]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.2.2-201201290115.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.2.2-201201290115.patch
CommitLineData
b6142ec2
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index dfa6fc6..0095943 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9+*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13@@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17+*.gmo
18 *.grep
19 *.grp
20 *.gz
21@@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25+*.vim
26 *.xml
27 *.xz
28 *_MODULES
29+*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33@@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37+PERF*
38 SCCS
39 System.map*
40 TAGS
41@@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45+builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51+clut_vga16.c
52+common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59+config.c
60 config.mak
61 config.mak.autogen
62+config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66@@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70+exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74@@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78+gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90@@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103-linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107@@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111-media
112 mconf
113+mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120+mkpiggy
121 mkprep
122 mkregtable
123 mktables
124@@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128+regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132@@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152+vmlinux.bin.bz2
153 vmlinux.lds
154+vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158@@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zconf.lex.c
169 zoffset.h
170diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171index 81c287f..d456d02 100644
172--- a/Documentation/kernel-parameters.txt
173+++ b/Documentation/kernel-parameters.txt
174@@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179+ virtualization environments that don't cope well with the
180+ expand down segment used by UDEREF on X86-32 or the frequent
181+ page table updates on X86-64.
182+
183+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184+
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188diff --git a/Makefile b/Makefile
189index 2f684da..bf21f8d 100644
190--- a/Makefile
191+++ b/Makefile
192@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197-HOSTCXXFLAGS = -O2
198+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208-PHONY += scripts_basic
209-scripts_basic:
210+PHONY += scripts_basic gcc-plugins
211+scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215@@ -564,6 +565,46 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219+ifndef DISABLE_PAX_PLUGINS
220+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223+endif
224+ifdef CONFIG_PAX_MEMORY_STACKLEAK
225+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227+endif
228+ifdef CONFIG_KALLOCSTAT_PLUGIN
229+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230+endif
231+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234+endif
235+ifdef CONFIG_CHECKER_PLUGIN
236+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238+endif
239+endif
240+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242+ifeq ($(KBUILD_EXTMOD),)
243+gcc-plugins:
244+ $(Q)$(MAKE) $(build)=tools/gcc
245+else
246+gcc-plugins: ;
247+endif
248+else
249+gcc-plugins:
250+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
251+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
252+else
253+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
254+endif
255+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
256+endif
257+endif
258+
259 include $(srctree)/arch/$(SRCARCH)/Makefile
260
261 ifneq ($(CONFIG_FRAME_WARN),0)
262@@ -708,7 +749,7 @@ export mod_strip_cmd
263
264
265 ifeq ($(KBUILD_EXTMOD),)
266-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
267+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
268
269 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
270 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
271@@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
272
273 # The actual objects are generated when descending,
274 # make sure no implicit rule kicks in
275+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
276 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
277
278 # Handle descending into subdirectories listed in $(vmlinux-dirs)
279@@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280 # Error messages still appears in the original language
281
282 PHONY += $(vmlinux-dirs)
283-$(vmlinux-dirs): prepare scripts
284+$(vmlinux-dirs): gcc-plugins prepare scripts
285 $(Q)$(MAKE) $(build)=$@
286
287 # Store (new) KERNELRELASE string in include/config/kernel.release
288@@ -985,6 +1027,7 @@ prepare0: archprepare FORCE
289 $(Q)$(MAKE) $(build)=.
290
291 # All the preparing..
292+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
293 prepare: prepare0
294
295 # Generate some files
296@@ -1086,6 +1129,7 @@ all: modules
297 # using awk while concatenating to the final file.
298
299 PHONY += modules
300+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
301 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
302 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
303 @$(kecho) ' Building modules, stage 2.';
304@@ -1101,7 +1145,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
305
306 # Target to prepare building external modules
307 PHONY += modules_prepare
308-modules_prepare: prepare scripts
309+modules_prepare: gcc-plugins prepare scripts
310
311 # Target to install modules
312 PHONY += modules_install
313@@ -1198,6 +1242,7 @@ distclean: mrproper
314 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
315 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
316 -o -name '.*.rej' \
317+ -o -name '.*.rej' -o -name '*.so' \
318 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
319 -type f -print | xargs rm -f
320
321@@ -1358,6 +1403,7 @@ PHONY += $(module-dirs) modules
322 $(module-dirs): crmodverdir $(objtree)/Module.symvers
323 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
324
325+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
326 modules: $(module-dirs)
327 @$(kecho) ' Building modules, stage 2.';
328 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
329@@ -1484,17 +1530,19 @@ else
330 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
331 endif
332
333-%.s: %.c prepare scripts FORCE
334+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
335+%.s: %.c gcc-plugins prepare scripts FORCE
336 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
337 %.i: %.c prepare scripts FORCE
338 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
339-%.o: %.c prepare scripts FORCE
340+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
341+%.o: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.lst: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345-%.s: %.S prepare scripts FORCE
346+%.s: %.S gcc-plugins prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348-%.o: %.S prepare scripts FORCE
349+%.o: %.S gcc-plugins prepare scripts FORCE
350 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
351 %.symtypes: %.c prepare scripts FORCE
352 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
353@@ -1504,11 +1552,13 @@ endif
354 $(cmd_crmodverdir)
355 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
356 $(build)=$(build-dir)
357-%/: prepare scripts FORCE
358+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
359+%/: gcc-plugins prepare scripts FORCE
360 $(cmd_crmodverdir)
361 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
362 $(build)=$(build-dir)
363-%.ko: prepare scripts FORCE
364+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
365+%.ko: gcc-plugins prepare scripts FORCE
366 $(cmd_crmodverdir)
367 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
368 $(build)=$(build-dir) $(@:.ko=.o)
369diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
370index da5449e..7418343 100644
371--- a/arch/alpha/include/asm/elf.h
372+++ b/arch/alpha/include/asm/elf.h
373@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
374
375 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
376
377+#ifdef CONFIG_PAX_ASLR
378+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
379+
380+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
381+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
382+#endif
383+
384 /* $0 is set by ld.so to a pointer to a function which might be
385 registered using atexit. This provides a mean for the dynamic
386 linker to call DT_FINI functions for shared libraries that have
387diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
388index de98a73..bd4f1f8 100644
389--- a/arch/alpha/include/asm/pgtable.h
390+++ b/arch/alpha/include/asm/pgtable.h
391@@ -101,6 +101,17 @@ struct vm_area_struct;
392 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
393 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
394 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
395+
396+#ifdef CONFIG_PAX_PAGEEXEC
397+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
398+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
399+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
400+#else
401+# define PAGE_SHARED_NOEXEC PAGE_SHARED
402+# define PAGE_COPY_NOEXEC PAGE_COPY
403+# define PAGE_READONLY_NOEXEC PAGE_READONLY
404+#endif
405+
406 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
407
408 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
409diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
410index 2fd00b7..cfd5069 100644
411--- a/arch/alpha/kernel/module.c
412+++ b/arch/alpha/kernel/module.c
413@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
414
415 /* The small sections were sorted to the end of the segment.
416 The following should definitely cover them. */
417- gp = (u64)me->module_core + me->core_size - 0x8000;
418+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
419 got = sechdrs[me->arch.gotsecindex].sh_addr;
420
421 for (i = 0; i < n; i++) {
422diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
423index 01e8715..be0e80f 100644
424--- a/arch/alpha/kernel/osf_sys.c
425+++ b/arch/alpha/kernel/osf_sys.c
426@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
427 /* At this point: (!vma || addr < vma->vm_end). */
428 if (limit - len < addr)
429 return -ENOMEM;
430- if (!vma || addr + len <= vma->vm_start)
431+ if (check_heap_stack_gap(vma, addr, len))
432 return addr;
433 addr = vma->vm_end;
434 vma = vma->vm_next;
435@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
436 merely specific addresses, but regions of memory -- perhaps
437 this feature should be incorporated into all ports? */
438
439+#ifdef CONFIG_PAX_RANDMMAP
440+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
441+#endif
442+
443 if (addr) {
444 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
445 if (addr != (unsigned long) -ENOMEM)
446@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
447 }
448
449 /* Next, try allocating at TASK_UNMAPPED_BASE. */
450- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
451- len, limit);
452+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
453+
454 if (addr != (unsigned long) -ENOMEM)
455 return addr;
456
457diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
458index fadd5f8..904e73a 100644
459--- a/arch/alpha/mm/fault.c
460+++ b/arch/alpha/mm/fault.c
461@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
462 __reload_thread(pcb);
463 }
464
465+#ifdef CONFIG_PAX_PAGEEXEC
466+/*
467+ * PaX: decide what to do with offenders (regs->pc = fault address)
468+ *
469+ * returns 1 when task should be killed
470+ * 2 when patched PLT trampoline was detected
471+ * 3 when unpatched PLT trampoline was detected
472+ */
473+static int pax_handle_fetch_fault(struct pt_regs *regs)
474+{
475+
476+#ifdef CONFIG_PAX_EMUPLT
477+ int err;
478+
479+ do { /* PaX: patched PLT emulation #1 */
480+ unsigned int ldah, ldq, jmp;
481+
482+ err = get_user(ldah, (unsigned int *)regs->pc);
483+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
484+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
485+
486+ if (err)
487+ break;
488+
489+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
490+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
491+ jmp == 0x6BFB0000U)
492+ {
493+ unsigned long r27, addr;
494+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
495+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
496+
497+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
498+ err = get_user(r27, (unsigned long *)addr);
499+ if (err)
500+ break;
501+
502+ regs->r27 = r27;
503+ regs->pc = r27;
504+ return 2;
505+ }
506+ } while (0);
507+
508+ do { /* PaX: patched PLT emulation #2 */
509+ unsigned int ldah, lda, br;
510+
511+ err = get_user(ldah, (unsigned int *)regs->pc);
512+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
513+ err |= get_user(br, (unsigned int *)(regs->pc+8));
514+
515+ if (err)
516+ break;
517+
518+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
519+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
520+ (br & 0xFFE00000U) == 0xC3E00000U)
521+ {
522+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
523+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
524+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
525+
526+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
527+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
528+ return 2;
529+ }
530+ } while (0);
531+
532+ do { /* PaX: unpatched PLT emulation */
533+ unsigned int br;
534+
535+ err = get_user(br, (unsigned int *)regs->pc);
536+
537+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
538+ unsigned int br2, ldq, nop, jmp;
539+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
540+
541+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
542+ err = get_user(br2, (unsigned int *)addr);
543+ err |= get_user(ldq, (unsigned int *)(addr+4));
544+ err |= get_user(nop, (unsigned int *)(addr+8));
545+ err |= get_user(jmp, (unsigned int *)(addr+12));
546+ err |= get_user(resolver, (unsigned long *)(addr+16));
547+
548+ if (err)
549+ break;
550+
551+ if (br2 == 0xC3600000U &&
552+ ldq == 0xA77B000CU &&
553+ nop == 0x47FF041FU &&
554+ jmp == 0x6B7B0000U)
555+ {
556+ regs->r28 = regs->pc+4;
557+ regs->r27 = addr+16;
558+ regs->pc = resolver;
559+ return 3;
560+ }
561+ }
562+ } while (0);
563+#endif
564+
565+ return 1;
566+}
567+
568+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
569+{
570+ unsigned long i;
571+
572+ printk(KERN_ERR "PAX: bytes at PC: ");
573+ for (i = 0; i < 5; i++) {
574+ unsigned int c;
575+ if (get_user(c, (unsigned int *)pc+i))
576+ printk(KERN_CONT "???????? ");
577+ else
578+ printk(KERN_CONT "%08x ", c);
579+ }
580+ printk("\n");
581+}
582+#endif
583
584 /*
585 * This routine handles page faults. It determines the address,
586@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
587 good_area:
588 si_code = SEGV_ACCERR;
589 if (cause < 0) {
590- if (!(vma->vm_flags & VM_EXEC))
591+ if (!(vma->vm_flags & VM_EXEC)) {
592+
593+#ifdef CONFIG_PAX_PAGEEXEC
594+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
595+ goto bad_area;
596+
597+ up_read(&mm->mmap_sem);
598+ switch (pax_handle_fetch_fault(regs)) {
599+
600+#ifdef CONFIG_PAX_EMUPLT
601+ case 2:
602+ case 3:
603+ return;
604+#endif
605+
606+ }
607+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
608+ do_group_exit(SIGKILL);
609+#else
610 goto bad_area;
611+#endif
612+
613+ }
614 } else if (!cause) {
615 /* Allow reads even for write-only mappings */
616 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
617diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
618index 86976d0..8a57797 100644
619--- a/arch/arm/include/asm/atomic.h
620+++ b/arch/arm/include/asm/atomic.h
621@@ -239,6 +239,14 @@ typedef struct {
622 u64 __aligned(8) counter;
623 } atomic64_t;
624
625+#ifdef CONFIG_PAX_REFCOUNT
626+typedef struct {
627+ u64 __aligned(8) counter;
628+} atomic64_unchecked_t;
629+#else
630+typedef atomic64_t atomic64_unchecked_t;
631+#endif
632+
633 #define ATOMIC64_INIT(i) { (i) }
634
635 static inline u64 atomic64_read(atomic64_t *v)
636diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
637index 0e9ce8d..6ef1e03 100644
638--- a/arch/arm/include/asm/elf.h
639+++ b/arch/arm/include/asm/elf.h
640@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
641 the loader. We need to make sure that it is out of the way of the program
642 that it will "exec", and that there is sufficient room for the brk. */
643
644-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
645+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
646+
647+#ifdef CONFIG_PAX_ASLR
648+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
649+
650+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
651+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
652+#endif
653
654 /* When the program starts, a1 contains a pointer to a function to be
655 registered with atexit, as per the SVR4 ABI. A value of 0 means we
656@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 extern void elf_set_personality(const struct elf32_hdr *);
658 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
659
660-struct mm_struct;
661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
662-#define arch_randomize_brk arch_randomize_brk
663-
664 extern int vectors_user_mapping(void);
665 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
666 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
667diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
668index e51b1e8..32a3113 100644
669--- a/arch/arm/include/asm/kmap_types.h
670+++ b/arch/arm/include/asm/kmap_types.h
671@@ -21,6 +21,7 @@ enum km_type {
672 KM_L1_CACHE,
673 KM_L2_CACHE,
674 KM_KDB,
675+ KM_CLEARPAGE,
676 KM_TYPE_NR
677 };
678
679diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
680index b293616..96310e5 100644
681--- a/arch/arm/include/asm/uaccess.h
682+++ b/arch/arm/include/asm/uaccess.h
683@@ -22,6 +22,8 @@
684 #define VERIFY_READ 0
685 #define VERIFY_WRITE 1
686
687+extern void check_object_size(const void *ptr, unsigned long n, bool to);
688+
689 /*
690 * The exception table consists of pairs of addresses: the first is the
691 * address of an instruction that is allowed to fault, and the second is
692@@ -387,8 +389,23 @@ do { \
693
694
695 #ifdef CONFIG_MMU
696-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
697-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
698+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
699+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
700+
701+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
702+{
703+ if (!__builtin_constant_p(n))
704+ check_object_size(to, n, false);
705+ return ___copy_from_user(to, from, n);
706+}
707+
708+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
709+{
710+ if (!__builtin_constant_p(n))
711+ check_object_size(from, n, true);
712+ return ___copy_to_user(to, from, n);
713+}
714+
715 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
716 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
717 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
718@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
719
720 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
721 {
722+ if ((long)n < 0)
723+ return n;
724+
725 if (access_ok(VERIFY_READ, from, n))
726 n = __copy_from_user(to, from, n);
727 else /* security hole - plug it */
728@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
729
730 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
731 {
732+ if ((long)n < 0)
733+ return n;
734+
735 if (access_ok(VERIFY_WRITE, to, n))
736 n = __copy_to_user(to, from, n);
737 return n;
738diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
739index 5b0bce6..becd81c 100644
740--- a/arch/arm/kernel/armksyms.c
741+++ b/arch/arm/kernel/armksyms.c
742@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
743 #ifdef CONFIG_MMU
744 EXPORT_SYMBOL(copy_page);
745
746-EXPORT_SYMBOL(__copy_from_user);
747-EXPORT_SYMBOL(__copy_to_user);
748+EXPORT_SYMBOL(___copy_from_user);
749+EXPORT_SYMBOL(___copy_to_user);
750 EXPORT_SYMBOL(__clear_user);
751
752 EXPORT_SYMBOL(__get_user_1);
753diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
754index 3d0c6fb..3dcae52 100644
755--- a/arch/arm/kernel/process.c
756+++ b/arch/arm/kernel/process.c
757@@ -28,7 +28,6 @@
758 #include <linux/tick.h>
759 #include <linux/utsname.h>
760 #include <linux/uaccess.h>
761-#include <linux/random.h>
762 #include <linux/hw_breakpoint.h>
763 #include <linux/cpuidle.h>
764
765@@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
766 return 0;
767 }
768
769-unsigned long arch_randomize_brk(struct mm_struct *mm)
770-{
771- unsigned long range_end = mm->brk + 0x02000000;
772- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
773-}
774-
775 #ifdef CONFIG_MMU
776 /*
777 * The vectors page is always readable from user space for the
778diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
779index 99a5727..a3d5bb1 100644
780--- a/arch/arm/kernel/traps.c
781+++ b/arch/arm/kernel/traps.c
782@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
783
784 static DEFINE_RAW_SPINLOCK(die_lock);
785
786+extern void gr_handle_kernel_exploit(void);
787+
788 /*
789 * This function is protected against re-entrancy.
790 */
791@@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
792 panic("Fatal exception in interrupt");
793 if (panic_on_oops)
794 panic("Fatal exception");
795+
796+ gr_handle_kernel_exploit();
797+
798 if (ret != NOTIFY_STOP)
799 do_exit(SIGSEGV);
800 }
801diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
802index 66a477a..bee61d3 100644
803--- a/arch/arm/lib/copy_from_user.S
804+++ b/arch/arm/lib/copy_from_user.S
805@@ -16,7 +16,7 @@
806 /*
807 * Prototype:
808 *
809- * size_t __copy_from_user(void *to, const void *from, size_t n)
810+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
811 *
812 * Purpose:
813 *
814@@ -84,11 +84,11 @@
815
816 .text
817
818-ENTRY(__copy_from_user)
819+ENTRY(___copy_from_user)
820
821 #include "copy_template.S"
822
823-ENDPROC(__copy_from_user)
824+ENDPROC(___copy_from_user)
825
826 .pushsection .fixup,"ax"
827 .align 0
828diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
829index d066df6..df28194 100644
830--- a/arch/arm/lib/copy_to_user.S
831+++ b/arch/arm/lib/copy_to_user.S
832@@ -16,7 +16,7 @@
833 /*
834 * Prototype:
835 *
836- * size_t __copy_to_user(void *to, const void *from, size_t n)
837+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
838 *
839 * Purpose:
840 *
841@@ -88,11 +88,11 @@
842 .text
843
844 ENTRY(__copy_to_user_std)
845-WEAK(__copy_to_user)
846+WEAK(___copy_to_user)
847
848 #include "copy_template.S"
849
850-ENDPROC(__copy_to_user)
851+ENDPROC(___copy_to_user)
852 ENDPROC(__copy_to_user_std)
853
854 .pushsection .fixup,"ax"
855diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
856index d0ece2a..5ae2f39 100644
857--- a/arch/arm/lib/uaccess.S
858+++ b/arch/arm/lib/uaccess.S
859@@ -20,7 +20,7 @@
860
861 #define PAGE_SHIFT 12
862
863-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
864+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
865 * Purpose : copy a block to user memory from kernel memory
866 * Params : to - user memory
867 * : from - kernel memory
868@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
869 sub r2, r2, ip
870 b .Lc2u_dest_aligned
871
872-ENTRY(__copy_to_user)
873+ENTRY(___copy_to_user)
874 stmfd sp!, {r2, r4 - r7, lr}
875 cmp r2, #4
876 blt .Lc2u_not_enough
877@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
878 ldrgtb r3, [r1], #0
879 USER( T(strgtb) r3, [r0], #1) @ May fault
880 b .Lc2u_finished
881-ENDPROC(__copy_to_user)
882+ENDPROC(___copy_to_user)
883
884 .pushsection .fixup,"ax"
885 .align 0
886 9001: ldmfd sp!, {r0, r4 - r7, pc}
887 .popsection
888
889-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
890+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
891 * Purpose : copy a block from user memory to kernel memory
892 * Params : to - kernel memory
893 * : from - user memory
894@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
895 sub r2, r2, ip
896 b .Lcfu_dest_aligned
897
898-ENTRY(__copy_from_user)
899+ENTRY(___copy_from_user)
900 stmfd sp!, {r0, r2, r4 - r7, lr}
901 cmp r2, #4
902 blt .Lcfu_not_enough
903@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
904 USER( T(ldrgtb) r3, [r1], #1) @ May fault
905 strgtb r3, [r0], #1
906 b .Lcfu_finished
907-ENDPROC(__copy_from_user)
908+ENDPROC(___copy_from_user)
909
910 .pushsection .fixup,"ax"
911 .align 0
912diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
913index 025f742..8432b08 100644
914--- a/arch/arm/lib/uaccess_with_memcpy.c
915+++ b/arch/arm/lib/uaccess_with_memcpy.c
916@@ -104,7 +104,7 @@ out:
917 }
918
919 unsigned long
920-__copy_to_user(void __user *to, const void *from, unsigned long n)
921+___copy_to_user(void __user *to, const void *from, unsigned long n)
922 {
923 /*
924 * This test is stubbed out of the main function above to keep
925diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
926index 2b2d51c..0127490 100644
927--- a/arch/arm/mach-ux500/mbox-db5500.c
928+++ b/arch/arm/mach-ux500/mbox-db5500.c
929@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
930 return sprintf(buf, "0x%X\n", mbox_value);
931 }
932
933-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
934+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
935
936 static int mbox_show(struct seq_file *s, void *data)
937 {
938diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
939index aa33949..b242a2f 100644
940--- a/arch/arm/mm/fault.c
941+++ b/arch/arm/mm/fault.c
942@@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
943 }
944 #endif
945
946+#ifdef CONFIG_PAX_PAGEEXEC
947+ if (fsr & FSR_LNX_PF) {
948+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
949+ do_group_exit(SIGKILL);
950+ }
951+#endif
952+
953 tsk->thread.address = addr;
954 tsk->thread.error_code = fsr;
955 tsk->thread.trap_no = 14;
956@@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
957 }
958 #endif /* CONFIG_MMU */
959
960+#ifdef CONFIG_PAX_PAGEEXEC
961+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
962+{
963+ long i;
964+
965+ printk(KERN_ERR "PAX: bytes at PC: ");
966+ for (i = 0; i < 20; i++) {
967+ unsigned char c;
968+ if (get_user(c, (__force unsigned char __user *)pc+i))
969+ printk(KERN_CONT "?? ");
970+ else
971+ printk(KERN_CONT "%02x ", c);
972+ }
973+ printk("\n");
974+
975+ printk(KERN_ERR "PAX: bytes at SP-4: ");
976+ for (i = -1; i < 20; i++) {
977+ unsigned long c;
978+ if (get_user(c, (__force unsigned long __user *)sp+i))
979+ printk(KERN_CONT "???????? ");
980+ else
981+ printk(KERN_CONT "%08lx ", c);
982+ }
983+ printk("\n");
984+}
985+#endif
986+
987 /*
988 * First Level Translation Fault Handler
989 *
990diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
991index 44b628e..623ee2a 100644
992--- a/arch/arm/mm/mmap.c
993+++ b/arch/arm/mm/mmap.c
994@@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
995 if (len > TASK_SIZE)
996 return -ENOMEM;
997
998+#ifdef CONFIG_PAX_RANDMMAP
999+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1000+#endif
1001+
1002 if (addr) {
1003 if (do_align)
1004 addr = COLOUR_ALIGN(addr, pgoff);
1005@@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1006 addr = PAGE_ALIGN(addr);
1007
1008 vma = find_vma(mm, addr);
1009- if (TASK_SIZE - len >= addr &&
1010- (!vma || addr + len <= vma->vm_start))
1011+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1012 return addr;
1013 }
1014 if (len > mm->cached_hole_size) {
1015- start_addr = addr = mm->free_area_cache;
1016+ start_addr = addr = mm->free_area_cache;
1017 } else {
1018- start_addr = addr = TASK_UNMAPPED_BASE;
1019- mm->cached_hole_size = 0;
1020+ start_addr = addr = mm->mmap_base;
1021+ mm->cached_hole_size = 0;
1022 }
1023 /* 8 bits of randomness in 20 address space bits */
1024 if ((current->flags & PF_RANDOMIZE) &&
1025@@ -89,14 +92,14 @@ full_search:
1026 * Start a new search - just in case we missed
1027 * some holes.
1028 */
1029- if (start_addr != TASK_UNMAPPED_BASE) {
1030- start_addr = addr = TASK_UNMAPPED_BASE;
1031+ if (start_addr != mm->mmap_base) {
1032+ start_addr = addr = mm->mmap_base;
1033 mm->cached_hole_size = 0;
1034 goto full_search;
1035 }
1036 return -ENOMEM;
1037 }
1038- if (!vma || addr + len <= vma->vm_start) {
1039+ if (check_heap_stack_gap(vma, addr, len)) {
1040 /*
1041 * Remember the place where we stopped the search:
1042 */
1043diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1044index 3b3159b..425ea94 100644
1045--- a/arch/avr32/include/asm/elf.h
1046+++ b/arch/avr32/include/asm/elf.h
1047@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1048 the loader. We need to make sure that it is out of the way of the program
1049 that it will "exec", and that there is sufficient room for the brk. */
1050
1051-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1052+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1053
1054+#ifdef CONFIG_PAX_ASLR
1055+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1056+
1057+#define PAX_DELTA_MMAP_LEN 15
1058+#define PAX_DELTA_STACK_LEN 15
1059+#endif
1060
1061 /* This yields a mask that user programs can use to figure out what
1062 instruction set this CPU supports. This could be done in user space,
1063diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1064index b7f5c68..556135c 100644
1065--- a/arch/avr32/include/asm/kmap_types.h
1066+++ b/arch/avr32/include/asm/kmap_types.h
1067@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1068 D(11) KM_IRQ1,
1069 D(12) KM_SOFTIRQ0,
1070 D(13) KM_SOFTIRQ1,
1071-D(14) KM_TYPE_NR
1072+D(14) KM_CLEARPAGE,
1073+D(15) KM_TYPE_NR
1074 };
1075
1076 #undef D
1077diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1078index f7040a1..db9f300 100644
1079--- a/arch/avr32/mm/fault.c
1080+++ b/arch/avr32/mm/fault.c
1081@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1082
1083 int exception_trace = 1;
1084
1085+#ifdef CONFIG_PAX_PAGEEXEC
1086+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1087+{
1088+ unsigned long i;
1089+
1090+ printk(KERN_ERR "PAX: bytes at PC: ");
1091+ for (i = 0; i < 20; i++) {
1092+ unsigned char c;
1093+ if (get_user(c, (unsigned char *)pc+i))
1094+ printk(KERN_CONT "???????? ");
1095+ else
1096+ printk(KERN_CONT "%02x ", c);
1097+ }
1098+ printk("\n");
1099+}
1100+#endif
1101+
1102 /*
1103 * This routine handles page faults. It determines the address and the
1104 * problem, and then passes it off to one of the appropriate routines.
1105@@ -156,6 +173,16 @@ bad_area:
1106 up_read(&mm->mmap_sem);
1107
1108 if (user_mode(regs)) {
1109+
1110+#ifdef CONFIG_PAX_PAGEEXEC
1111+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1112+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1113+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1114+ do_group_exit(SIGKILL);
1115+ }
1116+ }
1117+#endif
1118+
1119 if (exception_trace && printk_ratelimit())
1120 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1121 "sp %08lx ecr %lu\n",
1122diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1123index f8e16b2..c73ff79 100644
1124--- a/arch/frv/include/asm/kmap_types.h
1125+++ b/arch/frv/include/asm/kmap_types.h
1126@@ -23,6 +23,7 @@ enum km_type {
1127 KM_IRQ1,
1128 KM_SOFTIRQ0,
1129 KM_SOFTIRQ1,
1130+ KM_CLEARPAGE,
1131 KM_TYPE_NR
1132 };
1133
1134diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1135index 385fd30..6c3d97e 100644
1136--- a/arch/frv/mm/elf-fdpic.c
1137+++ b/arch/frv/mm/elf-fdpic.c
1138@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1139 if (addr) {
1140 addr = PAGE_ALIGN(addr);
1141 vma = find_vma(current->mm, addr);
1142- if (TASK_SIZE - len >= addr &&
1143- (!vma || addr + len <= vma->vm_start))
1144+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1145 goto success;
1146 }
1147
1148@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1149 for (; vma; vma = vma->vm_next) {
1150 if (addr > limit)
1151 break;
1152- if (addr + len <= vma->vm_start)
1153+ if (check_heap_stack_gap(vma, addr, len))
1154 goto success;
1155 addr = vma->vm_end;
1156 }
1157@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1158 for (; vma; vma = vma->vm_next) {
1159 if (addr > limit)
1160 break;
1161- if (addr + len <= vma->vm_start)
1162+ if (check_heap_stack_gap(vma, addr, len))
1163 goto success;
1164 addr = vma->vm_end;
1165 }
1166diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1167index b5298eb..67c6e62 100644
1168--- a/arch/ia64/include/asm/elf.h
1169+++ b/arch/ia64/include/asm/elf.h
1170@@ -42,6 +42,13 @@
1171 */
1172 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1173
1174+#ifdef CONFIG_PAX_ASLR
1175+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1176+
1177+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1178+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1179+#endif
1180+
1181 #define PT_IA_64_UNWIND 0x70000001
1182
1183 /* IA-64 relocations: */
1184diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1185index 1a97af3..7529d31 100644
1186--- a/arch/ia64/include/asm/pgtable.h
1187+++ b/arch/ia64/include/asm/pgtable.h
1188@@ -12,7 +12,7 @@
1189 * David Mosberger-Tang <davidm@hpl.hp.com>
1190 */
1191
1192-
1193+#include <linux/const.h>
1194 #include <asm/mman.h>
1195 #include <asm/page.h>
1196 #include <asm/processor.h>
1197@@ -143,6 +143,17 @@
1198 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1199 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1200 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1201+
1202+#ifdef CONFIG_PAX_PAGEEXEC
1203+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1204+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1205+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1206+#else
1207+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1208+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1209+# define PAGE_COPY_NOEXEC PAGE_COPY
1210+#endif
1211+
1212 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1213 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1214 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1215diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1216index b77768d..e0795eb 100644
1217--- a/arch/ia64/include/asm/spinlock.h
1218+++ b/arch/ia64/include/asm/spinlock.h
1219@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1220 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1221
1222 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1223- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1224+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1225 }
1226
1227 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1228diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1229index 449c8c0..432a3d2 100644
1230--- a/arch/ia64/include/asm/uaccess.h
1231+++ b/arch/ia64/include/asm/uaccess.h
1232@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1233 const void *__cu_from = (from); \
1234 long __cu_len = (n); \
1235 \
1236- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1237+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1238 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1239 __cu_len; \
1240 })
1241@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1242 long __cu_len = (n); \
1243 \
1244 __chk_user_ptr(__cu_from); \
1245- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1246+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1247 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1248 __cu_len; \
1249 })
1250diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1251index 24603be..948052d 100644
1252--- a/arch/ia64/kernel/module.c
1253+++ b/arch/ia64/kernel/module.c
1254@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1255 void
1256 module_free (struct module *mod, void *module_region)
1257 {
1258- if (mod && mod->arch.init_unw_table &&
1259- module_region == mod->module_init) {
1260+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1261 unw_remove_unwind_table(mod->arch.init_unw_table);
1262 mod->arch.init_unw_table = NULL;
1263 }
1264@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1265 }
1266
1267 static inline int
1268+in_init_rx (const struct module *mod, uint64_t addr)
1269+{
1270+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1271+}
1272+
1273+static inline int
1274+in_init_rw (const struct module *mod, uint64_t addr)
1275+{
1276+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1277+}
1278+
1279+static inline int
1280 in_init (const struct module *mod, uint64_t addr)
1281 {
1282- return addr - (uint64_t) mod->module_init < mod->init_size;
1283+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1284+}
1285+
1286+static inline int
1287+in_core_rx (const struct module *mod, uint64_t addr)
1288+{
1289+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1290+}
1291+
1292+static inline int
1293+in_core_rw (const struct module *mod, uint64_t addr)
1294+{
1295+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1296 }
1297
1298 static inline int
1299 in_core (const struct module *mod, uint64_t addr)
1300 {
1301- return addr - (uint64_t) mod->module_core < mod->core_size;
1302+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1303 }
1304
1305 static inline int
1306@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1307 break;
1308
1309 case RV_BDREL:
1310- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1311+ if (in_init_rx(mod, val))
1312+ val -= (uint64_t) mod->module_init_rx;
1313+ else if (in_init_rw(mod, val))
1314+ val -= (uint64_t) mod->module_init_rw;
1315+ else if (in_core_rx(mod, val))
1316+ val -= (uint64_t) mod->module_core_rx;
1317+ else if (in_core_rw(mod, val))
1318+ val -= (uint64_t) mod->module_core_rw;
1319 break;
1320
1321 case RV_LTV:
1322@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1323 * addresses have been selected...
1324 */
1325 uint64_t gp;
1326- if (mod->core_size > MAX_LTOFF)
1327+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1328 /*
1329 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1330 * at the end of the module.
1331 */
1332- gp = mod->core_size - MAX_LTOFF / 2;
1333+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1334 else
1335- gp = mod->core_size / 2;
1336- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1337+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1338+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1339 mod->arch.gp = gp;
1340 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1341 }
1342diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1343index 609d500..7dde2a8 100644
1344--- a/arch/ia64/kernel/sys_ia64.c
1345+++ b/arch/ia64/kernel/sys_ia64.c
1346@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1347 if (REGION_NUMBER(addr) == RGN_HPAGE)
1348 addr = 0;
1349 #endif
1350+
1351+#ifdef CONFIG_PAX_RANDMMAP
1352+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1353+ addr = mm->free_area_cache;
1354+ else
1355+#endif
1356+
1357 if (!addr)
1358 addr = mm->free_area_cache;
1359
1360@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1361 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1362 /* At this point: (!vma || addr < vma->vm_end). */
1363 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1364- if (start_addr != TASK_UNMAPPED_BASE) {
1365+ if (start_addr != mm->mmap_base) {
1366 /* Start a new search --- just in case we missed some holes. */
1367- addr = TASK_UNMAPPED_BASE;
1368+ addr = mm->mmap_base;
1369 goto full_search;
1370 }
1371 return -ENOMEM;
1372 }
1373- if (!vma || addr + len <= vma->vm_start) {
1374+ if (check_heap_stack_gap(vma, addr, len)) {
1375 /* Remember the address where we stopped this search: */
1376 mm->free_area_cache = addr + len;
1377 return addr;
1378diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1379index 53c0ba0..2accdde 100644
1380--- a/arch/ia64/kernel/vmlinux.lds.S
1381+++ b/arch/ia64/kernel/vmlinux.lds.S
1382@@ -199,7 +199,7 @@ SECTIONS {
1383 /* Per-cpu data: */
1384 . = ALIGN(PERCPU_PAGE_SIZE);
1385 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1386- __phys_per_cpu_start = __per_cpu_load;
1387+ __phys_per_cpu_start = per_cpu_load;
1388 /*
1389 * ensure percpu data fits
1390 * into percpu page size
1391diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1392index 20b3593..1ce77f0 100644
1393--- a/arch/ia64/mm/fault.c
1394+++ b/arch/ia64/mm/fault.c
1395@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1396 return pte_present(pte);
1397 }
1398
1399+#ifdef CONFIG_PAX_PAGEEXEC
1400+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1401+{
1402+ unsigned long i;
1403+
1404+ printk(KERN_ERR "PAX: bytes at PC: ");
1405+ for (i = 0; i < 8; i++) {
1406+ unsigned int c;
1407+ if (get_user(c, (unsigned int *)pc+i))
1408+ printk(KERN_CONT "???????? ");
1409+ else
1410+ printk(KERN_CONT "%08x ", c);
1411+ }
1412+ printk("\n");
1413+}
1414+#endif
1415+
1416 void __kprobes
1417 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1418 {
1419@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1420 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1421 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1422
1423- if ((vma->vm_flags & mask) != mask)
1424+ if ((vma->vm_flags & mask) != mask) {
1425+
1426+#ifdef CONFIG_PAX_PAGEEXEC
1427+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1428+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1429+ goto bad_area;
1430+
1431+ up_read(&mm->mmap_sem);
1432+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1433+ do_group_exit(SIGKILL);
1434+ }
1435+#endif
1436+
1437 goto bad_area;
1438
1439+ }
1440+
1441 /*
1442 * If for any reason at all we couldn't handle the fault, make
1443 * sure we exit gracefully rather than endlessly redo the
1444diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1445index 5ca674b..e0e1b70 100644
1446--- a/arch/ia64/mm/hugetlbpage.c
1447+++ b/arch/ia64/mm/hugetlbpage.c
1448@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1449 /* At this point: (!vmm || addr < vmm->vm_end). */
1450 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1451 return -ENOMEM;
1452- if (!vmm || (addr + len) <= vmm->vm_start)
1453+ if (check_heap_stack_gap(vmm, addr, len))
1454 return addr;
1455 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1456 }
1457diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1458index 00cb0e2..2ad8024 100644
1459--- a/arch/ia64/mm/init.c
1460+++ b/arch/ia64/mm/init.c
1461@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1462 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1463 vma->vm_end = vma->vm_start + PAGE_SIZE;
1464 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1465+
1466+#ifdef CONFIG_PAX_PAGEEXEC
1467+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1468+ vma->vm_flags &= ~VM_EXEC;
1469+
1470+#ifdef CONFIG_PAX_MPROTECT
1471+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1472+ vma->vm_flags &= ~VM_MAYEXEC;
1473+#endif
1474+
1475+ }
1476+#endif
1477+
1478 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1479 down_write(&current->mm->mmap_sem);
1480 if (insert_vm_struct(current->mm, vma)) {
1481diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1482index 82abd15..d95ae5d 100644
1483--- a/arch/m32r/lib/usercopy.c
1484+++ b/arch/m32r/lib/usercopy.c
1485@@ -14,6 +14,9 @@
1486 unsigned long
1487 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1488 {
1489+ if ((long)n < 0)
1490+ return n;
1491+
1492 prefetch(from);
1493 if (access_ok(VERIFY_WRITE, to, n))
1494 __copy_user(to,from,n);
1495@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1496 unsigned long
1497 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1498 {
1499+ if ((long)n < 0)
1500+ return n;
1501+
1502 prefetchw(to);
1503 if (access_ok(VERIFY_READ, from, n))
1504 __copy_user_zeroing(to,from,n);
1505diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1506index 455c0ac..ad65fbe 100644
1507--- a/arch/mips/include/asm/elf.h
1508+++ b/arch/mips/include/asm/elf.h
1509@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1510 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1511 #endif
1512
1513+#ifdef CONFIG_PAX_ASLR
1514+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1515+
1516+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1517+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1518+#endif
1519+
1520 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1521 struct linux_binprm;
1522 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1523 int uses_interp);
1524
1525-struct mm_struct;
1526-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1527-#define arch_randomize_brk arch_randomize_brk
1528-
1529 #endif /* _ASM_ELF_H */
1530diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1531index e59cd1a..8e329d6 100644
1532--- a/arch/mips/include/asm/page.h
1533+++ b/arch/mips/include/asm/page.h
1534@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1535 #ifdef CONFIG_CPU_MIPS32
1536 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1537 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1538- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1539+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1540 #else
1541 typedef struct { unsigned long long pte; } pte_t;
1542 #define pte_val(x) ((x).pte)
1543diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1544index 6018c80..7c37203 100644
1545--- a/arch/mips/include/asm/system.h
1546+++ b/arch/mips/include/asm/system.h
1547@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1548 */
1549 #define __ARCH_WANT_UNLOCKED_CTXSW
1550
1551-extern unsigned long arch_align_stack(unsigned long sp);
1552+#define arch_align_stack(x) ((x) & ~0xfUL)
1553
1554 #endif /* _ASM_SYSTEM_H */
1555diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1556index 9fdd8bc..4bd7f1a 100644
1557--- a/arch/mips/kernel/binfmt_elfn32.c
1558+++ b/arch/mips/kernel/binfmt_elfn32.c
1559@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1560 #undef ELF_ET_DYN_BASE
1561 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1562
1563+#ifdef CONFIG_PAX_ASLR
1564+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1565+
1566+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1567+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1568+#endif
1569+
1570 #include <asm/processor.h>
1571 #include <linux/module.h>
1572 #include <linux/elfcore.h>
1573diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1574index ff44823..97f8906 100644
1575--- a/arch/mips/kernel/binfmt_elfo32.c
1576+++ b/arch/mips/kernel/binfmt_elfo32.c
1577@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1578 #undef ELF_ET_DYN_BASE
1579 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1580
1581+#ifdef CONFIG_PAX_ASLR
1582+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1583+
1584+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1585+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1586+#endif
1587+
1588 #include <asm/processor.h>
1589
1590 /*
1591diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1592index c47f96e..661d418 100644
1593--- a/arch/mips/kernel/process.c
1594+++ b/arch/mips/kernel/process.c
1595@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1596 out:
1597 return pc;
1598 }
1599-
1600-/*
1601- * Don't forget that the stack pointer must be aligned on a 8 bytes
1602- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1603- */
1604-unsigned long arch_align_stack(unsigned long sp)
1605-{
1606- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1607- sp -= get_random_int() & ~PAGE_MASK;
1608-
1609- return sp & ALMASK;
1610-}
1611diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1612index 937cf33..adb39bb 100644
1613--- a/arch/mips/mm/fault.c
1614+++ b/arch/mips/mm/fault.c
1615@@ -28,6 +28,23 @@
1616 #include <asm/highmem.h> /* For VMALLOC_END */
1617 #include <linux/kdebug.h>
1618
1619+#ifdef CONFIG_PAX_PAGEEXEC
1620+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1621+{
1622+ unsigned long i;
1623+
1624+ printk(KERN_ERR "PAX: bytes at PC: ");
1625+ for (i = 0; i < 5; i++) {
1626+ unsigned int c;
1627+ if (get_user(c, (unsigned int *)pc+i))
1628+ printk(KERN_CONT "???????? ");
1629+ else
1630+ printk(KERN_CONT "%08x ", c);
1631+ }
1632+ printk("\n");
1633+}
1634+#endif
1635+
1636 /*
1637 * This routine handles page faults. It determines the address,
1638 * and the problem, and then passes it off to one of the appropriate
1639diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1640index 302d779..7d35bf8 100644
1641--- a/arch/mips/mm/mmap.c
1642+++ b/arch/mips/mm/mmap.c
1643@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1644 do_color_align = 1;
1645
1646 /* requesting a specific address */
1647+
1648+#ifdef CONFIG_PAX_RANDMMAP
1649+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1650+#endif
1651+
1652 if (addr) {
1653 if (do_color_align)
1654 addr = COLOUR_ALIGN(addr, pgoff);
1655@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1656 addr = PAGE_ALIGN(addr);
1657
1658 vma = find_vma(mm, addr);
1659- if (TASK_SIZE - len >= addr &&
1660- (!vma || addr + len <= vma->vm_start))
1661+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1662 return addr;
1663 }
1664
1665@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1666 /* At this point: (!vma || addr < vma->vm_end). */
1667 if (TASK_SIZE - len < addr)
1668 return -ENOMEM;
1669- if (!vma || addr + len <= vma->vm_start)
1670+ if (check_heap_stack_gap(vmm, addr, len))
1671 return addr;
1672 addr = vma->vm_end;
1673 if (do_color_align)
1674@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1675 /* make sure it can fit in the remaining address space */
1676 if (likely(addr > len)) {
1677 vma = find_vma(mm, addr - len);
1678- if (!vma || addr <= vma->vm_start) {
1679+ if (check_heap_stack_gap(vmm, addr - len, len))
1680 /* cache the address as a hint for next time */
1681 return mm->free_area_cache = addr - len;
1682 }
1683@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1684 * return with success:
1685 */
1686 vma = find_vma(mm, addr);
1687- if (likely(!vma || addr + len <= vma->vm_start)) {
1688+ if (check_heap_stack_gap(vmm, addr, len)) {
1689 /* cache the address as a hint for next time */
1690 return mm->free_area_cache = addr;
1691 }
1692@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1693 mm->unmap_area = arch_unmap_area_topdown;
1694 }
1695 }
1696-
1697-static inline unsigned long brk_rnd(void)
1698-{
1699- unsigned long rnd = get_random_int();
1700-
1701- rnd = rnd << PAGE_SHIFT;
1702- /* 8MB for 32bit, 256MB for 64bit */
1703- if (TASK_IS_32BIT_ADDR)
1704- rnd = rnd & 0x7ffffful;
1705- else
1706- rnd = rnd & 0xffffffful;
1707-
1708- return rnd;
1709-}
1710-
1711-unsigned long arch_randomize_brk(struct mm_struct *mm)
1712-{
1713- unsigned long base = mm->brk;
1714- unsigned long ret;
1715-
1716- ret = PAGE_ALIGN(base + brk_rnd());
1717-
1718- if (ret < mm->brk)
1719- return mm->brk;
1720-
1721- return ret;
1722-}
1723diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1724index 19f6cb1..6c78cf2 100644
1725--- a/arch/parisc/include/asm/elf.h
1726+++ b/arch/parisc/include/asm/elf.h
1727@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1728
1729 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1730
1731+#ifdef CONFIG_PAX_ASLR
1732+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1733+
1734+#define PAX_DELTA_MMAP_LEN 16
1735+#define PAX_DELTA_STACK_LEN 16
1736+#endif
1737+
1738 /* This yields a mask that user programs can use to figure out what
1739 instruction set this CPU supports. This could be done in user space,
1740 but it's not easy, and we've already done it here. */
1741diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1742index 22dadeb..f6c2be4 100644
1743--- a/arch/parisc/include/asm/pgtable.h
1744+++ b/arch/parisc/include/asm/pgtable.h
1745@@ -210,6 +210,17 @@ struct vm_area_struct;
1746 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1747 #define PAGE_COPY PAGE_EXECREAD
1748 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1749+
1750+#ifdef CONFIG_PAX_PAGEEXEC
1751+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1752+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1753+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1754+#else
1755+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1756+# define PAGE_COPY_NOEXEC PAGE_COPY
1757+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1758+#endif
1759+
1760 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1761 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1762 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1763diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1764index 5e34ccf..672bc9c 100644
1765--- a/arch/parisc/kernel/module.c
1766+++ b/arch/parisc/kernel/module.c
1767@@ -98,16 +98,38 @@
1768
1769 /* three functions to determine where in the module core
1770 * or init pieces the location is */
1771+static inline int in_init_rx(struct module *me, void *loc)
1772+{
1773+ return (loc >= me->module_init_rx &&
1774+ loc < (me->module_init_rx + me->init_size_rx));
1775+}
1776+
1777+static inline int in_init_rw(struct module *me, void *loc)
1778+{
1779+ return (loc >= me->module_init_rw &&
1780+ loc < (me->module_init_rw + me->init_size_rw));
1781+}
1782+
1783 static inline int in_init(struct module *me, void *loc)
1784 {
1785- return (loc >= me->module_init &&
1786- loc <= (me->module_init + me->init_size));
1787+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1788+}
1789+
1790+static inline int in_core_rx(struct module *me, void *loc)
1791+{
1792+ return (loc >= me->module_core_rx &&
1793+ loc < (me->module_core_rx + me->core_size_rx));
1794+}
1795+
1796+static inline int in_core_rw(struct module *me, void *loc)
1797+{
1798+ return (loc >= me->module_core_rw &&
1799+ loc < (me->module_core_rw + me->core_size_rw));
1800 }
1801
1802 static inline int in_core(struct module *me, void *loc)
1803 {
1804- return (loc >= me->module_core &&
1805- loc <= (me->module_core + me->core_size));
1806+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1807 }
1808
1809 static inline int in_local(struct module *me, void *loc)
1810@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1811 }
1812
1813 /* align things a bit */
1814- me->core_size = ALIGN(me->core_size, 16);
1815- me->arch.got_offset = me->core_size;
1816- me->core_size += gots * sizeof(struct got_entry);
1817+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1818+ me->arch.got_offset = me->core_size_rw;
1819+ me->core_size_rw += gots * sizeof(struct got_entry);
1820
1821- me->core_size = ALIGN(me->core_size, 16);
1822- me->arch.fdesc_offset = me->core_size;
1823- me->core_size += fdescs * sizeof(Elf_Fdesc);
1824+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1825+ me->arch.fdesc_offset = me->core_size_rw;
1826+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1827
1828 me->arch.got_max = gots;
1829 me->arch.fdesc_max = fdescs;
1830@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1831
1832 BUG_ON(value == 0);
1833
1834- got = me->module_core + me->arch.got_offset;
1835+ got = me->module_core_rw + me->arch.got_offset;
1836 for (i = 0; got[i].addr; i++)
1837 if (got[i].addr == value)
1838 goto out;
1839@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1840 #ifdef CONFIG_64BIT
1841 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1842 {
1843- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1844+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1845
1846 if (!value) {
1847 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1848@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1849
1850 /* Create new one */
1851 fdesc->addr = value;
1852- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1853+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1854 return (Elf_Addr)fdesc;
1855 }
1856 #endif /* CONFIG_64BIT */
1857@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1858
1859 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1860 end = table + sechdrs[me->arch.unwind_section].sh_size;
1861- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1862+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1863
1864 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1865 me->arch.unwind_section, table, end, gp);
1866diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1867index c9b9322..02d8940 100644
1868--- a/arch/parisc/kernel/sys_parisc.c
1869+++ b/arch/parisc/kernel/sys_parisc.c
1870@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
1871 /* At this point: (!vma || addr < vma->vm_end). */
1872 if (TASK_SIZE - len < addr)
1873 return -ENOMEM;
1874- if (!vma || addr + len <= vma->vm_start)
1875+ if (check_heap_stack_gap(vma, addr, len))
1876 return addr;
1877 addr = vma->vm_end;
1878 }
1879@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
1880 /* At this point: (!vma || addr < vma->vm_end). */
1881 if (TASK_SIZE - len < addr)
1882 return -ENOMEM;
1883- if (!vma || addr + len <= vma->vm_start)
1884+ if (check_heap_stack_gap(vma, addr, len))
1885 return addr;
1886 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1887 if (addr < vma->vm_end) /* handle wraparound */
1888@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1889 if (flags & MAP_FIXED)
1890 return addr;
1891 if (!addr)
1892- addr = TASK_UNMAPPED_BASE;
1893+ addr = current->mm->mmap_base;
1894
1895 if (filp) {
1896 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1897diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1898index f19e660..414fe24 100644
1899--- a/arch/parisc/kernel/traps.c
1900+++ b/arch/parisc/kernel/traps.c
1901@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1902
1903 down_read(&current->mm->mmap_sem);
1904 vma = find_vma(current->mm,regs->iaoq[0]);
1905- if (vma && (regs->iaoq[0] >= vma->vm_start)
1906- && (vma->vm_flags & VM_EXEC)) {
1907-
1908+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1909 fault_address = regs->iaoq[0];
1910 fault_space = regs->iasq[0];
1911
1912diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1913index 18162ce..94de376 100644
1914--- a/arch/parisc/mm/fault.c
1915+++ b/arch/parisc/mm/fault.c
1916@@ -15,6 +15,7 @@
1917 #include <linux/sched.h>
1918 #include <linux/interrupt.h>
1919 #include <linux/module.h>
1920+#include <linux/unistd.h>
1921
1922 #include <asm/uaccess.h>
1923 #include <asm/traps.h>
1924@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
1925 static unsigned long
1926 parisc_acctyp(unsigned long code, unsigned int inst)
1927 {
1928- if (code == 6 || code == 16)
1929+ if (code == 6 || code == 7 || code == 16)
1930 return VM_EXEC;
1931
1932 switch (inst & 0xf0000000) {
1933@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
1934 }
1935 #endif
1936
1937+#ifdef CONFIG_PAX_PAGEEXEC
1938+/*
1939+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1940+ *
1941+ * returns 1 when task should be killed
1942+ * 2 when rt_sigreturn trampoline was detected
1943+ * 3 when unpatched PLT trampoline was detected
1944+ */
1945+static int pax_handle_fetch_fault(struct pt_regs *regs)
1946+{
1947+
1948+#ifdef CONFIG_PAX_EMUPLT
1949+ int err;
1950+
1951+ do { /* PaX: unpatched PLT emulation */
1952+ unsigned int bl, depwi;
1953+
1954+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1955+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1956+
1957+ if (err)
1958+ break;
1959+
1960+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1961+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1962+
1963+ err = get_user(ldw, (unsigned int *)addr);
1964+ err |= get_user(bv, (unsigned int *)(addr+4));
1965+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1966+
1967+ if (err)
1968+ break;
1969+
1970+ if (ldw == 0x0E801096U &&
1971+ bv == 0xEAC0C000U &&
1972+ ldw2 == 0x0E881095U)
1973+ {
1974+ unsigned int resolver, map;
1975+
1976+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1977+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1978+ if (err)
1979+ break;
1980+
1981+ regs->gr[20] = instruction_pointer(regs)+8;
1982+ regs->gr[21] = map;
1983+ regs->gr[22] = resolver;
1984+ regs->iaoq[0] = resolver | 3UL;
1985+ regs->iaoq[1] = regs->iaoq[0] + 4;
1986+ return 3;
1987+ }
1988+ }
1989+ } while (0);
1990+#endif
1991+
1992+#ifdef CONFIG_PAX_EMUTRAMP
1993+
1994+#ifndef CONFIG_PAX_EMUSIGRT
1995+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1996+ return 1;
1997+#endif
1998+
1999+ do { /* PaX: rt_sigreturn emulation */
2000+ unsigned int ldi1, ldi2, bel, nop;
2001+
2002+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2003+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2004+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2005+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2006+
2007+ if (err)
2008+ break;
2009+
2010+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2011+ ldi2 == 0x3414015AU &&
2012+ bel == 0xE4008200U &&
2013+ nop == 0x08000240U)
2014+ {
2015+ regs->gr[25] = (ldi1 & 2) >> 1;
2016+ regs->gr[20] = __NR_rt_sigreturn;
2017+ regs->gr[31] = regs->iaoq[1] + 16;
2018+ regs->sr[0] = regs->iasq[1];
2019+ regs->iaoq[0] = 0x100UL;
2020+ regs->iaoq[1] = regs->iaoq[0] + 4;
2021+ regs->iasq[0] = regs->sr[2];
2022+ regs->iasq[1] = regs->sr[2];
2023+ return 2;
2024+ }
2025+ } while (0);
2026+#endif
2027+
2028+ return 1;
2029+}
2030+
2031+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2032+{
2033+ unsigned long i;
2034+
2035+ printk(KERN_ERR "PAX: bytes at PC: ");
2036+ for (i = 0; i < 5; i++) {
2037+ unsigned int c;
2038+ if (get_user(c, (unsigned int *)pc+i))
2039+ printk(KERN_CONT "???????? ");
2040+ else
2041+ printk(KERN_CONT "%08x ", c);
2042+ }
2043+ printk("\n");
2044+}
2045+#endif
2046+
2047 int fixup_exception(struct pt_regs *regs)
2048 {
2049 const struct exception_table_entry *fix;
2050@@ -192,8 +303,33 @@ good_area:
2051
2052 acc_type = parisc_acctyp(code,regs->iir);
2053
2054- if ((vma->vm_flags & acc_type) != acc_type)
2055+ if ((vma->vm_flags & acc_type) != acc_type) {
2056+
2057+#ifdef CONFIG_PAX_PAGEEXEC
2058+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2059+ (address & ~3UL) == instruction_pointer(regs))
2060+ {
2061+ up_read(&mm->mmap_sem);
2062+ switch (pax_handle_fetch_fault(regs)) {
2063+
2064+#ifdef CONFIG_PAX_EMUPLT
2065+ case 3:
2066+ return;
2067+#endif
2068+
2069+#ifdef CONFIG_PAX_EMUTRAMP
2070+ case 2:
2071+ return;
2072+#endif
2073+
2074+ }
2075+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2076+ do_group_exit(SIGKILL);
2077+ }
2078+#endif
2079+
2080 goto bad_area;
2081+ }
2082
2083 /*
2084 * If for any reason at all we couldn't handle the fault, make
2085diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2086index 3bf9cca..e7457d0 100644
2087--- a/arch/powerpc/include/asm/elf.h
2088+++ b/arch/powerpc/include/asm/elf.h
2089@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2090 the loader. We need to make sure that it is out of the way of the program
2091 that it will "exec", and that there is sufficient room for the brk. */
2092
2093-extern unsigned long randomize_et_dyn(unsigned long base);
2094-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2095+#define ELF_ET_DYN_BASE (0x20000000)
2096+
2097+#ifdef CONFIG_PAX_ASLR
2098+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2099+
2100+#ifdef __powerpc64__
2101+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2102+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2103+#else
2104+#define PAX_DELTA_MMAP_LEN 15
2105+#define PAX_DELTA_STACK_LEN 15
2106+#endif
2107+#endif
2108
2109 /*
2110 * Our registers are always unsigned longs, whether we're a 32 bit
2111@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2112 (0x7ff >> (PAGE_SHIFT - 12)) : \
2113 (0x3ffff >> (PAGE_SHIFT - 12)))
2114
2115-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2116-#define arch_randomize_brk arch_randomize_brk
2117-
2118 #endif /* __KERNEL__ */
2119
2120 /*
2121diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2122index bca8fdc..61e9580 100644
2123--- a/arch/powerpc/include/asm/kmap_types.h
2124+++ b/arch/powerpc/include/asm/kmap_types.h
2125@@ -27,6 +27,7 @@ enum km_type {
2126 KM_PPC_SYNC_PAGE,
2127 KM_PPC_SYNC_ICACHE,
2128 KM_KDB,
2129+ KM_CLEARPAGE,
2130 KM_TYPE_NR
2131 };
2132
2133diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2134index d4a7f64..451de1c 100644
2135--- a/arch/powerpc/include/asm/mman.h
2136+++ b/arch/powerpc/include/asm/mman.h
2137@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2138 }
2139 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2140
2141-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2142+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2143 {
2144 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2145 }
2146diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2147index dd9c4fd..a2ced87 100644
2148--- a/arch/powerpc/include/asm/page.h
2149+++ b/arch/powerpc/include/asm/page.h
2150@@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156+#define VM_DATA_DEFAULT_FLAGS32 \
2157+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162@@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166+#define ktla_ktva(addr) (addr)
2167+#define ktva_ktla(addr) (addr)
2168+
2169 /*
2170 * Use the top bit of the higher-level page table entries to indicate whether
2171 * the entries we point to contain hugepages. This works because we know that
2172diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2173index fb40ede..d3ce956 100644
2174--- a/arch/powerpc/include/asm/page_64.h
2175+++ b/arch/powerpc/include/asm/page_64.h
2176@@ -144,15 +144,18 @@ do { \
2177 * stack by default, so in the absence of a PT_GNU_STACK program header
2178 * we turn execute permission off.
2179 */
2180-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2181- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2182+#define VM_STACK_DEFAULT_FLAGS32 \
2183+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2184+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2185
2186 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2187 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2188
2189+#ifndef CONFIG_PAX_PAGEEXEC
2190 #define VM_STACK_DEFAULT_FLAGS \
2191 (is_32bit_task() ? \
2192 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2193+#endif
2194
2195 #include <asm-generic/getorder.h>
2196
2197diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2198index 88b0bd9..e32bc67 100644
2199--- a/arch/powerpc/include/asm/pgtable.h
2200+++ b/arch/powerpc/include/asm/pgtable.h
2201@@ -2,6 +2,7 @@
2202 #define _ASM_POWERPC_PGTABLE_H
2203 #ifdef __KERNEL__
2204
2205+#include <linux/const.h>
2206 #ifndef __ASSEMBLY__
2207 #include <asm/processor.h> /* For TASK_SIZE */
2208 #include <asm/mmu.h>
2209diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2210index 4aad413..85d86bf 100644
2211--- a/arch/powerpc/include/asm/pte-hash32.h
2212+++ b/arch/powerpc/include/asm/pte-hash32.h
2213@@ -21,6 +21,7 @@
2214 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2215 #define _PAGE_USER 0x004 /* usermode access allowed */
2216 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2217+#define _PAGE_EXEC _PAGE_GUARDED
2218 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2219 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2220 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2221diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2222index 559da19..7e5835c 100644
2223--- a/arch/powerpc/include/asm/reg.h
2224+++ b/arch/powerpc/include/asm/reg.h
2225@@ -212,6 +212,7 @@
2226 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2227 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2228 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2229+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2230 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2231 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2232 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2233diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2234index e30a13d..2b7d994 100644
2235--- a/arch/powerpc/include/asm/system.h
2236+++ b/arch/powerpc/include/asm/system.h
2237@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2238 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2239 #endif
2240
2241-extern unsigned long arch_align_stack(unsigned long sp);
2242+#define arch_align_stack(x) ((x) & ~0xfUL)
2243
2244 /* Used in very early kernel initialization. */
2245 extern unsigned long reloc_offset(void);
2246diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2247index bd0fb84..a42a14b 100644
2248--- a/arch/powerpc/include/asm/uaccess.h
2249+++ b/arch/powerpc/include/asm/uaccess.h
2250@@ -13,6 +13,8 @@
2251 #define VERIFY_READ 0
2252 #define VERIFY_WRITE 1
2253
2254+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2255+
2256 /*
2257 * The fs value determines whether argument validity checking should be
2258 * performed or not. If get_fs() == USER_DS, checking is performed, with
2259@@ -327,52 +329,6 @@ do { \
2260 extern unsigned long __copy_tofrom_user(void __user *to,
2261 const void __user *from, unsigned long size);
2262
2263-#ifndef __powerpc64__
2264-
2265-static inline unsigned long copy_from_user(void *to,
2266- const void __user *from, unsigned long n)
2267-{
2268- unsigned long over;
2269-
2270- if (access_ok(VERIFY_READ, from, n))
2271- return __copy_tofrom_user((__force void __user *)to, from, n);
2272- if ((unsigned long)from < TASK_SIZE) {
2273- over = (unsigned long)from + n - TASK_SIZE;
2274- return __copy_tofrom_user((__force void __user *)to, from,
2275- n - over) + over;
2276- }
2277- return n;
2278-}
2279-
2280-static inline unsigned long copy_to_user(void __user *to,
2281- const void *from, unsigned long n)
2282-{
2283- unsigned long over;
2284-
2285- if (access_ok(VERIFY_WRITE, to, n))
2286- return __copy_tofrom_user(to, (__force void __user *)from, n);
2287- if ((unsigned long)to < TASK_SIZE) {
2288- over = (unsigned long)to + n - TASK_SIZE;
2289- return __copy_tofrom_user(to, (__force void __user *)from,
2290- n - over) + over;
2291- }
2292- return n;
2293-}
2294-
2295-#else /* __powerpc64__ */
2296-
2297-#define __copy_in_user(to, from, size) \
2298- __copy_tofrom_user((to), (from), (size))
2299-
2300-extern unsigned long copy_from_user(void *to, const void __user *from,
2301- unsigned long n);
2302-extern unsigned long copy_to_user(void __user *to, const void *from,
2303- unsigned long n);
2304-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2305- unsigned long n);
2306-
2307-#endif /* __powerpc64__ */
2308-
2309 static inline unsigned long __copy_from_user_inatomic(void *to,
2310 const void __user *from, unsigned long n)
2311 {
2312@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2313 if (ret == 0)
2314 return 0;
2315 }
2316+
2317+ if (!__builtin_constant_p(n))
2318+ check_object_size(to, n, false);
2319+
2320 return __copy_tofrom_user((__force void __user *)to, from, n);
2321 }
2322
2323@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2324 if (ret == 0)
2325 return 0;
2326 }
2327+
2328+ if (!__builtin_constant_p(n))
2329+ check_object_size(from, n, true);
2330+
2331 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2332 }
2333
2334@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2335 return __copy_to_user_inatomic(to, from, size);
2336 }
2337
2338+#ifndef __powerpc64__
2339+
2340+static inline unsigned long __must_check copy_from_user(void *to,
2341+ const void __user *from, unsigned long n)
2342+{
2343+ unsigned long over;
2344+
2345+ if ((long)n < 0)
2346+ return n;
2347+
2348+ if (access_ok(VERIFY_READ, from, n)) {
2349+ if (!__builtin_constant_p(n))
2350+ check_object_size(to, n, false);
2351+ return __copy_tofrom_user((__force void __user *)to, from, n);
2352+ }
2353+ if ((unsigned long)from < TASK_SIZE) {
2354+ over = (unsigned long)from + n - TASK_SIZE;
2355+ if (!__builtin_constant_p(n - over))
2356+ check_object_size(to, n - over, false);
2357+ return __copy_tofrom_user((__force void __user *)to, from,
2358+ n - over) + over;
2359+ }
2360+ return n;
2361+}
2362+
2363+static inline unsigned long __must_check copy_to_user(void __user *to,
2364+ const void *from, unsigned long n)
2365+{
2366+ unsigned long over;
2367+
2368+ if ((long)n < 0)
2369+ return n;
2370+
2371+ if (access_ok(VERIFY_WRITE, to, n)) {
2372+ if (!__builtin_constant_p(n))
2373+ check_object_size(from, n, true);
2374+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2375+ }
2376+ if ((unsigned long)to < TASK_SIZE) {
2377+ over = (unsigned long)to + n - TASK_SIZE;
2378+ if (!__builtin_constant_p(n))
2379+ check_object_size(from, n - over, true);
2380+ return __copy_tofrom_user(to, (__force void __user *)from,
2381+ n - over) + over;
2382+ }
2383+ return n;
2384+}
2385+
2386+#else /* __powerpc64__ */
2387+
2388+#define __copy_in_user(to, from, size) \
2389+ __copy_tofrom_user((to), (from), (size))
2390+
2391+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2392+{
2393+ if ((long)n < 0 || n > INT_MAX)
2394+ return n;
2395+
2396+ if (!__builtin_constant_p(n))
2397+ check_object_size(to, n, false);
2398+
2399+ if (likely(access_ok(VERIFY_READ, from, n)))
2400+ n = __copy_from_user(to, from, n);
2401+ else
2402+ memset(to, 0, n);
2403+ return n;
2404+}
2405+
2406+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2407+{
2408+ if ((long)n < 0 || n > INT_MAX)
2409+ return n;
2410+
2411+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2412+ if (!__builtin_constant_p(n))
2413+ check_object_size(from, n, true);
2414+ n = __copy_to_user(to, from, n);
2415+ }
2416+ return n;
2417+}
2418+
2419+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2420+ unsigned long n);
2421+
2422+#endif /* __powerpc64__ */
2423+
2424 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2425
2426 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2427diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2428index 429983c..7af363b 100644
2429--- a/arch/powerpc/kernel/exceptions-64e.S
2430+++ b/arch/powerpc/kernel/exceptions-64e.S
2431@@ -587,6 +587,7 @@ storage_fault_common:
2432 std r14,_DAR(r1)
2433 std r15,_DSISR(r1)
2434 addi r3,r1,STACK_FRAME_OVERHEAD
2435+ bl .save_nvgprs
2436 mr r4,r14
2437 mr r5,r15
2438 ld r14,PACA_EXGEN+EX_R14(r13)
2439@@ -596,8 +597,7 @@ storage_fault_common:
2440 cmpdi r3,0
2441 bne- 1f
2442 b .ret_from_except_lite
2443-1: bl .save_nvgprs
2444- mr r5,r3
2445+1: mr r5,r3
2446 addi r3,r1,STACK_FRAME_OVERHEAD
2447 ld r4,_DAR(r1)
2448 bl .bad_page_fault
2449diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2450index cf9c69b..ebc9640 100644
2451--- a/arch/powerpc/kernel/exceptions-64s.S
2452+++ b/arch/powerpc/kernel/exceptions-64s.S
2453@@ -1004,10 +1004,10 @@ handle_page_fault:
2454 11: ld r4,_DAR(r1)
2455 ld r5,_DSISR(r1)
2456 addi r3,r1,STACK_FRAME_OVERHEAD
2457+ bl .save_nvgprs
2458 bl .do_page_fault
2459 cmpdi r3,0
2460 beq+ 13f
2461- bl .save_nvgprs
2462 mr r5,r3
2463 addi r3,r1,STACK_FRAME_OVERHEAD
2464 lwz r4,_DAR(r1)
2465diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2466index 0b6d796..d760ddb 100644
2467--- a/arch/powerpc/kernel/module_32.c
2468+++ b/arch/powerpc/kernel/module_32.c
2469@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2470 me->arch.core_plt_section = i;
2471 }
2472 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2473- printk("Module doesn't contain .plt or .init.plt sections.\n");
2474+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2475 return -ENOEXEC;
2476 }
2477
2478@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2479
2480 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2481 /* Init, or core PLT? */
2482- if (location >= mod->module_core
2483- && location < mod->module_core + mod->core_size)
2484+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2485+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2486 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2487- else
2488+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2489+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2490 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2491+ else {
2492+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2493+ return ~0UL;
2494+ }
2495
2496 /* Find this entry, or if that fails, the next avail. entry */
2497 while (entry->jump[0]) {
2498diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2499index 6457574..08b28d3 100644
2500--- a/arch/powerpc/kernel/process.c
2501+++ b/arch/powerpc/kernel/process.c
2502@@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2503 * Lookup NIP late so we have the best change of getting the
2504 * above info out without failing
2505 */
2506- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2507- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2508+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2509+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2510 #endif
2511 show_stack(current, (unsigned long *) regs->gpr[1]);
2512 if (!user_mode(regs))
2513@@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2514 newsp = stack[0];
2515 ip = stack[STACK_FRAME_LR_SAVE];
2516 if (!firstframe || ip != lr) {
2517- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2518+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2519 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2520 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2521- printk(" (%pS)",
2522+ printk(" (%pA)",
2523 (void *)current->ret_stack[curr_frame].ret);
2524 curr_frame--;
2525 }
2526@@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2527 struct pt_regs *regs = (struct pt_regs *)
2528 (sp + STACK_FRAME_OVERHEAD);
2529 lr = regs->link;
2530- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2531+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2532 regs->trap, (void *)regs->nip, (void *)lr);
2533 firstframe = 1;
2534 }
2535@@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2536 }
2537
2538 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2539-
2540-unsigned long arch_align_stack(unsigned long sp)
2541-{
2542- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2543- sp -= get_random_int() & ~PAGE_MASK;
2544- return sp & ~0xf;
2545-}
2546-
2547-static inline unsigned long brk_rnd(void)
2548-{
2549- unsigned long rnd = 0;
2550-
2551- /* 8MB for 32bit, 1GB for 64bit */
2552- if (is_32bit_task())
2553- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2554- else
2555- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2556-
2557- return rnd << PAGE_SHIFT;
2558-}
2559-
2560-unsigned long arch_randomize_brk(struct mm_struct *mm)
2561-{
2562- unsigned long base = mm->brk;
2563- unsigned long ret;
2564-
2565-#ifdef CONFIG_PPC_STD_MMU_64
2566- /*
2567- * If we are using 1TB segments and we are allowed to randomise
2568- * the heap, we can put it above 1TB so it is backed by a 1TB
2569- * segment. Otherwise the heap will be in the bottom 1TB
2570- * which always uses 256MB segments and this may result in a
2571- * performance penalty.
2572- */
2573- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2574- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2575-#endif
2576-
2577- ret = PAGE_ALIGN(base + brk_rnd());
2578-
2579- if (ret < mm->brk)
2580- return mm->brk;
2581-
2582- return ret;
2583-}
2584-
2585-unsigned long randomize_et_dyn(unsigned long base)
2586-{
2587- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2588-
2589- if (ret < base)
2590- return base;
2591-
2592- return ret;
2593-}
2594diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2595index 836a5a1..27289a3 100644
2596--- a/arch/powerpc/kernel/signal_32.c
2597+++ b/arch/powerpc/kernel/signal_32.c
2598@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2599 /* Save user registers on the stack */
2600 frame = &rt_sf->uc.uc_mcontext;
2601 addr = frame;
2602- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2603+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2604 if (save_user_regs(regs, frame, 0, 1))
2605 goto badframe;
2606 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2607diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2608index a50b5ec..547078a 100644
2609--- a/arch/powerpc/kernel/signal_64.c
2610+++ b/arch/powerpc/kernel/signal_64.c
2611@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2612 current->thread.fpscr.val = 0;
2613
2614 /* Set up to return from userspace. */
2615- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2616+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2617 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2618 } else {
2619 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2620diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2621index 5459d14..10f8070 100644
2622--- a/arch/powerpc/kernel/traps.c
2623+++ b/arch/powerpc/kernel/traps.c
2624@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2625 static inline void pmac_backlight_unblank(void) { }
2626 #endif
2627
2628+extern void gr_handle_kernel_exploit(void);
2629+
2630 int die(const char *str, struct pt_regs *regs, long err)
2631 {
2632 static struct {
2633@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2634 if (panic_on_oops)
2635 panic("Fatal exception");
2636
2637+ gr_handle_kernel_exploit();
2638+
2639 oops_exit();
2640 do_exit(err);
2641
2642diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2643index 7d14bb6..1305601 100644
2644--- a/arch/powerpc/kernel/vdso.c
2645+++ b/arch/powerpc/kernel/vdso.c
2646@@ -35,6 +35,7 @@
2647 #include <asm/firmware.h>
2648 #include <asm/vdso.h>
2649 #include <asm/vdso_datapage.h>
2650+#include <asm/mman.h>
2651
2652 #include "setup.h"
2653
2654@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2655 vdso_base = VDSO32_MBASE;
2656 #endif
2657
2658- current->mm->context.vdso_base = 0;
2659+ current->mm->context.vdso_base = ~0UL;
2660
2661 /* vDSO has a problem and was disabled, just don't "enable" it for the
2662 * process
2663@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2664 vdso_base = get_unmapped_area(NULL, vdso_base,
2665 (vdso_pages << PAGE_SHIFT) +
2666 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2667- 0, 0);
2668+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2669 if (IS_ERR_VALUE(vdso_base)) {
2670 rc = vdso_base;
2671 goto fail_mmapsem;
2672diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2673index 5eea6f3..5d10396 100644
2674--- a/arch/powerpc/lib/usercopy_64.c
2675+++ b/arch/powerpc/lib/usercopy_64.c
2676@@ -9,22 +9,6 @@
2677 #include <linux/module.h>
2678 #include <asm/uaccess.h>
2679
2680-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2681-{
2682- if (likely(access_ok(VERIFY_READ, from, n)))
2683- n = __copy_from_user(to, from, n);
2684- else
2685- memset(to, 0, n);
2686- return n;
2687-}
2688-
2689-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2690-{
2691- if (likely(access_ok(VERIFY_WRITE, to, n)))
2692- n = __copy_to_user(to, from, n);
2693- return n;
2694-}
2695-
2696 unsigned long copy_in_user(void __user *to, const void __user *from,
2697 unsigned long n)
2698 {
2699@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2700 return n;
2701 }
2702
2703-EXPORT_SYMBOL(copy_from_user);
2704-EXPORT_SYMBOL(copy_to_user);
2705 EXPORT_SYMBOL(copy_in_user);
2706
2707diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2708index 5efe8c9..db9ceef 100644
2709--- a/arch/powerpc/mm/fault.c
2710+++ b/arch/powerpc/mm/fault.c
2711@@ -32,6 +32,10 @@
2712 #include <linux/perf_event.h>
2713 #include <linux/magic.h>
2714 #include <linux/ratelimit.h>
2715+#include <linux/slab.h>
2716+#include <linux/pagemap.h>
2717+#include <linux/compiler.h>
2718+#include <linux/unistd.h>
2719
2720 #include <asm/firmware.h>
2721 #include <asm/page.h>
2722@@ -43,6 +47,7 @@
2723 #include <asm/tlbflush.h>
2724 #include <asm/siginfo.h>
2725 #include <mm/mmu_decl.h>
2726+#include <asm/ptrace.h>
2727
2728 #ifdef CONFIG_KPROBES
2729 static inline int notify_page_fault(struct pt_regs *regs)
2730@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2731 }
2732 #endif
2733
2734+#ifdef CONFIG_PAX_PAGEEXEC
2735+/*
2736+ * PaX: decide what to do with offenders (regs->nip = fault address)
2737+ *
2738+ * returns 1 when task should be killed
2739+ */
2740+static int pax_handle_fetch_fault(struct pt_regs *regs)
2741+{
2742+ return 1;
2743+}
2744+
2745+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2746+{
2747+ unsigned long i;
2748+
2749+ printk(KERN_ERR "PAX: bytes at PC: ");
2750+ for (i = 0; i < 5; i++) {
2751+ unsigned int c;
2752+ if (get_user(c, (unsigned int __user *)pc+i))
2753+ printk(KERN_CONT "???????? ");
2754+ else
2755+ printk(KERN_CONT "%08x ", c);
2756+ }
2757+ printk("\n");
2758+}
2759+#endif
2760+
2761 /*
2762 * Check whether the instruction at regs->nip is a store using
2763 * an update addressing form which will update r1.
2764@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2765 * indicate errors in DSISR but can validly be set in SRR1.
2766 */
2767 if (trap == 0x400)
2768- error_code &= 0x48200000;
2769+ error_code &= 0x58200000;
2770 else
2771 is_write = error_code & DSISR_ISSTORE;
2772 #else
2773@@ -259,7 +291,7 @@ good_area:
2774 * "undefined". Of those that can be set, this is the only
2775 * one which seems bad.
2776 */
2777- if (error_code & 0x10000000)
2778+ if (error_code & DSISR_GUARDED)
2779 /* Guarded storage error. */
2780 goto bad_area;
2781 #endif /* CONFIG_8xx */
2782@@ -274,7 +306,7 @@ good_area:
2783 * processors use the same I/D cache coherency mechanism
2784 * as embedded.
2785 */
2786- if (error_code & DSISR_PROTFAULT)
2787+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2788 goto bad_area;
2789 #endif /* CONFIG_PPC_STD_MMU */
2790
2791@@ -343,6 +375,23 @@ bad_area:
2792 bad_area_nosemaphore:
2793 /* User mode accesses cause a SIGSEGV */
2794 if (user_mode(regs)) {
2795+
2796+#ifdef CONFIG_PAX_PAGEEXEC
2797+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2798+#ifdef CONFIG_PPC_STD_MMU
2799+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2800+#else
2801+ if (is_exec && regs->nip == address) {
2802+#endif
2803+ switch (pax_handle_fetch_fault(regs)) {
2804+ }
2805+
2806+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2807+ do_group_exit(SIGKILL);
2808+ }
2809+ }
2810+#endif
2811+
2812 _exception(SIGSEGV, regs, code, address);
2813 return 0;
2814 }
2815diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2816index 5a783d8..c23e14b 100644
2817--- a/arch/powerpc/mm/mmap_64.c
2818+++ b/arch/powerpc/mm/mmap_64.c
2819@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2820 */
2821 if (mmap_is_legacy()) {
2822 mm->mmap_base = TASK_UNMAPPED_BASE;
2823+
2824+#ifdef CONFIG_PAX_RANDMMAP
2825+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2826+ mm->mmap_base += mm->delta_mmap;
2827+#endif
2828+
2829 mm->get_unmapped_area = arch_get_unmapped_area;
2830 mm->unmap_area = arch_unmap_area;
2831 } else {
2832 mm->mmap_base = mmap_base();
2833+
2834+#ifdef CONFIG_PAX_RANDMMAP
2835+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2836+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2837+#endif
2838+
2839 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2840 mm->unmap_area = arch_unmap_area_topdown;
2841 }
2842diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2843index 73709f7..6b90313 100644
2844--- a/arch/powerpc/mm/slice.c
2845+++ b/arch/powerpc/mm/slice.c
2846@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
2847 if ((mm->task_size - len) < addr)
2848 return 0;
2849 vma = find_vma(mm, addr);
2850- return (!vma || (addr + len) <= vma->vm_start);
2851+ return check_heap_stack_gap(vma, addr, len);
2852 }
2853
2854 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2855@@ -256,7 +256,7 @@ full_search:
2856 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2857 continue;
2858 }
2859- if (!vma || addr + len <= vma->vm_start) {
2860+ if (check_heap_stack_gap(vma, addr, len)) {
2861 /*
2862 * Remember the place where we stopped the search:
2863 */
2864@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2865 }
2866 }
2867
2868- addr = mm->mmap_base;
2869- while (addr > len) {
2870+ if (mm->mmap_base < len)
2871+ addr = -ENOMEM;
2872+ else
2873+ addr = mm->mmap_base - len;
2874+
2875+ while (!IS_ERR_VALUE(addr)) {
2876 /* Go down by chunk size */
2877- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2878+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2879
2880 /* Check for hit with different page size */
2881 mask = slice_range_to_mask(addr, len);
2882@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2883 * return with success:
2884 */
2885 vma = find_vma(mm, addr);
2886- if (!vma || (addr + len) <= vma->vm_start) {
2887+ if (check_heap_stack_gap(vma, addr, len)) {
2888 /* remember the address as a hint for next time */
2889 if (use_cache)
2890 mm->free_area_cache = addr;
2891@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2892 mm->cached_hole_size = vma->vm_start - addr;
2893
2894 /* try just below the current vma->vm_start */
2895- addr = vma->vm_start;
2896+ addr = skip_heap_stack_gap(vma, len);
2897 }
2898
2899 /*
2900@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
2901 if (fixed && addr > (mm->task_size - len))
2902 return -EINVAL;
2903
2904+#ifdef CONFIG_PAX_RANDMMAP
2905+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2906+ addr = 0;
2907+#endif
2908+
2909 /* If hint, make sure it matches our alignment restrictions */
2910 if (!fixed && addr) {
2911 addr = _ALIGN_UP(addr, 1ul << pshift);
2912diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2913index 547f1a6..3fff354 100644
2914--- a/arch/s390/include/asm/elf.h
2915+++ b/arch/s390/include/asm/elf.h
2916@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2917 the loader. We need to make sure that it is out of the way of the program
2918 that it will "exec", and that there is sufficient room for the brk. */
2919
2920-extern unsigned long randomize_et_dyn(unsigned long base);
2921-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2922+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2923+
2924+#ifdef CONFIG_PAX_ASLR
2925+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2926+
2927+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2928+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2929+#endif
2930
2931 /* This yields a mask that user programs can use to figure out what
2932 instruction set this CPU supports. */
2933@@ -211,7 +217,4 @@ struct linux_binprm;
2934 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2935 int arch_setup_additional_pages(struct linux_binprm *, int);
2936
2937-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2938-#define arch_randomize_brk arch_randomize_brk
2939-
2940 #endif
2941diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2942index ef573c1..75a1ce6 100644
2943--- a/arch/s390/include/asm/system.h
2944+++ b/arch/s390/include/asm/system.h
2945@@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
2946 extern void (*_machine_halt)(void);
2947 extern void (*_machine_power_off)(void);
2948
2949-extern unsigned long arch_align_stack(unsigned long sp);
2950+#define arch_align_stack(x) ((x) & ~0xfUL)
2951
2952 static inline int tprot(unsigned long addr)
2953 {
2954diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2955index 2b23885..e136e31 100644
2956--- a/arch/s390/include/asm/uaccess.h
2957+++ b/arch/s390/include/asm/uaccess.h
2958@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2959 copy_to_user(void __user *to, const void *from, unsigned long n)
2960 {
2961 might_fault();
2962+
2963+ if ((long)n < 0)
2964+ return n;
2965+
2966 if (access_ok(VERIFY_WRITE, to, n))
2967 n = __copy_to_user(to, from, n);
2968 return n;
2969@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
2970 static inline unsigned long __must_check
2971 __copy_from_user(void *to, const void __user *from, unsigned long n)
2972 {
2973+ if ((long)n < 0)
2974+ return n;
2975+
2976 if (__builtin_constant_p(n) && (n <= 256))
2977 return uaccess.copy_from_user_small(n, from, to);
2978 else
2979@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
2980 unsigned int sz = __compiletime_object_size(to);
2981
2982 might_fault();
2983+
2984+ if ((long)n < 0)
2985+ return n;
2986+
2987 if (unlikely(sz != -1 && sz < n)) {
2988 copy_from_user_overflow();
2989 return n;
2990diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2991index dfcb343..eda788a 100644
2992--- a/arch/s390/kernel/module.c
2993+++ b/arch/s390/kernel/module.c
2994@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2995
2996 /* Increase core size by size of got & plt and set start
2997 offsets for got and plt. */
2998- me->core_size = ALIGN(me->core_size, 4);
2999- me->arch.got_offset = me->core_size;
3000- me->core_size += me->arch.got_size;
3001- me->arch.plt_offset = me->core_size;
3002- me->core_size += me->arch.plt_size;
3003+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3004+ me->arch.got_offset = me->core_size_rw;
3005+ me->core_size_rw += me->arch.got_size;
3006+ me->arch.plt_offset = me->core_size_rx;
3007+ me->core_size_rx += me->arch.plt_size;
3008 return 0;
3009 }
3010
3011@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3012 if (info->got_initialized == 0) {
3013 Elf_Addr *gotent;
3014
3015- gotent = me->module_core + me->arch.got_offset +
3016+ gotent = me->module_core_rw + me->arch.got_offset +
3017 info->got_offset;
3018 *gotent = val;
3019 info->got_initialized = 1;
3020@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3021 else if (r_type == R_390_GOTENT ||
3022 r_type == R_390_GOTPLTENT)
3023 *(unsigned int *) loc =
3024- (val + (Elf_Addr) me->module_core - loc) >> 1;
3025+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3026 else if (r_type == R_390_GOT64 ||
3027 r_type == R_390_GOTPLT64)
3028 *(unsigned long *) loc = val;
3029@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3030 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3031 if (info->plt_initialized == 0) {
3032 unsigned int *ip;
3033- ip = me->module_core + me->arch.plt_offset +
3034+ ip = me->module_core_rx + me->arch.plt_offset +
3035 info->plt_offset;
3036 #ifndef CONFIG_64BIT
3037 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3038@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3039 val - loc + 0xffffUL < 0x1ffffeUL) ||
3040 (r_type == R_390_PLT32DBL &&
3041 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3042- val = (Elf_Addr) me->module_core +
3043+ val = (Elf_Addr) me->module_core_rx +
3044 me->arch.plt_offset +
3045 info->plt_offset;
3046 val += rela->r_addend - loc;
3047@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3048 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3049 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3050 val = val + rela->r_addend -
3051- ((Elf_Addr) me->module_core + me->arch.got_offset);
3052+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3053 if (r_type == R_390_GOTOFF16)
3054 *(unsigned short *) loc = val;
3055 else if (r_type == R_390_GOTOFF32)
3056@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3057 break;
3058 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3059 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3060- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3061+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3062 rela->r_addend - loc;
3063 if (r_type == R_390_GOTPC)
3064 *(unsigned int *) loc = val;
3065diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3066index 9451b21..ed8956f 100644
3067--- a/arch/s390/kernel/process.c
3068+++ b/arch/s390/kernel/process.c
3069@@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3070 }
3071 return 0;
3072 }
3073-
3074-unsigned long arch_align_stack(unsigned long sp)
3075-{
3076- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3077- sp -= get_random_int() & ~PAGE_MASK;
3078- return sp & ~0xf;
3079-}
3080-
3081-static inline unsigned long brk_rnd(void)
3082-{
3083- /* 8MB for 32bit, 1GB for 64bit */
3084- if (is_32bit_task())
3085- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3086- else
3087- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3088-}
3089-
3090-unsigned long arch_randomize_brk(struct mm_struct *mm)
3091-{
3092- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3093-
3094- if (ret < mm->brk)
3095- return mm->brk;
3096- return ret;
3097-}
3098-
3099-unsigned long randomize_et_dyn(unsigned long base)
3100-{
3101- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3102-
3103- if (!(current->flags & PF_RANDOMIZE))
3104- return base;
3105- if (ret < base)
3106- return base;
3107- return ret;
3108-}
3109diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3110index f09c748..cf9ec1d 100644
3111--- a/arch/s390/mm/mmap.c
3112+++ b/arch/s390/mm/mmap.c
3113@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3114 */
3115 if (mmap_is_legacy()) {
3116 mm->mmap_base = TASK_UNMAPPED_BASE;
3117+
3118+#ifdef CONFIG_PAX_RANDMMAP
3119+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3120+ mm->mmap_base += mm->delta_mmap;
3121+#endif
3122+
3123 mm->get_unmapped_area = arch_get_unmapped_area;
3124 mm->unmap_area = arch_unmap_area;
3125 } else {
3126 mm->mmap_base = mmap_base();
3127+
3128+#ifdef CONFIG_PAX_RANDMMAP
3129+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3130+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3131+#endif
3132+
3133 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3134 mm->unmap_area = arch_unmap_area_topdown;
3135 }
3136@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3137 */
3138 if (mmap_is_legacy()) {
3139 mm->mmap_base = TASK_UNMAPPED_BASE;
3140+
3141+#ifdef CONFIG_PAX_RANDMMAP
3142+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3143+ mm->mmap_base += mm->delta_mmap;
3144+#endif
3145+
3146 mm->get_unmapped_area = s390_get_unmapped_area;
3147 mm->unmap_area = arch_unmap_area;
3148 } else {
3149 mm->mmap_base = mmap_base();
3150+
3151+#ifdef CONFIG_PAX_RANDMMAP
3152+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3153+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3154+#endif
3155+
3156 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3157 mm->unmap_area = arch_unmap_area_topdown;
3158 }
3159diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3160index 589d5c7..669e274 100644
3161--- a/arch/score/include/asm/system.h
3162+++ b/arch/score/include/asm/system.h
3163@@ -17,7 +17,7 @@ do { \
3164 #define finish_arch_switch(prev) do {} while (0)
3165
3166 typedef void (*vi_handler_t)(void);
3167-extern unsigned long arch_align_stack(unsigned long sp);
3168+#define arch_align_stack(x) (x)
3169
3170 #define mb() barrier()
3171 #define rmb() barrier()
3172diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3173index 25d0803..d6c8e36 100644
3174--- a/arch/score/kernel/process.c
3175+++ b/arch/score/kernel/process.c
3176@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3177
3178 return task_pt_regs(task)->cp0_epc;
3179 }
3180-
3181-unsigned long arch_align_stack(unsigned long sp)
3182-{
3183- return sp;
3184-}
3185diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3186index afeb710..d1d1289 100644
3187--- a/arch/sh/mm/mmap.c
3188+++ b/arch/sh/mm/mmap.c
3189@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3190 addr = PAGE_ALIGN(addr);
3191
3192 vma = find_vma(mm, addr);
3193- if (TASK_SIZE - len >= addr &&
3194- (!vma || addr + len <= vma->vm_start))
3195+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3196 return addr;
3197 }
3198
3199@@ -106,7 +105,7 @@ full_search:
3200 }
3201 return -ENOMEM;
3202 }
3203- if (likely(!vma || addr + len <= vma->vm_start)) {
3204+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3205 /*
3206 * Remember the place where we stopped the search:
3207 */
3208@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3209 addr = PAGE_ALIGN(addr);
3210
3211 vma = find_vma(mm, addr);
3212- if (TASK_SIZE - len >= addr &&
3213- (!vma || addr + len <= vma->vm_start))
3214+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3215 return addr;
3216 }
3217
3218@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3219 /* make sure it can fit in the remaining address space */
3220 if (likely(addr > len)) {
3221 vma = find_vma(mm, addr-len);
3222- if (!vma || addr <= vma->vm_start) {
3223+ if (check_heap_stack_gap(vma, addr - len, len)) {
3224 /* remember the address as a hint for next time */
3225 return (mm->free_area_cache = addr-len);
3226 }
3227@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3228 if (unlikely(mm->mmap_base < len))
3229 goto bottomup;
3230
3231- addr = mm->mmap_base-len;
3232- if (do_colour_align)
3233- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3234+ addr = mm->mmap_base - len;
3235
3236 do {
3237+ if (do_colour_align)
3238+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3239 /*
3240 * Lookup failure means no vma is above this address,
3241 * else if new region fits below vma->vm_start,
3242 * return with success:
3243 */
3244 vma = find_vma(mm, addr);
3245- if (likely(!vma || addr+len <= vma->vm_start)) {
3246+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3247 /* remember the address as a hint for next time */
3248 return (mm->free_area_cache = addr);
3249 }
3250@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3251 mm->cached_hole_size = vma->vm_start - addr;
3252
3253 /* try just below the current vma->vm_start */
3254- addr = vma->vm_start-len;
3255- if (do_colour_align)
3256- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3257- } while (likely(len < vma->vm_start));
3258+ addr = skip_heap_stack_gap(vma, len);
3259+ } while (!IS_ERR_VALUE(addr));
3260
3261 bottomup:
3262 /*
3263diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3264index ad1fb5d..fc5315b 100644
3265--- a/arch/sparc/Makefile
3266+++ b/arch/sparc/Makefile
3267@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3268 # Export what is needed by arch/sparc/boot/Makefile
3269 export VMLINUX_INIT VMLINUX_MAIN
3270 VMLINUX_INIT := $(head-y) $(init-y)
3271-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3272+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3273 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3274 VMLINUX_MAIN += $(drivers-y) $(net-y)
3275
3276diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3277index 9f421df..b81fc12 100644
3278--- a/arch/sparc/include/asm/atomic_64.h
3279+++ b/arch/sparc/include/asm/atomic_64.h
3280@@ -14,18 +14,40 @@
3281 #define ATOMIC64_INIT(i) { (i) }
3282
3283 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3284+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3285+{
3286+ return v->counter;
3287+}
3288 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3289+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3290+{
3291+ return v->counter;
3292+}
3293
3294 #define atomic_set(v, i) (((v)->counter) = i)
3295+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3296+{
3297+ v->counter = i;
3298+}
3299 #define atomic64_set(v, i) (((v)->counter) = i)
3300+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3301+{
3302+ v->counter = i;
3303+}
3304
3305 extern void atomic_add(int, atomic_t *);
3306+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3307 extern void atomic64_add(long, atomic64_t *);
3308+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3309 extern void atomic_sub(int, atomic_t *);
3310+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3311 extern void atomic64_sub(long, atomic64_t *);
3312+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3313
3314 extern int atomic_add_ret(int, atomic_t *);
3315+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3316 extern long atomic64_add_ret(long, atomic64_t *);
3317+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3318 extern int atomic_sub_ret(int, atomic_t *);
3319 extern long atomic64_sub_ret(long, atomic64_t *);
3320
3321@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3322 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3323
3324 #define atomic_inc_return(v) atomic_add_ret(1, v)
3325+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3326+{
3327+ return atomic_add_ret_unchecked(1, v);
3328+}
3329 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3330+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3331+{
3332+ return atomic64_add_ret_unchecked(1, v);
3333+}
3334
3335 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3336 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3337
3338 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3339+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3340+{
3341+ return atomic_add_ret_unchecked(i, v);
3342+}
3343 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3344+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3345+{
3346+ return atomic64_add_ret_unchecked(i, v);
3347+}
3348
3349 /*
3350 * atomic_inc_and_test - increment and test
3351@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3352 * other cases.
3353 */
3354 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3355+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3356+{
3357+ return atomic_inc_return_unchecked(v) == 0;
3358+}
3359 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3360
3361 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3362@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3363 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3364
3365 #define atomic_inc(v) atomic_add(1, v)
3366+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3367+{
3368+ atomic_add_unchecked(1, v);
3369+}
3370 #define atomic64_inc(v) atomic64_add(1, v)
3371+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3372+{
3373+ atomic64_add_unchecked(1, v);
3374+}
3375
3376 #define atomic_dec(v) atomic_sub(1, v)
3377+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3378+{
3379+ atomic_sub_unchecked(1, v);
3380+}
3381 #define atomic64_dec(v) atomic64_sub(1, v)
3382+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3383+{
3384+ atomic64_sub_unchecked(1, v);
3385+}
3386
3387 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3388 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3389
3390 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3391+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3392+{
3393+ return cmpxchg(&v->counter, old, new);
3394+}
3395 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3396+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3397+{
3398+ return xchg(&v->counter, new);
3399+}
3400
3401 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3402 {
3403- int c, old;
3404+ int c, old, new;
3405 c = atomic_read(v);
3406 for (;;) {
3407- if (unlikely(c == (u)))
3408+ if (unlikely(c == u))
3409 break;
3410- old = atomic_cmpxchg((v), c, c + (a));
3411+
3412+ asm volatile("addcc %2, %0, %0\n"
3413+
3414+#ifdef CONFIG_PAX_REFCOUNT
3415+ "tvs %%icc, 6\n"
3416+#endif
3417+
3418+ : "=r" (new)
3419+ : "0" (c), "ir" (a)
3420+ : "cc");
3421+
3422+ old = atomic_cmpxchg(v, c, new);
3423 if (likely(old == c))
3424 break;
3425 c = old;
3426@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3427 #define atomic64_cmpxchg(v, o, n) \
3428 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3429 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3430+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3431+{
3432+ return xchg(&v->counter, new);
3433+}
3434
3435 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3436 {
3437- long c, old;
3438+ long c, old, new;
3439 c = atomic64_read(v);
3440 for (;;) {
3441- if (unlikely(c == (u)))
3442+ if (unlikely(c == u))
3443 break;
3444- old = atomic64_cmpxchg((v), c, c + (a));
3445+
3446+ asm volatile("addcc %2, %0, %0\n"
3447+
3448+#ifdef CONFIG_PAX_REFCOUNT
3449+ "tvs %%xcc, 6\n"
3450+#endif
3451+
3452+ : "=r" (new)
3453+ : "0" (c), "ir" (a)
3454+ : "cc");
3455+
3456+ old = atomic64_cmpxchg(v, c, new);
3457 if (likely(old == c))
3458 break;
3459 c = old;
3460 }
3461- return c != (u);
3462+ return c != u;
3463 }
3464
3465 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3466diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3467index 69358b5..17b4745 100644
3468--- a/arch/sparc/include/asm/cache.h
3469+++ b/arch/sparc/include/asm/cache.h
3470@@ -10,7 +10,7 @@
3471 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3472
3473 #define L1_CACHE_SHIFT 5
3474-#define L1_CACHE_BYTES 32
3475+#define L1_CACHE_BYTES 32UL
3476
3477 #ifdef CONFIG_SPARC32
3478 #define SMP_CACHE_BYTES_SHIFT 5
3479diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3480index 4269ca6..e3da77f 100644
3481--- a/arch/sparc/include/asm/elf_32.h
3482+++ b/arch/sparc/include/asm/elf_32.h
3483@@ -114,6 +114,13 @@ typedef struct {
3484
3485 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3486
3487+#ifdef CONFIG_PAX_ASLR
3488+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3489+
3490+#define PAX_DELTA_MMAP_LEN 16
3491+#define PAX_DELTA_STACK_LEN 16
3492+#endif
3493+
3494 /* This yields a mask that user programs can use to figure out what
3495 instruction set this cpu supports. This can NOT be done in userspace
3496 on Sparc. */
3497diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3498index 7df8b7f..4946269 100644
3499--- a/arch/sparc/include/asm/elf_64.h
3500+++ b/arch/sparc/include/asm/elf_64.h
3501@@ -180,6 +180,13 @@ typedef struct {
3502 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3503 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3504
3505+#ifdef CONFIG_PAX_ASLR
3506+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3507+
3508+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3509+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3510+#endif
3511+
3512 extern unsigned long sparc64_elf_hwcap;
3513 #define ELF_HWCAP sparc64_elf_hwcap
3514
3515diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3516index a790cc6..091ed94 100644
3517--- a/arch/sparc/include/asm/pgtable_32.h
3518+++ b/arch/sparc/include/asm/pgtable_32.h
3519@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3520 BTFIXUPDEF_INT(page_none)
3521 BTFIXUPDEF_INT(page_copy)
3522 BTFIXUPDEF_INT(page_readonly)
3523+
3524+#ifdef CONFIG_PAX_PAGEEXEC
3525+BTFIXUPDEF_INT(page_shared_noexec)
3526+BTFIXUPDEF_INT(page_copy_noexec)
3527+BTFIXUPDEF_INT(page_readonly_noexec)
3528+#endif
3529+
3530 BTFIXUPDEF_INT(page_kernel)
3531
3532 #define PMD_SHIFT SUN4C_PMD_SHIFT
3533@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3534 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3535 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3536
3537+#ifdef CONFIG_PAX_PAGEEXEC
3538+extern pgprot_t PAGE_SHARED_NOEXEC;
3539+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3540+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3541+#else
3542+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3543+# define PAGE_COPY_NOEXEC PAGE_COPY
3544+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3545+#endif
3546+
3547 extern unsigned long page_kernel;
3548
3549 #ifdef MODULE
3550diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3551index f6ae2b2..b03ffc7 100644
3552--- a/arch/sparc/include/asm/pgtsrmmu.h
3553+++ b/arch/sparc/include/asm/pgtsrmmu.h
3554@@ -115,6 +115,13 @@
3555 SRMMU_EXEC | SRMMU_REF)
3556 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3557 SRMMU_EXEC | SRMMU_REF)
3558+
3559+#ifdef CONFIG_PAX_PAGEEXEC
3560+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3561+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3562+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3563+#endif
3564+
3565 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3566 SRMMU_DIRTY | SRMMU_REF)
3567
3568diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3569index 9689176..63c18ea 100644
3570--- a/arch/sparc/include/asm/spinlock_64.h
3571+++ b/arch/sparc/include/asm/spinlock_64.h
3572@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3573
3574 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3575
3576-static void inline arch_read_lock(arch_rwlock_t *lock)
3577+static inline void arch_read_lock(arch_rwlock_t *lock)
3578 {
3579 unsigned long tmp1, tmp2;
3580
3581 __asm__ __volatile__ (
3582 "1: ldsw [%2], %0\n"
3583 " brlz,pn %0, 2f\n"
3584-"4: add %0, 1, %1\n"
3585+"4: addcc %0, 1, %1\n"
3586+
3587+#ifdef CONFIG_PAX_REFCOUNT
3588+" tvs %%icc, 6\n"
3589+#endif
3590+
3591 " cas [%2], %0, %1\n"
3592 " cmp %0, %1\n"
3593 " bne,pn %%icc, 1b\n"
3594@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3595 " .previous"
3596 : "=&r" (tmp1), "=&r" (tmp2)
3597 : "r" (lock)
3598- : "memory");
3599+ : "memory", "cc");
3600 }
3601
3602-static int inline arch_read_trylock(arch_rwlock_t *lock)
3603+static inline int arch_read_trylock(arch_rwlock_t *lock)
3604 {
3605 int tmp1, tmp2;
3606
3607@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3608 "1: ldsw [%2], %0\n"
3609 " brlz,a,pn %0, 2f\n"
3610 " mov 0, %0\n"
3611-" add %0, 1, %1\n"
3612+" addcc %0, 1, %1\n"
3613+
3614+#ifdef CONFIG_PAX_REFCOUNT
3615+" tvs %%icc, 6\n"
3616+#endif
3617+
3618 " cas [%2], %0, %1\n"
3619 " cmp %0, %1\n"
3620 " bne,pn %%icc, 1b\n"
3621@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3622 return tmp1;
3623 }
3624
3625-static void inline arch_read_unlock(arch_rwlock_t *lock)
3626+static inline void arch_read_unlock(arch_rwlock_t *lock)
3627 {
3628 unsigned long tmp1, tmp2;
3629
3630 __asm__ __volatile__(
3631 "1: lduw [%2], %0\n"
3632-" sub %0, 1, %1\n"
3633+" subcc %0, 1, %1\n"
3634+
3635+#ifdef CONFIG_PAX_REFCOUNT
3636+" tvs %%icc, 6\n"
3637+#endif
3638+
3639 " cas [%2], %0, %1\n"
3640 " cmp %0, %1\n"
3641 " bne,pn %%xcc, 1b\n"
3642@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3643 : "memory");
3644 }
3645
3646-static void inline arch_write_lock(arch_rwlock_t *lock)
3647+static inline void arch_write_lock(arch_rwlock_t *lock)
3648 {
3649 unsigned long mask, tmp1, tmp2;
3650
3651@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3652 : "memory");
3653 }
3654
3655-static void inline arch_write_unlock(arch_rwlock_t *lock)
3656+static inline void arch_write_unlock(arch_rwlock_t *lock)
3657 {
3658 __asm__ __volatile__(
3659 " stw %%g0, [%0]"
3660@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3661 : "memory");
3662 }
3663
3664-static int inline arch_write_trylock(arch_rwlock_t *lock)
3665+static inline int arch_write_trylock(arch_rwlock_t *lock)
3666 {
3667 unsigned long mask, tmp1, tmp2, result;
3668
3669diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3670index fa57532..e1a4c53 100644
3671--- a/arch/sparc/include/asm/thread_info_32.h
3672+++ b/arch/sparc/include/asm/thread_info_32.h
3673@@ -50,6 +50,8 @@ struct thread_info {
3674 unsigned long w_saved;
3675
3676 struct restart_block restart_block;
3677+
3678+ unsigned long lowest_stack;
3679 };
3680
3681 /*
3682diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3683index 60d86be..952dea1 100644
3684--- a/arch/sparc/include/asm/thread_info_64.h
3685+++ b/arch/sparc/include/asm/thread_info_64.h
3686@@ -63,6 +63,8 @@ struct thread_info {
3687 struct pt_regs *kern_una_regs;
3688 unsigned int kern_una_insn;
3689
3690+ unsigned long lowest_stack;
3691+
3692 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3693 };
3694
3695diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3696index e88fbe5..96b0ce5 100644
3697--- a/arch/sparc/include/asm/uaccess.h
3698+++ b/arch/sparc/include/asm/uaccess.h
3699@@ -1,5 +1,13 @@
3700 #ifndef ___ASM_SPARC_UACCESS_H
3701 #define ___ASM_SPARC_UACCESS_H
3702+
3703+#ifdef __KERNEL__
3704+#ifndef __ASSEMBLY__
3705+#include <linux/types.h>
3706+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3707+#endif
3708+#endif
3709+
3710 #if defined(__sparc__) && defined(__arch64__)
3711 #include <asm/uaccess_64.h>
3712 #else
3713diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3714index 8303ac4..07f333d 100644
3715--- a/arch/sparc/include/asm/uaccess_32.h
3716+++ b/arch/sparc/include/asm/uaccess_32.h
3717@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3718
3719 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3720 {
3721- if (n && __access_ok((unsigned long) to, n))
3722+ if ((long)n < 0)
3723+ return n;
3724+
3725+ if (n && __access_ok((unsigned long) to, n)) {
3726+ if (!__builtin_constant_p(n))
3727+ check_object_size(from, n, true);
3728 return __copy_user(to, (__force void __user *) from, n);
3729- else
3730+ } else
3731 return n;
3732 }
3733
3734 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3735 {
3736+ if ((long)n < 0)
3737+ return n;
3738+
3739+ if (!__builtin_constant_p(n))
3740+ check_object_size(from, n, true);
3741+
3742 return __copy_user(to, (__force void __user *) from, n);
3743 }
3744
3745 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3746 {
3747- if (n && __access_ok((unsigned long) from, n))
3748+ if ((long)n < 0)
3749+ return n;
3750+
3751+ if (n && __access_ok((unsigned long) from, n)) {
3752+ if (!__builtin_constant_p(n))
3753+ check_object_size(to, n, false);
3754 return __copy_user((__force void __user *) to, from, n);
3755- else
3756+ } else
3757 return n;
3758 }
3759
3760 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3761 {
3762+ if ((long)n < 0)
3763+ return n;
3764+
3765 return __copy_user((__force void __user *) to, from, n);
3766 }
3767
3768diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3769index 3e1449f..5293a0e 100644
3770--- a/arch/sparc/include/asm/uaccess_64.h
3771+++ b/arch/sparc/include/asm/uaccess_64.h
3772@@ -10,6 +10,7 @@
3773 #include <linux/compiler.h>
3774 #include <linux/string.h>
3775 #include <linux/thread_info.h>
3776+#include <linux/kernel.h>
3777 #include <asm/asi.h>
3778 #include <asm/system.h>
3779 #include <asm/spitfire.h>
3780@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
3781 static inline unsigned long __must_check
3782 copy_from_user(void *to, const void __user *from, unsigned long size)
3783 {
3784- unsigned long ret = ___copy_from_user(to, from, size);
3785+ unsigned long ret;
3786
3787+ if ((long)size < 0 || size > INT_MAX)
3788+ return size;
3789+
3790+ if (!__builtin_constant_p(size))
3791+ check_object_size(to, size, false);
3792+
3793+ ret = ___copy_from_user(to, from, size);
3794 if (unlikely(ret))
3795 ret = copy_from_user_fixup(to, from, size);
3796
3797@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
3798 static inline unsigned long __must_check
3799 copy_to_user(void __user *to, const void *from, unsigned long size)
3800 {
3801- unsigned long ret = ___copy_to_user(to, from, size);
3802+ unsigned long ret;
3803
3804+ if ((long)size < 0 || size > INT_MAX)
3805+ return size;
3806+
3807+ if (!__builtin_constant_p(size))
3808+ check_object_size(from, size, true);
3809+
3810+ ret = ___copy_to_user(to, from, size);
3811 if (unlikely(ret))
3812 ret = copy_to_user_fixup(to, from, size);
3813 return ret;
3814diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3815index cb85458..e063f17 100644
3816--- a/arch/sparc/kernel/Makefile
3817+++ b/arch/sparc/kernel/Makefile
3818@@ -3,7 +3,7 @@
3819 #
3820
3821 asflags-y := -ansi
3822-ccflags-y := -Werror
3823+#ccflags-y := -Werror
3824
3825 extra-y := head_$(BITS).o
3826 extra-y += init_task.o
3827diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3828index f793742..4d880af 100644
3829--- a/arch/sparc/kernel/process_32.c
3830+++ b/arch/sparc/kernel/process_32.c
3831@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3832 rw->ins[4], rw->ins[5],
3833 rw->ins[6],
3834 rw->ins[7]);
3835- printk("%pS\n", (void *) rw->ins[7]);
3836+ printk("%pA\n", (void *) rw->ins[7]);
3837 rw = (struct reg_window32 *) rw->ins[6];
3838 }
3839 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3840@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3841
3842 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3843 r->psr, r->pc, r->npc, r->y, print_tainted());
3844- printk("PC: <%pS>\n", (void *) r->pc);
3845+ printk("PC: <%pA>\n", (void *) r->pc);
3846 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3847 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3848 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3849 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3850 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3851 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3852- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3853+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3854
3855 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3856 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3857@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
3858 rw = (struct reg_window32 *) fp;
3859 pc = rw->ins[7];
3860 printk("[%08lx : ", pc);
3861- printk("%pS ] ", (void *) pc);
3862+ printk("%pA ] ", (void *) pc);
3863 fp = rw->ins[6];
3864 } while (++count < 16);
3865 printk("\n");
3866diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3867index 3739a06..48b2ff0 100644
3868--- a/arch/sparc/kernel/process_64.c
3869+++ b/arch/sparc/kernel/process_64.c
3870@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
3871 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3872 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3873 if (regs->tstate & TSTATE_PRIV)
3874- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3875+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3876 }
3877
3878 void show_regs(struct pt_regs *regs)
3879 {
3880 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3881 regs->tpc, regs->tnpc, regs->y, print_tainted());
3882- printk("TPC: <%pS>\n", (void *) regs->tpc);
3883+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3884 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3885 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3886 regs->u_regs[3]);
3887@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3888 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3889 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3890 regs->u_regs[15]);
3891- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3892+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3893 show_regwindow(regs);
3894 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3895 }
3896@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
3897 ((tp && tp->task) ? tp->task->pid : -1));
3898
3899 if (gp->tstate & TSTATE_PRIV) {
3900- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3901+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3902 (void *) gp->tpc,
3903 (void *) gp->o7,
3904 (void *) gp->i7,
3905diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3906index 42b282f..28ce9f2 100644
3907--- a/arch/sparc/kernel/sys_sparc_32.c
3908+++ b/arch/sparc/kernel/sys_sparc_32.c
3909@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3910 if (ARCH_SUN4C && len > 0x20000000)
3911 return -ENOMEM;
3912 if (!addr)
3913- addr = TASK_UNMAPPED_BASE;
3914+ addr = current->mm->mmap_base;
3915
3916 if (flags & MAP_SHARED)
3917 addr = COLOUR_ALIGN(addr);
3918@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3919 }
3920 if (TASK_SIZE - PAGE_SIZE - len < addr)
3921 return -ENOMEM;
3922- if (!vmm || addr + len <= vmm->vm_start)
3923+ if (check_heap_stack_gap(vmm, addr, len))
3924 return addr;
3925 addr = vmm->vm_end;
3926 if (flags & MAP_SHARED)
3927diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3928index 441521a..b767073 100644
3929--- a/arch/sparc/kernel/sys_sparc_64.c
3930+++ b/arch/sparc/kernel/sys_sparc_64.c
3931@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3932 /* We do not accept a shared mapping if it would violate
3933 * cache aliasing constraints.
3934 */
3935- if ((flags & MAP_SHARED) &&
3936+ if ((filp || (flags & MAP_SHARED)) &&
3937 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3938 return -EINVAL;
3939 return addr;
3940@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3941 if (filp || (flags & MAP_SHARED))
3942 do_color_align = 1;
3943
3944+#ifdef CONFIG_PAX_RANDMMAP
3945+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3946+#endif
3947+
3948 if (addr) {
3949 if (do_color_align)
3950 addr = COLOUR_ALIGN(addr, pgoff);
3951@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3952 addr = PAGE_ALIGN(addr);
3953
3954 vma = find_vma(mm, addr);
3955- if (task_size - len >= addr &&
3956- (!vma || addr + len <= vma->vm_start))
3957+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3958 return addr;
3959 }
3960
3961 if (len > mm->cached_hole_size) {
3962- start_addr = addr = mm->free_area_cache;
3963+ start_addr = addr = mm->free_area_cache;
3964 } else {
3965- start_addr = addr = TASK_UNMAPPED_BASE;
3966+ start_addr = addr = mm->mmap_base;
3967 mm->cached_hole_size = 0;
3968 }
3969
3970@@ -174,14 +177,14 @@ full_search:
3971 vma = find_vma(mm, VA_EXCLUDE_END);
3972 }
3973 if (unlikely(task_size < addr)) {
3974- if (start_addr != TASK_UNMAPPED_BASE) {
3975- start_addr = addr = TASK_UNMAPPED_BASE;
3976+ if (start_addr != mm->mmap_base) {
3977+ start_addr = addr = mm->mmap_base;
3978 mm->cached_hole_size = 0;
3979 goto full_search;
3980 }
3981 return -ENOMEM;
3982 }
3983- if (likely(!vma || addr + len <= vma->vm_start)) {
3984+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3985 /*
3986 * Remember the place where we stopped the search:
3987 */
3988@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3989 /* We do not accept a shared mapping if it would violate
3990 * cache aliasing constraints.
3991 */
3992- if ((flags & MAP_SHARED) &&
3993+ if ((filp || (flags & MAP_SHARED)) &&
3994 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3995 return -EINVAL;
3996 return addr;
3997@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3998 addr = PAGE_ALIGN(addr);
3999
4000 vma = find_vma(mm, addr);
4001- if (task_size - len >= addr &&
4002- (!vma || addr + len <= vma->vm_start))
4003+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4004 return addr;
4005 }
4006
4007@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4008 /* make sure it can fit in the remaining address space */
4009 if (likely(addr > len)) {
4010 vma = find_vma(mm, addr-len);
4011- if (!vma || addr <= vma->vm_start) {
4012+ if (check_heap_stack_gap(vma, addr - len, len)) {
4013 /* remember the address as a hint for next time */
4014 return (mm->free_area_cache = addr-len);
4015 }
4016@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4017 if (unlikely(mm->mmap_base < len))
4018 goto bottomup;
4019
4020- addr = mm->mmap_base-len;
4021- if (do_color_align)
4022- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4023+ addr = mm->mmap_base - len;
4024
4025 do {
4026+ if (do_color_align)
4027+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4028 /*
4029 * Lookup failure means no vma is above this address,
4030 * else if new region fits below vma->vm_start,
4031 * return with success:
4032 */
4033 vma = find_vma(mm, addr);
4034- if (likely(!vma || addr+len <= vma->vm_start)) {
4035+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4036 /* remember the address as a hint for next time */
4037 return (mm->free_area_cache = addr);
4038 }
4039@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4040 mm->cached_hole_size = vma->vm_start - addr;
4041
4042 /* try just below the current vma->vm_start */
4043- addr = vma->vm_start-len;
4044- if (do_color_align)
4045- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4046- } while (likely(len < vma->vm_start));
4047+ addr = skip_heap_stack_gap(vma, len);
4048+ } while (!IS_ERR_VALUE(addr));
4049
4050 bottomup:
4051 /*
4052@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 gap == RLIM_INFINITY ||
4054 sysctl_legacy_va_layout) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4056+
4057+#ifdef CONFIG_PAX_RANDMMAP
4058+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4059+ mm->mmap_base += mm->delta_mmap;
4060+#endif
4061+
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4066 gap = (task_size / 6 * 5);
4067
4068 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4069+
4070+#ifdef CONFIG_PAX_RANDMMAP
4071+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4072+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4073+#endif
4074+
4075 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4076 mm->unmap_area = arch_unmap_area_topdown;
4077 }
4078diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4079index 591f20c..0f1b925 100644
4080--- a/arch/sparc/kernel/traps_32.c
4081+++ b/arch/sparc/kernel/traps_32.c
4082@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4083 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4084 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4085
4086+extern void gr_handle_kernel_exploit(void);
4087+
4088 void die_if_kernel(char *str, struct pt_regs *regs)
4089 {
4090 static int die_counter;
4091@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4092 count++ < 30 &&
4093 (((unsigned long) rw) >= PAGE_OFFSET) &&
4094 !(((unsigned long) rw) & 0x7)) {
4095- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4096+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4097 (void *) rw->ins[7]);
4098 rw = (struct reg_window32 *)rw->ins[6];
4099 }
4100 }
4101 printk("Instruction DUMP:");
4102 instruction_dump ((unsigned long *) regs->pc);
4103- if(regs->psr & PSR_PS)
4104+ if(regs->psr & PSR_PS) {
4105+ gr_handle_kernel_exploit();
4106 do_exit(SIGKILL);
4107+ }
4108 do_exit(SIGSEGV);
4109 }
4110
4111diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4112index 0cbdaa4..438e4c9 100644
4113--- a/arch/sparc/kernel/traps_64.c
4114+++ b/arch/sparc/kernel/traps_64.c
4115@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4116 i + 1,
4117 p->trapstack[i].tstate, p->trapstack[i].tpc,
4118 p->trapstack[i].tnpc, p->trapstack[i].tt);
4119- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4120+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4121 }
4122 }
4123
4124@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4125
4126 lvl -= 0x100;
4127 if (regs->tstate & TSTATE_PRIV) {
4128+
4129+#ifdef CONFIG_PAX_REFCOUNT
4130+ if (lvl == 6)
4131+ pax_report_refcount_overflow(regs);
4132+#endif
4133+
4134 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4135 die_if_kernel(buffer, regs);
4136 }
4137@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4138 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4139 {
4140 char buffer[32];
4141-
4142+
4143 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4144 0, lvl, SIGTRAP) == NOTIFY_STOP)
4145 return;
4146
4147+#ifdef CONFIG_PAX_REFCOUNT
4148+ if (lvl == 6)
4149+ pax_report_refcount_overflow(regs);
4150+#endif
4151+
4152 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4153
4154 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4155@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4156 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4157 printk("%s" "ERROR(%d): ",
4158 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4159- printk("TPC<%pS>\n", (void *) regs->tpc);
4160+ printk("TPC<%pA>\n", (void *) regs->tpc);
4161 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4162 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4163 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4164@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4165 smp_processor_id(),
4166 (type & 0x1) ? 'I' : 'D',
4167 regs->tpc);
4168- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4169+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4170 panic("Irrecoverable Cheetah+ parity error.");
4171 }
4172
4173@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4174 smp_processor_id(),
4175 (type & 0x1) ? 'I' : 'D',
4176 regs->tpc);
4177- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4178+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4179 }
4180
4181 struct sun4v_error_entry {
4182@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4183
4184 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4185 regs->tpc, tl);
4186- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4187+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4188 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4189- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4190+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4191 (void *) regs->u_regs[UREG_I7]);
4192 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4193 "pte[%lx] error[%lx]\n",
4194@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4195
4196 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4197 regs->tpc, tl);
4198- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4199+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4200 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4201- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4202+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4203 (void *) regs->u_regs[UREG_I7]);
4204 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4205 "pte[%lx] error[%lx]\n",
4206@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4207 fp = (unsigned long)sf->fp + STACK_BIAS;
4208 }
4209
4210- printk(" [%016lx] %pS\n", pc, (void *) pc);
4211+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4212 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4213 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4214 int index = tsk->curr_ret_stack;
4215 if (tsk->ret_stack && index >= graph) {
4216 pc = tsk->ret_stack[index - graph].ret;
4217- printk(" [%016lx] %pS\n", pc, (void *) pc);
4218+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4219 graph++;
4220 }
4221 }
4222@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4223 return (struct reg_window *) (fp + STACK_BIAS);
4224 }
4225
4226+extern void gr_handle_kernel_exploit(void);
4227+
4228 void die_if_kernel(char *str, struct pt_regs *regs)
4229 {
4230 static int die_counter;
4231@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4232 while (rw &&
4233 count++ < 30 &&
4234 kstack_valid(tp, (unsigned long) rw)) {
4235- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4236+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4237 (void *) rw->ins[7]);
4238
4239 rw = kernel_stack_up(rw);
4240@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4241 }
4242 user_instruction_dump ((unsigned int __user *) regs->tpc);
4243 }
4244- if (regs->tstate & TSTATE_PRIV)
4245+ if (regs->tstate & TSTATE_PRIV) {
4246+ gr_handle_kernel_exploit();
4247 do_exit(SIGKILL);
4248+ }
4249 do_exit(SIGSEGV);
4250 }
4251 EXPORT_SYMBOL(die_if_kernel);
4252diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4253index 76e4ac1..78f8bb1 100644
4254--- a/arch/sparc/kernel/unaligned_64.c
4255+++ b/arch/sparc/kernel/unaligned_64.c
4256@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4257 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4258
4259 if (__ratelimit(&ratelimit)) {
4260- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4261+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4262 regs->tpc, (void *) regs->tpc);
4263 }
4264 }
4265diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4266index a3fc437..fea9957 100644
4267--- a/arch/sparc/lib/Makefile
4268+++ b/arch/sparc/lib/Makefile
4269@@ -2,7 +2,7 @@
4270 #
4271
4272 asflags-y := -ansi -DST_DIV0=0x02
4273-ccflags-y := -Werror
4274+#ccflags-y := -Werror
4275
4276 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4277 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4278diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4279index 59186e0..f747d7a 100644
4280--- a/arch/sparc/lib/atomic_64.S
4281+++ b/arch/sparc/lib/atomic_64.S
4282@@ -18,7 +18,12 @@
4283 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4284 BACKOFF_SETUP(%o2)
4285 1: lduw [%o1], %g1
4286- add %g1, %o0, %g7
4287+ addcc %g1, %o0, %g7
4288+
4289+#ifdef CONFIG_PAX_REFCOUNT
4290+ tvs %icc, 6
4291+#endif
4292+
4293 cas [%o1], %g1, %g7
4294 cmp %g1, %g7
4295 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4296@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4297 2: BACKOFF_SPIN(%o2, %o3, 1b)
4298 .size atomic_add, .-atomic_add
4299
4300+ .globl atomic_add_unchecked
4301+ .type atomic_add_unchecked,#function
4302+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4303+ BACKOFF_SETUP(%o2)
4304+1: lduw [%o1], %g1
4305+ add %g1, %o0, %g7
4306+ cas [%o1], %g1, %g7
4307+ cmp %g1, %g7
4308+ bne,pn %icc, 2f
4309+ nop
4310+ retl
4311+ nop
4312+2: BACKOFF_SPIN(%o2, %o3, 1b)
4313+ .size atomic_add_unchecked, .-atomic_add_unchecked
4314+
4315 .globl atomic_sub
4316 .type atomic_sub,#function
4317 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4318 BACKOFF_SETUP(%o2)
4319 1: lduw [%o1], %g1
4320- sub %g1, %o0, %g7
4321+ subcc %g1, %o0, %g7
4322+
4323+#ifdef CONFIG_PAX_REFCOUNT
4324+ tvs %icc, 6
4325+#endif
4326+
4327 cas [%o1], %g1, %g7
4328 cmp %g1, %g7
4329 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4330@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4331 2: BACKOFF_SPIN(%o2, %o3, 1b)
4332 .size atomic_sub, .-atomic_sub
4333
4334+ .globl atomic_sub_unchecked
4335+ .type atomic_sub_unchecked,#function
4336+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4337+ BACKOFF_SETUP(%o2)
4338+1: lduw [%o1], %g1
4339+ sub %g1, %o0, %g7
4340+ cas [%o1], %g1, %g7
4341+ cmp %g1, %g7
4342+ bne,pn %icc, 2f
4343+ nop
4344+ retl
4345+ nop
4346+2: BACKOFF_SPIN(%o2, %o3, 1b)
4347+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4348+
4349 .globl atomic_add_ret
4350 .type atomic_add_ret,#function
4351 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4352 BACKOFF_SETUP(%o2)
4353 1: lduw [%o1], %g1
4354- add %g1, %o0, %g7
4355+ addcc %g1, %o0, %g7
4356+
4357+#ifdef CONFIG_PAX_REFCOUNT
4358+ tvs %icc, 6
4359+#endif
4360+
4361 cas [%o1], %g1, %g7
4362 cmp %g1, %g7
4363 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4364@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4365 2: BACKOFF_SPIN(%o2, %o3, 1b)
4366 .size atomic_add_ret, .-atomic_add_ret
4367
4368+ .globl atomic_add_ret_unchecked
4369+ .type atomic_add_ret_unchecked,#function
4370+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4371+ BACKOFF_SETUP(%o2)
4372+1: lduw [%o1], %g1
4373+ addcc %g1, %o0, %g7
4374+ cas [%o1], %g1, %g7
4375+ cmp %g1, %g7
4376+ bne,pn %icc, 2f
4377+ add %g7, %o0, %g7
4378+ sra %g7, 0, %o0
4379+ retl
4380+ nop
4381+2: BACKOFF_SPIN(%o2, %o3, 1b)
4382+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4383+
4384 .globl atomic_sub_ret
4385 .type atomic_sub_ret,#function
4386 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4387 BACKOFF_SETUP(%o2)
4388 1: lduw [%o1], %g1
4389- sub %g1, %o0, %g7
4390+ subcc %g1, %o0, %g7
4391+
4392+#ifdef CONFIG_PAX_REFCOUNT
4393+ tvs %icc, 6
4394+#endif
4395+
4396 cas [%o1], %g1, %g7
4397 cmp %g1, %g7
4398 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4399@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4400 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4401 BACKOFF_SETUP(%o2)
4402 1: ldx [%o1], %g1
4403- add %g1, %o0, %g7
4404+ addcc %g1, %o0, %g7
4405+
4406+#ifdef CONFIG_PAX_REFCOUNT
4407+ tvs %xcc, 6
4408+#endif
4409+
4410 casx [%o1], %g1, %g7
4411 cmp %g1, %g7
4412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4413@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4414 2: BACKOFF_SPIN(%o2, %o3, 1b)
4415 .size atomic64_add, .-atomic64_add
4416
4417+ .globl atomic64_add_unchecked
4418+ .type atomic64_add_unchecked,#function
4419+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4420+ BACKOFF_SETUP(%o2)
4421+1: ldx [%o1], %g1
4422+ addcc %g1, %o0, %g7
4423+ casx [%o1], %g1, %g7
4424+ cmp %g1, %g7
4425+ bne,pn %xcc, 2f
4426+ nop
4427+ retl
4428+ nop
4429+2: BACKOFF_SPIN(%o2, %o3, 1b)
4430+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4431+
4432 .globl atomic64_sub
4433 .type atomic64_sub,#function
4434 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4435 BACKOFF_SETUP(%o2)
4436 1: ldx [%o1], %g1
4437- sub %g1, %o0, %g7
4438+ subcc %g1, %o0, %g7
4439+
4440+#ifdef CONFIG_PAX_REFCOUNT
4441+ tvs %xcc, 6
4442+#endif
4443+
4444 casx [%o1], %g1, %g7
4445 cmp %g1, %g7
4446 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4447@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4448 2: BACKOFF_SPIN(%o2, %o3, 1b)
4449 .size atomic64_sub, .-atomic64_sub
4450
4451+ .globl atomic64_sub_unchecked
4452+ .type atomic64_sub_unchecked,#function
4453+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4454+ BACKOFF_SETUP(%o2)
4455+1: ldx [%o1], %g1
4456+ subcc %g1, %o0, %g7
4457+ casx [%o1], %g1, %g7
4458+ cmp %g1, %g7
4459+ bne,pn %xcc, 2f
4460+ nop
4461+ retl
4462+ nop
4463+2: BACKOFF_SPIN(%o2, %o3, 1b)
4464+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4465+
4466 .globl atomic64_add_ret
4467 .type atomic64_add_ret,#function
4468 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4469 BACKOFF_SETUP(%o2)
4470 1: ldx [%o1], %g1
4471- add %g1, %o0, %g7
4472+ addcc %g1, %o0, %g7
4473+
4474+#ifdef CONFIG_PAX_REFCOUNT
4475+ tvs %xcc, 6
4476+#endif
4477+
4478 casx [%o1], %g1, %g7
4479 cmp %g1, %g7
4480 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4481@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4482 2: BACKOFF_SPIN(%o2, %o3, 1b)
4483 .size atomic64_add_ret, .-atomic64_add_ret
4484
4485+ .globl atomic64_add_ret_unchecked
4486+ .type atomic64_add_ret_unchecked,#function
4487+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4488+ BACKOFF_SETUP(%o2)
4489+1: ldx [%o1], %g1
4490+ addcc %g1, %o0, %g7
4491+ casx [%o1], %g1, %g7
4492+ cmp %g1, %g7
4493+ bne,pn %xcc, 2f
4494+ add %g7, %o0, %g7
4495+ mov %g7, %o0
4496+ retl
4497+ nop
4498+2: BACKOFF_SPIN(%o2, %o3, 1b)
4499+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4500+
4501 .globl atomic64_sub_ret
4502 .type atomic64_sub_ret,#function
4503 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4504 BACKOFF_SETUP(%o2)
4505 1: ldx [%o1], %g1
4506- sub %g1, %o0, %g7
4507+ subcc %g1, %o0, %g7
4508+
4509+#ifdef CONFIG_PAX_REFCOUNT
4510+ tvs %xcc, 6
4511+#endif
4512+
4513 casx [%o1], %g1, %g7
4514 cmp %g1, %g7
4515 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4516diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4517index 1b30bb3..b4a16c7 100644
4518--- a/arch/sparc/lib/ksyms.c
4519+++ b/arch/sparc/lib/ksyms.c
4520@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4521
4522 /* Atomic counter implementation. */
4523 EXPORT_SYMBOL(atomic_add);
4524+EXPORT_SYMBOL(atomic_add_unchecked);
4525 EXPORT_SYMBOL(atomic_add_ret);
4526+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4527 EXPORT_SYMBOL(atomic_sub);
4528+EXPORT_SYMBOL(atomic_sub_unchecked);
4529 EXPORT_SYMBOL(atomic_sub_ret);
4530 EXPORT_SYMBOL(atomic64_add);
4531+EXPORT_SYMBOL(atomic64_add_unchecked);
4532 EXPORT_SYMBOL(atomic64_add_ret);
4533+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4534 EXPORT_SYMBOL(atomic64_sub);
4535+EXPORT_SYMBOL(atomic64_sub_unchecked);
4536 EXPORT_SYMBOL(atomic64_sub_ret);
4537
4538 /* Atomic bit operations. */
4539diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4540index 301421c..e2535d1 100644
4541--- a/arch/sparc/mm/Makefile
4542+++ b/arch/sparc/mm/Makefile
4543@@ -2,7 +2,7 @@
4544 #
4545
4546 asflags-y := -ansi
4547-ccflags-y := -Werror
4548+#ccflags-y := -Werror
4549
4550 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4551 obj-y += fault_$(BITS).o
4552diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4553index 8023fd7..c8e89e9 100644
4554--- a/arch/sparc/mm/fault_32.c
4555+++ b/arch/sparc/mm/fault_32.c
4556@@ -21,6 +21,9 @@
4557 #include <linux/perf_event.h>
4558 #include <linux/interrupt.h>
4559 #include <linux/kdebug.h>
4560+#include <linux/slab.h>
4561+#include <linux/pagemap.h>
4562+#include <linux/compiler.h>
4563
4564 #include <asm/system.h>
4565 #include <asm/page.h>
4566@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4567 return safe_compute_effective_address(regs, insn);
4568 }
4569
4570+#ifdef CONFIG_PAX_PAGEEXEC
4571+#ifdef CONFIG_PAX_DLRESOLVE
4572+static void pax_emuplt_close(struct vm_area_struct *vma)
4573+{
4574+ vma->vm_mm->call_dl_resolve = 0UL;
4575+}
4576+
4577+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4578+{
4579+ unsigned int *kaddr;
4580+
4581+ vmf->page = alloc_page(GFP_HIGHUSER);
4582+ if (!vmf->page)
4583+ return VM_FAULT_OOM;
4584+
4585+ kaddr = kmap(vmf->page);
4586+ memset(kaddr, 0, PAGE_SIZE);
4587+ kaddr[0] = 0x9DE3BFA8U; /* save */
4588+ flush_dcache_page(vmf->page);
4589+ kunmap(vmf->page);
4590+ return VM_FAULT_MAJOR;
4591+}
4592+
4593+static const struct vm_operations_struct pax_vm_ops = {
4594+ .close = pax_emuplt_close,
4595+ .fault = pax_emuplt_fault
4596+};
4597+
4598+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4599+{
4600+ int ret;
4601+
4602+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4603+ vma->vm_mm = current->mm;
4604+ vma->vm_start = addr;
4605+ vma->vm_end = addr + PAGE_SIZE;
4606+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4607+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4608+ vma->vm_ops = &pax_vm_ops;
4609+
4610+ ret = insert_vm_struct(current->mm, vma);
4611+ if (ret)
4612+ return ret;
4613+
4614+ ++current->mm->total_vm;
4615+ return 0;
4616+}
4617+#endif
4618+
4619+/*
4620+ * PaX: decide what to do with offenders (regs->pc = fault address)
4621+ *
4622+ * returns 1 when task should be killed
4623+ * 2 when patched PLT trampoline was detected
4624+ * 3 when unpatched PLT trampoline was detected
4625+ */
4626+static int pax_handle_fetch_fault(struct pt_regs *regs)
4627+{
4628+
4629+#ifdef CONFIG_PAX_EMUPLT
4630+ int err;
4631+
4632+ do { /* PaX: patched PLT emulation #1 */
4633+ unsigned int sethi1, sethi2, jmpl;
4634+
4635+ err = get_user(sethi1, (unsigned int *)regs->pc);
4636+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4638+
4639+ if (err)
4640+ break;
4641+
4642+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4643+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4644+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4645+ {
4646+ unsigned int addr;
4647+
4648+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4649+ addr = regs->u_regs[UREG_G1];
4650+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4651+ regs->pc = addr;
4652+ regs->npc = addr+4;
4653+ return 2;
4654+ }
4655+ } while (0);
4656+
4657+ { /* PaX: patched PLT emulation #2 */
4658+ unsigned int ba;
4659+
4660+ err = get_user(ba, (unsigned int *)regs->pc);
4661+
4662+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4663+ unsigned int addr;
4664+
4665+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4666+ regs->pc = addr;
4667+ regs->npc = addr+4;
4668+ return 2;
4669+ }
4670+ }
4671+
4672+ do { /* PaX: patched PLT emulation #3 */
4673+ unsigned int sethi, jmpl, nop;
4674+
4675+ err = get_user(sethi, (unsigned int *)regs->pc);
4676+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4677+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4678+
4679+ if (err)
4680+ break;
4681+
4682+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4683+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4684+ nop == 0x01000000U)
4685+ {
4686+ unsigned int addr;
4687+
4688+ addr = (sethi & 0x003FFFFFU) << 10;
4689+ regs->u_regs[UREG_G1] = addr;
4690+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4691+ regs->pc = addr;
4692+ regs->npc = addr+4;
4693+ return 2;
4694+ }
4695+ } while (0);
4696+
4697+ do { /* PaX: unpatched PLT emulation step 1 */
4698+ unsigned int sethi, ba, nop;
4699+
4700+ err = get_user(sethi, (unsigned int *)regs->pc);
4701+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4702+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4703+
4704+ if (err)
4705+ break;
4706+
4707+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4708+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4709+ nop == 0x01000000U)
4710+ {
4711+ unsigned int addr, save, call;
4712+
4713+ if ((ba & 0xFFC00000U) == 0x30800000U)
4714+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4715+ else
4716+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4717+
4718+ err = get_user(save, (unsigned int *)addr);
4719+ err |= get_user(call, (unsigned int *)(addr+4));
4720+ err |= get_user(nop, (unsigned int *)(addr+8));
4721+ if (err)
4722+ break;
4723+
4724+#ifdef CONFIG_PAX_DLRESOLVE
4725+ if (save == 0x9DE3BFA8U &&
4726+ (call & 0xC0000000U) == 0x40000000U &&
4727+ nop == 0x01000000U)
4728+ {
4729+ struct vm_area_struct *vma;
4730+ unsigned long call_dl_resolve;
4731+
4732+ down_read(&current->mm->mmap_sem);
4733+ call_dl_resolve = current->mm->call_dl_resolve;
4734+ up_read(&current->mm->mmap_sem);
4735+ if (likely(call_dl_resolve))
4736+ goto emulate;
4737+
4738+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4739+
4740+ down_write(&current->mm->mmap_sem);
4741+ if (current->mm->call_dl_resolve) {
4742+ call_dl_resolve = current->mm->call_dl_resolve;
4743+ up_write(&current->mm->mmap_sem);
4744+ if (vma)
4745+ kmem_cache_free(vm_area_cachep, vma);
4746+ goto emulate;
4747+ }
4748+
4749+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4750+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4751+ up_write(&current->mm->mmap_sem);
4752+ if (vma)
4753+ kmem_cache_free(vm_area_cachep, vma);
4754+ return 1;
4755+ }
4756+
4757+ if (pax_insert_vma(vma, call_dl_resolve)) {
4758+ up_write(&current->mm->mmap_sem);
4759+ kmem_cache_free(vm_area_cachep, vma);
4760+ return 1;
4761+ }
4762+
4763+ current->mm->call_dl_resolve = call_dl_resolve;
4764+ up_write(&current->mm->mmap_sem);
4765+
4766+emulate:
4767+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4768+ regs->pc = call_dl_resolve;
4769+ regs->npc = addr+4;
4770+ return 3;
4771+ }
4772+#endif
4773+
4774+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4775+ if ((save & 0xFFC00000U) == 0x05000000U &&
4776+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4777+ nop == 0x01000000U)
4778+ {
4779+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4780+ regs->u_regs[UREG_G2] = addr + 4;
4781+ addr = (save & 0x003FFFFFU) << 10;
4782+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4783+ regs->pc = addr;
4784+ regs->npc = addr+4;
4785+ return 3;
4786+ }
4787+ }
4788+ } while (0);
4789+
4790+ do { /* PaX: unpatched PLT emulation step 2 */
4791+ unsigned int save, call, nop;
4792+
4793+ err = get_user(save, (unsigned int *)(regs->pc-4));
4794+ err |= get_user(call, (unsigned int *)regs->pc);
4795+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4796+ if (err)
4797+ break;
4798+
4799+ if (save == 0x9DE3BFA8U &&
4800+ (call & 0xC0000000U) == 0x40000000U &&
4801+ nop == 0x01000000U)
4802+ {
4803+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4804+
4805+ regs->u_regs[UREG_RETPC] = regs->pc;
4806+ regs->pc = dl_resolve;
4807+ regs->npc = dl_resolve+4;
4808+ return 3;
4809+ }
4810+ } while (0);
4811+#endif
4812+
4813+ return 1;
4814+}
4815+
4816+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4817+{
4818+ unsigned long i;
4819+
4820+ printk(KERN_ERR "PAX: bytes at PC: ");
4821+ for (i = 0; i < 8; i++) {
4822+ unsigned int c;
4823+ if (get_user(c, (unsigned int *)pc+i))
4824+ printk(KERN_CONT "???????? ");
4825+ else
4826+ printk(KERN_CONT "%08x ", c);
4827+ }
4828+ printk("\n");
4829+}
4830+#endif
4831+
4832 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4833 int text_fault)
4834 {
4835@@ -280,6 +545,24 @@ good_area:
4836 if(!(vma->vm_flags & VM_WRITE))
4837 goto bad_area;
4838 } else {
4839+
4840+#ifdef CONFIG_PAX_PAGEEXEC
4841+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4842+ up_read(&mm->mmap_sem);
4843+ switch (pax_handle_fetch_fault(regs)) {
4844+
4845+#ifdef CONFIG_PAX_EMUPLT
4846+ case 2:
4847+ case 3:
4848+ return;
4849+#endif
4850+
4851+ }
4852+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4853+ do_group_exit(SIGKILL);
4854+ }
4855+#endif
4856+
4857 /* Allow reads even for write-only mappings */
4858 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4859 goto bad_area;
4860diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4861index 504c062..6fcb9c6 100644
4862--- a/arch/sparc/mm/fault_64.c
4863+++ b/arch/sparc/mm/fault_64.c
4864@@ -21,6 +21,9 @@
4865 #include <linux/kprobes.h>
4866 #include <linux/kdebug.h>
4867 #include <linux/percpu.h>
4868+#include <linux/slab.h>
4869+#include <linux/pagemap.h>
4870+#include <linux/compiler.h>
4871
4872 #include <asm/page.h>
4873 #include <asm/pgtable.h>
4874@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
4875 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4876 regs->tpc);
4877 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4878- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4879+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4880 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4881 dump_stack();
4882 unhandled_fault(regs->tpc, current, regs);
4883@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
4884 show_regs(regs);
4885 }
4886
4887+#ifdef CONFIG_PAX_PAGEEXEC
4888+#ifdef CONFIG_PAX_DLRESOLVE
4889+static void pax_emuplt_close(struct vm_area_struct *vma)
4890+{
4891+ vma->vm_mm->call_dl_resolve = 0UL;
4892+}
4893+
4894+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4895+{
4896+ unsigned int *kaddr;
4897+
4898+ vmf->page = alloc_page(GFP_HIGHUSER);
4899+ if (!vmf->page)
4900+ return VM_FAULT_OOM;
4901+
4902+ kaddr = kmap(vmf->page);
4903+ memset(kaddr, 0, PAGE_SIZE);
4904+ kaddr[0] = 0x9DE3BFA8U; /* save */
4905+ flush_dcache_page(vmf->page);
4906+ kunmap(vmf->page);
4907+ return VM_FAULT_MAJOR;
4908+}
4909+
4910+static const struct vm_operations_struct pax_vm_ops = {
4911+ .close = pax_emuplt_close,
4912+ .fault = pax_emuplt_fault
4913+};
4914+
4915+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4916+{
4917+ int ret;
4918+
4919+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4920+ vma->vm_mm = current->mm;
4921+ vma->vm_start = addr;
4922+ vma->vm_end = addr + PAGE_SIZE;
4923+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4924+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4925+ vma->vm_ops = &pax_vm_ops;
4926+
4927+ ret = insert_vm_struct(current->mm, vma);
4928+ if (ret)
4929+ return ret;
4930+
4931+ ++current->mm->total_vm;
4932+ return 0;
4933+}
4934+#endif
4935+
4936+/*
4937+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4938+ *
4939+ * returns 1 when task should be killed
4940+ * 2 when patched PLT trampoline was detected
4941+ * 3 when unpatched PLT trampoline was detected
4942+ */
4943+static int pax_handle_fetch_fault(struct pt_regs *regs)
4944+{
4945+
4946+#ifdef CONFIG_PAX_EMUPLT
4947+ int err;
4948+
4949+ do { /* PaX: patched PLT emulation #1 */
4950+ unsigned int sethi1, sethi2, jmpl;
4951+
4952+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4953+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4954+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4955+
4956+ if (err)
4957+ break;
4958+
4959+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4960+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4961+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4962+ {
4963+ unsigned long addr;
4964+
4965+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4966+ addr = regs->u_regs[UREG_G1];
4967+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4968+
4969+ if (test_thread_flag(TIF_32BIT))
4970+ addr &= 0xFFFFFFFFUL;
4971+
4972+ regs->tpc = addr;
4973+ regs->tnpc = addr+4;
4974+ return 2;
4975+ }
4976+ } while (0);
4977+
4978+ { /* PaX: patched PLT emulation #2 */
4979+ unsigned int ba;
4980+
4981+ err = get_user(ba, (unsigned int *)regs->tpc);
4982+
4983+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4984+ unsigned long addr;
4985+
4986+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4987+
4988+ if (test_thread_flag(TIF_32BIT))
4989+ addr &= 0xFFFFFFFFUL;
4990+
4991+ regs->tpc = addr;
4992+ regs->tnpc = addr+4;
4993+ return 2;
4994+ }
4995+ }
4996+
4997+ do { /* PaX: patched PLT emulation #3 */
4998+ unsigned int sethi, jmpl, nop;
4999+
5000+ err = get_user(sethi, (unsigned int *)regs->tpc);
5001+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5002+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5003+
5004+ if (err)
5005+ break;
5006+
5007+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5008+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5009+ nop == 0x01000000U)
5010+ {
5011+ unsigned long addr;
5012+
5013+ addr = (sethi & 0x003FFFFFU) << 10;
5014+ regs->u_regs[UREG_G1] = addr;
5015+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5016+
5017+ if (test_thread_flag(TIF_32BIT))
5018+ addr &= 0xFFFFFFFFUL;
5019+
5020+ regs->tpc = addr;
5021+ regs->tnpc = addr+4;
5022+ return 2;
5023+ }
5024+ } while (0);
5025+
5026+ do { /* PaX: patched PLT emulation #4 */
5027+ unsigned int sethi, mov1, call, mov2;
5028+
5029+ err = get_user(sethi, (unsigned int *)regs->tpc);
5030+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5031+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5032+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5033+
5034+ if (err)
5035+ break;
5036+
5037+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5038+ mov1 == 0x8210000FU &&
5039+ (call & 0xC0000000U) == 0x40000000U &&
5040+ mov2 == 0x9E100001U)
5041+ {
5042+ unsigned long addr;
5043+
5044+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5045+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5046+
5047+ if (test_thread_flag(TIF_32BIT))
5048+ addr &= 0xFFFFFFFFUL;
5049+
5050+ regs->tpc = addr;
5051+ regs->tnpc = addr+4;
5052+ return 2;
5053+ }
5054+ } while (0);
5055+
5056+ do { /* PaX: patched PLT emulation #5 */
5057+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5058+
5059+ err = get_user(sethi, (unsigned int *)regs->tpc);
5060+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5061+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5062+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5063+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5064+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5065+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5066+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5067+
5068+ if (err)
5069+ break;
5070+
5071+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5072+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5073+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5074+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5075+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5076+ sllx == 0x83287020U &&
5077+ jmpl == 0x81C04005U &&
5078+ nop == 0x01000000U)
5079+ {
5080+ unsigned long addr;
5081+
5082+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5083+ regs->u_regs[UREG_G1] <<= 32;
5084+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5085+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5086+ regs->tpc = addr;
5087+ regs->tnpc = addr+4;
5088+ return 2;
5089+ }
5090+ } while (0);
5091+
5092+ do { /* PaX: patched PLT emulation #6 */
5093+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5094+
5095+ err = get_user(sethi, (unsigned int *)regs->tpc);
5096+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5097+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5098+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5099+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5100+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5101+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5102+
5103+ if (err)
5104+ break;
5105+
5106+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5107+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5108+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5109+ sllx == 0x83287020U &&
5110+ (or & 0xFFFFE000U) == 0x8A116000U &&
5111+ jmpl == 0x81C04005U &&
5112+ nop == 0x01000000U)
5113+ {
5114+ unsigned long addr;
5115+
5116+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5117+ regs->u_regs[UREG_G1] <<= 32;
5118+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5119+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5120+ regs->tpc = addr;
5121+ regs->tnpc = addr+4;
5122+ return 2;
5123+ }
5124+ } while (0);
5125+
5126+ do { /* PaX: unpatched PLT emulation step 1 */
5127+ unsigned int sethi, ba, nop;
5128+
5129+ err = get_user(sethi, (unsigned int *)regs->tpc);
5130+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5131+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5132+
5133+ if (err)
5134+ break;
5135+
5136+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5137+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5138+ nop == 0x01000000U)
5139+ {
5140+ unsigned long addr;
5141+ unsigned int save, call;
5142+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5143+
5144+ if ((ba & 0xFFC00000U) == 0x30800000U)
5145+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5146+ else
5147+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5148+
5149+ if (test_thread_flag(TIF_32BIT))
5150+ addr &= 0xFFFFFFFFUL;
5151+
5152+ err = get_user(save, (unsigned int *)addr);
5153+ err |= get_user(call, (unsigned int *)(addr+4));
5154+ err |= get_user(nop, (unsigned int *)(addr+8));
5155+ if (err)
5156+ break;
5157+
5158+#ifdef CONFIG_PAX_DLRESOLVE
5159+ if (save == 0x9DE3BFA8U &&
5160+ (call & 0xC0000000U) == 0x40000000U &&
5161+ nop == 0x01000000U)
5162+ {
5163+ struct vm_area_struct *vma;
5164+ unsigned long call_dl_resolve;
5165+
5166+ down_read(&current->mm->mmap_sem);
5167+ call_dl_resolve = current->mm->call_dl_resolve;
5168+ up_read(&current->mm->mmap_sem);
5169+ if (likely(call_dl_resolve))
5170+ goto emulate;
5171+
5172+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5173+
5174+ down_write(&current->mm->mmap_sem);
5175+ if (current->mm->call_dl_resolve) {
5176+ call_dl_resolve = current->mm->call_dl_resolve;
5177+ up_write(&current->mm->mmap_sem);
5178+ if (vma)
5179+ kmem_cache_free(vm_area_cachep, vma);
5180+ goto emulate;
5181+ }
5182+
5183+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5184+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5185+ up_write(&current->mm->mmap_sem);
5186+ if (vma)
5187+ kmem_cache_free(vm_area_cachep, vma);
5188+ return 1;
5189+ }
5190+
5191+ if (pax_insert_vma(vma, call_dl_resolve)) {
5192+ up_write(&current->mm->mmap_sem);
5193+ kmem_cache_free(vm_area_cachep, vma);
5194+ return 1;
5195+ }
5196+
5197+ current->mm->call_dl_resolve = call_dl_resolve;
5198+ up_write(&current->mm->mmap_sem);
5199+
5200+emulate:
5201+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5202+ regs->tpc = call_dl_resolve;
5203+ regs->tnpc = addr+4;
5204+ return 3;
5205+ }
5206+#endif
5207+
5208+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5209+ if ((save & 0xFFC00000U) == 0x05000000U &&
5210+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5211+ nop == 0x01000000U)
5212+ {
5213+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5214+ regs->u_regs[UREG_G2] = addr + 4;
5215+ addr = (save & 0x003FFFFFU) << 10;
5216+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5217+
5218+ if (test_thread_flag(TIF_32BIT))
5219+ addr &= 0xFFFFFFFFUL;
5220+
5221+ regs->tpc = addr;
5222+ regs->tnpc = addr+4;
5223+ return 3;
5224+ }
5225+
5226+ /* PaX: 64-bit PLT stub */
5227+ err = get_user(sethi1, (unsigned int *)addr);
5228+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5229+ err |= get_user(or1, (unsigned int *)(addr+8));
5230+ err |= get_user(or2, (unsigned int *)(addr+12));
5231+ err |= get_user(sllx, (unsigned int *)(addr+16));
5232+ err |= get_user(add, (unsigned int *)(addr+20));
5233+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5234+ err |= get_user(nop, (unsigned int *)(addr+28));
5235+ if (err)
5236+ break;
5237+
5238+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5239+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5240+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5241+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5242+ sllx == 0x89293020U &&
5243+ add == 0x8A010005U &&
5244+ jmpl == 0x89C14000U &&
5245+ nop == 0x01000000U)
5246+ {
5247+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5248+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5249+ regs->u_regs[UREG_G4] <<= 32;
5250+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5251+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5252+ regs->u_regs[UREG_G4] = addr + 24;
5253+ addr = regs->u_regs[UREG_G5];
5254+ regs->tpc = addr;
5255+ regs->tnpc = addr+4;
5256+ return 3;
5257+ }
5258+ }
5259+ } while (0);
5260+
5261+#ifdef CONFIG_PAX_DLRESOLVE
5262+ do { /* PaX: unpatched PLT emulation step 2 */
5263+ unsigned int save, call, nop;
5264+
5265+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5266+ err |= get_user(call, (unsigned int *)regs->tpc);
5267+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5268+ if (err)
5269+ break;
5270+
5271+ if (save == 0x9DE3BFA8U &&
5272+ (call & 0xC0000000U) == 0x40000000U &&
5273+ nop == 0x01000000U)
5274+ {
5275+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5276+
5277+ if (test_thread_flag(TIF_32BIT))
5278+ dl_resolve &= 0xFFFFFFFFUL;
5279+
5280+ regs->u_regs[UREG_RETPC] = regs->tpc;
5281+ regs->tpc = dl_resolve;
5282+ regs->tnpc = dl_resolve+4;
5283+ return 3;
5284+ }
5285+ } while (0);
5286+#endif
5287+
5288+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5289+ unsigned int sethi, ba, nop;
5290+
5291+ err = get_user(sethi, (unsigned int *)regs->tpc);
5292+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5293+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5294+
5295+ if (err)
5296+ break;
5297+
5298+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5299+ (ba & 0xFFF00000U) == 0x30600000U &&
5300+ nop == 0x01000000U)
5301+ {
5302+ unsigned long addr;
5303+
5304+ addr = (sethi & 0x003FFFFFU) << 10;
5305+ regs->u_regs[UREG_G1] = addr;
5306+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5307+
5308+ if (test_thread_flag(TIF_32BIT))
5309+ addr &= 0xFFFFFFFFUL;
5310+
5311+ regs->tpc = addr;
5312+ regs->tnpc = addr+4;
5313+ return 2;
5314+ }
5315+ } while (0);
5316+
5317+#endif
5318+
5319+ return 1;
5320+}
5321+
5322+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5323+{
5324+ unsigned long i;
5325+
5326+ printk(KERN_ERR "PAX: bytes at PC: ");
5327+ for (i = 0; i < 8; i++) {
5328+ unsigned int c;
5329+ if (get_user(c, (unsigned int *)pc+i))
5330+ printk(KERN_CONT "???????? ");
5331+ else
5332+ printk(KERN_CONT "%08x ", c);
5333+ }
5334+ printk("\n");
5335+}
5336+#endif
5337+
5338 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5339 {
5340 struct mm_struct *mm = current->mm;
5341@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5342 if (!vma)
5343 goto bad_area;
5344
5345+#ifdef CONFIG_PAX_PAGEEXEC
5346+ /* PaX: detect ITLB misses on non-exec pages */
5347+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5348+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5349+ {
5350+ if (address != regs->tpc)
5351+ goto good_area;
5352+
5353+ up_read(&mm->mmap_sem);
5354+ switch (pax_handle_fetch_fault(regs)) {
5355+
5356+#ifdef CONFIG_PAX_EMUPLT
5357+ case 2:
5358+ case 3:
5359+ return;
5360+#endif
5361+
5362+ }
5363+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5364+ do_group_exit(SIGKILL);
5365+ }
5366+#endif
5367+
5368 /* Pure DTLB misses do not tell us whether the fault causing
5369 * load/store/atomic was a write or not, it only says that there
5370 * was no match. So in such a case we (carefully) read the
5371diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5372index 07e1453..0a7d9e9 100644
5373--- a/arch/sparc/mm/hugetlbpage.c
5374+++ b/arch/sparc/mm/hugetlbpage.c
5375@@ -67,7 +67,7 @@ full_search:
5376 }
5377 return -ENOMEM;
5378 }
5379- if (likely(!vma || addr + len <= vma->vm_start)) {
5380+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5381 /*
5382 * Remember the place where we stopped the search:
5383 */
5384@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5385 /* make sure it can fit in the remaining address space */
5386 if (likely(addr > len)) {
5387 vma = find_vma(mm, addr-len);
5388- if (!vma || addr <= vma->vm_start) {
5389+ if (check_heap_stack_gap(vma, addr - len, len)) {
5390 /* remember the address as a hint for next time */
5391 return (mm->free_area_cache = addr-len);
5392 }
5393@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5394 if (unlikely(mm->mmap_base < len))
5395 goto bottomup;
5396
5397- addr = (mm->mmap_base-len) & HPAGE_MASK;
5398+ addr = mm->mmap_base - len;
5399
5400 do {
5401+ addr &= HPAGE_MASK;
5402 /*
5403 * Lookup failure means no vma is above this address,
5404 * else if new region fits below vma->vm_start,
5405 * return with success:
5406 */
5407 vma = find_vma(mm, addr);
5408- if (likely(!vma || addr+len <= vma->vm_start)) {
5409+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5410 /* remember the address as a hint for next time */
5411 return (mm->free_area_cache = addr);
5412 }
5413@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 mm->cached_hole_size = vma->vm_start - addr;
5415
5416 /* try just below the current vma->vm_start */
5417- addr = (vma->vm_start-len) & HPAGE_MASK;
5418- } while (likely(len < vma->vm_start));
5419+ addr = skip_heap_stack_gap(vma, len);
5420+ } while (!IS_ERR_VALUE(addr));
5421
5422 bottomup:
5423 /*
5424@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5425 if (addr) {
5426 addr = ALIGN(addr, HPAGE_SIZE);
5427 vma = find_vma(mm, addr);
5428- if (task_size - len >= addr &&
5429- (!vma || addr + len <= vma->vm_start))
5430+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5431 return addr;
5432 }
5433 if (mm->get_unmapped_area == arch_get_unmapped_area)
5434diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5435index 7b00de6..78239f4 100644
5436--- a/arch/sparc/mm/init_32.c
5437+++ b/arch/sparc/mm/init_32.c
5438@@ -316,6 +316,9 @@ extern void device_scan(void);
5439 pgprot_t PAGE_SHARED __read_mostly;
5440 EXPORT_SYMBOL(PAGE_SHARED);
5441
5442+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5443+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5444+
5445 void __init paging_init(void)
5446 {
5447 switch(sparc_cpu_model) {
5448@@ -344,17 +347,17 @@ void __init paging_init(void)
5449
5450 /* Initialize the protection map with non-constant, MMU dependent values. */
5451 protection_map[0] = PAGE_NONE;
5452- protection_map[1] = PAGE_READONLY;
5453- protection_map[2] = PAGE_COPY;
5454- protection_map[3] = PAGE_COPY;
5455+ protection_map[1] = PAGE_READONLY_NOEXEC;
5456+ protection_map[2] = PAGE_COPY_NOEXEC;
5457+ protection_map[3] = PAGE_COPY_NOEXEC;
5458 protection_map[4] = PAGE_READONLY;
5459 protection_map[5] = PAGE_READONLY;
5460 protection_map[6] = PAGE_COPY;
5461 protection_map[7] = PAGE_COPY;
5462 protection_map[8] = PAGE_NONE;
5463- protection_map[9] = PAGE_READONLY;
5464- protection_map[10] = PAGE_SHARED;
5465- protection_map[11] = PAGE_SHARED;
5466+ protection_map[9] = PAGE_READONLY_NOEXEC;
5467+ protection_map[10] = PAGE_SHARED_NOEXEC;
5468+ protection_map[11] = PAGE_SHARED_NOEXEC;
5469 protection_map[12] = PAGE_READONLY;
5470 protection_map[13] = PAGE_READONLY;
5471 protection_map[14] = PAGE_SHARED;
5472diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5473index cbef74e..c38fead 100644
5474--- a/arch/sparc/mm/srmmu.c
5475+++ b/arch/sparc/mm/srmmu.c
5476@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5477 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5478 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5479 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5480+
5481+#ifdef CONFIG_PAX_PAGEEXEC
5482+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5483+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5484+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5485+#endif
5486+
5487 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5488 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5489
5490diff --git a/arch/um/Makefile b/arch/um/Makefile
5491index 7730af6..cce5b19 100644
5492--- a/arch/um/Makefile
5493+++ b/arch/um/Makefile
5494@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5495 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5496 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5497
5498+ifdef CONSTIFY_PLUGIN
5499+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5500+endif
5501+
5502 #This will adjust *FLAGS accordingly to the platform.
5503 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5504
5505diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5506index 6c03acd..a5e0215 100644
5507--- a/arch/um/include/asm/kmap_types.h
5508+++ b/arch/um/include/asm/kmap_types.h
5509@@ -23,6 +23,7 @@ enum km_type {
5510 KM_IRQ1,
5511 KM_SOFTIRQ0,
5512 KM_SOFTIRQ1,
5513+ KM_CLEARPAGE,
5514 KM_TYPE_NR
5515 };
5516
5517diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5518index 7cfc3ce..cbd1a58 100644
5519--- a/arch/um/include/asm/page.h
5520+++ b/arch/um/include/asm/page.h
5521@@ -14,6 +14,9 @@
5522 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5523 #define PAGE_MASK (~(PAGE_SIZE-1))
5524
5525+#define ktla_ktva(addr) (addr)
5526+#define ktva_ktla(addr) (addr)
5527+
5528 #ifndef __ASSEMBLY__
5529
5530 struct page;
5531diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5532index c533835..84db18e 100644
5533--- a/arch/um/kernel/process.c
5534+++ b/arch/um/kernel/process.c
5535@@ -406,22 +406,6 @@ int singlestepping(void * t)
5536 return 2;
5537 }
5538
5539-/*
5540- * Only x86 and x86_64 have an arch_align_stack().
5541- * All other arches have "#define arch_align_stack(x) (x)"
5542- * in their asm/system.h
5543- * As this is included in UML from asm-um/system-generic.h,
5544- * we can use it to behave as the subarch does.
5545- */
5546-#ifndef arch_align_stack
5547-unsigned long arch_align_stack(unsigned long sp)
5548-{
5549- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5550- sp -= get_random_int() % 8192;
5551- return sp & ~0xf;
5552-}
5553-#endif
5554-
5555 unsigned long get_wchan(struct task_struct *p)
5556 {
5557 unsigned long stack_page, sp, ip;
5558diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5559index efb4294..61bc18c 100644
5560--- a/arch/x86/Kconfig
5561+++ b/arch/x86/Kconfig
5562@@ -235,7 +235,7 @@ config X86_HT
5563
5564 config X86_32_LAZY_GS
5565 def_bool y
5566- depends on X86_32 && !CC_STACKPROTECTOR
5567+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5568
5569 config ARCH_HWEIGHT_CFLAGS
5570 string
5571@@ -1022,7 +1022,7 @@ choice
5572
5573 config NOHIGHMEM
5574 bool "off"
5575- depends on !X86_NUMAQ
5576+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5577 ---help---
5578 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5579 However, the address space of 32-bit x86 processors is only 4
5580@@ -1059,7 +1059,7 @@ config NOHIGHMEM
5581
5582 config HIGHMEM4G
5583 bool "4GB"
5584- depends on !X86_NUMAQ
5585+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5586 ---help---
5587 Select this if you have a 32-bit processor and between 1 and 4
5588 gigabytes of physical RAM.
5589@@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5590 hex
5591 default 0xB0000000 if VMSPLIT_3G_OPT
5592 default 0x80000000 if VMSPLIT_2G
5593- default 0x78000000 if VMSPLIT_2G_OPT
5594+ default 0x70000000 if VMSPLIT_2G_OPT
5595 default 0x40000000 if VMSPLIT_1G
5596 default 0xC0000000
5597 depends on X86_32
5598@@ -1496,6 +1496,7 @@ config SECCOMP
5599
5600 config CC_STACKPROTECTOR
5601 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5602+ depends on X86_64 || !PAX_MEMORY_UDEREF
5603 ---help---
5604 This option turns on the -fstack-protector GCC feature. This
5605 feature puts, at the beginning of functions, a canary value on
5606@@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5607 config PHYSICAL_START
5608 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5609 default "0x1000000"
5610+ range 0x400000 0x40000000
5611 ---help---
5612 This gives the physical address where the kernel is loaded.
5613
5614@@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5615 config PHYSICAL_ALIGN
5616 hex "Alignment value to which kernel should be aligned" if X86_32
5617 default "0x1000000"
5618+ range 0x400000 0x1000000 if PAX_KERNEXEC
5619 range 0x2000 0x1000000
5620 ---help---
5621 This value puts the alignment restrictions on physical address
5622@@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5623 Say N if you want to disable CPU hotplug.
5624
5625 config COMPAT_VDSO
5626- def_bool y
5627+ def_bool n
5628 prompt "Compat VDSO support"
5629 depends on X86_32 || IA32_EMULATION
5630+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5631 ---help---
5632 Map the 32-bit VDSO to the predictable old-style address too.
5633
5634diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5635index e3ca7e0..b30b28a 100644
5636--- a/arch/x86/Kconfig.cpu
5637+++ b/arch/x86/Kconfig.cpu
5638@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5639
5640 config X86_F00F_BUG
5641 def_bool y
5642- depends on M586MMX || M586TSC || M586 || M486 || M386
5643+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5644
5645 config X86_INVD_BUG
5646 def_bool y
5647@@ -365,7 +365,7 @@ config X86_POPAD_OK
5648
5649 config X86_ALIGNMENT_16
5650 def_bool y
5651- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5652+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5653
5654 config X86_INTEL_USERCOPY
5655 def_bool y
5656@@ -411,7 +411,7 @@ config X86_CMPXCHG64
5657 # generates cmov.
5658 config X86_CMOV
5659 def_bool y
5660- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5661+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5662
5663 config X86_MINIMUM_CPU_FAMILY
5664 int
5665diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5666index bf56e17..05f9891 100644
5667--- a/arch/x86/Kconfig.debug
5668+++ b/arch/x86/Kconfig.debug
5669@@ -81,7 +81,7 @@ config X86_PTDUMP
5670 config DEBUG_RODATA
5671 bool "Write protect kernel read-only data structures"
5672 default y
5673- depends on DEBUG_KERNEL
5674+ depends on DEBUG_KERNEL && BROKEN
5675 ---help---
5676 Mark the kernel read-only data as write-protected in the pagetables,
5677 in order to catch accidental (and incorrect) writes to such const
5678@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5679
5680 config DEBUG_SET_MODULE_RONX
5681 bool "Set loadable kernel module data as NX and text as RO"
5682- depends on MODULES
5683+ depends on MODULES && BROKEN
5684 ---help---
5685 This option helps catch unintended modifications to loadable
5686 kernel module's text and read-only data. It also prevents execution
5687diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5688index b02e509..2631e48 100644
5689--- a/arch/x86/Makefile
5690+++ b/arch/x86/Makefile
5691@@ -46,6 +46,7 @@ else
5692 UTS_MACHINE := x86_64
5693 CHECKFLAGS += -D__x86_64__ -m64
5694
5695+ biarch := $(call cc-option,-m64)
5696 KBUILD_AFLAGS += -m64
5697 KBUILD_CFLAGS += -m64
5698
5699@@ -195,3 +196,12 @@ define archhelp
5700 echo ' FDARGS="..." arguments for the booted kernel'
5701 echo ' FDINITRD=file initrd for the booted kernel'
5702 endef
5703+
5704+define OLD_LD
5705+
5706+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5707+*** Please upgrade your binutils to 2.18 or newer
5708+endef
5709+
5710+archprepare:
5711+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5712diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5713index 95365a8..52f857b 100644
5714--- a/arch/x86/boot/Makefile
5715+++ b/arch/x86/boot/Makefile
5716@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5717 $(call cc-option, -fno-stack-protector) \
5718 $(call cc-option, -mpreferred-stack-boundary=2)
5719 KBUILD_CFLAGS += $(call cc-option, -m32)
5720+ifdef CONSTIFY_PLUGIN
5721+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5722+endif
5723 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5724 GCOV_PROFILE := n
5725
5726diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5727index 878e4b9..20537ab 100644
5728--- a/arch/x86/boot/bitops.h
5729+++ b/arch/x86/boot/bitops.h
5730@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5731 u8 v;
5732 const u32 *p = (const u32 *)addr;
5733
5734- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5735+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5736 return v;
5737 }
5738
5739@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5740
5741 static inline void set_bit(int nr, void *addr)
5742 {
5743- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5744+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5745 }
5746
5747 #endif /* BOOT_BITOPS_H */
5748diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5749index c7093bd..d4247ffe0 100644
5750--- a/arch/x86/boot/boot.h
5751+++ b/arch/x86/boot/boot.h
5752@@ -85,7 +85,7 @@ static inline void io_delay(void)
5753 static inline u16 ds(void)
5754 {
5755 u16 seg;
5756- asm("movw %%ds,%0" : "=rm" (seg));
5757+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5758 return seg;
5759 }
5760
5761@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
5762 static inline int memcmp(const void *s1, const void *s2, size_t len)
5763 {
5764 u8 diff;
5765- asm("repe; cmpsb; setnz %0"
5766+ asm volatile("repe; cmpsb; setnz %0"
5767 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5768 return diff;
5769 }
5770diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5771index 09664ef..edc5d03 100644
5772--- a/arch/x86/boot/compressed/Makefile
5773+++ b/arch/x86/boot/compressed/Makefile
5774@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5775 KBUILD_CFLAGS += $(cflags-y)
5776 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5777 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5778+ifdef CONSTIFY_PLUGIN
5779+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5780+endif
5781
5782 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5783 GCOV_PROFILE := n
5784diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5785index 67a655a..b924059 100644
5786--- a/arch/x86/boot/compressed/head_32.S
5787+++ b/arch/x86/boot/compressed/head_32.S
5788@@ -76,7 +76,7 @@ ENTRY(startup_32)
5789 notl %eax
5790 andl %eax, %ebx
5791 #else
5792- movl $LOAD_PHYSICAL_ADDR, %ebx
5793+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5794 #endif
5795
5796 /* Target address to relocate to for decompression */
5797@@ -162,7 +162,7 @@ relocated:
5798 * and where it was actually loaded.
5799 */
5800 movl %ebp, %ebx
5801- subl $LOAD_PHYSICAL_ADDR, %ebx
5802+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5803 jz 2f /* Nothing to be done if loaded at compiled addr. */
5804 /*
5805 * Process relocations.
5806@@ -170,8 +170,7 @@ relocated:
5807
5808 1: subl $4, %edi
5809 movl (%edi), %ecx
5810- testl %ecx, %ecx
5811- jz 2f
5812+ jecxz 2f
5813 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5814 jmp 1b
5815 2:
5816diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5817index 35af09d..99c9676 100644
5818--- a/arch/x86/boot/compressed/head_64.S
5819+++ b/arch/x86/boot/compressed/head_64.S
5820@@ -91,7 +91,7 @@ ENTRY(startup_32)
5821 notl %eax
5822 andl %eax, %ebx
5823 #else
5824- movl $LOAD_PHYSICAL_ADDR, %ebx
5825+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5826 #endif
5827
5828 /* Target address to relocate to for decompression */
5829@@ -233,7 +233,7 @@ ENTRY(startup_64)
5830 notq %rax
5831 andq %rax, %rbp
5832 #else
5833- movq $LOAD_PHYSICAL_ADDR, %rbp
5834+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5835 #endif
5836
5837 /* Target address to relocate to for decompression */
5838diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5839index 3a19d04..7c1d55a 100644
5840--- a/arch/x86/boot/compressed/misc.c
5841+++ b/arch/x86/boot/compressed/misc.c
5842@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5843 case PT_LOAD:
5844 #ifdef CONFIG_RELOCATABLE
5845 dest = output;
5846- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5847+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5848 #else
5849 dest = (void *)(phdr->p_paddr);
5850 #endif
5851@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
5852 error("Destination address too large");
5853 #endif
5854 #ifndef CONFIG_RELOCATABLE
5855- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5856+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5857 error("Wrong destination address");
5858 #endif
5859
5860diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5861index 89bbf4e..869908e 100644
5862--- a/arch/x86/boot/compressed/relocs.c
5863+++ b/arch/x86/boot/compressed/relocs.c
5864@@ -13,8 +13,11 @@
5865
5866 static void die(char *fmt, ...);
5867
5868+#include "../../../../include/generated/autoconf.h"
5869+
5870 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5871 static Elf32_Ehdr ehdr;
5872+static Elf32_Phdr *phdr;
5873 static unsigned long reloc_count, reloc_idx;
5874 static unsigned long *relocs;
5875
5876@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5877 }
5878 }
5879
5880+static void read_phdrs(FILE *fp)
5881+{
5882+ unsigned int i;
5883+
5884+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5885+ if (!phdr) {
5886+ die("Unable to allocate %d program headers\n",
5887+ ehdr.e_phnum);
5888+ }
5889+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5890+ die("Seek to %d failed: %s\n",
5891+ ehdr.e_phoff, strerror(errno));
5892+ }
5893+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5894+ die("Cannot read ELF program headers: %s\n",
5895+ strerror(errno));
5896+ }
5897+ for(i = 0; i < ehdr.e_phnum; i++) {
5898+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5899+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5900+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5901+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5902+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5903+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5904+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5905+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5906+ }
5907+
5908+}
5909+
5910 static void read_shdrs(FILE *fp)
5911 {
5912- int i;
5913+ unsigned int i;
5914 Elf32_Shdr shdr;
5915
5916 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5917@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5918
5919 static void read_strtabs(FILE *fp)
5920 {
5921- int i;
5922+ unsigned int i;
5923 for (i = 0; i < ehdr.e_shnum; i++) {
5924 struct section *sec = &secs[i];
5925 if (sec->shdr.sh_type != SHT_STRTAB) {
5926@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5927
5928 static void read_symtabs(FILE *fp)
5929 {
5930- int i,j;
5931+ unsigned int i,j;
5932 for (i = 0; i < ehdr.e_shnum; i++) {
5933 struct section *sec = &secs[i];
5934 if (sec->shdr.sh_type != SHT_SYMTAB) {
5935@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5936
5937 static void read_relocs(FILE *fp)
5938 {
5939- int i,j;
5940+ unsigned int i,j;
5941+ uint32_t base;
5942+
5943 for (i = 0; i < ehdr.e_shnum; i++) {
5944 struct section *sec = &secs[i];
5945 if (sec->shdr.sh_type != SHT_REL) {
5946@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5947 die("Cannot read symbol table: %s\n",
5948 strerror(errno));
5949 }
5950+ base = 0;
5951+ for (j = 0; j < ehdr.e_phnum; j++) {
5952+ if (phdr[j].p_type != PT_LOAD )
5953+ continue;
5954+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5955+ continue;
5956+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5957+ break;
5958+ }
5959 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5960 Elf32_Rel *rel = &sec->reltab[j];
5961- rel->r_offset = elf32_to_cpu(rel->r_offset);
5962+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5963 rel->r_info = elf32_to_cpu(rel->r_info);
5964 }
5965 }
5966@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5967
5968 static void print_absolute_symbols(void)
5969 {
5970- int i;
5971+ unsigned int i;
5972 printf("Absolute symbols\n");
5973 printf(" Num: Value Size Type Bind Visibility Name\n");
5974 for (i = 0; i < ehdr.e_shnum; i++) {
5975 struct section *sec = &secs[i];
5976 char *sym_strtab;
5977 Elf32_Sym *sh_symtab;
5978- int j;
5979+ unsigned int j;
5980
5981 if (sec->shdr.sh_type != SHT_SYMTAB) {
5982 continue;
5983@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5984
5985 static void print_absolute_relocs(void)
5986 {
5987- int i, printed = 0;
5988+ unsigned int i, printed = 0;
5989
5990 for (i = 0; i < ehdr.e_shnum; i++) {
5991 struct section *sec = &secs[i];
5992 struct section *sec_applies, *sec_symtab;
5993 char *sym_strtab;
5994 Elf32_Sym *sh_symtab;
5995- int j;
5996+ unsigned int j;
5997 if (sec->shdr.sh_type != SHT_REL) {
5998 continue;
5999 }
6000@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6001
6002 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6003 {
6004- int i;
6005+ unsigned int i;
6006 /* Walk through the relocations */
6007 for (i = 0; i < ehdr.e_shnum; i++) {
6008 char *sym_strtab;
6009 Elf32_Sym *sh_symtab;
6010 struct section *sec_applies, *sec_symtab;
6011- int j;
6012+ unsigned int j;
6013 struct section *sec = &secs[i];
6014
6015 if (sec->shdr.sh_type != SHT_REL) {
6016@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6017 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6018 continue;
6019 }
6020+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6021+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6022+ continue;
6023+
6024+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6025+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6026+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6027+ continue;
6028+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6029+ continue;
6030+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6031+ continue;
6032+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6033+ continue;
6034+#endif
6035+
6036 switch (r_type) {
6037 case R_386_NONE:
6038 case R_386_PC32:
6039@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6040
6041 static void emit_relocs(int as_text)
6042 {
6043- int i;
6044+ unsigned int i;
6045 /* Count how many relocations I have and allocate space for them. */
6046 reloc_count = 0;
6047 walk_relocs(count_reloc);
6048@@ -665,6 +725,7 @@ int main(int argc, char **argv)
6049 fname, strerror(errno));
6050 }
6051 read_ehdr(fp);
6052+ read_phdrs(fp);
6053 read_shdrs(fp);
6054 read_strtabs(fp);
6055 read_symtabs(fp);
6056diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6057index 4d3ff03..e4972ff 100644
6058--- a/arch/x86/boot/cpucheck.c
6059+++ b/arch/x86/boot/cpucheck.c
6060@@ -74,7 +74,7 @@ static int has_fpu(void)
6061 u16 fcw = -1, fsw = -1;
6062 u32 cr0;
6063
6064- asm("movl %%cr0,%0" : "=r" (cr0));
6065+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6066 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6067 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6068 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6069@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6070 {
6071 u32 f0, f1;
6072
6073- asm("pushfl ; "
6074+ asm volatile("pushfl ; "
6075 "pushfl ; "
6076 "popl %0 ; "
6077 "movl %0,%1 ; "
6078@@ -115,7 +115,7 @@ static void get_flags(void)
6079 set_bit(X86_FEATURE_FPU, cpu.flags);
6080
6081 if (has_eflag(X86_EFLAGS_ID)) {
6082- asm("cpuid"
6083+ asm volatile("cpuid"
6084 : "=a" (max_intel_level),
6085 "=b" (cpu_vendor[0]),
6086 "=d" (cpu_vendor[1]),
6087@@ -124,7 +124,7 @@ static void get_flags(void)
6088
6089 if (max_intel_level >= 0x00000001 &&
6090 max_intel_level <= 0x0000ffff) {
6091- asm("cpuid"
6092+ asm volatile("cpuid"
6093 : "=a" (tfms),
6094 "=c" (cpu.flags[4]),
6095 "=d" (cpu.flags[0])
6096@@ -136,7 +136,7 @@ static void get_flags(void)
6097 cpu.model += ((tfms >> 16) & 0xf) << 4;
6098 }
6099
6100- asm("cpuid"
6101+ asm volatile("cpuid"
6102 : "=a" (max_amd_level)
6103 : "a" (0x80000000)
6104 : "ebx", "ecx", "edx");
6105@@ -144,7 +144,7 @@ static void get_flags(void)
6106 if (max_amd_level >= 0x80000001 &&
6107 max_amd_level <= 0x8000ffff) {
6108 u32 eax = 0x80000001;
6109- asm("cpuid"
6110+ asm volatile("cpuid"
6111 : "+a" (eax),
6112 "=c" (cpu.flags[6]),
6113 "=d" (cpu.flags[1])
6114@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6115 u32 ecx = MSR_K7_HWCR;
6116 u32 eax, edx;
6117
6118- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6119+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6120 eax &= ~(1 << 15);
6121- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6122+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6123
6124 get_flags(); /* Make sure it really did something */
6125 err = check_flags();
6126@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6127 u32 ecx = MSR_VIA_FCR;
6128 u32 eax, edx;
6129
6130- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6131+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6132 eax |= (1<<1)|(1<<7);
6133- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6134+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6135
6136 set_bit(X86_FEATURE_CX8, cpu.flags);
6137 err = check_flags();
6138@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6139 u32 eax, edx;
6140 u32 level = 1;
6141
6142- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6143- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6144- asm("cpuid"
6145+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6146+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6147+ asm volatile("cpuid"
6148 : "+a" (level), "=d" (cpu.flags[0])
6149 : : "ecx", "ebx");
6150- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6151+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6152
6153 err = check_flags();
6154 }
6155diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6156index bdb4d45..0476680 100644
6157--- a/arch/x86/boot/header.S
6158+++ b/arch/x86/boot/header.S
6159@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6160 # single linked list of
6161 # struct setup_data
6162
6163-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6164+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6165
6166 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6167 #define VO_INIT_SIZE (VO__end - VO__text)
6168diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6169index db75d07..8e6d0af 100644
6170--- a/arch/x86/boot/memory.c
6171+++ b/arch/x86/boot/memory.c
6172@@ -19,7 +19,7 @@
6173
6174 static int detect_memory_e820(void)
6175 {
6176- int count = 0;
6177+ unsigned int count = 0;
6178 struct biosregs ireg, oreg;
6179 struct e820entry *desc = boot_params.e820_map;
6180 static struct e820entry buf; /* static so it is zeroed */
6181diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6182index 11e8c6e..fdbb1ed 100644
6183--- a/arch/x86/boot/video-vesa.c
6184+++ b/arch/x86/boot/video-vesa.c
6185@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6186
6187 boot_params.screen_info.vesapm_seg = oreg.es;
6188 boot_params.screen_info.vesapm_off = oreg.di;
6189+ boot_params.screen_info.vesapm_size = oreg.cx;
6190 }
6191
6192 /*
6193diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6194index 43eda28..5ab5fdb 100644
6195--- a/arch/x86/boot/video.c
6196+++ b/arch/x86/boot/video.c
6197@@ -96,7 +96,7 @@ static void store_mode_params(void)
6198 static unsigned int get_entry(void)
6199 {
6200 char entry_buf[4];
6201- int i, len = 0;
6202+ unsigned int i, len = 0;
6203 int key;
6204 unsigned int v;
6205
6206diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6207index 5b577d5..3c1fed4 100644
6208--- a/arch/x86/crypto/aes-x86_64-asm_64.S
6209+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6210@@ -8,6 +8,8 @@
6211 * including this sentence is retained in full.
6212 */
6213
6214+#include <asm/alternative-asm.h>
6215+
6216 .extern crypto_ft_tab
6217 .extern crypto_it_tab
6218 .extern crypto_fl_tab
6219@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6220 je B192; \
6221 leaq 32(r9),r9;
6222
6223+#define ret pax_force_retaddr 0, 1; ret
6224+
6225 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6226 movq r1,r2; \
6227 movq r3,r4; \
6228diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6229index be6d9e3..21fbbca 100644
6230--- a/arch/x86/crypto/aesni-intel_asm.S
6231+++ b/arch/x86/crypto/aesni-intel_asm.S
6232@@ -31,6 +31,7 @@
6233
6234 #include <linux/linkage.h>
6235 #include <asm/inst.h>
6236+#include <asm/alternative-asm.h>
6237
6238 #ifdef __x86_64__
6239 .data
6240@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6241 pop %r14
6242 pop %r13
6243 pop %r12
6244+ pax_force_retaddr 0, 1
6245 ret
6246+ENDPROC(aesni_gcm_dec)
6247
6248
6249 /*****************************************************************************
6250@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6251 pop %r14
6252 pop %r13
6253 pop %r12
6254+ pax_force_retaddr 0, 1
6255 ret
6256+ENDPROC(aesni_gcm_enc)
6257
6258 #endif
6259
6260@@ -1714,6 +1719,7 @@ _key_expansion_256a:
6261 pxor %xmm1, %xmm0
6262 movaps %xmm0, (TKEYP)
6263 add $0x10, TKEYP
6264+ pax_force_retaddr_bts
6265 ret
6266
6267 .align 4
6268@@ -1738,6 +1744,7 @@ _key_expansion_192a:
6269 shufps $0b01001110, %xmm2, %xmm1
6270 movaps %xmm1, 0x10(TKEYP)
6271 add $0x20, TKEYP
6272+ pax_force_retaddr_bts
6273 ret
6274
6275 .align 4
6276@@ -1757,6 +1764,7 @@ _key_expansion_192b:
6277
6278 movaps %xmm0, (TKEYP)
6279 add $0x10, TKEYP
6280+ pax_force_retaddr_bts
6281 ret
6282
6283 .align 4
6284@@ -1769,6 +1777,7 @@ _key_expansion_256b:
6285 pxor %xmm1, %xmm2
6286 movaps %xmm2, (TKEYP)
6287 add $0x10, TKEYP
6288+ pax_force_retaddr_bts
6289 ret
6290
6291 /*
6292@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6293 #ifndef __x86_64__
6294 popl KEYP
6295 #endif
6296+ pax_force_retaddr 0, 1
6297 ret
6298+ENDPROC(aesni_set_key)
6299
6300 /*
6301 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6302@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6303 popl KLEN
6304 popl KEYP
6305 #endif
6306+ pax_force_retaddr 0, 1
6307 ret
6308+ENDPROC(aesni_enc)
6309
6310 /*
6311 * _aesni_enc1: internal ABI
6312@@ -1959,6 +1972,7 @@ _aesni_enc1:
6313 AESENC KEY STATE
6314 movaps 0x70(TKEYP), KEY
6315 AESENCLAST KEY STATE
6316+ pax_force_retaddr_bts
6317 ret
6318
6319 /*
6320@@ -2067,6 +2081,7 @@ _aesni_enc4:
6321 AESENCLAST KEY STATE2
6322 AESENCLAST KEY STATE3
6323 AESENCLAST KEY STATE4
6324+ pax_force_retaddr_bts
6325 ret
6326
6327 /*
6328@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6329 popl KLEN
6330 popl KEYP
6331 #endif
6332+ pax_force_retaddr 0, 1
6333 ret
6334+ENDPROC(aesni_dec)
6335
6336 /*
6337 * _aesni_dec1: internal ABI
6338@@ -2146,6 +2163,7 @@ _aesni_dec1:
6339 AESDEC KEY STATE
6340 movaps 0x70(TKEYP), KEY
6341 AESDECLAST KEY STATE
6342+ pax_force_retaddr_bts
6343 ret
6344
6345 /*
6346@@ -2254,6 +2272,7 @@ _aesni_dec4:
6347 AESDECLAST KEY STATE2
6348 AESDECLAST KEY STATE3
6349 AESDECLAST KEY STATE4
6350+ pax_force_retaddr_bts
6351 ret
6352
6353 /*
6354@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6355 popl KEYP
6356 popl LEN
6357 #endif
6358+ pax_force_retaddr 0, 1
6359 ret
6360+ENDPROC(aesni_ecb_enc)
6361
6362 /*
6363 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6364@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6365 popl KEYP
6366 popl LEN
6367 #endif
6368+ pax_force_retaddr 0, 1
6369 ret
6370+ENDPROC(aesni_ecb_dec)
6371
6372 /*
6373 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6374@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6375 popl LEN
6376 popl IVP
6377 #endif
6378+ pax_force_retaddr 0, 1
6379 ret
6380+ENDPROC(aesni_cbc_enc)
6381
6382 /*
6383 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6384@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6385 popl LEN
6386 popl IVP
6387 #endif
6388+ pax_force_retaddr 0, 1
6389 ret
6390+ENDPROC(aesni_cbc_dec)
6391
6392 #ifdef __x86_64__
6393 .align 16
6394@@ -2524,6 +2551,7 @@ _aesni_inc_init:
6395 mov $1, TCTR_LOW
6396 MOVQ_R64_XMM TCTR_LOW INC
6397 MOVQ_R64_XMM CTR TCTR_LOW
6398+ pax_force_retaddr_bts
6399 ret
6400
6401 /*
6402@@ -2552,6 +2580,7 @@ _aesni_inc:
6403 .Linc_low:
6404 movaps CTR, IV
6405 PSHUFB_XMM BSWAP_MASK IV
6406+ pax_force_retaddr_bts
6407 ret
6408
6409 /*
6410@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6411 .Lctr_enc_ret:
6412 movups IV, (IVP)
6413 .Lctr_enc_just_ret:
6414+ pax_force_retaddr 0, 1
6415 ret
6416+ENDPROC(aesni_ctr_enc)
6417 #endif
6418diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6419index 391d245..67f35c2 100644
6420--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6421+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6422@@ -20,6 +20,8 @@
6423 *
6424 */
6425
6426+#include <asm/alternative-asm.h>
6427+
6428 .file "blowfish-x86_64-asm.S"
6429 .text
6430
6431@@ -151,9 +153,11 @@ __blowfish_enc_blk:
6432 jnz __enc_xor;
6433
6434 write_block();
6435+ pax_force_retaddr 0, 1
6436 ret;
6437 __enc_xor:
6438 xor_block();
6439+ pax_force_retaddr 0, 1
6440 ret;
6441
6442 .align 8
6443@@ -188,6 +192,7 @@ blowfish_dec_blk:
6444
6445 movq %r11, %rbp;
6446
6447+ pax_force_retaddr 0, 1
6448 ret;
6449
6450 /**********************************************************************
6451@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6452
6453 popq %rbx;
6454 popq %rbp;
6455+ pax_force_retaddr 0, 1
6456 ret;
6457
6458 __enc_xor4:
6459@@ -349,6 +355,7 @@ __enc_xor4:
6460
6461 popq %rbx;
6462 popq %rbp;
6463+ pax_force_retaddr 0, 1
6464 ret;
6465
6466 .align 8
6467@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6468 popq %rbx;
6469 popq %rbp;
6470
6471+ pax_force_retaddr 0, 1
6472 ret;
6473
6474diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6475index 6214a9b..1f4fc9a 100644
6476--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6477+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6478@@ -1,3 +1,5 @@
6479+#include <asm/alternative-asm.h>
6480+
6481 # enter ECRYPT_encrypt_bytes
6482 .text
6483 .p2align 5
6484@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6485 add %r11,%rsp
6486 mov %rdi,%rax
6487 mov %rsi,%rdx
6488+ pax_force_retaddr 0, 1
6489 ret
6490 # bytesatleast65:
6491 ._bytesatleast65:
6492@@ -891,6 +894,7 @@ ECRYPT_keysetup:
6493 add %r11,%rsp
6494 mov %rdi,%rax
6495 mov %rsi,%rdx
6496+ pax_force_retaddr
6497 ret
6498 # enter ECRYPT_ivsetup
6499 .text
6500@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6501 add %r11,%rsp
6502 mov %rdi,%rax
6503 mov %rsi,%rdx
6504+ pax_force_retaddr
6505 ret
6506diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6507index b2c2f57..8470cab 100644
6508--- a/arch/x86/crypto/sha1_ssse3_asm.S
6509+++ b/arch/x86/crypto/sha1_ssse3_asm.S
6510@@ -28,6 +28,8 @@
6511 * (at your option) any later version.
6512 */
6513
6514+#include <asm/alternative-asm.h>
6515+
6516 #define CTX %rdi // arg1
6517 #define BUF %rsi // arg2
6518 #define CNT %rdx // arg3
6519@@ -104,6 +106,7 @@
6520 pop %r12
6521 pop %rbp
6522 pop %rbx
6523+ pax_force_retaddr 0, 1
6524 ret
6525
6526 .size \name, .-\name
6527diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6528index 5b012a2..36d5364 100644
6529--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6530+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6531@@ -20,6 +20,8 @@
6532 *
6533 */
6534
6535+#include <asm/alternative-asm.h>
6536+
6537 .file "twofish-x86_64-asm-3way.S"
6538 .text
6539
6540@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6541 popq %r13;
6542 popq %r14;
6543 popq %r15;
6544+ pax_force_retaddr 0, 1
6545 ret;
6546
6547 __enc_xor3:
6548@@ -271,6 +274,7 @@ __enc_xor3:
6549 popq %r13;
6550 popq %r14;
6551 popq %r15;
6552+ pax_force_retaddr 0, 1
6553 ret;
6554
6555 .global twofish_dec_blk_3way
6556@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6557 popq %r13;
6558 popq %r14;
6559 popq %r15;
6560+ pax_force_retaddr 0, 1
6561 ret;
6562
6563diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6564index 7bcf3fc..f53832f 100644
6565--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6566+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6567@@ -21,6 +21,7 @@
6568 .text
6569
6570 #include <asm/asm-offsets.h>
6571+#include <asm/alternative-asm.h>
6572
6573 #define a_offset 0
6574 #define b_offset 4
6575@@ -268,6 +269,7 @@ twofish_enc_blk:
6576
6577 popq R1
6578 movq $1,%rax
6579+ pax_force_retaddr 0, 1
6580 ret
6581
6582 twofish_dec_blk:
6583@@ -319,4 +321,5 @@ twofish_dec_blk:
6584
6585 popq R1
6586 movq $1,%rax
6587+ pax_force_retaddr 0, 1
6588 ret
6589diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6590index fd84387..0b4af7d 100644
6591--- a/arch/x86/ia32/ia32_aout.c
6592+++ b/arch/x86/ia32/ia32_aout.c
6593@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6594 unsigned long dump_start, dump_size;
6595 struct user32 dump;
6596
6597+ memset(&dump, 0, sizeof(dump));
6598+
6599 fs = get_fs();
6600 set_fs(KERNEL_DS);
6601 has_dumped = 1;
6602diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6603index 6557769..ef6ae89 100644
6604--- a/arch/x86/ia32/ia32_signal.c
6605+++ b/arch/x86/ia32/ia32_signal.c
6606@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6607 }
6608 seg = get_fs();
6609 set_fs(KERNEL_DS);
6610- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6611+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6612 set_fs(seg);
6613 if (ret >= 0 && uoss_ptr) {
6614 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6615@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6616 */
6617 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6618 size_t frame_size,
6619- void **fpstate)
6620+ void __user **fpstate)
6621 {
6622 unsigned long sp;
6623
6624@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6625
6626 if (used_math()) {
6627 sp = sp - sig_xstate_ia32_size;
6628- *fpstate = (struct _fpstate_ia32 *) sp;
6629+ *fpstate = (struct _fpstate_ia32 __user *) sp;
6630 if (save_i387_xstate_ia32(*fpstate) < 0)
6631 return (void __user *) -1L;
6632 }
6633@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6634 sp -= frame_size;
6635 /* Align the stack pointer according to the i386 ABI,
6636 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6637- sp = ((sp + 4) & -16ul) - 4;
6638+ sp = ((sp - 12) & -16ul) - 4;
6639 return (void __user *) sp;
6640 }
6641
6642@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6643 * These are actually not used anymore, but left because some
6644 * gdb versions depend on them as a marker.
6645 */
6646- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6647+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6648 } put_user_catch(err);
6649
6650 if (err)
6651@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6652 0xb8,
6653 __NR_ia32_rt_sigreturn,
6654 0x80cd,
6655- 0,
6656+ 0
6657 };
6658
6659 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6660@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6661
6662 if (ka->sa.sa_flags & SA_RESTORER)
6663 restorer = ka->sa.sa_restorer;
6664+ else if (current->mm->context.vdso)
6665+ /* Return stub is in 32bit vsyscall page */
6666+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6667 else
6668- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6669- rt_sigreturn);
6670+ restorer = &frame->retcode;
6671 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6672
6673 /*
6674 * Not actually used anymore, but left because some gdb
6675 * versions need it.
6676 */
6677- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6678+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6679 } put_user_catch(err);
6680
6681 if (err)
6682diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6683index a6253ec..4ad2120 100644
6684--- a/arch/x86/ia32/ia32entry.S
6685+++ b/arch/x86/ia32/ia32entry.S
6686@@ -13,7 +13,9 @@
6687 #include <asm/thread_info.h>
6688 #include <asm/segment.h>
6689 #include <asm/irqflags.h>
6690+#include <asm/pgtable.h>
6691 #include <linux/linkage.h>
6692+#include <asm/alternative-asm.h>
6693
6694 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6695 #include <linux/elf-em.h>
6696@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6697 ENDPROC(native_irq_enable_sysexit)
6698 #endif
6699
6700+ .macro pax_enter_kernel_user
6701+ pax_set_fptr_mask
6702+#ifdef CONFIG_PAX_MEMORY_UDEREF
6703+ call pax_enter_kernel_user
6704+#endif
6705+ .endm
6706+
6707+ .macro pax_exit_kernel_user
6708+#ifdef CONFIG_PAX_MEMORY_UDEREF
6709+ call pax_exit_kernel_user
6710+#endif
6711+#ifdef CONFIG_PAX_RANDKSTACK
6712+ pushq %rax
6713+ pushq %r11
6714+ call pax_randomize_kstack
6715+ popq %r11
6716+ popq %rax
6717+#endif
6718+ .endm
6719+
6720+.macro pax_erase_kstack
6721+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6722+ call pax_erase_kstack
6723+#endif
6724+.endm
6725+
6726 /*
6727 * 32bit SYSENTER instruction entry.
6728 *
6729@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6730 CFI_REGISTER rsp,rbp
6731 SWAPGS_UNSAFE_STACK
6732 movq PER_CPU_VAR(kernel_stack), %rsp
6733- addq $(KERNEL_STACK_OFFSET),%rsp
6734- /*
6735- * No need to follow this irqs on/off section: the syscall
6736- * disabled irqs, here we enable it straight after entry:
6737- */
6738- ENABLE_INTERRUPTS(CLBR_NONE)
6739 movl %ebp,%ebp /* zero extension */
6740 pushq_cfi $__USER32_DS
6741 /*CFI_REL_OFFSET ss,0*/
6742@@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6743 CFI_REL_OFFSET rsp,0
6744 pushfq_cfi
6745 /*CFI_REL_OFFSET rflags,0*/
6746- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6747- CFI_REGISTER rip,r10
6748+ orl $X86_EFLAGS_IF,(%rsp)
6749+ GET_THREAD_INFO(%r11)
6750+ movl TI_sysenter_return(%r11), %r11d
6751+ CFI_REGISTER rip,r11
6752 pushq_cfi $__USER32_CS
6753 /*CFI_REL_OFFSET cs,0*/
6754 movl %eax, %eax
6755- pushq_cfi %r10
6756+ pushq_cfi %r11
6757 CFI_REL_OFFSET rip,0
6758 pushq_cfi %rax
6759 cld
6760 SAVE_ARGS 0,1,0
6761+ pax_enter_kernel_user
6762+ /*
6763+ * No need to follow this irqs on/off section: the syscall
6764+ * disabled irqs, here we enable it straight after entry:
6765+ */
6766+ ENABLE_INTERRUPTS(CLBR_NONE)
6767 /* no need to do an access_ok check here because rbp has been
6768 32bit zero extended */
6769+
6770+#ifdef CONFIG_PAX_MEMORY_UDEREF
6771+ mov $PAX_USER_SHADOW_BASE,%r11
6772+ add %r11,%rbp
6773+#endif
6774+
6775 1: movl (%rbp),%ebp
6776 .section __ex_table,"a"
6777 .quad 1b,ia32_badarg
6778 .previous
6779- GET_THREAD_INFO(%r10)
6780- orl $TS_COMPAT,TI_status(%r10)
6781- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6782+ GET_THREAD_INFO(%r11)
6783+ orl $TS_COMPAT,TI_status(%r11)
6784+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6785 CFI_REMEMBER_STATE
6786 jnz sysenter_tracesys
6787 cmpq $(IA32_NR_syscalls-1),%rax
6788@@ -162,13 +198,15 @@ sysenter_do_call:
6789 sysenter_dispatch:
6790 call *ia32_sys_call_table(,%rax,8)
6791 movq %rax,RAX-ARGOFFSET(%rsp)
6792- GET_THREAD_INFO(%r10)
6793+ GET_THREAD_INFO(%r11)
6794 DISABLE_INTERRUPTS(CLBR_NONE)
6795 TRACE_IRQS_OFF
6796- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6797+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6798 jnz sysexit_audit
6799 sysexit_from_sys_call:
6800- andl $~TS_COMPAT,TI_status(%r10)
6801+ pax_exit_kernel_user
6802+ pax_erase_kstack
6803+ andl $~TS_COMPAT,TI_status(%r11)
6804 /* clear IF, that popfq doesn't enable interrupts early */
6805 andl $~0x200,EFLAGS-R11(%rsp)
6806 movl RIP-R11(%rsp),%edx /* User %eip */
6807@@ -194,6 +232,9 @@ sysexit_from_sys_call:
6808 movl %eax,%esi /* 2nd arg: syscall number */
6809 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6810 call audit_syscall_entry
6811+
6812+ pax_erase_kstack
6813+
6814 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6815 cmpq $(IA32_NR_syscalls-1),%rax
6816 ja ia32_badsys
6817@@ -205,7 +246,7 @@ sysexit_from_sys_call:
6818 .endm
6819
6820 .macro auditsys_exit exit
6821- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6822+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6823 jnz ia32_ret_from_sys_call
6824 TRACE_IRQS_ON
6825 sti
6826@@ -215,12 +256,12 @@ sysexit_from_sys_call:
6827 movzbl %al,%edi /* zero-extend that into %edi */
6828 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6829 call audit_syscall_exit
6830- GET_THREAD_INFO(%r10)
6831+ GET_THREAD_INFO(%r11)
6832 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6833 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6834 cli
6835 TRACE_IRQS_OFF
6836- testl %edi,TI_flags(%r10)
6837+ testl %edi,TI_flags(%r11)
6838 jz \exit
6839 CLEAR_RREGS -ARGOFFSET
6840 jmp int_with_check
6841@@ -238,7 +279,7 @@ sysexit_audit:
6842
6843 sysenter_tracesys:
6844 #ifdef CONFIG_AUDITSYSCALL
6845- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6846+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6847 jz sysenter_auditsys
6848 #endif
6849 SAVE_REST
6850@@ -246,6 +287,9 @@ sysenter_tracesys:
6851 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6852 movq %rsp,%rdi /* &pt_regs -> arg1 */
6853 call syscall_trace_enter
6854+
6855+ pax_erase_kstack
6856+
6857 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6858 RESTORE_REST
6859 cmpq $(IA32_NR_syscalls-1),%rax
6860@@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
6861 ENTRY(ia32_cstar_target)
6862 CFI_STARTPROC32 simple
6863 CFI_SIGNAL_FRAME
6864- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6865+ CFI_DEF_CFA rsp,0
6866 CFI_REGISTER rip,rcx
6867 /*CFI_REGISTER rflags,r11*/
6868 SWAPGS_UNSAFE_STACK
6869 movl %esp,%r8d
6870 CFI_REGISTER rsp,r8
6871 movq PER_CPU_VAR(kernel_stack),%rsp
6872+ SAVE_ARGS 8*6,0,0
6873+ pax_enter_kernel_user
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879- SAVE_ARGS 8,0,0
6880 movl %eax,%eax /* zero extension */
6881 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6882 movq %rcx,RIP-ARGOFFSET(%rsp)
6883@@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
6884 /* no need to do an access_ok check here because r8 has been
6885 32bit zero extended */
6886 /* hardware stack frame is complete now */
6887+
6888+#ifdef CONFIG_PAX_MEMORY_UDEREF
6889+ mov $PAX_USER_SHADOW_BASE,%r11
6890+ add %r11,%r8
6891+#endif
6892+
6893 1: movl (%r8),%r9d
6894 .section __ex_table,"a"
6895 .quad 1b,ia32_badarg
6896 .previous
6897- GET_THREAD_INFO(%r10)
6898- orl $TS_COMPAT,TI_status(%r10)
6899- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6900+ GET_THREAD_INFO(%r11)
6901+ orl $TS_COMPAT,TI_status(%r11)
6902+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6903 CFI_REMEMBER_STATE
6904 jnz cstar_tracesys
6905 cmpq $IA32_NR_syscalls-1,%rax
6906@@ -321,13 +372,15 @@ cstar_do_call:
6907 cstar_dispatch:
6908 call *ia32_sys_call_table(,%rax,8)
6909 movq %rax,RAX-ARGOFFSET(%rsp)
6910- GET_THREAD_INFO(%r10)
6911+ GET_THREAD_INFO(%r11)
6912 DISABLE_INTERRUPTS(CLBR_NONE)
6913 TRACE_IRQS_OFF
6914- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6915+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6916 jnz sysretl_audit
6917 sysretl_from_sys_call:
6918- andl $~TS_COMPAT,TI_status(%r10)
6919+ pax_exit_kernel_user
6920+ pax_erase_kstack
6921+ andl $~TS_COMPAT,TI_status(%r11)
6922 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6923 movl RIP-ARGOFFSET(%rsp),%ecx
6924 CFI_REGISTER rip,rcx
6925@@ -355,7 +408,7 @@ sysretl_audit:
6926
6927 cstar_tracesys:
6928 #ifdef CONFIG_AUDITSYSCALL
6929- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6930+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6931 jz cstar_auditsys
6932 #endif
6933 xchgl %r9d,%ebp
6934@@ -364,6 +417,9 @@ cstar_tracesys:
6935 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6936 movq %rsp,%rdi /* &pt_regs -> arg1 */
6937 call syscall_trace_enter
6938+
6939+ pax_erase_kstack
6940+
6941 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6942 RESTORE_REST
6943 xchgl %ebp,%r9d
6944@@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
6945 CFI_REL_OFFSET rip,RIP-RIP
6946 PARAVIRT_ADJUST_EXCEPTION_FRAME
6947 SWAPGS
6948- /*
6949- * No need to follow this irqs on/off section: the syscall
6950- * disabled irqs and here we enable it straight after entry:
6951- */
6952- ENABLE_INTERRUPTS(CLBR_NONE)
6953 movl %eax,%eax
6954 pushq_cfi %rax
6955 cld
6956 /* note the registers are not zero extended to the sf.
6957 this could be a problem. */
6958 SAVE_ARGS 0,1,0
6959- GET_THREAD_INFO(%r10)
6960- orl $TS_COMPAT,TI_status(%r10)
6961- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6962+ pax_enter_kernel_user
6963+ /*
6964+ * No need to follow this irqs on/off section: the syscall
6965+ * disabled irqs and here we enable it straight after entry:
6966+ */
6967+ ENABLE_INTERRUPTS(CLBR_NONE)
6968+ GET_THREAD_INFO(%r11)
6969+ orl $TS_COMPAT,TI_status(%r11)
6970+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6971 jnz ia32_tracesys
6972 cmpq $(IA32_NR_syscalls-1),%rax
6973 ja ia32_badsys
6974@@ -441,6 +498,9 @@ ia32_tracesys:
6975 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6976 movq %rsp,%rdi /* &pt_regs -> arg1 */
6977 call syscall_trace_enter
6978+
6979+ pax_erase_kstack
6980+
6981 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6982 RESTORE_REST
6983 cmpq $(IA32_NR_syscalls-1),%rax
6984@@ -455,6 +515,7 @@ ia32_badsys:
6985
6986 quiet_ni_syscall:
6987 movq $-ENOSYS,%rax
6988+ pax_force_retaddr
6989 ret
6990 CFI_ENDPROC
6991
6992diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6993index f6f5c53..b358b28 100644
6994--- a/arch/x86/ia32/sys_ia32.c
6995+++ b/arch/x86/ia32/sys_ia32.c
6996@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6997 */
6998 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6999 {
7000- typeof(ubuf->st_uid) uid = 0;
7001- typeof(ubuf->st_gid) gid = 0;
7002+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
7003+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
7004 SET_UID(uid, stat->uid);
7005 SET_GID(gid, stat->gid);
7006 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7007@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7008 }
7009 set_fs(KERNEL_DS);
7010 ret = sys_rt_sigprocmask(how,
7011- set ? (sigset_t __user *)&s : NULL,
7012- oset ? (sigset_t __user *)&s : NULL,
7013+ set ? (sigset_t __force_user *)&s : NULL,
7014+ oset ? (sigset_t __force_user *)&s : NULL,
7015 sigsetsize);
7016 set_fs(old_fs);
7017 if (ret)
7018@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7019 return alarm_setitimer(seconds);
7020 }
7021
7022-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7023+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7024 int options)
7025 {
7026 return compat_sys_wait4(pid, stat_addr, options, NULL);
7027@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7028 mm_segment_t old_fs = get_fs();
7029
7030 set_fs(KERNEL_DS);
7031- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7032+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7033 set_fs(old_fs);
7034 if (put_compat_timespec(&t, interval))
7035 return -EFAULT;
7036@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7037 mm_segment_t old_fs = get_fs();
7038
7039 set_fs(KERNEL_DS);
7040- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7041+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7042 set_fs(old_fs);
7043 if (!ret) {
7044 switch (_NSIG_WORDS) {
7045@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7046 if (copy_siginfo_from_user32(&info, uinfo))
7047 return -EFAULT;
7048 set_fs(KERNEL_DS);
7049- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7050+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7051 set_fs(old_fs);
7052 return ret;
7053 }
7054@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7055 return -EFAULT;
7056
7057 set_fs(KERNEL_DS);
7058- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7059+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7060 count);
7061 set_fs(old_fs);
7062
7063diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7064index 091508b..e245ff2 100644
7065--- a/arch/x86/include/asm/alternative-asm.h
7066+++ b/arch/x86/include/asm/alternative-asm.h
7067@@ -4,10 +4,10 @@
7068
7069 #ifdef CONFIG_SMP
7070 .macro LOCK_PREFIX
7071-1: lock
7072+672: lock
7073 .section .smp_locks,"a"
7074 .balign 4
7075- .long 1b - .
7076+ .long 672b - .
7077 .previous
7078 .endm
7079 #else
7080@@ -15,6 +15,45 @@
7081 .endm
7082 #endif
7083
7084+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
7085+ .macro pax_force_retaddr_bts rip=0
7086+ btsq $63,\rip(%rsp)
7087+ .endm
7088+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7089+ .macro pax_force_retaddr rip=0, reload=0
7090+ btsq $63,\rip(%rsp)
7091+ .endm
7092+ .macro pax_force_fptr ptr
7093+ btsq $63,\ptr
7094+ .endm
7095+ .macro pax_set_fptr_mask
7096+ .endm
7097+#endif
7098+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7099+ .macro pax_force_retaddr rip=0, reload=0
7100+ .if \reload
7101+ pax_set_fptr_mask
7102+ .endif
7103+ orq %r10,\rip(%rsp)
7104+ .endm
7105+ .macro pax_force_fptr ptr
7106+ orq %r10,\ptr
7107+ .endm
7108+ .macro pax_set_fptr_mask
7109+ movabs $0x8000000000000000,%r10
7110+ .endm
7111+#endif
7112+#else
7113+ .macro pax_force_retaddr rip=0, reload=0
7114+ .endm
7115+ .macro pax_force_fptr ptr
7116+ .endm
7117+ .macro pax_force_retaddr_bts rip=0
7118+ .endm
7119+ .macro pax_set_fptr_mask
7120+ .endm
7121+#endif
7122+
7123 .macro altinstruction_entry orig alt feature orig_len alt_len
7124 .long \orig - .
7125 .long \alt - .
7126diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7127index 37ad100..7d47faa 100644
7128--- a/arch/x86/include/asm/alternative.h
7129+++ b/arch/x86/include/asm/alternative.h
7130@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7131 ".section .discard,\"aw\",@progbits\n" \
7132 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7133 ".previous\n" \
7134- ".section .altinstr_replacement, \"ax\"\n" \
7135+ ".section .altinstr_replacement, \"a\"\n" \
7136 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7137 ".previous"
7138
7139diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7140index 1a6c09a..fec2432 100644
7141--- a/arch/x86/include/asm/apic.h
7142+++ b/arch/x86/include/asm/apic.h
7143@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7144
7145 #ifdef CONFIG_X86_LOCAL_APIC
7146
7147-extern unsigned int apic_verbosity;
7148+extern int apic_verbosity;
7149 extern int local_apic_timer_c2_ok;
7150
7151 extern int disable_apic;
7152diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7153index 20370c6..a2eb9b0 100644
7154--- a/arch/x86/include/asm/apm.h
7155+++ b/arch/x86/include/asm/apm.h
7156@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7157 __asm__ __volatile__(APM_DO_ZERO_SEGS
7158 "pushl %%edi\n\t"
7159 "pushl %%ebp\n\t"
7160- "lcall *%%cs:apm_bios_entry\n\t"
7161+ "lcall *%%ss:apm_bios_entry\n\t"
7162 "setc %%al\n\t"
7163 "popl %%ebp\n\t"
7164 "popl %%edi\n\t"
7165@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7166 __asm__ __volatile__(APM_DO_ZERO_SEGS
7167 "pushl %%edi\n\t"
7168 "pushl %%ebp\n\t"
7169- "lcall *%%cs:apm_bios_entry\n\t"
7170+ "lcall *%%ss:apm_bios_entry\n\t"
7171 "setc %%bl\n\t"
7172 "popl %%ebp\n\t"
7173 "popl %%edi\n\t"
7174diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7175index 58cb6d4..ca9010d 100644
7176--- a/arch/x86/include/asm/atomic.h
7177+++ b/arch/x86/include/asm/atomic.h
7178@@ -22,7 +22,18 @@
7179 */
7180 static inline int atomic_read(const atomic_t *v)
7181 {
7182- return (*(volatile int *)&(v)->counter);
7183+ return (*(volatile const int *)&(v)->counter);
7184+}
7185+
7186+/**
7187+ * atomic_read_unchecked - read atomic variable
7188+ * @v: pointer of type atomic_unchecked_t
7189+ *
7190+ * Atomically reads the value of @v.
7191+ */
7192+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7193+{
7194+ return (*(volatile const int *)&(v)->counter);
7195 }
7196
7197 /**
7198@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7199 }
7200
7201 /**
7202+ * atomic_set_unchecked - set atomic variable
7203+ * @v: pointer of type atomic_unchecked_t
7204+ * @i: required value
7205+ *
7206+ * Atomically sets the value of @v to @i.
7207+ */
7208+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7209+{
7210+ v->counter = i;
7211+}
7212+
7213+/**
7214 * atomic_add - add integer to atomic variable
7215 * @i: integer value to add
7216 * @v: pointer of type atomic_t
7217@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7218 */
7219 static inline void atomic_add(int i, atomic_t *v)
7220 {
7221- asm volatile(LOCK_PREFIX "addl %1,%0"
7222+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7223+
7224+#ifdef CONFIG_PAX_REFCOUNT
7225+ "jno 0f\n"
7226+ LOCK_PREFIX "subl %1,%0\n"
7227+ "int $4\n0:\n"
7228+ _ASM_EXTABLE(0b, 0b)
7229+#endif
7230+
7231+ : "+m" (v->counter)
7232+ : "ir" (i));
7233+}
7234+
7235+/**
7236+ * atomic_add_unchecked - add integer to atomic variable
7237+ * @i: integer value to add
7238+ * @v: pointer of type atomic_unchecked_t
7239+ *
7240+ * Atomically adds @i to @v.
7241+ */
7242+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7243+{
7244+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7245 : "+m" (v->counter)
7246 : "ir" (i));
7247 }
7248@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7249 */
7250 static inline void atomic_sub(int i, atomic_t *v)
7251 {
7252- asm volatile(LOCK_PREFIX "subl %1,%0"
7253+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7254+
7255+#ifdef CONFIG_PAX_REFCOUNT
7256+ "jno 0f\n"
7257+ LOCK_PREFIX "addl %1,%0\n"
7258+ "int $4\n0:\n"
7259+ _ASM_EXTABLE(0b, 0b)
7260+#endif
7261+
7262+ : "+m" (v->counter)
7263+ : "ir" (i));
7264+}
7265+
7266+/**
7267+ * atomic_sub_unchecked - subtract integer from atomic variable
7268+ * @i: integer value to subtract
7269+ * @v: pointer of type atomic_unchecked_t
7270+ *
7271+ * Atomically subtracts @i from @v.
7272+ */
7273+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7274+{
7275+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7276 : "+m" (v->counter)
7277 : "ir" (i));
7278 }
7279@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7280 {
7281 unsigned char c;
7282
7283- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7284+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7285+
7286+#ifdef CONFIG_PAX_REFCOUNT
7287+ "jno 0f\n"
7288+ LOCK_PREFIX "addl %2,%0\n"
7289+ "int $4\n0:\n"
7290+ _ASM_EXTABLE(0b, 0b)
7291+#endif
7292+
7293+ "sete %1\n"
7294 : "+m" (v->counter), "=qm" (c)
7295 : "ir" (i) : "memory");
7296 return c;
7297@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7298 */
7299 static inline void atomic_inc(atomic_t *v)
7300 {
7301- asm volatile(LOCK_PREFIX "incl %0"
7302+ asm volatile(LOCK_PREFIX "incl %0\n"
7303+
7304+#ifdef CONFIG_PAX_REFCOUNT
7305+ "jno 0f\n"
7306+ LOCK_PREFIX "decl %0\n"
7307+ "int $4\n0:\n"
7308+ _ASM_EXTABLE(0b, 0b)
7309+#endif
7310+
7311+ : "+m" (v->counter));
7312+}
7313+
7314+/**
7315+ * atomic_inc_unchecked - increment atomic variable
7316+ * @v: pointer of type atomic_unchecked_t
7317+ *
7318+ * Atomically increments @v by 1.
7319+ */
7320+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7321+{
7322+ asm volatile(LOCK_PREFIX "incl %0\n"
7323 : "+m" (v->counter));
7324 }
7325
7326@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7327 */
7328 static inline void atomic_dec(atomic_t *v)
7329 {
7330- asm volatile(LOCK_PREFIX "decl %0"
7331+ asm volatile(LOCK_PREFIX "decl %0\n"
7332+
7333+#ifdef CONFIG_PAX_REFCOUNT
7334+ "jno 0f\n"
7335+ LOCK_PREFIX "incl %0\n"
7336+ "int $4\n0:\n"
7337+ _ASM_EXTABLE(0b, 0b)
7338+#endif
7339+
7340+ : "+m" (v->counter));
7341+}
7342+
7343+/**
7344+ * atomic_dec_unchecked - decrement atomic variable
7345+ * @v: pointer of type atomic_unchecked_t
7346+ *
7347+ * Atomically decrements @v by 1.
7348+ */
7349+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7350+{
7351+ asm volatile(LOCK_PREFIX "decl %0\n"
7352 : "+m" (v->counter));
7353 }
7354
7355@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7356 {
7357 unsigned char c;
7358
7359- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7360+ asm volatile(LOCK_PREFIX "decl %0\n"
7361+
7362+#ifdef CONFIG_PAX_REFCOUNT
7363+ "jno 0f\n"
7364+ LOCK_PREFIX "incl %0\n"
7365+ "int $4\n0:\n"
7366+ _ASM_EXTABLE(0b, 0b)
7367+#endif
7368+
7369+ "sete %1\n"
7370 : "+m" (v->counter), "=qm" (c)
7371 : : "memory");
7372 return c != 0;
7373@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7374 {
7375 unsigned char c;
7376
7377- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7378+ asm volatile(LOCK_PREFIX "incl %0\n"
7379+
7380+#ifdef CONFIG_PAX_REFCOUNT
7381+ "jno 0f\n"
7382+ LOCK_PREFIX "decl %0\n"
7383+ "int $4\n0:\n"
7384+ _ASM_EXTABLE(0b, 0b)
7385+#endif
7386+
7387+ "sete %1\n"
7388+ : "+m" (v->counter), "=qm" (c)
7389+ : : "memory");
7390+ return c != 0;
7391+}
7392+
7393+/**
7394+ * atomic_inc_and_test_unchecked - increment and test
7395+ * @v: pointer of type atomic_unchecked_t
7396+ *
7397+ * Atomically increments @v by 1
7398+ * and returns true if the result is zero, or false for all
7399+ * other cases.
7400+ */
7401+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7402+{
7403+ unsigned char c;
7404+
7405+ asm volatile(LOCK_PREFIX "incl %0\n"
7406+ "sete %1\n"
7407 : "+m" (v->counter), "=qm" (c)
7408 : : "memory");
7409 return c != 0;
7410@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7411 {
7412 unsigned char c;
7413
7414- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7415+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7416+
7417+#ifdef CONFIG_PAX_REFCOUNT
7418+ "jno 0f\n"
7419+ LOCK_PREFIX "subl %2,%0\n"
7420+ "int $4\n0:\n"
7421+ _ASM_EXTABLE(0b, 0b)
7422+#endif
7423+
7424+ "sets %1\n"
7425 : "+m" (v->counter), "=qm" (c)
7426 : "ir" (i) : "memory");
7427 return c;
7428@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7429 goto no_xadd;
7430 #endif
7431 /* Modern 486+ processor */
7432- return i + xadd(&v->counter, i);
7433+ return i + xadd_check_overflow(&v->counter, i);
7434
7435 #ifdef CONFIG_M386
7436 no_xadd: /* Legacy 386 processor */
7437@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7438 }
7439
7440 /**
7441+ * atomic_add_return_unchecked - add integer and return
7442+ * @i: integer value to add
7443+ * @v: pointer of type atomic_unchecked_t
7444+ *
7445+ * Atomically adds @i to @v and returns @i + @v
7446+ */
7447+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7448+{
7449+#ifdef CONFIG_M386
7450+ int __i;
7451+ unsigned long flags;
7452+ if (unlikely(boot_cpu_data.x86 <= 3))
7453+ goto no_xadd;
7454+#endif
7455+ /* Modern 486+ processor */
7456+ return i + xadd(&v->counter, i);
7457+
7458+#ifdef CONFIG_M386
7459+no_xadd: /* Legacy 386 processor */
7460+ raw_local_irq_save(flags);
7461+ __i = atomic_read_unchecked(v);
7462+ atomic_set_unchecked(v, i + __i);
7463+ raw_local_irq_restore(flags);
7464+ return i + __i;
7465+#endif
7466+}
7467+
7468+/**
7469 * atomic_sub_return - subtract integer and return
7470 * @v: pointer of type atomic_t
7471 * @i: integer value to subtract
7472@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7473 }
7474
7475 #define atomic_inc_return(v) (atomic_add_return(1, v))
7476+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7477+{
7478+ return atomic_add_return_unchecked(1, v);
7479+}
7480 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7481
7482 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7483@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7484 return cmpxchg(&v->counter, old, new);
7485 }
7486
7487+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7488+{
7489+ return cmpxchg(&v->counter, old, new);
7490+}
7491+
7492 static inline int atomic_xchg(atomic_t *v, int new)
7493 {
7494 return xchg(&v->counter, new);
7495 }
7496
7497+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7498+{
7499+ return xchg(&v->counter, new);
7500+}
7501+
7502 /**
7503 * __atomic_add_unless - add unless the number is already a given value
7504 * @v: pointer of type atomic_t
7505@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7506 */
7507 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7508 {
7509- int c, old;
7510+ int c, old, new;
7511 c = atomic_read(v);
7512 for (;;) {
7513- if (unlikely(c == (u)))
7514+ if (unlikely(c == u))
7515 break;
7516- old = atomic_cmpxchg((v), c, c + (a));
7517+
7518+ asm volatile("addl %2,%0\n"
7519+
7520+#ifdef CONFIG_PAX_REFCOUNT
7521+ "jno 0f\n"
7522+ "subl %2,%0\n"
7523+ "int $4\n0:\n"
7524+ _ASM_EXTABLE(0b, 0b)
7525+#endif
7526+
7527+ : "=r" (new)
7528+ : "0" (c), "ir" (a));
7529+
7530+ old = atomic_cmpxchg(v, c, new);
7531 if (likely(old == c))
7532 break;
7533 c = old;
7534@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7535 return c;
7536 }
7537
7538+/**
7539+ * atomic_inc_not_zero_hint - increment if not null
7540+ * @v: pointer of type atomic_t
7541+ * @hint: probable value of the atomic before the increment
7542+ *
7543+ * This version of atomic_inc_not_zero() gives a hint of probable
7544+ * value of the atomic. This helps processor to not read the memory
7545+ * before doing the atomic read/modify/write cycle, lowering
7546+ * number of bus transactions on some arches.
7547+ *
7548+ * Returns: 0 if increment was not done, 1 otherwise.
7549+ */
7550+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7551+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7552+{
7553+ int val, c = hint, new;
7554+
7555+ /* sanity test, should be removed by compiler if hint is a constant */
7556+ if (!hint)
7557+ return __atomic_add_unless(v, 1, 0);
7558+
7559+ do {
7560+ asm volatile("incl %0\n"
7561+
7562+#ifdef CONFIG_PAX_REFCOUNT
7563+ "jno 0f\n"
7564+ "decl %0\n"
7565+ "int $4\n0:\n"
7566+ _ASM_EXTABLE(0b, 0b)
7567+#endif
7568+
7569+ : "=r" (new)
7570+ : "0" (c));
7571+
7572+ val = atomic_cmpxchg(v, c, new);
7573+ if (val == c)
7574+ return 1;
7575+ c = val;
7576+ } while (c);
7577+
7578+ return 0;
7579+}
7580
7581 /*
7582 * atomic_dec_if_positive - decrement by 1 if old value positive
7583diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7584index 24098aa..1e37723 100644
7585--- a/arch/x86/include/asm/atomic64_32.h
7586+++ b/arch/x86/include/asm/atomic64_32.h
7587@@ -12,6 +12,14 @@ typedef struct {
7588 u64 __aligned(8) counter;
7589 } atomic64_t;
7590
7591+#ifdef CONFIG_PAX_REFCOUNT
7592+typedef struct {
7593+ u64 __aligned(8) counter;
7594+} atomic64_unchecked_t;
7595+#else
7596+typedef atomic64_t atomic64_unchecked_t;
7597+#endif
7598+
7599 #define ATOMIC64_INIT(val) { (val) }
7600
7601 #ifdef CONFIG_X86_CMPXCHG64
7602@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7603 }
7604
7605 /**
7606+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7607+ * @p: pointer to type atomic64_unchecked_t
7608+ * @o: expected value
7609+ * @n: new value
7610+ *
7611+ * Atomically sets @v to @n if it was equal to @o and returns
7612+ * the old value.
7613+ */
7614+
7615+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7616+{
7617+ return cmpxchg64(&v->counter, o, n);
7618+}
7619+
7620+/**
7621 * atomic64_xchg - xchg atomic64 variable
7622 * @v: pointer to type atomic64_t
7623 * @n: value to assign
7624@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7625 }
7626
7627 /**
7628+ * atomic64_set_unchecked - set atomic64 variable
7629+ * @v: pointer to type atomic64_unchecked_t
7630+ * @n: value to assign
7631+ *
7632+ * Atomically sets the value of @v to @n.
7633+ */
7634+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7635+{
7636+ unsigned high = (unsigned)(i >> 32);
7637+ unsigned low = (unsigned)i;
7638+ asm volatile(ATOMIC64_ALTERNATIVE(set)
7639+ : "+b" (low), "+c" (high)
7640+ : "S" (v)
7641+ : "eax", "edx", "memory"
7642+ );
7643+}
7644+
7645+/**
7646 * atomic64_read - read atomic64 variable
7647 * @v: pointer to type atomic64_t
7648 *
7649@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7650 }
7651
7652 /**
7653+ * atomic64_read_unchecked - read atomic64 variable
7654+ * @v: pointer to type atomic64_unchecked_t
7655+ *
7656+ * Atomically reads the value of @v and returns it.
7657+ */
7658+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7659+{
7660+ long long r;
7661+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7662+ : "=A" (r), "+c" (v)
7663+ : : "memory"
7664+ );
7665+ return r;
7666+ }
7667+
7668+/**
7669 * atomic64_add_return - add and return
7670 * @i: integer value to add
7671 * @v: pointer to type atomic64_t
7672@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7673 return i;
7674 }
7675
7676+/**
7677+ * atomic64_add_return_unchecked - add and return
7678+ * @i: integer value to add
7679+ * @v: pointer to type atomic64_unchecked_t
7680+ *
7681+ * Atomically adds @i to @v and returns @i + *@v
7682+ */
7683+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7684+{
7685+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7686+ : "+A" (i), "+c" (v)
7687+ : : "memory"
7688+ );
7689+ return i;
7690+}
7691+
7692 /*
7693 * Other variants with different arithmetic operators:
7694 */
7695@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7696 return a;
7697 }
7698
7699+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7700+{
7701+ long long a;
7702+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7703+ : "=A" (a)
7704+ : "S" (v)
7705+ : "memory", "ecx"
7706+ );
7707+ return a;
7708+}
7709+
7710 static inline long long atomic64_dec_return(atomic64_t *v)
7711 {
7712 long long a;
7713@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7714 }
7715
7716 /**
7717+ * atomic64_add_unchecked - add integer to atomic64 variable
7718+ * @i: integer value to add
7719+ * @v: pointer to type atomic64_unchecked_t
7720+ *
7721+ * Atomically adds @i to @v.
7722+ */
7723+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7724+{
7725+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7726+ : "+A" (i), "+c" (v)
7727+ : : "memory"
7728+ );
7729+ return i;
7730+}
7731+
7732+/**
7733 * atomic64_sub - subtract the atomic64 variable
7734 * @i: integer value to subtract
7735 * @v: pointer to type atomic64_t
7736diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7737index 0e1cbfc..5623683 100644
7738--- a/arch/x86/include/asm/atomic64_64.h
7739+++ b/arch/x86/include/asm/atomic64_64.h
7740@@ -18,7 +18,19 @@
7741 */
7742 static inline long atomic64_read(const atomic64_t *v)
7743 {
7744- return (*(volatile long *)&(v)->counter);
7745+ return (*(volatile const long *)&(v)->counter);
7746+}
7747+
7748+/**
7749+ * atomic64_read_unchecked - read atomic64 variable
7750+ * @v: pointer of type atomic64_unchecked_t
7751+ *
7752+ * Atomically reads the value of @v.
7753+ * Doesn't imply a read memory barrier.
7754+ */
7755+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7756+{
7757+ return (*(volatile const long *)&(v)->counter);
7758 }
7759
7760 /**
7761@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
7762 }
7763
7764 /**
7765+ * atomic64_set_unchecked - set atomic64 variable
7766+ * @v: pointer to type atomic64_unchecked_t
7767+ * @i: required value
7768+ *
7769+ * Atomically sets the value of @v to @i.
7770+ */
7771+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7772+{
7773+ v->counter = i;
7774+}
7775+
7776+/**
7777 * atomic64_add - add integer to atomic64 variable
7778 * @i: integer value to add
7779 * @v: pointer to type atomic64_t
7780@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
7781 */
7782 static inline void atomic64_add(long i, atomic64_t *v)
7783 {
7784+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7785+
7786+#ifdef CONFIG_PAX_REFCOUNT
7787+ "jno 0f\n"
7788+ LOCK_PREFIX "subq %1,%0\n"
7789+ "int $4\n0:\n"
7790+ _ASM_EXTABLE(0b, 0b)
7791+#endif
7792+
7793+ : "=m" (v->counter)
7794+ : "er" (i), "m" (v->counter));
7795+}
7796+
7797+/**
7798+ * atomic64_add_unchecked - add integer to atomic64 variable
7799+ * @i: integer value to add
7800+ * @v: pointer to type atomic64_unchecked_t
7801+ *
7802+ * Atomically adds @i to @v.
7803+ */
7804+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7805+{
7806 asm volatile(LOCK_PREFIX "addq %1,%0"
7807 : "=m" (v->counter)
7808 : "er" (i), "m" (v->counter));
7809@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
7810 */
7811 static inline void atomic64_sub(long i, atomic64_t *v)
7812 {
7813- asm volatile(LOCK_PREFIX "subq %1,%0"
7814+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7815+
7816+#ifdef CONFIG_PAX_REFCOUNT
7817+ "jno 0f\n"
7818+ LOCK_PREFIX "addq %1,%0\n"
7819+ "int $4\n0:\n"
7820+ _ASM_EXTABLE(0b, 0b)
7821+#endif
7822+
7823+ : "=m" (v->counter)
7824+ : "er" (i), "m" (v->counter));
7825+}
7826+
7827+/**
7828+ * atomic64_sub_unchecked - subtract the atomic64 variable
7829+ * @i: integer value to subtract
7830+ * @v: pointer to type atomic64_unchecked_t
7831+ *
7832+ * Atomically subtracts @i from @v.
7833+ */
7834+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7835+{
7836+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7837 : "=m" (v->counter)
7838 : "er" (i), "m" (v->counter));
7839 }
7840@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7841 {
7842 unsigned char c;
7843
7844- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7845+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7846+
7847+#ifdef CONFIG_PAX_REFCOUNT
7848+ "jno 0f\n"
7849+ LOCK_PREFIX "addq %2,%0\n"
7850+ "int $4\n0:\n"
7851+ _ASM_EXTABLE(0b, 0b)
7852+#endif
7853+
7854+ "sete %1\n"
7855 : "=m" (v->counter), "=qm" (c)
7856 : "er" (i), "m" (v->counter) : "memory");
7857 return c;
7858@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7859 */
7860 static inline void atomic64_inc(atomic64_t *v)
7861 {
7862+ asm volatile(LOCK_PREFIX "incq %0\n"
7863+
7864+#ifdef CONFIG_PAX_REFCOUNT
7865+ "jno 0f\n"
7866+ LOCK_PREFIX "decq %0\n"
7867+ "int $4\n0:\n"
7868+ _ASM_EXTABLE(0b, 0b)
7869+#endif
7870+
7871+ : "=m" (v->counter)
7872+ : "m" (v->counter));
7873+}
7874+
7875+/**
7876+ * atomic64_inc_unchecked - increment atomic64 variable
7877+ * @v: pointer to type atomic64_unchecked_t
7878+ *
7879+ * Atomically increments @v by 1.
7880+ */
7881+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7882+{
7883 asm volatile(LOCK_PREFIX "incq %0"
7884 : "=m" (v->counter)
7885 : "m" (v->counter));
7886@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
7887 */
7888 static inline void atomic64_dec(atomic64_t *v)
7889 {
7890- asm volatile(LOCK_PREFIX "decq %0"
7891+ asm volatile(LOCK_PREFIX "decq %0\n"
7892+
7893+#ifdef CONFIG_PAX_REFCOUNT
7894+ "jno 0f\n"
7895+ LOCK_PREFIX "incq %0\n"
7896+ "int $4\n0:\n"
7897+ _ASM_EXTABLE(0b, 0b)
7898+#endif
7899+
7900+ : "=m" (v->counter)
7901+ : "m" (v->counter));
7902+}
7903+
7904+/**
7905+ * atomic64_dec_unchecked - decrement atomic64 variable
7906+ * @v: pointer to type atomic64_t
7907+ *
7908+ * Atomically decrements @v by 1.
7909+ */
7910+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7911+{
7912+ asm volatile(LOCK_PREFIX "decq %0\n"
7913 : "=m" (v->counter)
7914 : "m" (v->counter));
7915 }
7916@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
7917 {
7918 unsigned char c;
7919
7920- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7921+ asm volatile(LOCK_PREFIX "decq %0\n"
7922+
7923+#ifdef CONFIG_PAX_REFCOUNT
7924+ "jno 0f\n"
7925+ LOCK_PREFIX "incq %0\n"
7926+ "int $4\n0:\n"
7927+ _ASM_EXTABLE(0b, 0b)
7928+#endif
7929+
7930+ "sete %1\n"
7931 : "=m" (v->counter), "=qm" (c)
7932 : "m" (v->counter) : "memory");
7933 return c != 0;
7934@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
7935 {
7936 unsigned char c;
7937
7938- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7939+ asm volatile(LOCK_PREFIX "incq %0\n"
7940+
7941+#ifdef CONFIG_PAX_REFCOUNT
7942+ "jno 0f\n"
7943+ LOCK_PREFIX "decq %0\n"
7944+ "int $4\n0:\n"
7945+ _ASM_EXTABLE(0b, 0b)
7946+#endif
7947+
7948+ "sete %1\n"
7949 : "=m" (v->counter), "=qm" (c)
7950 : "m" (v->counter) : "memory");
7951 return c != 0;
7952@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7953 {
7954 unsigned char c;
7955
7956- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7957+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
7958+
7959+#ifdef CONFIG_PAX_REFCOUNT
7960+ "jno 0f\n"
7961+ LOCK_PREFIX "subq %2,%0\n"
7962+ "int $4\n0:\n"
7963+ _ASM_EXTABLE(0b, 0b)
7964+#endif
7965+
7966+ "sets %1\n"
7967 : "=m" (v->counter), "=qm" (c)
7968 : "er" (i), "m" (v->counter) : "memory");
7969 return c;
7970@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7971 */
7972 static inline long atomic64_add_return(long i, atomic64_t *v)
7973 {
7974+ return i + xadd_check_overflow(&v->counter, i);
7975+}
7976+
7977+/**
7978+ * atomic64_add_return_unchecked - add and return
7979+ * @i: integer value to add
7980+ * @v: pointer to type atomic64_unchecked_t
7981+ *
7982+ * Atomically adds @i to @v and returns @i + @v
7983+ */
7984+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7985+{
7986 return i + xadd(&v->counter, i);
7987 }
7988
7989@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
7990 }
7991
7992 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7993+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7994+{
7995+ return atomic64_add_return_unchecked(1, v);
7996+}
7997 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7998
7999 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8000@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8001 return cmpxchg(&v->counter, old, new);
8002 }
8003
8004+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8005+{
8006+ return cmpxchg(&v->counter, old, new);
8007+}
8008+
8009 static inline long atomic64_xchg(atomic64_t *v, long new)
8010 {
8011 return xchg(&v->counter, new);
8012@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8013 */
8014 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8015 {
8016- long c, old;
8017+ long c, old, new;
8018 c = atomic64_read(v);
8019 for (;;) {
8020- if (unlikely(c == (u)))
8021+ if (unlikely(c == u))
8022 break;
8023- old = atomic64_cmpxchg((v), c, c + (a));
8024+
8025+ asm volatile("add %2,%0\n"
8026+
8027+#ifdef CONFIG_PAX_REFCOUNT
8028+ "jno 0f\n"
8029+ "sub %2,%0\n"
8030+ "int $4\n0:\n"
8031+ _ASM_EXTABLE(0b, 0b)
8032+#endif
8033+
8034+ : "=r" (new)
8035+ : "0" (c), "ir" (a));
8036+
8037+ old = atomic64_cmpxchg(v, c, new);
8038 if (likely(old == c))
8039 break;
8040 c = old;
8041 }
8042- return c != (u);
8043+ return c != u;
8044 }
8045
8046 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8047diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8048index 1775d6e..b65017f 100644
8049--- a/arch/x86/include/asm/bitops.h
8050+++ b/arch/x86/include/asm/bitops.h
8051@@ -38,7 +38,7 @@
8052 * a mask operation on a byte.
8053 */
8054 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8055-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8056+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8057 #define CONST_MASK(nr) (1 << ((nr) & 7))
8058
8059 /**
8060diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8061index 5e1a2ee..c9f9533 100644
8062--- a/arch/x86/include/asm/boot.h
8063+++ b/arch/x86/include/asm/boot.h
8064@@ -11,10 +11,15 @@
8065 #include <asm/pgtable_types.h>
8066
8067 /* Physical address where kernel should be loaded. */
8068-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8069+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8070 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8071 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8072
8073+#ifndef __ASSEMBLY__
8074+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8075+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8076+#endif
8077+
8078 /* Minimum kernel alignment, as a power of two */
8079 #ifdef CONFIG_X86_64
8080 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8081diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8082index 48f99f1..d78ebf9 100644
8083--- a/arch/x86/include/asm/cache.h
8084+++ b/arch/x86/include/asm/cache.h
8085@@ -5,12 +5,13 @@
8086
8087 /* L1 cache line size */
8088 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8089-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8090+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8091
8092 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8093+#define __read_only __attribute__((__section__(".data..read_only")))
8094
8095 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8096-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8097+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8098
8099 #ifdef CONFIG_X86_VSMP
8100 #ifdef CONFIG_SMP
8101diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8102index 4e12668..501d239 100644
8103--- a/arch/x86/include/asm/cacheflush.h
8104+++ b/arch/x86/include/asm/cacheflush.h
8105@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8106 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8107
8108 if (pg_flags == _PGMT_DEFAULT)
8109- return -1;
8110+ return ~0UL;
8111 else if (pg_flags == _PGMT_WC)
8112 return _PAGE_CACHE_WC;
8113 else if (pg_flags == _PGMT_UC_MINUS)
8114diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8115index 46fc474..b02b0f9 100644
8116--- a/arch/x86/include/asm/checksum_32.h
8117+++ b/arch/x86/include/asm/checksum_32.h
8118@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8119 int len, __wsum sum,
8120 int *src_err_ptr, int *dst_err_ptr);
8121
8122+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8123+ int len, __wsum sum,
8124+ int *src_err_ptr, int *dst_err_ptr);
8125+
8126+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8127+ int len, __wsum sum,
8128+ int *src_err_ptr, int *dst_err_ptr);
8129+
8130 /*
8131 * Note: when you get a NULL pointer exception here this means someone
8132 * passed in an incorrect kernel address to one of these functions.
8133@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8134 int *err_ptr)
8135 {
8136 might_sleep();
8137- return csum_partial_copy_generic((__force void *)src, dst,
8138+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8139 len, sum, err_ptr, NULL);
8140 }
8141
8142@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8143 {
8144 might_sleep();
8145 if (access_ok(VERIFY_WRITE, dst, len))
8146- return csum_partial_copy_generic(src, (__force void *)dst,
8147+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8148 len, sum, NULL, err_ptr);
8149
8150 if (len)
8151diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8152index 5d3acdf..6447a02 100644
8153--- a/arch/x86/include/asm/cmpxchg.h
8154+++ b/arch/x86/include/asm/cmpxchg.h
8155@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8156 __compiletime_error("Bad argument size for cmpxchg");
8157 extern void __xadd_wrong_size(void)
8158 __compiletime_error("Bad argument size for xadd");
8159+extern void __xadd_check_overflow_wrong_size(void)
8160+ __compiletime_error("Bad argument size for xadd_check_overflow");
8161
8162 /*
8163 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8164@@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8165 __ret; \
8166 })
8167
8168+#define __xadd_check_overflow(ptr, inc, lock) \
8169+ ({ \
8170+ __typeof__ (*(ptr)) __ret = (inc); \
8171+ switch (sizeof(*(ptr))) { \
8172+ case __X86_CASE_L: \
8173+ asm volatile (lock "xaddl %0, %1\n" \
8174+ "jno 0f\n" \
8175+ "mov %0,%1\n" \
8176+ "int $4\n0:\n" \
8177+ _ASM_EXTABLE(0b, 0b) \
8178+ : "+r" (__ret), "+m" (*(ptr)) \
8179+ : : "memory", "cc"); \
8180+ break; \
8181+ case __X86_CASE_Q: \
8182+ asm volatile (lock "xaddq %q0, %1\n" \
8183+ "jno 0f\n" \
8184+ "mov %0,%1\n" \
8185+ "int $4\n0:\n" \
8186+ _ASM_EXTABLE(0b, 0b) \
8187+ : "+r" (__ret), "+m" (*(ptr)) \
8188+ : : "memory", "cc"); \
8189+ break; \
8190+ default: \
8191+ __xadd_check_overflow_wrong_size(); \
8192+ } \
8193+ __ret; \
8194+ })
8195+
8196 /*
8197 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8198 * value of "*ptr".
8199@@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8200 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8201 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8202
8203+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8204+
8205 #endif /* ASM_X86_CMPXCHG_H */
8206diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8207index f3444f7..051a196 100644
8208--- a/arch/x86/include/asm/cpufeature.h
8209+++ b/arch/x86/include/asm/cpufeature.h
8210@@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8211 ".section .discard,\"aw\",@progbits\n"
8212 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8213 ".previous\n"
8214- ".section .altinstr_replacement,\"ax\"\n"
8215+ ".section .altinstr_replacement,\"a\"\n"
8216 "3: movb $1,%0\n"
8217 "4:\n"
8218 ".previous\n"
8219diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8220index 41935fa..3b40db8 100644
8221--- a/arch/x86/include/asm/desc.h
8222+++ b/arch/x86/include/asm/desc.h
8223@@ -4,6 +4,7 @@
8224 #include <asm/desc_defs.h>
8225 #include <asm/ldt.h>
8226 #include <asm/mmu.h>
8227+#include <asm/pgtable.h>
8228
8229 #include <linux/smp.h>
8230
8231@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8232
8233 desc->type = (info->read_exec_only ^ 1) << 1;
8234 desc->type |= info->contents << 2;
8235+ desc->type |= info->seg_not_present ^ 1;
8236
8237 desc->s = 1;
8238 desc->dpl = 0x3;
8239@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8240 }
8241
8242 extern struct desc_ptr idt_descr;
8243-extern gate_desc idt_table[];
8244-
8245-struct gdt_page {
8246- struct desc_struct gdt[GDT_ENTRIES];
8247-} __attribute__((aligned(PAGE_SIZE)));
8248-
8249-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8250+extern gate_desc idt_table[256];
8251
8252+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8253 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8254 {
8255- return per_cpu(gdt_page, cpu).gdt;
8256+ return cpu_gdt_table[cpu];
8257 }
8258
8259 #ifdef CONFIG_X86_64
8260@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8261 unsigned long base, unsigned dpl, unsigned flags,
8262 unsigned short seg)
8263 {
8264- gate->a = (seg << 16) | (base & 0xffff);
8265- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8266+ gate->gate.offset_low = base;
8267+ gate->gate.seg = seg;
8268+ gate->gate.reserved = 0;
8269+ gate->gate.type = type;
8270+ gate->gate.s = 0;
8271+ gate->gate.dpl = dpl;
8272+ gate->gate.p = 1;
8273+ gate->gate.offset_high = base >> 16;
8274 }
8275
8276 #endif
8277@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8278
8279 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8280 {
8281+ pax_open_kernel();
8282 memcpy(&idt[entry], gate, sizeof(*gate));
8283+ pax_close_kernel();
8284 }
8285
8286 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8287 {
8288+ pax_open_kernel();
8289 memcpy(&ldt[entry], desc, 8);
8290+ pax_close_kernel();
8291 }
8292
8293 static inline void
8294@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8295 default: size = sizeof(*gdt); break;
8296 }
8297
8298+ pax_open_kernel();
8299 memcpy(&gdt[entry], desc, size);
8300+ pax_close_kernel();
8301 }
8302
8303 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8304@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8305
8306 static inline void native_load_tr_desc(void)
8307 {
8308+ pax_open_kernel();
8309 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8310+ pax_close_kernel();
8311 }
8312
8313 static inline void native_load_gdt(const struct desc_ptr *dtr)
8314@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8315 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8316 unsigned int i;
8317
8318+ pax_open_kernel();
8319 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8320 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8321+ pax_close_kernel();
8322 }
8323
8324 #define _LDT_empty(info) \
8325@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8326 desc->limit = (limit >> 16) & 0xf;
8327 }
8328
8329-static inline void _set_gate(int gate, unsigned type, void *addr,
8330+static inline void _set_gate(int gate, unsigned type, const void *addr,
8331 unsigned dpl, unsigned ist, unsigned seg)
8332 {
8333 gate_desc s;
8334@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8335 * Pentium F0 0F bugfix can have resulted in the mapped
8336 * IDT being write-protected.
8337 */
8338-static inline void set_intr_gate(unsigned int n, void *addr)
8339+static inline void set_intr_gate(unsigned int n, const void *addr)
8340 {
8341 BUG_ON((unsigned)n > 0xFF);
8342 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8343@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8344 /*
8345 * This routine sets up an interrupt gate at directory privilege level 3.
8346 */
8347-static inline void set_system_intr_gate(unsigned int n, void *addr)
8348+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8349 {
8350 BUG_ON((unsigned)n > 0xFF);
8351 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8352 }
8353
8354-static inline void set_system_trap_gate(unsigned int n, void *addr)
8355+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8356 {
8357 BUG_ON((unsigned)n > 0xFF);
8358 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8359 }
8360
8361-static inline void set_trap_gate(unsigned int n, void *addr)
8362+static inline void set_trap_gate(unsigned int n, const void *addr)
8363 {
8364 BUG_ON((unsigned)n > 0xFF);
8365 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8366@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8367 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8368 {
8369 BUG_ON((unsigned)n > 0xFF);
8370- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8371+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8372 }
8373
8374-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8375+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8376 {
8377 BUG_ON((unsigned)n > 0xFF);
8378 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8379 }
8380
8381-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8382+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8383 {
8384 BUG_ON((unsigned)n > 0xFF);
8385 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8386 }
8387
8388+#ifdef CONFIG_X86_32
8389+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8390+{
8391+ struct desc_struct d;
8392+
8393+ if (likely(limit))
8394+ limit = (limit - 1UL) >> PAGE_SHIFT;
8395+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8396+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8397+}
8398+#endif
8399+
8400 #endif /* _ASM_X86_DESC_H */
8401diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8402index 278441f..b95a174 100644
8403--- a/arch/x86/include/asm/desc_defs.h
8404+++ b/arch/x86/include/asm/desc_defs.h
8405@@ -31,6 +31,12 @@ struct desc_struct {
8406 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8407 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8408 };
8409+ struct {
8410+ u16 offset_low;
8411+ u16 seg;
8412+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8413+ unsigned offset_high: 16;
8414+ } gate;
8415 };
8416 } __attribute__((packed));
8417
8418diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8419index 908b969..a1f4eb4 100644
8420--- a/arch/x86/include/asm/e820.h
8421+++ b/arch/x86/include/asm/e820.h
8422@@ -69,7 +69,7 @@ struct e820map {
8423 #define ISA_START_ADDRESS 0xa0000
8424 #define ISA_END_ADDRESS 0x100000
8425
8426-#define BIOS_BEGIN 0x000a0000
8427+#define BIOS_BEGIN 0x000c0000
8428 #define BIOS_END 0x00100000
8429
8430 #define BIOS_ROM_BASE 0xffe00000
8431diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8432index 5f962df..7289f09 100644
8433--- a/arch/x86/include/asm/elf.h
8434+++ b/arch/x86/include/asm/elf.h
8435@@ -238,7 +238,25 @@ extern int force_personality32;
8436 the loader. We need to make sure that it is out of the way of the program
8437 that it will "exec", and that there is sufficient room for the brk. */
8438
8439+#ifdef CONFIG_PAX_SEGMEXEC
8440+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8441+#else
8442 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8443+#endif
8444+
8445+#ifdef CONFIG_PAX_ASLR
8446+#ifdef CONFIG_X86_32
8447+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8448+
8449+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8450+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8451+#else
8452+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8453+
8454+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8455+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8456+#endif
8457+#endif
8458
8459 /* This yields a mask that user programs can use to figure out what
8460 instruction set this CPU supports. This could be done in user space,
8461@@ -291,9 +309,7 @@ do { \
8462
8463 #define ARCH_DLINFO \
8464 do { \
8465- if (vdso_enabled) \
8466- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8467- (unsigned long)current->mm->context.vdso); \
8468+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8469 } while (0)
8470
8471 #define AT_SYSINFO 32
8472@@ -304,7 +320,7 @@ do { \
8473
8474 #endif /* !CONFIG_X86_32 */
8475
8476-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8477+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8478
8479 #define VDSO_ENTRY \
8480 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8481@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8482 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8483 #define compat_arch_setup_additional_pages syscall32_setup_pages
8484
8485-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8486-#define arch_randomize_brk arch_randomize_brk
8487-
8488 /*
8489 * True on X86_32 or when emulating IA32 on X86_64
8490 */
8491diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8492index cc70c1c..d96d011 100644
8493--- a/arch/x86/include/asm/emergency-restart.h
8494+++ b/arch/x86/include/asm/emergency-restart.h
8495@@ -15,6 +15,6 @@ enum reboot_type {
8496
8497 extern enum reboot_type reboot_type;
8498
8499-extern void machine_emergency_restart(void);
8500+extern void machine_emergency_restart(void) __noreturn;
8501
8502 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8503diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8504index d09bb03..4ea4194 100644
8505--- a/arch/x86/include/asm/futex.h
8506+++ b/arch/x86/include/asm/futex.h
8507@@ -12,16 +12,18 @@
8508 #include <asm/system.h>
8509
8510 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8511+ typecheck(u32 __user *, uaddr); \
8512 asm volatile("1:\t" insn "\n" \
8513 "2:\t.section .fixup,\"ax\"\n" \
8514 "3:\tmov\t%3, %1\n" \
8515 "\tjmp\t2b\n" \
8516 "\t.previous\n" \
8517 _ASM_EXTABLE(1b, 3b) \
8518- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8519+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8520 : "i" (-EFAULT), "0" (oparg), "1" (0))
8521
8522 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8523+ typecheck(u32 __user *, uaddr); \
8524 asm volatile("1:\tmovl %2, %0\n" \
8525 "\tmovl\t%0, %3\n" \
8526 "\t" insn "\n" \
8527@@ -34,7 +36,7 @@
8528 _ASM_EXTABLE(1b, 4b) \
8529 _ASM_EXTABLE(2b, 4b) \
8530 : "=&a" (oldval), "=&r" (ret), \
8531- "+m" (*uaddr), "=&r" (tem) \
8532+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8533 : "r" (oparg), "i" (-EFAULT), "1" (0))
8534
8535 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8536@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8537
8538 switch (op) {
8539 case FUTEX_OP_SET:
8540- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8541+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8542 break;
8543 case FUTEX_OP_ADD:
8544- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8545+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8546 uaddr, oparg);
8547 break;
8548 case FUTEX_OP_OR:
8549@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8550 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8551 return -EFAULT;
8552
8553- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8554+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8555 "2:\t.section .fixup, \"ax\"\n"
8556 "3:\tmov %3, %0\n"
8557 "\tjmp 2b\n"
8558 "\t.previous\n"
8559 _ASM_EXTABLE(1b, 3b)
8560- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8561+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8562 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8563 : "memory"
8564 );
8565diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8566index eb92a6e..b98b2f4 100644
8567--- a/arch/x86/include/asm/hw_irq.h
8568+++ b/arch/x86/include/asm/hw_irq.h
8569@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8570 extern void enable_IO_APIC(void);
8571
8572 /* Statistics */
8573-extern atomic_t irq_err_count;
8574-extern atomic_t irq_mis_count;
8575+extern atomic_unchecked_t irq_err_count;
8576+extern atomic_unchecked_t irq_mis_count;
8577
8578 /* EISA */
8579 extern void eisa_set_level_irq(unsigned int irq);
8580diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8581index c9e09ea..73888df 100644
8582--- a/arch/x86/include/asm/i387.h
8583+++ b/arch/x86/include/asm/i387.h
8584@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8585 {
8586 int err;
8587
8588+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8589+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8590+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8591+#endif
8592+
8593 /* See comment in fxsave() below. */
8594 #ifdef CONFIG_AS_FXSAVEQ
8595 asm volatile("1: fxrstorq %[fx]\n\t"
8596@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8597 {
8598 int err;
8599
8600+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8601+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8602+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8603+#endif
8604+
8605 /*
8606 * Clear the bytes not touched by the fxsave and reserved
8607 * for the SW usage.
8608@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8609 #endif /* CONFIG_X86_64 */
8610
8611 /* We need a safe address that is cheap to find and that is already
8612- in L1 during context switch. The best choices are unfortunately
8613- different for UP and SMP */
8614-#ifdef CONFIG_SMP
8615-#define safe_address (__per_cpu_offset[0])
8616-#else
8617-#define safe_address (kstat_cpu(0).cpustat.user)
8618-#endif
8619+ in L1 during context switch. */
8620+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8621
8622 /*
8623 * These must be called with preempt disabled
8624@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8625 struct thread_info *me = current_thread_info();
8626 preempt_disable();
8627 if (me->status & TS_USEDFPU)
8628- __save_init_fpu(me->task);
8629+ __save_init_fpu(current);
8630 else
8631 clts();
8632 }
8633diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8634index d8e8eef..99f81ae 100644
8635--- a/arch/x86/include/asm/io.h
8636+++ b/arch/x86/include/asm/io.h
8637@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8638
8639 #include <linux/vmalloc.h>
8640
8641+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8642+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8643+{
8644+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8645+}
8646+
8647+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8648+{
8649+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8650+}
8651+
8652 /*
8653 * Convert a virtual cached pointer to an uncached pointer
8654 */
8655diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8656index bba3cf8..06bc8da 100644
8657--- a/arch/x86/include/asm/irqflags.h
8658+++ b/arch/x86/include/asm/irqflags.h
8659@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8660 sti; \
8661 sysexit
8662
8663+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8664+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8665+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8666+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8667+
8668 #else
8669 #define INTERRUPT_RETURN iret
8670 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8671diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8672index 5478825..839e88c 100644
8673--- a/arch/x86/include/asm/kprobes.h
8674+++ b/arch/x86/include/asm/kprobes.h
8675@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8676 #define RELATIVEJUMP_SIZE 5
8677 #define RELATIVECALL_OPCODE 0xe8
8678 #define RELATIVE_ADDR_SIZE 4
8679-#define MAX_STACK_SIZE 64
8680-#define MIN_STACK_SIZE(ADDR) \
8681- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8682- THREAD_SIZE - (unsigned long)(ADDR))) \
8683- ? (MAX_STACK_SIZE) \
8684- : (((unsigned long)current_thread_info()) + \
8685- THREAD_SIZE - (unsigned long)(ADDR)))
8686+#define MAX_STACK_SIZE 64UL
8687+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8688
8689 #define flush_insn_slot(p) do { } while (0)
8690
8691diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8692index b4973f4..7c4d3fc 100644
8693--- a/arch/x86/include/asm/kvm_host.h
8694+++ b/arch/x86/include/asm/kvm_host.h
8695@@ -459,7 +459,7 @@ struct kvm_arch {
8696 unsigned int n_requested_mmu_pages;
8697 unsigned int n_max_mmu_pages;
8698 unsigned int indirect_shadow_pages;
8699- atomic_t invlpg_counter;
8700+ atomic_unchecked_t invlpg_counter;
8701 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8702 /*
8703 * Hash table of struct kvm_mmu_page.
8704@@ -638,7 +638,7 @@ struct kvm_x86_ops {
8705 int (*check_intercept)(struct kvm_vcpu *vcpu,
8706 struct x86_instruction_info *info,
8707 enum x86_intercept_stage stage);
8708-};
8709+} __do_const;
8710
8711 struct kvm_arch_async_pf {
8712 u32 token;
8713diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8714index 9cdae5d..300d20f 100644
8715--- a/arch/x86/include/asm/local.h
8716+++ b/arch/x86/include/asm/local.h
8717@@ -18,26 +18,58 @@ typedef struct {
8718
8719 static inline void local_inc(local_t *l)
8720 {
8721- asm volatile(_ASM_INC "%0"
8722+ asm volatile(_ASM_INC "%0\n"
8723+
8724+#ifdef CONFIG_PAX_REFCOUNT
8725+ "jno 0f\n"
8726+ _ASM_DEC "%0\n"
8727+ "int $4\n0:\n"
8728+ _ASM_EXTABLE(0b, 0b)
8729+#endif
8730+
8731 : "+m" (l->a.counter));
8732 }
8733
8734 static inline void local_dec(local_t *l)
8735 {
8736- asm volatile(_ASM_DEC "%0"
8737+ asm volatile(_ASM_DEC "%0\n"
8738+
8739+#ifdef CONFIG_PAX_REFCOUNT
8740+ "jno 0f\n"
8741+ _ASM_INC "%0\n"
8742+ "int $4\n0:\n"
8743+ _ASM_EXTABLE(0b, 0b)
8744+#endif
8745+
8746 : "+m" (l->a.counter));
8747 }
8748
8749 static inline void local_add(long i, local_t *l)
8750 {
8751- asm volatile(_ASM_ADD "%1,%0"
8752+ asm volatile(_ASM_ADD "%1,%0\n"
8753+
8754+#ifdef CONFIG_PAX_REFCOUNT
8755+ "jno 0f\n"
8756+ _ASM_SUB "%1,%0\n"
8757+ "int $4\n0:\n"
8758+ _ASM_EXTABLE(0b, 0b)
8759+#endif
8760+
8761 : "+m" (l->a.counter)
8762 : "ir" (i));
8763 }
8764
8765 static inline void local_sub(long i, local_t *l)
8766 {
8767- asm volatile(_ASM_SUB "%1,%0"
8768+ asm volatile(_ASM_SUB "%1,%0\n"
8769+
8770+#ifdef CONFIG_PAX_REFCOUNT
8771+ "jno 0f\n"
8772+ _ASM_ADD "%1,%0\n"
8773+ "int $4\n0:\n"
8774+ _ASM_EXTABLE(0b, 0b)
8775+#endif
8776+
8777 : "+m" (l->a.counter)
8778 : "ir" (i));
8779 }
8780@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
8781 {
8782 unsigned char c;
8783
8784- asm volatile(_ASM_SUB "%2,%0; sete %1"
8785+ asm volatile(_ASM_SUB "%2,%0\n"
8786+
8787+#ifdef CONFIG_PAX_REFCOUNT
8788+ "jno 0f\n"
8789+ _ASM_ADD "%2,%0\n"
8790+ "int $4\n0:\n"
8791+ _ASM_EXTABLE(0b, 0b)
8792+#endif
8793+
8794+ "sete %1\n"
8795 : "+m" (l->a.counter), "=qm" (c)
8796 : "ir" (i) : "memory");
8797 return c;
8798@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
8799 {
8800 unsigned char c;
8801
8802- asm volatile(_ASM_DEC "%0; sete %1"
8803+ asm volatile(_ASM_DEC "%0\n"
8804+
8805+#ifdef CONFIG_PAX_REFCOUNT
8806+ "jno 0f\n"
8807+ _ASM_INC "%0\n"
8808+ "int $4\n0:\n"
8809+ _ASM_EXTABLE(0b, 0b)
8810+#endif
8811+
8812+ "sete %1\n"
8813 : "+m" (l->a.counter), "=qm" (c)
8814 : : "memory");
8815 return c != 0;
8816@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
8817 {
8818 unsigned char c;
8819
8820- asm volatile(_ASM_INC "%0; sete %1"
8821+ asm volatile(_ASM_INC "%0\n"
8822+
8823+#ifdef CONFIG_PAX_REFCOUNT
8824+ "jno 0f\n"
8825+ _ASM_DEC "%0\n"
8826+ "int $4\n0:\n"
8827+ _ASM_EXTABLE(0b, 0b)
8828+#endif
8829+
8830+ "sete %1\n"
8831 : "+m" (l->a.counter), "=qm" (c)
8832 : : "memory");
8833 return c != 0;
8834@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
8835 {
8836 unsigned char c;
8837
8838- asm volatile(_ASM_ADD "%2,%0; sets %1"
8839+ asm volatile(_ASM_ADD "%2,%0\n"
8840+
8841+#ifdef CONFIG_PAX_REFCOUNT
8842+ "jno 0f\n"
8843+ _ASM_SUB "%2,%0\n"
8844+ "int $4\n0:\n"
8845+ _ASM_EXTABLE(0b, 0b)
8846+#endif
8847+
8848+ "sets %1\n"
8849 : "+m" (l->a.counter), "=qm" (c)
8850 : "ir" (i) : "memory");
8851 return c;
8852@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
8853 #endif
8854 /* Modern 486+ processor */
8855 __i = i;
8856- asm volatile(_ASM_XADD "%0, %1;"
8857+ asm volatile(_ASM_XADD "%0, %1\n"
8858+
8859+#ifdef CONFIG_PAX_REFCOUNT
8860+ "jno 0f\n"
8861+ _ASM_MOV "%0,%1\n"
8862+ "int $4\n0:\n"
8863+ _ASM_EXTABLE(0b, 0b)
8864+#endif
8865+
8866 : "+r" (i), "+m" (l->a.counter)
8867 : : "memory");
8868 return i + __i;
8869diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8870index 593e51d..fa69c9a 100644
8871--- a/arch/x86/include/asm/mman.h
8872+++ b/arch/x86/include/asm/mman.h
8873@@ -5,4 +5,14 @@
8874
8875 #include <asm-generic/mman.h>
8876
8877+#ifdef __KERNEL__
8878+#ifndef __ASSEMBLY__
8879+#ifdef CONFIG_X86_32
8880+#define arch_mmap_check i386_mmap_check
8881+int i386_mmap_check(unsigned long addr, unsigned long len,
8882+ unsigned long flags);
8883+#endif
8884+#endif
8885+#endif
8886+
8887 #endif /* _ASM_X86_MMAN_H */
8888diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8889index 5f55e69..e20bfb1 100644
8890--- a/arch/x86/include/asm/mmu.h
8891+++ b/arch/x86/include/asm/mmu.h
8892@@ -9,7 +9,7 @@
8893 * we put the segment information here.
8894 */
8895 typedef struct {
8896- void *ldt;
8897+ struct desc_struct *ldt;
8898 int size;
8899
8900 #ifdef CONFIG_X86_64
8901@@ -18,7 +18,19 @@ typedef struct {
8902 #endif
8903
8904 struct mutex lock;
8905- void *vdso;
8906+ unsigned long vdso;
8907+
8908+#ifdef CONFIG_X86_32
8909+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8910+ unsigned long user_cs_base;
8911+ unsigned long user_cs_limit;
8912+
8913+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8914+ cpumask_t cpu_user_cs_mask;
8915+#endif
8916+
8917+#endif
8918+#endif
8919 } mm_context_t;
8920
8921 #ifdef CONFIG_SMP
8922diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8923index 6902152..399f3a2 100644
8924--- a/arch/x86/include/asm/mmu_context.h
8925+++ b/arch/x86/include/asm/mmu_context.h
8926@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
8927
8928 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8929 {
8930+
8931+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8932+ unsigned int i;
8933+ pgd_t *pgd;
8934+
8935+ pax_open_kernel();
8936+ pgd = get_cpu_pgd(smp_processor_id());
8937+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8938+ set_pgd_batched(pgd+i, native_make_pgd(0));
8939+ pax_close_kernel();
8940+#endif
8941+
8942 #ifdef CONFIG_SMP
8943 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8944 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8945@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8946 struct task_struct *tsk)
8947 {
8948 unsigned cpu = smp_processor_id();
8949+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8950+ int tlbstate = TLBSTATE_OK;
8951+#endif
8952
8953 if (likely(prev != next)) {
8954 #ifdef CONFIG_SMP
8955+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8956+ tlbstate = percpu_read(cpu_tlbstate.state);
8957+#endif
8958 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8959 percpu_write(cpu_tlbstate.active_mm, next);
8960 #endif
8961 cpumask_set_cpu(cpu, mm_cpumask(next));
8962
8963 /* Re-load page tables */
8964+#ifdef CONFIG_PAX_PER_CPU_PGD
8965+ pax_open_kernel();
8966+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8967+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8968+ pax_close_kernel();
8969+ load_cr3(get_cpu_pgd(cpu));
8970+#else
8971 load_cr3(next->pgd);
8972+#endif
8973
8974 /* stop flush ipis for the previous mm */
8975 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8976@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8977 */
8978 if (unlikely(prev->context.ldt != next->context.ldt))
8979 load_LDT_nolock(&next->context);
8980- }
8981+
8982+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8983+ if (!(__supported_pte_mask & _PAGE_NX)) {
8984+ smp_mb__before_clear_bit();
8985+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8986+ smp_mb__after_clear_bit();
8987+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8988+ }
8989+#endif
8990+
8991+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8992+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8993+ prev->context.user_cs_limit != next->context.user_cs_limit))
8994+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8995 #ifdef CONFIG_SMP
8996+ else if (unlikely(tlbstate != TLBSTATE_OK))
8997+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8998+#endif
8999+#endif
9000+
9001+ }
9002 else {
9003+
9004+#ifdef CONFIG_PAX_PER_CPU_PGD
9005+ pax_open_kernel();
9006+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9007+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9008+ pax_close_kernel();
9009+ load_cr3(get_cpu_pgd(cpu));
9010+#endif
9011+
9012+#ifdef CONFIG_SMP
9013 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9014 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9015
9016@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9017 * tlb flush IPI delivery. We must reload CR3
9018 * to make sure to use no freed page tables.
9019 */
9020+
9021+#ifndef CONFIG_PAX_PER_CPU_PGD
9022 load_cr3(next->pgd);
9023+#endif
9024+
9025 load_LDT_nolock(&next->context);
9026+
9027+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9028+ if (!(__supported_pte_mask & _PAGE_NX))
9029+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9030+#endif
9031+
9032+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9033+#ifdef CONFIG_PAX_PAGEEXEC
9034+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9035+#endif
9036+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9037+#endif
9038+
9039 }
9040+#endif
9041 }
9042-#endif
9043 }
9044
9045 #define activate_mm(prev, next) \
9046diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9047index 9eae775..c914fea 100644
9048--- a/arch/x86/include/asm/module.h
9049+++ b/arch/x86/include/asm/module.h
9050@@ -5,6 +5,7 @@
9051
9052 #ifdef CONFIG_X86_64
9053 /* X86_64 does not define MODULE_PROC_FAMILY */
9054+#define MODULE_PROC_FAMILY ""
9055 #elif defined CONFIG_M386
9056 #define MODULE_PROC_FAMILY "386 "
9057 #elif defined CONFIG_M486
9058@@ -59,8 +60,20 @@
9059 #error unknown processor family
9060 #endif
9061
9062-#ifdef CONFIG_X86_32
9063-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9064+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9065+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9066+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9067+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9068+#else
9069+#define MODULE_PAX_KERNEXEC ""
9070 #endif
9071
9072+#ifdef CONFIG_PAX_MEMORY_UDEREF
9073+#define MODULE_PAX_UDEREF "UDEREF "
9074+#else
9075+#define MODULE_PAX_UDEREF ""
9076+#endif
9077+
9078+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9079+
9080 #endif /* _ASM_X86_MODULE_H */
9081diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9082index 7639dbf..e08a58c 100644
9083--- a/arch/x86/include/asm/page_64_types.h
9084+++ b/arch/x86/include/asm/page_64_types.h
9085@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9086
9087 /* duplicated to the one in bootmem.h */
9088 extern unsigned long max_pfn;
9089-extern unsigned long phys_base;
9090+extern const unsigned long phys_base;
9091
9092 extern unsigned long __phys_addr(unsigned long);
9093 #define __phys_reloc_hide(x) (x)
9094diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9095index a7d2db9..edb023e 100644
9096--- a/arch/x86/include/asm/paravirt.h
9097+++ b/arch/x86/include/asm/paravirt.h
9098@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9099 val);
9100 }
9101
9102+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9103+{
9104+ pgdval_t val = native_pgd_val(pgd);
9105+
9106+ if (sizeof(pgdval_t) > sizeof(long))
9107+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9108+ val, (u64)val >> 32);
9109+ else
9110+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9111+ val);
9112+}
9113+
9114 static inline void pgd_clear(pgd_t *pgdp)
9115 {
9116 set_pgd(pgdp, __pgd(0));
9117@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9118 pv_mmu_ops.set_fixmap(idx, phys, flags);
9119 }
9120
9121+#ifdef CONFIG_PAX_KERNEXEC
9122+static inline unsigned long pax_open_kernel(void)
9123+{
9124+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9125+}
9126+
9127+static inline unsigned long pax_close_kernel(void)
9128+{
9129+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9130+}
9131+#else
9132+static inline unsigned long pax_open_kernel(void) { return 0; }
9133+static inline unsigned long pax_close_kernel(void) { return 0; }
9134+#endif
9135+
9136 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9137
9138 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9139@@ -964,7 +991,7 @@ extern void default_banner(void);
9140
9141 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9142 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9143-#define PARA_INDIRECT(addr) *%cs:addr
9144+#define PARA_INDIRECT(addr) *%ss:addr
9145 #endif
9146
9147 #define INTERRUPT_RETURN \
9148@@ -1041,6 +1068,21 @@ extern void default_banner(void);
9149 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9150 CLBR_NONE, \
9151 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9152+
9153+#define GET_CR0_INTO_RDI \
9154+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9155+ mov %rax,%rdi
9156+
9157+#define SET_RDI_INTO_CR0 \
9158+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9159+
9160+#define GET_CR3_INTO_RDI \
9161+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9162+ mov %rax,%rdi
9163+
9164+#define SET_RDI_INTO_CR3 \
9165+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9166+
9167 #endif /* CONFIG_X86_32 */
9168
9169 #endif /* __ASSEMBLY__ */
9170diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9171index 8e8b9a4..f07d725 100644
9172--- a/arch/x86/include/asm/paravirt_types.h
9173+++ b/arch/x86/include/asm/paravirt_types.h
9174@@ -84,20 +84,20 @@ struct pv_init_ops {
9175 */
9176 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9177 unsigned long addr, unsigned len);
9178-};
9179+} __no_const;
9180
9181
9182 struct pv_lazy_ops {
9183 /* Set deferred update mode, used for batching operations. */
9184 void (*enter)(void);
9185 void (*leave)(void);
9186-};
9187+} __no_const;
9188
9189 struct pv_time_ops {
9190 unsigned long long (*sched_clock)(void);
9191 unsigned long long (*steal_clock)(int cpu);
9192 unsigned long (*get_tsc_khz)(void);
9193-};
9194+} __no_const;
9195
9196 struct pv_cpu_ops {
9197 /* hooks for various privileged instructions */
9198@@ -193,7 +193,7 @@ struct pv_cpu_ops {
9199
9200 void (*start_context_switch)(struct task_struct *prev);
9201 void (*end_context_switch)(struct task_struct *next);
9202-};
9203+} __no_const;
9204
9205 struct pv_irq_ops {
9206 /*
9207@@ -224,7 +224,7 @@ struct pv_apic_ops {
9208 unsigned long start_eip,
9209 unsigned long start_esp);
9210 #endif
9211-};
9212+} __no_const;
9213
9214 struct pv_mmu_ops {
9215 unsigned long (*read_cr2)(void);
9216@@ -313,6 +313,7 @@ struct pv_mmu_ops {
9217 struct paravirt_callee_save make_pud;
9218
9219 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9220+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9221 #endif /* PAGETABLE_LEVELS == 4 */
9222 #endif /* PAGETABLE_LEVELS >= 3 */
9223
9224@@ -324,6 +325,12 @@ struct pv_mmu_ops {
9225 an mfn. We can tell which is which from the index. */
9226 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9227 phys_addr_t phys, pgprot_t flags);
9228+
9229+#ifdef CONFIG_PAX_KERNEXEC
9230+ unsigned long (*pax_open_kernel)(void);
9231+ unsigned long (*pax_close_kernel)(void);
9232+#endif
9233+
9234 };
9235
9236 struct arch_spinlock;
9237@@ -334,7 +341,7 @@ struct pv_lock_ops {
9238 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9239 int (*spin_trylock)(struct arch_spinlock *lock);
9240 void (*spin_unlock)(struct arch_spinlock *lock);
9241-};
9242+} __no_const;
9243
9244 /* This contains all the paravirt structures: we get a convenient
9245 * number for each function using the offset which we use to indicate
9246diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9247index b4389a4..b7ff22c 100644
9248--- a/arch/x86/include/asm/pgalloc.h
9249+++ b/arch/x86/include/asm/pgalloc.h
9250@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9251 pmd_t *pmd, pte_t *pte)
9252 {
9253 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9254+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9255+}
9256+
9257+static inline void pmd_populate_user(struct mm_struct *mm,
9258+ pmd_t *pmd, pte_t *pte)
9259+{
9260+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9261 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9262 }
9263
9264diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9265index 98391db..8f6984e 100644
9266--- a/arch/x86/include/asm/pgtable-2level.h
9267+++ b/arch/x86/include/asm/pgtable-2level.h
9268@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9269
9270 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9271 {
9272+ pax_open_kernel();
9273 *pmdp = pmd;
9274+ pax_close_kernel();
9275 }
9276
9277 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9278diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9279index effff47..f9e4035 100644
9280--- a/arch/x86/include/asm/pgtable-3level.h
9281+++ b/arch/x86/include/asm/pgtable-3level.h
9282@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9283
9284 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9285 {
9286+ pax_open_kernel();
9287 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9288+ pax_close_kernel();
9289 }
9290
9291 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9292 {
9293+ pax_open_kernel();
9294 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9295+ pax_close_kernel();
9296 }
9297
9298 /*
9299diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9300index 18601c8..3d716d1 100644
9301--- a/arch/x86/include/asm/pgtable.h
9302+++ b/arch/x86/include/asm/pgtable.h
9303@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9304
9305 #ifndef __PAGETABLE_PUD_FOLDED
9306 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9307+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9308 #define pgd_clear(pgd) native_pgd_clear(pgd)
9309 #endif
9310
9311@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9312
9313 #define arch_end_context_switch(prev) do {} while(0)
9314
9315+#define pax_open_kernel() native_pax_open_kernel()
9316+#define pax_close_kernel() native_pax_close_kernel()
9317 #endif /* CONFIG_PARAVIRT */
9318
9319+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9320+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9321+
9322+#ifdef CONFIG_PAX_KERNEXEC
9323+static inline unsigned long native_pax_open_kernel(void)
9324+{
9325+ unsigned long cr0;
9326+
9327+ preempt_disable();
9328+ barrier();
9329+ cr0 = read_cr0() ^ X86_CR0_WP;
9330+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9331+ write_cr0(cr0);
9332+ return cr0 ^ X86_CR0_WP;
9333+}
9334+
9335+static inline unsigned long native_pax_close_kernel(void)
9336+{
9337+ unsigned long cr0;
9338+
9339+ cr0 = read_cr0() ^ X86_CR0_WP;
9340+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9341+ write_cr0(cr0);
9342+ barrier();
9343+ preempt_enable_no_resched();
9344+ return cr0 ^ X86_CR0_WP;
9345+}
9346+#else
9347+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9348+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9349+#endif
9350+
9351 /*
9352 * The following only work if pte_present() is true.
9353 * Undefined behaviour if not..
9354 */
9355+static inline int pte_user(pte_t pte)
9356+{
9357+ return pte_val(pte) & _PAGE_USER;
9358+}
9359+
9360 static inline int pte_dirty(pte_t pte)
9361 {
9362 return pte_flags(pte) & _PAGE_DIRTY;
9363@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9364 return pte_clear_flags(pte, _PAGE_RW);
9365 }
9366
9367+static inline pte_t pte_mkread(pte_t pte)
9368+{
9369+ return __pte(pte_val(pte) | _PAGE_USER);
9370+}
9371+
9372 static inline pte_t pte_mkexec(pte_t pte)
9373 {
9374- return pte_clear_flags(pte, _PAGE_NX);
9375+#ifdef CONFIG_X86_PAE
9376+ if (__supported_pte_mask & _PAGE_NX)
9377+ return pte_clear_flags(pte, _PAGE_NX);
9378+ else
9379+#endif
9380+ return pte_set_flags(pte, _PAGE_USER);
9381+}
9382+
9383+static inline pte_t pte_exprotect(pte_t pte)
9384+{
9385+#ifdef CONFIG_X86_PAE
9386+ if (__supported_pte_mask & _PAGE_NX)
9387+ return pte_set_flags(pte, _PAGE_NX);
9388+ else
9389+#endif
9390+ return pte_clear_flags(pte, _PAGE_USER);
9391 }
9392
9393 static inline pte_t pte_mkdirty(pte_t pte)
9394@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9395 #endif
9396
9397 #ifndef __ASSEMBLY__
9398+
9399+#ifdef CONFIG_PAX_PER_CPU_PGD
9400+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9401+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9402+{
9403+ return cpu_pgd[cpu];
9404+}
9405+#endif
9406+
9407 #include <linux/mm_types.h>
9408
9409 static inline int pte_none(pte_t pte)
9410@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9411
9412 static inline int pgd_bad(pgd_t pgd)
9413 {
9414- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9415+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9416 }
9417
9418 static inline int pgd_none(pgd_t pgd)
9419@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9420 * pgd_offset() returns a (pgd_t *)
9421 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9422 */
9423-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9424+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9425+
9426+#ifdef CONFIG_PAX_PER_CPU_PGD
9427+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9428+#endif
9429+
9430 /*
9431 * a shortcut which implies the use of the kernel's pgd, instead
9432 * of a process's
9433@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9434 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9435 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9436
9437+#ifdef CONFIG_X86_32
9438+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9439+#else
9440+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9441+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9442+
9443+#ifdef CONFIG_PAX_MEMORY_UDEREF
9444+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9445+#else
9446+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9447+#endif
9448+
9449+#endif
9450+
9451 #ifndef __ASSEMBLY__
9452
9453 extern int direct_gbpages;
9454@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9455 * dst and src can be on the same page, but the range must not overlap,
9456 * and must not cross a page boundary.
9457 */
9458-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9459+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9460 {
9461- memcpy(dst, src, count * sizeof(pgd_t));
9462+ pax_open_kernel();
9463+ while (count--)
9464+ *dst++ = *src++;
9465+ pax_close_kernel();
9466 }
9467
9468+#ifdef CONFIG_PAX_PER_CPU_PGD
9469+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9470+#endif
9471+
9472+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9473+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9474+#else
9475+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9476+#endif
9477
9478 #include <asm-generic/pgtable.h>
9479 #endif /* __ASSEMBLY__ */
9480diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9481index 0c92113..34a77c6 100644
9482--- a/arch/x86/include/asm/pgtable_32.h
9483+++ b/arch/x86/include/asm/pgtable_32.h
9484@@ -25,9 +25,6 @@
9485 struct mm_struct;
9486 struct vm_area_struct;
9487
9488-extern pgd_t swapper_pg_dir[1024];
9489-extern pgd_t initial_page_table[1024];
9490-
9491 static inline void pgtable_cache_init(void) { }
9492 static inline void check_pgt_cache(void) { }
9493 void paging_init(void);
9494@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9495 # include <asm/pgtable-2level.h>
9496 #endif
9497
9498+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9499+extern pgd_t initial_page_table[PTRS_PER_PGD];
9500+#ifdef CONFIG_X86_PAE
9501+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9502+#endif
9503+
9504 #if defined(CONFIG_HIGHPTE)
9505 #define pte_offset_map(dir, address) \
9506 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9507@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9508 /* Clear a kernel PTE and flush it from the TLB */
9509 #define kpte_clear_flush(ptep, vaddr) \
9510 do { \
9511+ pax_open_kernel(); \
9512 pte_clear(&init_mm, (vaddr), (ptep)); \
9513+ pax_close_kernel(); \
9514 __flush_tlb_one((vaddr)); \
9515 } while (0)
9516
9517@@ -74,6 +79,9 @@ do { \
9518
9519 #endif /* !__ASSEMBLY__ */
9520
9521+#define HAVE_ARCH_UNMAPPED_AREA
9522+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9523+
9524 /*
9525 * kern_addr_valid() is (1) for FLATMEM and (0) for
9526 * SPARSEMEM and DISCONTIGMEM
9527diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9528index ed5903b..c7fe163 100644
9529--- a/arch/x86/include/asm/pgtable_32_types.h
9530+++ b/arch/x86/include/asm/pgtable_32_types.h
9531@@ -8,7 +8,7 @@
9532 */
9533 #ifdef CONFIG_X86_PAE
9534 # include <asm/pgtable-3level_types.h>
9535-# define PMD_SIZE (1UL << PMD_SHIFT)
9536+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9537 # define PMD_MASK (~(PMD_SIZE - 1))
9538 #else
9539 # include <asm/pgtable-2level_types.h>
9540@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9541 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9542 #endif
9543
9544+#ifdef CONFIG_PAX_KERNEXEC
9545+#ifndef __ASSEMBLY__
9546+extern unsigned char MODULES_EXEC_VADDR[];
9547+extern unsigned char MODULES_EXEC_END[];
9548+#endif
9549+#include <asm/boot.h>
9550+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9551+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9552+#else
9553+#define ktla_ktva(addr) (addr)
9554+#define ktva_ktla(addr) (addr)
9555+#endif
9556+
9557 #define MODULES_VADDR VMALLOC_START
9558 #define MODULES_END VMALLOC_END
9559 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9560diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9561index 975f709..107976d 100644
9562--- a/arch/x86/include/asm/pgtable_64.h
9563+++ b/arch/x86/include/asm/pgtable_64.h
9564@@ -16,10 +16,14 @@
9565
9566 extern pud_t level3_kernel_pgt[512];
9567 extern pud_t level3_ident_pgt[512];
9568+extern pud_t level3_vmalloc_start_pgt[512];
9569+extern pud_t level3_vmalloc_end_pgt[512];
9570+extern pud_t level3_vmemmap_pgt[512];
9571+extern pud_t level2_vmemmap_pgt[512];
9572 extern pmd_t level2_kernel_pgt[512];
9573 extern pmd_t level2_fixmap_pgt[512];
9574-extern pmd_t level2_ident_pgt[512];
9575-extern pgd_t init_level4_pgt[];
9576+extern pmd_t level2_ident_pgt[512*2];
9577+extern pgd_t init_level4_pgt[512];
9578
9579 #define swapper_pg_dir init_level4_pgt
9580
9581@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9582
9583 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9584 {
9585+ pax_open_kernel();
9586 *pmdp = pmd;
9587+ pax_close_kernel();
9588 }
9589
9590 static inline void native_pmd_clear(pmd_t *pmd)
9591@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9592
9593 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9594 {
9595+ pax_open_kernel();
9596+ *pgdp = pgd;
9597+ pax_close_kernel();
9598+}
9599+
9600+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9601+{
9602 *pgdp = pgd;
9603 }
9604
9605diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9606index 766ea16..5b96cb3 100644
9607--- a/arch/x86/include/asm/pgtable_64_types.h
9608+++ b/arch/x86/include/asm/pgtable_64_types.h
9609@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9610 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9611 #define MODULES_END _AC(0xffffffffff000000, UL)
9612 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9613+#define MODULES_EXEC_VADDR MODULES_VADDR
9614+#define MODULES_EXEC_END MODULES_END
9615+
9616+#define ktla_ktva(addr) (addr)
9617+#define ktva_ktla(addr) (addr)
9618
9619 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9620diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9621index 013286a..8b42f4f 100644
9622--- a/arch/x86/include/asm/pgtable_types.h
9623+++ b/arch/x86/include/asm/pgtable_types.h
9624@@ -16,13 +16,12 @@
9625 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9626 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9627 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9628-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9629+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9630 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9631 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9632 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9633-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9634-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9635-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9636+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9637+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9638 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9639
9640 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9641@@ -40,7 +39,6 @@
9642 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9643 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9644 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9645-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9646 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9647 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9648 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9649@@ -57,8 +55,10 @@
9650
9651 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9652 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9653-#else
9654+#elif defined(CONFIG_KMEMCHECK)
9655 #define _PAGE_NX (_AT(pteval_t, 0))
9656+#else
9657+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9658 #endif
9659
9660 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9661@@ -96,6 +96,9 @@
9662 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9663 _PAGE_ACCESSED)
9664
9665+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9666+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9667+
9668 #define __PAGE_KERNEL_EXEC \
9669 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9670 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9671@@ -106,7 +109,7 @@
9672 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9673 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9674 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9675-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9676+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9677 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9678 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9679 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9680@@ -168,8 +171,8 @@
9681 * bits are combined, this will alow user to access the high address mapped
9682 * VDSO in the presence of CONFIG_COMPAT_VDSO
9683 */
9684-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9685-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9686+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9687+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9688 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9689 #endif
9690
9691@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9692 {
9693 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9694 }
9695+#endif
9696
9697+#if PAGETABLE_LEVELS == 3
9698+#include <asm-generic/pgtable-nopud.h>
9699+#endif
9700+
9701+#if PAGETABLE_LEVELS == 2
9702+#include <asm-generic/pgtable-nopmd.h>
9703+#endif
9704+
9705+#ifndef __ASSEMBLY__
9706 #if PAGETABLE_LEVELS > 3
9707 typedef struct { pudval_t pud; } pud_t;
9708
9709@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9710 return pud.pud;
9711 }
9712 #else
9713-#include <asm-generic/pgtable-nopud.h>
9714-
9715 static inline pudval_t native_pud_val(pud_t pud)
9716 {
9717 return native_pgd_val(pud.pgd);
9718@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9719 return pmd.pmd;
9720 }
9721 #else
9722-#include <asm-generic/pgtable-nopmd.h>
9723-
9724 static inline pmdval_t native_pmd_val(pmd_t pmd)
9725 {
9726 return native_pgd_val(pmd.pud.pgd);
9727@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9728
9729 extern pteval_t __supported_pte_mask;
9730 extern void set_nx(void);
9731-extern int nx_enabled;
9732
9733 #define pgprot_writecombine pgprot_writecombine
9734 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9735diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9736index b650435..eefa566 100644
9737--- a/arch/x86/include/asm/processor.h
9738+++ b/arch/x86/include/asm/processor.h
9739@@ -268,7 +268,7 @@ struct tss_struct {
9740
9741 } ____cacheline_aligned;
9742
9743-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9744+extern struct tss_struct init_tss[NR_CPUS];
9745
9746 /*
9747 * Save the original ist values for checking stack pointers during debugging
9748@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
9749 */
9750 #define TASK_SIZE PAGE_OFFSET
9751 #define TASK_SIZE_MAX TASK_SIZE
9752+
9753+#ifdef CONFIG_PAX_SEGMEXEC
9754+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9755+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9756+#else
9757 #define STACK_TOP TASK_SIZE
9758-#define STACK_TOP_MAX STACK_TOP
9759+#endif
9760+
9761+#define STACK_TOP_MAX TASK_SIZE
9762
9763 #define INIT_THREAD { \
9764- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9765+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9766 .vm86_info = NULL, \
9767 .sysenter_cs = __KERNEL_CS, \
9768 .io_bitmap_ptr = NULL, \
9769@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
9770 */
9771 #define INIT_TSS { \
9772 .x86_tss = { \
9773- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9774+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9775 .ss0 = __KERNEL_DS, \
9776 .ss1 = __KERNEL_CS, \
9777 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9778@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
9779 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9780
9781 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9782-#define KSTK_TOP(info) \
9783-({ \
9784- unsigned long *__ptr = (unsigned long *)(info); \
9785- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9786-})
9787+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9788
9789 /*
9790 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9791@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9792 #define task_pt_regs(task) \
9793 ({ \
9794 struct pt_regs *__regs__; \
9795- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9796+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9797 __regs__ - 1; \
9798 })
9799
9800@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9801 /*
9802 * User space process size. 47bits minus one guard page.
9803 */
9804-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9805+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9806
9807 /* This decides where the kernel will search for a free chunk of vm
9808 * space during mmap's.
9809 */
9810 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9811- 0xc0000000 : 0xFFFFe000)
9812+ 0xc0000000 : 0xFFFFf000)
9813
9814 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9815 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9816@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9817 #define STACK_TOP_MAX TASK_SIZE_MAX
9818
9819 #define INIT_THREAD { \
9820- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9821+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9822 }
9823
9824 #define INIT_TSS { \
9825- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9826+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9827 }
9828
9829 /*
9830@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
9831 */
9832 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9833
9834+#ifdef CONFIG_PAX_SEGMEXEC
9835+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9836+#endif
9837+
9838 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9839
9840 /* Get/set a process' ability to use the timestamp counter instruction */
9841diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9842index 3566454..4bdfb8c 100644
9843--- a/arch/x86/include/asm/ptrace.h
9844+++ b/arch/x86/include/asm/ptrace.h
9845@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
9846 }
9847
9848 /*
9849- * user_mode_vm(regs) determines whether a register set came from user mode.
9850+ * user_mode(regs) determines whether a register set came from user mode.
9851 * This is true if V8086 mode was enabled OR if the register set was from
9852 * protected mode with RPL-3 CS value. This tricky test checks that with
9853 * one comparison. Many places in the kernel can bypass this full check
9854- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9855+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9856+ * be used.
9857 */
9858-static inline int user_mode(struct pt_regs *regs)
9859+static inline int user_mode_novm(struct pt_regs *regs)
9860 {
9861 #ifdef CONFIG_X86_32
9862 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9863 #else
9864- return !!(regs->cs & 3);
9865+ return !!(regs->cs & SEGMENT_RPL_MASK);
9866 #endif
9867 }
9868
9869-static inline int user_mode_vm(struct pt_regs *regs)
9870+static inline int user_mode(struct pt_regs *regs)
9871 {
9872 #ifdef CONFIG_X86_32
9873 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9874 USER_RPL;
9875 #else
9876- return user_mode(regs);
9877+ return user_mode_novm(regs);
9878 #endif
9879 }
9880
9881@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
9882 #ifdef CONFIG_X86_64
9883 static inline bool user_64bit_mode(struct pt_regs *regs)
9884 {
9885+ unsigned long cs = regs->cs & 0xffff;
9886 #ifndef CONFIG_PARAVIRT
9887 /*
9888 * On non-paravirt systems, this is the only long mode CPL 3
9889 * selector. We do not allow long mode selectors in the LDT.
9890 */
9891- return regs->cs == __USER_CS;
9892+ return cs == __USER_CS;
9893 #else
9894 /* Headers are too twisted for this to go in paravirt.h. */
9895- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9896+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9897 #endif
9898 }
9899 #endif
9900diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9901index 92f29706..a79cbbb 100644
9902--- a/arch/x86/include/asm/reboot.h
9903+++ b/arch/x86/include/asm/reboot.h
9904@@ -6,19 +6,19 @@
9905 struct pt_regs;
9906
9907 struct machine_ops {
9908- void (*restart)(char *cmd);
9909- void (*halt)(void);
9910- void (*power_off)(void);
9911+ void (* __noreturn restart)(char *cmd);
9912+ void (* __noreturn halt)(void);
9913+ void (* __noreturn power_off)(void);
9914 void (*shutdown)(void);
9915 void (*crash_shutdown)(struct pt_regs *);
9916- void (*emergency_restart)(void);
9917-};
9918+ void (* __noreturn emergency_restart)(void);
9919+} __no_const;
9920
9921 extern struct machine_ops machine_ops;
9922
9923 void native_machine_crash_shutdown(struct pt_regs *regs);
9924 void native_machine_shutdown(void);
9925-void machine_real_restart(unsigned int type);
9926+void machine_real_restart(unsigned int type) __noreturn;
9927 /* These must match dispatch_table in reboot_32.S */
9928 #define MRR_BIOS 0
9929 #define MRR_APM 1
9930diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9931index 2dbe4a7..ce1db00 100644
9932--- a/arch/x86/include/asm/rwsem.h
9933+++ b/arch/x86/include/asm/rwsem.h
9934@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
9935 {
9936 asm volatile("# beginning down_read\n\t"
9937 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9938+
9939+#ifdef CONFIG_PAX_REFCOUNT
9940+ "jno 0f\n"
9941+ LOCK_PREFIX _ASM_DEC "(%1)\n"
9942+ "int $4\n0:\n"
9943+ _ASM_EXTABLE(0b, 0b)
9944+#endif
9945+
9946 /* adds 0x00000001 */
9947 " jns 1f\n"
9948 " call call_rwsem_down_read_failed\n"
9949@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
9950 "1:\n\t"
9951 " mov %1,%2\n\t"
9952 " add %3,%2\n\t"
9953+
9954+#ifdef CONFIG_PAX_REFCOUNT
9955+ "jno 0f\n"
9956+ "sub %3,%2\n"
9957+ "int $4\n0:\n"
9958+ _ASM_EXTABLE(0b, 0b)
9959+#endif
9960+
9961 " jle 2f\n\t"
9962 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9963 " jnz 1b\n\t"
9964@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
9965 long tmp;
9966 asm volatile("# beginning down_write\n\t"
9967 LOCK_PREFIX " xadd %1,(%2)\n\t"
9968+
9969+#ifdef CONFIG_PAX_REFCOUNT
9970+ "jno 0f\n"
9971+ "mov %1,(%2)\n"
9972+ "int $4\n0:\n"
9973+ _ASM_EXTABLE(0b, 0b)
9974+#endif
9975+
9976 /* adds 0xffff0001, returns the old value */
9977 " test %1,%1\n\t"
9978 /* was the count 0 before? */
9979@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
9980 long tmp;
9981 asm volatile("# beginning __up_read\n\t"
9982 LOCK_PREFIX " xadd %1,(%2)\n\t"
9983+
9984+#ifdef CONFIG_PAX_REFCOUNT
9985+ "jno 0f\n"
9986+ "mov %1,(%2)\n"
9987+ "int $4\n0:\n"
9988+ _ASM_EXTABLE(0b, 0b)
9989+#endif
9990+
9991 /* subtracts 1, returns the old value */
9992 " jns 1f\n\t"
9993 " call call_rwsem_wake\n" /* expects old value in %edx */
9994@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
9995 long tmp;
9996 asm volatile("# beginning __up_write\n\t"
9997 LOCK_PREFIX " xadd %1,(%2)\n\t"
9998+
9999+#ifdef CONFIG_PAX_REFCOUNT
10000+ "jno 0f\n"
10001+ "mov %1,(%2)\n"
10002+ "int $4\n0:\n"
10003+ _ASM_EXTABLE(0b, 0b)
10004+#endif
10005+
10006 /* subtracts 0xffff0001, returns the old value */
10007 " jns 1f\n\t"
10008 " call call_rwsem_wake\n" /* expects old value in %edx */
10009@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10010 {
10011 asm volatile("# beginning __downgrade_write\n\t"
10012 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10013+
10014+#ifdef CONFIG_PAX_REFCOUNT
10015+ "jno 0f\n"
10016+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10017+ "int $4\n0:\n"
10018+ _ASM_EXTABLE(0b, 0b)
10019+#endif
10020+
10021 /*
10022 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10023 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10024@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10025 */
10026 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10027 {
10028- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10029+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10030+
10031+#ifdef CONFIG_PAX_REFCOUNT
10032+ "jno 0f\n"
10033+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10034+ "int $4\n0:\n"
10035+ _ASM_EXTABLE(0b, 0b)
10036+#endif
10037+
10038 : "+m" (sem->count)
10039 : "er" (delta));
10040 }
10041@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10042 */
10043 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10044 {
10045- return delta + xadd(&sem->count, delta);
10046+ return delta + xadd_check_overflow(&sem->count, delta);
10047 }
10048
10049 #endif /* __KERNEL__ */
10050diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10051index 5e64171..f58957e 100644
10052--- a/arch/x86/include/asm/segment.h
10053+++ b/arch/x86/include/asm/segment.h
10054@@ -64,10 +64,15 @@
10055 * 26 - ESPFIX small SS
10056 * 27 - per-cpu [ offset to per-cpu data area ]
10057 * 28 - stack_canary-20 [ for stack protector ]
10058- * 29 - unused
10059- * 30 - unused
10060+ * 29 - PCI BIOS CS
10061+ * 30 - PCI BIOS DS
10062 * 31 - TSS for double fault handler
10063 */
10064+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10065+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10066+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10067+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10068+
10069 #define GDT_ENTRY_TLS_MIN 6
10070 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10071
10072@@ -79,6 +84,8 @@
10073
10074 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10075
10076+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10077+
10078 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10079
10080 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10081@@ -104,6 +111,12 @@
10082 #define __KERNEL_STACK_CANARY 0
10083 #endif
10084
10085+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10086+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10087+
10088+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10089+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10090+
10091 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10092
10093 /*
10094@@ -141,7 +154,7 @@
10095 */
10096
10097 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10098-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10099+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10100
10101
10102 #else
10103@@ -165,6 +178,8 @@
10104 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10105 #define __USER32_DS __USER_DS
10106
10107+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10108+
10109 #define GDT_ENTRY_TSS 8 /* needs two entries */
10110 #define GDT_ENTRY_LDT 10 /* needs two entries */
10111 #define GDT_ENTRY_TLS_MIN 12
10112@@ -185,6 +200,7 @@
10113 #endif
10114
10115 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10116+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10117 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10118 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10119 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10120diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10121index 73b11bc..d4a3b63 100644
10122--- a/arch/x86/include/asm/smp.h
10123+++ b/arch/x86/include/asm/smp.h
10124@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10125 /* cpus sharing the last level cache: */
10126 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10127 DECLARE_PER_CPU(u16, cpu_llc_id);
10128-DECLARE_PER_CPU(int, cpu_number);
10129+DECLARE_PER_CPU(unsigned int, cpu_number);
10130
10131 static inline struct cpumask *cpu_sibling_mask(int cpu)
10132 {
10133@@ -77,7 +77,7 @@ struct smp_ops {
10134
10135 void (*send_call_func_ipi)(const struct cpumask *mask);
10136 void (*send_call_func_single_ipi)(int cpu);
10137-};
10138+} __no_const;
10139
10140 /* Globals due to paravirt */
10141 extern void set_cpu_sibling_map(int cpu);
10142@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10143 extern int safe_smp_processor_id(void);
10144
10145 #elif defined(CONFIG_X86_64_SMP)
10146-#define raw_smp_processor_id() (percpu_read(cpu_number))
10147-
10148-#define stack_smp_processor_id() \
10149-({ \
10150- struct thread_info *ti; \
10151- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10152- ti->cpu; \
10153-})
10154+#define raw_smp_processor_id() (percpu_read(cpu_number))
10155+#define stack_smp_processor_id() raw_smp_processor_id()
10156 #define safe_smp_processor_id() smp_processor_id()
10157
10158 #endif
10159diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10160index 972c260..43ab1fd 100644
10161--- a/arch/x86/include/asm/spinlock.h
10162+++ b/arch/x86/include/asm/spinlock.h
10163@@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10164 static inline void arch_read_lock(arch_rwlock_t *rw)
10165 {
10166 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10167+
10168+#ifdef CONFIG_PAX_REFCOUNT
10169+ "jno 0f\n"
10170+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10171+ "int $4\n0:\n"
10172+ _ASM_EXTABLE(0b, 0b)
10173+#endif
10174+
10175 "jns 1f\n"
10176 "call __read_lock_failed\n\t"
10177 "1:\n"
10178@@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10179 static inline void arch_write_lock(arch_rwlock_t *rw)
10180 {
10181 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10182+
10183+#ifdef CONFIG_PAX_REFCOUNT
10184+ "jno 0f\n"
10185+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10186+ "int $4\n0:\n"
10187+ _ASM_EXTABLE(0b, 0b)
10188+#endif
10189+
10190 "jz 1f\n"
10191 "call __write_lock_failed\n\t"
10192 "1:\n"
10193@@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10194
10195 static inline void arch_read_unlock(arch_rwlock_t *rw)
10196 {
10197- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10198+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10199+
10200+#ifdef CONFIG_PAX_REFCOUNT
10201+ "jno 0f\n"
10202+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10203+ "int $4\n0:\n"
10204+ _ASM_EXTABLE(0b, 0b)
10205+#endif
10206+
10207 :"+m" (rw->lock) : : "memory");
10208 }
10209
10210 static inline void arch_write_unlock(arch_rwlock_t *rw)
10211 {
10212- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10213+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10214+
10215+#ifdef CONFIG_PAX_REFCOUNT
10216+ "jno 0f\n"
10217+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10218+ "int $4\n0:\n"
10219+ _ASM_EXTABLE(0b, 0b)
10220+#endif
10221+
10222 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10223 }
10224
10225diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10226index 1575177..cb23f52 100644
10227--- a/arch/x86/include/asm/stackprotector.h
10228+++ b/arch/x86/include/asm/stackprotector.h
10229@@ -48,7 +48,7 @@
10230 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10231 */
10232 #define GDT_STACK_CANARY_INIT \
10233- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10234+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10235
10236 /*
10237 * Initialize the stackprotector canary value.
10238@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10239
10240 static inline void load_stack_canary_segment(void)
10241 {
10242-#ifdef CONFIG_X86_32
10243+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10244 asm volatile ("mov %0, %%gs" : : "r" (0));
10245 #endif
10246 }
10247diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10248index 70bbe39..4ae2bd4 100644
10249--- a/arch/x86/include/asm/stacktrace.h
10250+++ b/arch/x86/include/asm/stacktrace.h
10251@@ -11,28 +11,20 @@
10252
10253 extern int kstack_depth_to_print;
10254
10255-struct thread_info;
10256+struct task_struct;
10257 struct stacktrace_ops;
10258
10259-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10260- unsigned long *stack,
10261- unsigned long bp,
10262- const struct stacktrace_ops *ops,
10263- void *data,
10264- unsigned long *end,
10265- int *graph);
10266+typedef unsigned long walk_stack_t(struct task_struct *task,
10267+ void *stack_start,
10268+ unsigned long *stack,
10269+ unsigned long bp,
10270+ const struct stacktrace_ops *ops,
10271+ void *data,
10272+ unsigned long *end,
10273+ int *graph);
10274
10275-extern unsigned long
10276-print_context_stack(struct thread_info *tinfo,
10277- unsigned long *stack, unsigned long bp,
10278- const struct stacktrace_ops *ops, void *data,
10279- unsigned long *end, int *graph);
10280-
10281-extern unsigned long
10282-print_context_stack_bp(struct thread_info *tinfo,
10283- unsigned long *stack, unsigned long bp,
10284- const struct stacktrace_ops *ops, void *data,
10285- unsigned long *end, int *graph);
10286+extern walk_stack_t print_context_stack;
10287+extern walk_stack_t print_context_stack_bp;
10288
10289 /* Generic stack tracer with callbacks */
10290
10291@@ -40,7 +32,7 @@ struct stacktrace_ops {
10292 void (*address)(void *data, unsigned long address, int reliable);
10293 /* On negative return stop dumping */
10294 int (*stack)(void *data, char *name);
10295- walk_stack_t walk_stack;
10296+ walk_stack_t *walk_stack;
10297 };
10298
10299 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10300diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10301index cb23852..2dde194 100644
10302--- a/arch/x86/include/asm/sys_ia32.h
10303+++ b/arch/x86/include/asm/sys_ia32.h
10304@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10305 compat_sigset_t __user *, unsigned int);
10306 asmlinkage long sys32_alarm(unsigned int);
10307
10308-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10309+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10310 asmlinkage long sys32_sysfs(int, u32, u32);
10311
10312 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10313diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10314index 2d2f01c..f985723 100644
10315--- a/arch/x86/include/asm/system.h
10316+++ b/arch/x86/include/asm/system.h
10317@@ -129,7 +129,7 @@ do { \
10318 "call __switch_to\n\t" \
10319 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10320 __switch_canary \
10321- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10322+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10323 "movq %%rax,%%rdi\n\t" \
10324 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10325 "jnz ret_from_fork\n\t" \
10326@@ -140,7 +140,7 @@ do { \
10327 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10328 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10329 [_tif_fork] "i" (_TIF_FORK), \
10330- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10331+ [thread_info] "m" (current_tinfo), \
10332 [current_task] "m" (current_task) \
10333 __switch_canary_iparam \
10334 : "memory", "cc" __EXTRA_CLOBBER)
10335@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10336 {
10337 unsigned long __limit;
10338 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10339- return __limit + 1;
10340+ return __limit;
10341 }
10342
10343 static inline void native_clts(void)
10344@@ -397,13 +397,13 @@ void enable_hlt(void);
10345
10346 void cpu_idle_wait(void);
10347
10348-extern unsigned long arch_align_stack(unsigned long sp);
10349+#define arch_align_stack(x) ((x) & ~0xfUL)
10350 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10351
10352 void default_idle(void);
10353 bool set_pm_idle_to_default(void);
10354
10355-void stop_this_cpu(void *dummy);
10356+void stop_this_cpu(void *dummy) __noreturn;
10357
10358 /*
10359 * Force strict CPU ordering.
10360diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10361index a1fe5c1..ee326d8 100644
10362--- a/arch/x86/include/asm/thread_info.h
10363+++ b/arch/x86/include/asm/thread_info.h
10364@@ -10,6 +10,7 @@
10365 #include <linux/compiler.h>
10366 #include <asm/page.h>
10367 #include <asm/types.h>
10368+#include <asm/percpu.h>
10369
10370 /*
10371 * low level task data that entry.S needs immediate access to
10372@@ -24,7 +25,6 @@ struct exec_domain;
10373 #include <linux/atomic.h>
10374
10375 struct thread_info {
10376- struct task_struct *task; /* main task structure */
10377 struct exec_domain *exec_domain; /* execution domain */
10378 __u32 flags; /* low level flags */
10379 __u32 status; /* thread synchronous flags */
10380@@ -34,18 +34,12 @@ struct thread_info {
10381 mm_segment_t addr_limit;
10382 struct restart_block restart_block;
10383 void __user *sysenter_return;
10384-#ifdef CONFIG_X86_32
10385- unsigned long previous_esp; /* ESP of the previous stack in
10386- case of nested (IRQ) stacks
10387- */
10388- __u8 supervisor_stack[0];
10389-#endif
10390+ unsigned long lowest_stack;
10391 int uaccess_err;
10392 };
10393
10394-#define INIT_THREAD_INFO(tsk) \
10395+#define INIT_THREAD_INFO \
10396 { \
10397- .task = &tsk, \
10398 .exec_domain = &default_exec_domain, \
10399 .flags = 0, \
10400 .cpu = 0, \
10401@@ -56,7 +50,7 @@ struct thread_info {
10402 }, \
10403 }
10404
10405-#define init_thread_info (init_thread_union.thread_info)
10406+#define init_thread_info (init_thread_union.stack)
10407 #define init_stack (init_thread_union.stack)
10408
10409 #else /* !__ASSEMBLY__ */
10410@@ -170,45 +164,40 @@ struct thread_info {
10411 ret; \
10412 })
10413
10414-#ifdef CONFIG_X86_32
10415-
10416-#define STACK_WARN (THREAD_SIZE/8)
10417-/*
10418- * macros/functions for gaining access to the thread information structure
10419- *
10420- * preempt_count needs to be 1 initially, until the scheduler is functional.
10421- */
10422-#ifndef __ASSEMBLY__
10423-
10424-
10425-/* how to get the current stack pointer from C */
10426-register unsigned long current_stack_pointer asm("esp") __used;
10427-
10428-/* how to get the thread information struct from C */
10429-static inline struct thread_info *current_thread_info(void)
10430-{
10431- return (struct thread_info *)
10432- (current_stack_pointer & ~(THREAD_SIZE - 1));
10433-}
10434-
10435-#else /* !__ASSEMBLY__ */
10436-
10437+#ifdef __ASSEMBLY__
10438 /* how to get the thread information struct from ASM */
10439 #define GET_THREAD_INFO(reg) \
10440- movl $-THREAD_SIZE, reg; \
10441- andl %esp, reg
10442+ mov PER_CPU_VAR(current_tinfo), reg
10443
10444 /* use this one if reg already contains %esp */
10445-#define GET_THREAD_INFO_WITH_ESP(reg) \
10446- andl $-THREAD_SIZE, reg
10447+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10448+#else
10449+/* how to get the thread information struct from C */
10450+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10451+
10452+static __always_inline struct thread_info *current_thread_info(void)
10453+{
10454+ return percpu_read_stable(current_tinfo);
10455+}
10456+#endif
10457+
10458+#ifdef CONFIG_X86_32
10459+
10460+#define STACK_WARN (THREAD_SIZE/8)
10461+/*
10462+ * macros/functions for gaining access to the thread information structure
10463+ *
10464+ * preempt_count needs to be 1 initially, until the scheduler is functional.
10465+ */
10466+#ifndef __ASSEMBLY__
10467+
10468+/* how to get the current stack pointer from C */
10469+register unsigned long current_stack_pointer asm("esp") __used;
10470
10471 #endif
10472
10473 #else /* X86_32 */
10474
10475-#include <asm/percpu.h>
10476-#define KERNEL_STACK_OFFSET (5*8)
10477-
10478 /*
10479 * macros/functions for gaining access to the thread information structure
10480 * preempt_count needs to be 1 initially, until the scheduler is functional.
10481@@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10482 #ifndef __ASSEMBLY__
10483 DECLARE_PER_CPU(unsigned long, kernel_stack);
10484
10485-static inline struct thread_info *current_thread_info(void)
10486-{
10487- struct thread_info *ti;
10488- ti = (void *)(percpu_read_stable(kernel_stack) +
10489- KERNEL_STACK_OFFSET - THREAD_SIZE);
10490- return ti;
10491-}
10492-
10493-#else /* !__ASSEMBLY__ */
10494-
10495-/* how to get the thread information struct from ASM */
10496-#define GET_THREAD_INFO(reg) \
10497- movq PER_CPU_VAR(kernel_stack),reg ; \
10498- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10499-
10500+/* how to get the current stack pointer from C */
10501+register unsigned long current_stack_pointer asm("rsp") __used;
10502 #endif
10503
10504 #endif /* !X86_32 */
10505@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10506 extern void free_thread_info(struct thread_info *ti);
10507 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10508 #define arch_task_cache_init arch_task_cache_init
10509+
10510+#define __HAVE_THREAD_FUNCTIONS
10511+#define task_thread_info(task) (&(task)->tinfo)
10512+#define task_stack_page(task) ((task)->stack)
10513+#define setup_thread_stack(p, org) do {} while (0)
10514+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10515+
10516+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10517+extern struct task_struct *alloc_task_struct_node(int node);
10518+extern void free_task_struct(struct task_struct *);
10519+
10520 #endif
10521 #endif /* _ASM_X86_THREAD_INFO_H */
10522diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10523index 36361bf..324f262 100644
10524--- a/arch/x86/include/asm/uaccess.h
10525+++ b/arch/x86/include/asm/uaccess.h
10526@@ -7,12 +7,15 @@
10527 #include <linux/compiler.h>
10528 #include <linux/thread_info.h>
10529 #include <linux/string.h>
10530+#include <linux/sched.h>
10531 #include <asm/asm.h>
10532 #include <asm/page.h>
10533
10534 #define VERIFY_READ 0
10535 #define VERIFY_WRITE 1
10536
10537+extern void check_object_size(const void *ptr, unsigned long n, bool to);
10538+
10539 /*
10540 * The fs value determines whether argument validity checking should be
10541 * performed or not. If get_fs() == USER_DS, checking is performed, with
10542@@ -28,7 +31,12 @@
10543
10544 #define get_ds() (KERNEL_DS)
10545 #define get_fs() (current_thread_info()->addr_limit)
10546+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10547+void __set_fs(mm_segment_t x);
10548+void set_fs(mm_segment_t x);
10549+#else
10550 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10551+#endif
10552
10553 #define segment_eq(a, b) ((a).seg == (b).seg)
10554
10555@@ -76,7 +84,33 @@
10556 * checks that the pointer is in the user space range - after calling
10557 * this function, memory access functions may still return -EFAULT.
10558 */
10559-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10560+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10561+#define access_ok(type, addr, size) \
10562+({ \
10563+ long __size = size; \
10564+ unsigned long __addr = (unsigned long)addr; \
10565+ unsigned long __addr_ao = __addr & PAGE_MASK; \
10566+ unsigned long __end_ao = __addr + __size - 1; \
10567+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10568+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10569+ while(__addr_ao <= __end_ao) { \
10570+ char __c_ao; \
10571+ __addr_ao += PAGE_SIZE; \
10572+ if (__size > PAGE_SIZE) \
10573+ cond_resched(); \
10574+ if (__get_user(__c_ao, (char __user *)__addr)) \
10575+ break; \
10576+ if (type != VERIFY_WRITE) { \
10577+ __addr = __addr_ao; \
10578+ continue; \
10579+ } \
10580+ if (__put_user(__c_ao, (char __user *)__addr)) \
10581+ break; \
10582+ __addr = __addr_ao; \
10583+ } \
10584+ } \
10585+ __ret_ao; \
10586+})
10587
10588 /*
10589 * The exception table consists of pairs of addresses: the first is the
10590@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10591 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10592 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10593
10594-
10595+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10596+#define __copyuser_seg "gs;"
10597+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10598+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10599+#else
10600+#define __copyuser_seg
10601+#define __COPYUSER_SET_ES
10602+#define __COPYUSER_RESTORE_ES
10603+#endif
10604
10605 #ifdef CONFIG_X86_32
10606 #define __put_user_asm_u64(x, addr, err, errret) \
10607- asm volatile("1: movl %%eax,0(%2)\n" \
10608- "2: movl %%edx,4(%2)\n" \
10609+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10610+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10611 "3:\n" \
10612 ".section .fixup,\"ax\"\n" \
10613 "4: movl %3,%0\n" \
10614@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10615 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10616
10617 #define __put_user_asm_ex_u64(x, addr) \
10618- asm volatile("1: movl %%eax,0(%1)\n" \
10619- "2: movl %%edx,4(%1)\n" \
10620+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10621+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10622 "3:\n" \
10623 _ASM_EXTABLE(1b, 2b - 1b) \
10624 _ASM_EXTABLE(2b, 3b - 2b) \
10625@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10626 __typeof__(*(ptr)) __pu_val; \
10627 __chk_user_ptr(ptr); \
10628 might_fault(); \
10629- __pu_val = x; \
10630+ __pu_val = (x); \
10631 switch (sizeof(*(ptr))) { \
10632 case 1: \
10633 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10634@@ -373,7 +415,7 @@ do { \
10635 } while (0)
10636
10637 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10638- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10639+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10640 "2:\n" \
10641 ".section .fixup,\"ax\"\n" \
10642 "3: mov %3,%0\n" \
10643@@ -381,7 +423,7 @@ do { \
10644 " jmp 2b\n" \
10645 ".previous\n" \
10646 _ASM_EXTABLE(1b, 3b) \
10647- : "=r" (err), ltype(x) \
10648+ : "=r" (err), ltype (x) \
10649 : "m" (__m(addr)), "i" (errret), "0" (err))
10650
10651 #define __get_user_size_ex(x, ptr, size) \
10652@@ -406,7 +448,7 @@ do { \
10653 } while (0)
10654
10655 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10656- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10657+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10658 "2:\n" \
10659 _ASM_EXTABLE(1b, 2b - 1b) \
10660 : ltype(x) : "m" (__m(addr)))
10661@@ -423,13 +465,24 @@ do { \
10662 int __gu_err; \
10663 unsigned long __gu_val; \
10664 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10665- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10666+ (x) = (__typeof__(*(ptr)))__gu_val; \
10667 __gu_err; \
10668 })
10669
10670 /* FIXME: this hack is definitely wrong -AK */
10671 struct __large_struct { unsigned long buf[100]; };
10672-#define __m(x) (*(struct __large_struct __user *)(x))
10673+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10674+#define ____m(x) \
10675+({ \
10676+ unsigned long ____x = (unsigned long)(x); \
10677+ if (____x < PAX_USER_SHADOW_BASE) \
10678+ ____x += PAX_USER_SHADOW_BASE; \
10679+ (void __user *)____x; \
10680+})
10681+#else
10682+#define ____m(x) (x)
10683+#endif
10684+#define __m(x) (*(struct __large_struct __user *)____m(x))
10685
10686 /*
10687 * Tell gcc we read from memory instead of writing: this is because
10688@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10689 * aliasing issues.
10690 */
10691 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10692- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10693+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10694 "2:\n" \
10695 ".section .fixup,\"ax\"\n" \
10696 "3: mov %3,%0\n" \
10697@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10698 ".previous\n" \
10699 _ASM_EXTABLE(1b, 3b) \
10700 : "=r"(err) \
10701- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10702+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10703
10704 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10705- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10706+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10707 "2:\n" \
10708 _ASM_EXTABLE(1b, 2b - 1b) \
10709 : : ltype(x), "m" (__m(addr)))
10710@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10711 * On error, the variable @x is set to zero.
10712 */
10713
10714+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10715+#define __get_user(x, ptr) get_user((x), (ptr))
10716+#else
10717 #define __get_user(x, ptr) \
10718 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10719+#endif
10720
10721 /**
10722 * __put_user: - Write a simple value into user space, with less checking.
10723@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10724 * Returns zero on success, or -EFAULT on error.
10725 */
10726
10727+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10728+#define __put_user(x, ptr) put_user((x), (ptr))
10729+#else
10730 #define __put_user(x, ptr) \
10731 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10732+#endif
10733
10734 #define __get_user_unaligned __get_user
10735 #define __put_user_unaligned __put_user
10736@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10737 #define get_user_ex(x, ptr) do { \
10738 unsigned long __gue_val; \
10739 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10740- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10741+ (x) = (__typeof__(*(ptr)))__gue_val; \
10742 } while (0)
10743
10744 #ifdef CONFIG_X86_WP_WORKS_OK
10745diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10746index 566e803..b9521e9 100644
10747--- a/arch/x86/include/asm/uaccess_32.h
10748+++ b/arch/x86/include/asm/uaccess_32.h
10749@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10750 static __always_inline unsigned long __must_check
10751 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10752 {
10753+ if ((long)n < 0)
10754+ return n;
10755+
10756 if (__builtin_constant_p(n)) {
10757 unsigned long ret;
10758
10759@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10760 return ret;
10761 }
10762 }
10763+ if (!__builtin_constant_p(n))
10764+ check_object_size(from, n, true);
10765 return __copy_to_user_ll(to, from, n);
10766 }
10767
10768@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
10769 __copy_to_user(void __user *to, const void *from, unsigned long n)
10770 {
10771 might_fault();
10772+
10773 return __copy_to_user_inatomic(to, from, n);
10774 }
10775
10776 static __always_inline unsigned long
10777 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10778 {
10779+ if ((long)n < 0)
10780+ return n;
10781+
10782 /* Avoid zeroing the tail if the copy fails..
10783 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10784 * but as the zeroing behaviour is only significant when n is not
10785@@ -137,6 +146,10 @@ static __always_inline unsigned long
10786 __copy_from_user(void *to, const void __user *from, unsigned long n)
10787 {
10788 might_fault();
10789+
10790+ if ((long)n < 0)
10791+ return n;
10792+
10793 if (__builtin_constant_p(n)) {
10794 unsigned long ret;
10795
10796@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
10797 return ret;
10798 }
10799 }
10800+ if (!__builtin_constant_p(n))
10801+ check_object_size(to, n, false);
10802 return __copy_from_user_ll(to, from, n);
10803 }
10804
10805@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
10806 const void __user *from, unsigned long n)
10807 {
10808 might_fault();
10809+
10810+ if ((long)n < 0)
10811+ return n;
10812+
10813 if (__builtin_constant_p(n)) {
10814 unsigned long ret;
10815
10816@@ -181,15 +200,19 @@ static __always_inline unsigned long
10817 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10818 unsigned long n)
10819 {
10820- return __copy_from_user_ll_nocache_nozero(to, from, n);
10821+ if ((long)n < 0)
10822+ return n;
10823+
10824+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10825 }
10826
10827-unsigned long __must_check copy_to_user(void __user *to,
10828- const void *from, unsigned long n);
10829-unsigned long __must_check _copy_from_user(void *to,
10830- const void __user *from,
10831- unsigned long n);
10832-
10833+extern void copy_to_user_overflow(void)
10834+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10835+ __compiletime_error("copy_to_user() buffer size is not provably correct")
10836+#else
10837+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
10838+#endif
10839+;
10840
10841 extern void copy_from_user_overflow(void)
10842 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10843@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
10844 #endif
10845 ;
10846
10847-static inline unsigned long __must_check copy_from_user(void *to,
10848- const void __user *from,
10849- unsigned long n)
10850+/**
10851+ * copy_to_user: - Copy a block of data into user space.
10852+ * @to: Destination address, in user space.
10853+ * @from: Source address, in kernel space.
10854+ * @n: Number of bytes to copy.
10855+ *
10856+ * Context: User context only. This function may sleep.
10857+ *
10858+ * Copy data from kernel space to user space.
10859+ *
10860+ * Returns number of bytes that could not be copied.
10861+ * On success, this will be zero.
10862+ */
10863+static inline unsigned long __must_check
10864+copy_to_user(void __user *to, const void *from, unsigned long n)
10865+{
10866+ int sz = __compiletime_object_size(from);
10867+
10868+ if (unlikely(sz != -1 && sz < n))
10869+ copy_to_user_overflow();
10870+ else if (access_ok(VERIFY_WRITE, to, n))
10871+ n = __copy_to_user(to, from, n);
10872+ return n;
10873+}
10874+
10875+/**
10876+ * copy_from_user: - Copy a block of data from user space.
10877+ * @to: Destination address, in kernel space.
10878+ * @from: Source address, in user space.
10879+ * @n: Number of bytes to copy.
10880+ *
10881+ * Context: User context only. This function may sleep.
10882+ *
10883+ * Copy data from user space to kernel space.
10884+ *
10885+ * Returns number of bytes that could not be copied.
10886+ * On success, this will be zero.
10887+ *
10888+ * If some data could not be copied, this function will pad the copied
10889+ * data to the requested size using zero bytes.
10890+ */
10891+static inline unsigned long __must_check
10892+copy_from_user(void *to, const void __user *from, unsigned long n)
10893 {
10894 int sz = __compiletime_object_size(to);
10895
10896- if (likely(sz == -1 || sz >= n))
10897- n = _copy_from_user(to, from, n);
10898- else
10899+ if (unlikely(sz != -1 && sz < n))
10900 copy_from_user_overflow();
10901-
10902+ else if (access_ok(VERIFY_READ, from, n))
10903+ n = __copy_from_user(to, from, n);
10904+ else if ((long)n > 0) {
10905+ if (!__builtin_constant_p(n))
10906+ check_object_size(to, n, false);
10907+ memset(to, 0, n);
10908+ }
10909 return n;
10910 }
10911
10912diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10913index 1c66d30..23ab77d 100644
10914--- a/arch/x86/include/asm/uaccess_64.h
10915+++ b/arch/x86/include/asm/uaccess_64.h
10916@@ -10,6 +10,9 @@
10917 #include <asm/alternative.h>
10918 #include <asm/cpufeature.h>
10919 #include <asm/page.h>
10920+#include <asm/pgtable.h>
10921+
10922+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10923
10924 /*
10925 * Copy To/From Userspace
10926@@ -17,12 +20,12 @@
10927
10928 /* Handles exceptions in both to and from, but doesn't do access_ok */
10929 __must_check unsigned long
10930-copy_user_generic_string(void *to, const void *from, unsigned len);
10931+copy_user_generic_string(void *to, const void *from, unsigned long len);
10932 __must_check unsigned long
10933-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
10934+copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
10935
10936 static __always_inline __must_check unsigned long
10937-copy_user_generic(void *to, const void *from, unsigned len)
10938+copy_user_generic(void *to, const void *from, unsigned long len)
10939 {
10940 unsigned ret;
10941
10942@@ -36,138 +39,222 @@ copy_user_generic(void *to, const void *from, unsigned len)
10943 return ret;
10944 }
10945
10946+static __always_inline __must_check unsigned long
10947+__copy_to_user(void __user *to, const void *from, unsigned long len);
10948+static __always_inline __must_check unsigned long
10949+__copy_from_user(void *to, const void __user *from, unsigned long len);
10950 __must_check unsigned long
10951-_copy_to_user(void __user *to, const void *from, unsigned len);
10952-__must_check unsigned long
10953-_copy_from_user(void *to, const void __user *from, unsigned len);
10954-__must_check unsigned long
10955-copy_in_user(void __user *to, const void __user *from, unsigned len);
10956+copy_in_user(void __user *to, const void __user *from, unsigned long len);
10957
10958 static inline unsigned long __must_check copy_from_user(void *to,
10959 const void __user *from,
10960 unsigned long n)
10961 {
10962- int sz = __compiletime_object_size(to);
10963-
10964 might_fault();
10965- if (likely(sz == -1 || sz >= n))
10966- n = _copy_from_user(to, from, n);
10967-#ifdef CONFIG_DEBUG_VM
10968- else
10969- WARN(1, "Buffer overflow detected!\n");
10970-#endif
10971+
10972+ if (access_ok(VERIFY_READ, from, n))
10973+ n = __copy_from_user(to, from, n);
10974+ else if (n < INT_MAX) {
10975+ if (!__builtin_constant_p(n))
10976+ check_object_size(to, n, false);
10977+ memset(to, 0, n);
10978+ }
10979 return n;
10980 }
10981
10982 static __always_inline __must_check
10983-int copy_to_user(void __user *dst, const void *src, unsigned size)
10984+int copy_to_user(void __user *dst, const void *src, unsigned long size)
10985 {
10986 might_fault();
10987
10988- return _copy_to_user(dst, src, size);
10989+ if (access_ok(VERIFY_WRITE, dst, size))
10990+ size = __copy_to_user(dst, src, size);
10991+ return size;
10992 }
10993
10994 static __always_inline __must_check
10995-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10996+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
10997 {
10998- int ret = 0;
10999+ int sz = __compiletime_object_size(dst);
11000+ unsigned ret = 0;
11001
11002 might_fault();
11003- if (!__builtin_constant_p(size))
11004- return copy_user_generic(dst, (__force void *)src, size);
11005+
11006+ if (size > INT_MAX)
11007+ return size;
11008+
11009+#ifdef CONFIG_PAX_MEMORY_UDEREF
11010+ if (!__access_ok(VERIFY_READ, src, size))
11011+ return size;
11012+#endif
11013+
11014+ if (unlikely(sz != -1 && sz < size)) {
11015+#ifdef CONFIG_DEBUG_VM
11016+ WARN(1, "Buffer overflow detected!\n");
11017+#endif
11018+ return size;
11019+ }
11020+
11021+ if (!__builtin_constant_p(size)) {
11022+ check_object_size(dst, size, false);
11023+
11024+#ifdef CONFIG_PAX_MEMORY_UDEREF
11025+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11026+ src += PAX_USER_SHADOW_BASE;
11027+#endif
11028+
11029+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11030+ }
11031 switch (size) {
11032- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11033+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11034 ret, "b", "b", "=q", 1);
11035 return ret;
11036- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11037+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11038 ret, "w", "w", "=r", 2);
11039 return ret;
11040- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11041+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11042 ret, "l", "k", "=r", 4);
11043 return ret;
11044- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11045+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11046 ret, "q", "", "=r", 8);
11047 return ret;
11048 case 10:
11049- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11050+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11051 ret, "q", "", "=r", 10);
11052 if (unlikely(ret))
11053 return ret;
11054 __get_user_asm(*(u16 *)(8 + (char *)dst),
11055- (u16 __user *)(8 + (char __user *)src),
11056+ (const u16 __user *)(8 + (const char __user *)src),
11057 ret, "w", "w", "=r", 2);
11058 return ret;
11059 case 16:
11060- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11061+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11062 ret, "q", "", "=r", 16);
11063 if (unlikely(ret))
11064 return ret;
11065 __get_user_asm(*(u64 *)(8 + (char *)dst),
11066- (u64 __user *)(8 + (char __user *)src),
11067+ (const u64 __user *)(8 + (const char __user *)src),
11068 ret, "q", "", "=r", 8);
11069 return ret;
11070 default:
11071- return copy_user_generic(dst, (__force void *)src, size);
11072+
11073+#ifdef CONFIG_PAX_MEMORY_UDEREF
11074+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11075+ src += PAX_USER_SHADOW_BASE;
11076+#endif
11077+
11078+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11079 }
11080 }
11081
11082 static __always_inline __must_check
11083-int __copy_to_user(void __user *dst, const void *src, unsigned size)
11084+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11085 {
11086- int ret = 0;
11087+ int sz = __compiletime_object_size(src);
11088+ unsigned ret = 0;
11089
11090 might_fault();
11091- if (!__builtin_constant_p(size))
11092- return copy_user_generic((__force void *)dst, src, size);
11093+
11094+ if (size > INT_MAX)
11095+ return size;
11096+
11097+#ifdef CONFIG_PAX_MEMORY_UDEREF
11098+ if (!__access_ok(VERIFY_WRITE, dst, size))
11099+ return size;
11100+#endif
11101+
11102+ if (unlikely(sz != -1 && sz < size)) {
11103+#ifdef CONFIG_DEBUG_VM
11104+ WARN(1, "Buffer overflow detected!\n");
11105+#endif
11106+ return size;
11107+ }
11108+
11109+ if (!__builtin_constant_p(size)) {
11110+ check_object_size(src, size, true);
11111+
11112+#ifdef CONFIG_PAX_MEMORY_UDEREF
11113+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11114+ dst += PAX_USER_SHADOW_BASE;
11115+#endif
11116+
11117+ return copy_user_generic((__force_kernel void *)dst, src, size);
11118+ }
11119 switch (size) {
11120- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11121+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11122 ret, "b", "b", "iq", 1);
11123 return ret;
11124- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11125+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11126 ret, "w", "w", "ir", 2);
11127 return ret;
11128- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11129+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11130 ret, "l", "k", "ir", 4);
11131 return ret;
11132- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11133+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11134 ret, "q", "", "er", 8);
11135 return ret;
11136 case 10:
11137- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11138+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11139 ret, "q", "", "er", 10);
11140 if (unlikely(ret))
11141 return ret;
11142 asm("":::"memory");
11143- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11144+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11145 ret, "w", "w", "ir", 2);
11146 return ret;
11147 case 16:
11148- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11149+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11150 ret, "q", "", "er", 16);
11151 if (unlikely(ret))
11152 return ret;
11153 asm("":::"memory");
11154- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11155+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11156 ret, "q", "", "er", 8);
11157 return ret;
11158 default:
11159- return copy_user_generic((__force void *)dst, src, size);
11160+
11161+#ifdef CONFIG_PAX_MEMORY_UDEREF
11162+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11163+ dst += PAX_USER_SHADOW_BASE;
11164+#endif
11165+
11166+ return copy_user_generic((__force_kernel void *)dst, src, size);
11167 }
11168 }
11169
11170 static __always_inline __must_check
11171-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11172+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11173 {
11174- int ret = 0;
11175+ unsigned ret = 0;
11176
11177 might_fault();
11178- if (!__builtin_constant_p(size))
11179- return copy_user_generic((__force void *)dst,
11180- (__force void *)src, size);
11181+
11182+ if (size > INT_MAX)
11183+ return size;
11184+
11185+#ifdef CONFIG_PAX_MEMORY_UDEREF
11186+ if (!__access_ok(VERIFY_READ, src, size))
11187+ return size;
11188+ if (!__access_ok(VERIFY_WRITE, dst, size))
11189+ return size;
11190+#endif
11191+
11192+ if (!__builtin_constant_p(size)) {
11193+
11194+#ifdef CONFIG_PAX_MEMORY_UDEREF
11195+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11196+ src += PAX_USER_SHADOW_BASE;
11197+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11198+ dst += PAX_USER_SHADOW_BASE;
11199+#endif
11200+
11201+ return copy_user_generic((__force_kernel void *)dst,
11202+ (__force_kernel const void *)src, size);
11203+ }
11204 switch (size) {
11205 case 1: {
11206 u8 tmp;
11207- __get_user_asm(tmp, (u8 __user *)src,
11208+ __get_user_asm(tmp, (const u8 __user *)src,
11209 ret, "b", "b", "=q", 1);
11210 if (likely(!ret))
11211 __put_user_asm(tmp, (u8 __user *)dst,
11212@@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11213 }
11214 case 2: {
11215 u16 tmp;
11216- __get_user_asm(tmp, (u16 __user *)src,
11217+ __get_user_asm(tmp, (const u16 __user *)src,
11218 ret, "w", "w", "=r", 2);
11219 if (likely(!ret))
11220 __put_user_asm(tmp, (u16 __user *)dst,
11221@@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11222
11223 case 4: {
11224 u32 tmp;
11225- __get_user_asm(tmp, (u32 __user *)src,
11226+ __get_user_asm(tmp, (const u32 __user *)src,
11227 ret, "l", "k", "=r", 4);
11228 if (likely(!ret))
11229 __put_user_asm(tmp, (u32 __user *)dst,
11230@@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11231 }
11232 case 8: {
11233 u64 tmp;
11234- __get_user_asm(tmp, (u64 __user *)src,
11235+ __get_user_asm(tmp, (const u64 __user *)src,
11236 ret, "q", "", "=r", 8);
11237 if (likely(!ret))
11238 __put_user_asm(tmp, (u64 __user *)dst,
11239@@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11240 return ret;
11241 }
11242 default:
11243- return copy_user_generic((__force void *)dst,
11244- (__force void *)src, size);
11245+
11246+#ifdef CONFIG_PAX_MEMORY_UDEREF
11247+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11248+ src += PAX_USER_SHADOW_BASE;
11249+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11250+ dst += PAX_USER_SHADOW_BASE;
11251+#endif
11252+
11253+ return copy_user_generic((__force_kernel void *)dst,
11254+ (__force_kernel const void *)src, size);
11255 }
11256 }
11257
11258@@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11259 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11260
11261 static __must_check __always_inline int
11262-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11263+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11264 {
11265- return copy_user_generic(dst, (__force const void *)src, size);
11266+ if (size > INT_MAX)
11267+ return size;
11268+
11269+#ifdef CONFIG_PAX_MEMORY_UDEREF
11270+ if (!__access_ok(VERIFY_READ, src, size))
11271+ return size;
11272+
11273+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11274+ src += PAX_USER_SHADOW_BASE;
11275+#endif
11276+
11277+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11278 }
11279
11280-static __must_check __always_inline int
11281-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11282+static __must_check __always_inline unsigned long
11283+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11284 {
11285- return copy_user_generic((__force void *)dst, src, size);
11286+ if (size > INT_MAX)
11287+ return size;
11288+
11289+#ifdef CONFIG_PAX_MEMORY_UDEREF
11290+ if (!__access_ok(VERIFY_WRITE, dst, size))
11291+ return size;
11292+
11293+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11294+ dst += PAX_USER_SHADOW_BASE;
11295+#endif
11296+
11297+ return copy_user_generic((__force_kernel void *)dst, src, size);
11298 }
11299
11300-extern long __copy_user_nocache(void *dst, const void __user *src,
11301- unsigned size, int zerorest);
11302+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11303+ unsigned long size, int zerorest);
11304
11305-static inline int
11306-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11307+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11308 {
11309 might_sleep();
11310+
11311+ if (size > INT_MAX)
11312+ return size;
11313+
11314+#ifdef CONFIG_PAX_MEMORY_UDEREF
11315+ if (!__access_ok(VERIFY_READ, src, size))
11316+ return size;
11317+#endif
11318+
11319 return __copy_user_nocache(dst, src, size, 1);
11320 }
11321
11322-static inline int
11323-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11324- unsigned size)
11325+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11326+ unsigned long size)
11327 {
11328+ if (size > INT_MAX)
11329+ return size;
11330+
11331+#ifdef CONFIG_PAX_MEMORY_UDEREF
11332+ if (!__access_ok(VERIFY_READ, src, size))
11333+ return size;
11334+#endif
11335+
11336 return __copy_user_nocache(dst, src, size, 0);
11337 }
11338
11339-unsigned long
11340-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11341+extern unsigned long
11342+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11343
11344 #endif /* _ASM_X86_UACCESS_64_H */
11345diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11346index bb05228..d763d5b 100644
11347--- a/arch/x86/include/asm/vdso.h
11348+++ b/arch/x86/include/asm/vdso.h
11349@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11350 #define VDSO32_SYMBOL(base, name) \
11351 ({ \
11352 extern const char VDSO32_##name[]; \
11353- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11354+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11355 })
11356 #endif
11357
11358diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11359index 1971e65..1e3559b 100644
11360--- a/arch/x86/include/asm/x86_init.h
11361+++ b/arch/x86/include/asm/x86_init.h
11362@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11363 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11364 void (*find_smp_config)(void);
11365 void (*get_smp_config)(unsigned int early);
11366-};
11367+} __no_const;
11368
11369 /**
11370 * struct x86_init_resources - platform specific resource related ops
11371@@ -42,7 +42,7 @@ struct x86_init_resources {
11372 void (*probe_roms)(void);
11373 void (*reserve_resources)(void);
11374 char *(*memory_setup)(void);
11375-};
11376+} __no_const;
11377
11378 /**
11379 * struct x86_init_irqs - platform specific interrupt setup
11380@@ -55,7 +55,7 @@ struct x86_init_irqs {
11381 void (*pre_vector_init)(void);
11382 void (*intr_init)(void);
11383 void (*trap_init)(void);
11384-};
11385+} __no_const;
11386
11387 /**
11388 * struct x86_init_oem - oem platform specific customizing functions
11389@@ -65,7 +65,7 @@ struct x86_init_irqs {
11390 struct x86_init_oem {
11391 void (*arch_setup)(void);
11392 void (*banner)(void);
11393-};
11394+} __no_const;
11395
11396 /**
11397 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11398@@ -76,7 +76,7 @@ struct x86_init_oem {
11399 */
11400 struct x86_init_mapping {
11401 void (*pagetable_reserve)(u64 start, u64 end);
11402-};
11403+} __no_const;
11404
11405 /**
11406 * struct x86_init_paging - platform specific paging functions
11407@@ -86,7 +86,7 @@ struct x86_init_mapping {
11408 struct x86_init_paging {
11409 void (*pagetable_setup_start)(pgd_t *base);
11410 void (*pagetable_setup_done)(pgd_t *base);
11411-};
11412+} __no_const;
11413
11414 /**
11415 * struct x86_init_timers - platform specific timer setup
11416@@ -101,7 +101,7 @@ struct x86_init_timers {
11417 void (*tsc_pre_init)(void);
11418 void (*timer_init)(void);
11419 void (*wallclock_init)(void);
11420-};
11421+} __no_const;
11422
11423 /**
11424 * struct x86_init_iommu - platform specific iommu setup
11425@@ -109,7 +109,7 @@ struct x86_init_timers {
11426 */
11427 struct x86_init_iommu {
11428 int (*iommu_init)(void);
11429-};
11430+} __no_const;
11431
11432 /**
11433 * struct x86_init_pci - platform specific pci init functions
11434@@ -123,7 +123,7 @@ struct x86_init_pci {
11435 int (*init)(void);
11436 void (*init_irq)(void);
11437 void (*fixup_irqs)(void);
11438-};
11439+} __no_const;
11440
11441 /**
11442 * struct x86_init_ops - functions for platform specific setup
11443@@ -139,7 +139,7 @@ struct x86_init_ops {
11444 struct x86_init_timers timers;
11445 struct x86_init_iommu iommu;
11446 struct x86_init_pci pci;
11447-};
11448+} __no_const;
11449
11450 /**
11451 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11452@@ -147,7 +147,7 @@ struct x86_init_ops {
11453 */
11454 struct x86_cpuinit_ops {
11455 void (*setup_percpu_clockev)(void);
11456-};
11457+} __no_const;
11458
11459 /**
11460 * struct x86_platform_ops - platform specific runtime functions
11461@@ -169,7 +169,7 @@ struct x86_platform_ops {
11462 void (*nmi_init)(void);
11463 unsigned char (*get_nmi_reason)(void);
11464 int (*i8042_detect)(void);
11465-};
11466+} __no_const;
11467
11468 struct pci_dev;
11469
11470@@ -177,7 +177,7 @@ struct x86_msi_ops {
11471 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11472 void (*teardown_msi_irq)(unsigned int irq);
11473 void (*teardown_msi_irqs)(struct pci_dev *dev);
11474-};
11475+} __no_const;
11476
11477 extern struct x86_init_ops x86_init;
11478 extern struct x86_cpuinit_ops x86_cpuinit;
11479diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11480index c6ce245..ffbdab7 100644
11481--- a/arch/x86/include/asm/xsave.h
11482+++ b/arch/x86/include/asm/xsave.h
11483@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11484 {
11485 int err;
11486
11487+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11488+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11489+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11490+#endif
11491+
11492 /*
11493 * Clear the xsave header first, so that reserved fields are
11494 * initialized to zero.
11495@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11496 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11497 {
11498 int err;
11499- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11500+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11501 u32 lmask = mask;
11502 u32 hmask = mask >> 32;
11503
11504+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11505+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11506+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11507+#endif
11508+
11509 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11510 "2:\n"
11511 ".section .fixup,\"ax\"\n"
11512diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11513index 6a564ac..9b1340c 100644
11514--- a/arch/x86/kernel/acpi/realmode/Makefile
11515+++ b/arch/x86/kernel/acpi/realmode/Makefile
11516@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11517 $(call cc-option, -fno-stack-protector) \
11518 $(call cc-option, -mpreferred-stack-boundary=2)
11519 KBUILD_CFLAGS += $(call cc-option, -m32)
11520+ifdef CONSTIFY_PLUGIN
11521+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11522+endif
11523 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11524 GCOV_PROFILE := n
11525
11526diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11527index b4fd836..4358fe3 100644
11528--- a/arch/x86/kernel/acpi/realmode/wakeup.S
11529+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11530@@ -108,6 +108,9 @@ wakeup_code:
11531 /* Do any other stuff... */
11532
11533 #ifndef CONFIG_64BIT
11534+ /* Recheck NX bit overrides (64bit path does this in trampoline */
11535+ call verify_cpu
11536+
11537 /* This could also be done in C code... */
11538 movl pmode_cr3, %eax
11539 movl %eax, %cr3
11540@@ -131,6 +134,7 @@ wakeup_code:
11541 movl pmode_cr0, %eax
11542 movl %eax, %cr0
11543 jmp pmode_return
11544+# include "../../verify_cpu.S"
11545 #else
11546 pushw $0
11547 pushw trampoline_segment
11548diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11549index 103b6ab..2004d0a 100644
11550--- a/arch/x86/kernel/acpi/sleep.c
11551+++ b/arch/x86/kernel/acpi/sleep.c
11552@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11553 header->trampoline_segment = trampoline_address() >> 4;
11554 #ifdef CONFIG_SMP
11555 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11556+
11557+ pax_open_kernel();
11558 early_gdt_descr.address =
11559 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11560+ pax_close_kernel();
11561+
11562 initial_gs = per_cpu_offset(smp_processor_id());
11563 #endif
11564 initial_code = (unsigned long)wakeup_long64;
11565diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11566index 13ab720..95d5442 100644
11567--- a/arch/x86/kernel/acpi/wakeup_32.S
11568+++ b/arch/x86/kernel/acpi/wakeup_32.S
11569@@ -30,13 +30,11 @@ wakeup_pmode_return:
11570 # and restore the stack ... but you need gdt for this to work
11571 movl saved_context_esp, %esp
11572
11573- movl %cs:saved_magic, %eax
11574- cmpl $0x12345678, %eax
11575+ cmpl $0x12345678, saved_magic
11576 jne bogus_magic
11577
11578 # jump to place where we left off
11579- movl saved_eip, %eax
11580- jmp *%eax
11581+ jmp *(saved_eip)
11582
11583 bogus_magic:
11584 jmp bogus_magic
11585diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11586index 1f84794..e23f862 100644
11587--- a/arch/x86/kernel/alternative.c
11588+++ b/arch/x86/kernel/alternative.c
11589@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11590 */
11591 for (a = start; a < end; a++) {
11592 instr = (u8 *)&a->instr_offset + a->instr_offset;
11593+
11594+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11595+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11596+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11597+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11598+#endif
11599+
11600 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11601 BUG_ON(a->replacementlen > a->instrlen);
11602 BUG_ON(a->instrlen > sizeof(insnbuf));
11603@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11604 for (poff = start; poff < end; poff++) {
11605 u8 *ptr = (u8 *)poff + *poff;
11606
11607+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11608+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11609+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11610+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11611+#endif
11612+
11613 if (!*poff || ptr < text || ptr >= text_end)
11614 continue;
11615 /* turn DS segment override prefix into lock prefix */
11616- if (*ptr == 0x3e)
11617+ if (*ktla_ktva(ptr) == 0x3e)
11618 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11619 };
11620 mutex_unlock(&text_mutex);
11621@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11622 for (poff = start; poff < end; poff++) {
11623 u8 *ptr = (u8 *)poff + *poff;
11624
11625+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11626+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11627+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11628+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11629+#endif
11630+
11631 if (!*poff || ptr < text || ptr >= text_end)
11632 continue;
11633 /* turn lock prefix into DS segment override prefix */
11634- if (*ptr == 0xf0)
11635+ if (*ktla_ktva(ptr) == 0xf0)
11636 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11637 };
11638 mutex_unlock(&text_mutex);
11639@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11640
11641 BUG_ON(p->len > MAX_PATCH_LEN);
11642 /* prep the buffer with the original instructions */
11643- memcpy(insnbuf, p->instr, p->len);
11644+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11645 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11646 (unsigned long)p->instr, p->len);
11647
11648@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11649 if (smp_alt_once)
11650 free_init_pages("SMP alternatives",
11651 (unsigned long)__smp_locks,
11652- (unsigned long)__smp_locks_end);
11653+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11654
11655 restart_nmi();
11656 }
11657@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11658 * instructions. And on the local CPU you need to be protected again NMI or MCE
11659 * handlers seeing an inconsistent instruction while you patch.
11660 */
11661-void *__init_or_module text_poke_early(void *addr, const void *opcode,
11662+void *__kprobes text_poke_early(void *addr, const void *opcode,
11663 size_t len)
11664 {
11665 unsigned long flags;
11666 local_irq_save(flags);
11667- memcpy(addr, opcode, len);
11668+
11669+ pax_open_kernel();
11670+ memcpy(ktla_ktva(addr), opcode, len);
11671 sync_core();
11672+ pax_close_kernel();
11673+
11674 local_irq_restore(flags);
11675 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11676 that causes hangs on some VIA CPUs. */
11677@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11678 */
11679 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11680 {
11681- unsigned long flags;
11682- char *vaddr;
11683+ unsigned char *vaddr = ktla_ktva(addr);
11684 struct page *pages[2];
11685- int i;
11686+ size_t i;
11687
11688 if (!core_kernel_text((unsigned long)addr)) {
11689- pages[0] = vmalloc_to_page(addr);
11690- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11691+ pages[0] = vmalloc_to_page(vaddr);
11692+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11693 } else {
11694- pages[0] = virt_to_page(addr);
11695+ pages[0] = virt_to_page(vaddr);
11696 WARN_ON(!PageReserved(pages[0]));
11697- pages[1] = virt_to_page(addr + PAGE_SIZE);
11698+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11699 }
11700 BUG_ON(!pages[0]);
11701- local_irq_save(flags);
11702- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11703- if (pages[1])
11704- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11705- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11706- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11707- clear_fixmap(FIX_TEXT_POKE0);
11708- if (pages[1])
11709- clear_fixmap(FIX_TEXT_POKE1);
11710- local_flush_tlb();
11711- sync_core();
11712- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11713- that causes hangs on some VIA CPUs. */
11714+ text_poke_early(addr, opcode, len);
11715 for (i = 0; i < len; i++)
11716- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11717- local_irq_restore(flags);
11718+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11719 return addr;
11720 }
11721
11722diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11723index f98d84c..e402a69 100644
11724--- a/arch/x86/kernel/apic/apic.c
11725+++ b/arch/x86/kernel/apic/apic.c
11726@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11727 /*
11728 * Debug level, exported for io_apic.c
11729 */
11730-unsigned int apic_verbosity;
11731+int apic_verbosity;
11732
11733 int pic_mode;
11734
11735@@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11736 apic_write(APIC_ESR, 0);
11737 v1 = apic_read(APIC_ESR);
11738 ack_APIC_irq();
11739- atomic_inc(&irq_err_count);
11740+ atomic_inc_unchecked(&irq_err_count);
11741
11742 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11743 smp_processor_id(), v0 , v1);
11744diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11745index 6d939d7..0697fcc 100644
11746--- a/arch/x86/kernel/apic/io_apic.c
11747+++ b/arch/x86/kernel/apic/io_apic.c
11748@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11749 }
11750 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11751
11752-void lock_vector_lock(void)
11753+void lock_vector_lock(void) __acquires(vector_lock)
11754 {
11755 /* Used to the online set of cpus does not change
11756 * during assign_irq_vector.
11757@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
11758 raw_spin_lock(&vector_lock);
11759 }
11760
11761-void unlock_vector_lock(void)
11762+void unlock_vector_lock(void) __releases(vector_lock)
11763 {
11764 raw_spin_unlock(&vector_lock);
11765 }
11766@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
11767 ack_APIC_irq();
11768 }
11769
11770-atomic_t irq_mis_count;
11771+atomic_unchecked_t irq_mis_count;
11772
11773 static void ack_apic_level(struct irq_data *data)
11774 {
11775@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
11776 * at the cpu.
11777 */
11778 if (!(v & (1 << (i & 0x1f)))) {
11779- atomic_inc(&irq_mis_count);
11780+ atomic_inc_unchecked(&irq_mis_count);
11781
11782 eoi_ioapic_irq(irq, cfg);
11783 }
11784diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11785index a46bd38..6b906d7 100644
11786--- a/arch/x86/kernel/apm_32.c
11787+++ b/arch/x86/kernel/apm_32.c
11788@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
11789 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11790 * even though they are called in protected mode.
11791 */
11792-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11793+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11794 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11795
11796 static const char driver_version[] = "1.16ac"; /* no spaces */
11797@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
11798 BUG_ON(cpu != 0);
11799 gdt = get_cpu_gdt_table(cpu);
11800 save_desc_40 = gdt[0x40 / 8];
11801+
11802+ pax_open_kernel();
11803 gdt[0x40 / 8] = bad_bios_desc;
11804+ pax_close_kernel();
11805
11806 apm_irq_save(flags);
11807 APM_DO_SAVE_SEGS;
11808@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
11809 &call->esi);
11810 APM_DO_RESTORE_SEGS;
11811 apm_irq_restore(flags);
11812+
11813+ pax_open_kernel();
11814 gdt[0x40 / 8] = save_desc_40;
11815+ pax_close_kernel();
11816+
11817 put_cpu();
11818
11819 return call->eax & 0xff;
11820@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
11821 BUG_ON(cpu != 0);
11822 gdt = get_cpu_gdt_table(cpu);
11823 save_desc_40 = gdt[0x40 / 8];
11824+
11825+ pax_open_kernel();
11826 gdt[0x40 / 8] = bad_bios_desc;
11827+ pax_close_kernel();
11828
11829 apm_irq_save(flags);
11830 APM_DO_SAVE_SEGS;
11831@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
11832 &call->eax);
11833 APM_DO_RESTORE_SEGS;
11834 apm_irq_restore(flags);
11835+
11836+ pax_open_kernel();
11837 gdt[0x40 / 8] = save_desc_40;
11838+ pax_close_kernel();
11839+
11840 put_cpu();
11841 return error;
11842 }
11843@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
11844 * code to that CPU.
11845 */
11846 gdt = get_cpu_gdt_table(0);
11847+
11848+ pax_open_kernel();
11849 set_desc_base(&gdt[APM_CS >> 3],
11850 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11851 set_desc_base(&gdt[APM_CS_16 >> 3],
11852 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11853 set_desc_base(&gdt[APM_DS >> 3],
11854 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11855+ pax_close_kernel();
11856
11857 proc_create("apm", 0, NULL, &apm_file_ops);
11858
11859diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11860index 4f13faf..87db5d2 100644
11861--- a/arch/x86/kernel/asm-offsets.c
11862+++ b/arch/x86/kernel/asm-offsets.c
11863@@ -33,6 +33,8 @@ void common(void) {
11864 OFFSET(TI_status, thread_info, status);
11865 OFFSET(TI_addr_limit, thread_info, addr_limit);
11866 OFFSET(TI_preempt_count, thread_info, preempt_count);
11867+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11868+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11869
11870 BLANK();
11871 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11872@@ -53,8 +55,26 @@ void common(void) {
11873 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11874 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11875 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11876+
11877+#ifdef CONFIG_PAX_KERNEXEC
11878+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11879 #endif
11880
11881+#ifdef CONFIG_PAX_MEMORY_UDEREF
11882+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11883+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11884+#ifdef CONFIG_X86_64
11885+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11886+#endif
11887+#endif
11888+
11889+#endif
11890+
11891+ BLANK();
11892+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11893+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11894+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11895+
11896 #ifdef CONFIG_XEN
11897 BLANK();
11898 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11899diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11900index e72a119..6e2955d 100644
11901--- a/arch/x86/kernel/asm-offsets_64.c
11902+++ b/arch/x86/kernel/asm-offsets_64.c
11903@@ -69,6 +69,7 @@ int main(void)
11904 BLANK();
11905 #undef ENTRY
11906
11907+ DEFINE(TSS_size, sizeof(struct tss_struct));
11908 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11909 BLANK();
11910
11911diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11912index 25f24dc..4094a7f 100644
11913--- a/arch/x86/kernel/cpu/Makefile
11914+++ b/arch/x86/kernel/cpu/Makefile
11915@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11916 CFLAGS_REMOVE_perf_event.o = -pg
11917 endif
11918
11919-# Make sure load_percpu_segment has no stackprotector
11920-nostackp := $(call cc-option, -fno-stack-protector)
11921-CFLAGS_common.o := $(nostackp)
11922-
11923 obj-y := intel_cacheinfo.o scattered.o topology.o
11924 obj-y += proc.o capflags.o powerflags.o common.o
11925 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11926diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11927index 0bab2b1..d0a1bf8 100644
11928--- a/arch/x86/kernel/cpu/amd.c
11929+++ b/arch/x86/kernel/cpu/amd.c
11930@@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
11931 unsigned int size)
11932 {
11933 /* AMD errata T13 (order #21922) */
11934- if ((c->x86 == 6)) {
11935+ if (c->x86 == 6) {
11936 /* Duron Rev A0 */
11937 if (c->x86_model == 3 && c->x86_mask == 0)
11938 size = 64;
11939diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11940index aa003b1..47ea638 100644
11941--- a/arch/x86/kernel/cpu/common.c
11942+++ b/arch/x86/kernel/cpu/common.c
11943@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
11944
11945 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11946
11947-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11948-#ifdef CONFIG_X86_64
11949- /*
11950- * We need valid kernel segments for data and code in long mode too
11951- * IRET will check the segment types kkeil 2000/10/28
11952- * Also sysret mandates a special GDT layout
11953- *
11954- * TLS descriptors are currently at a different place compared to i386.
11955- * Hopefully nobody expects them at a fixed place (Wine?)
11956- */
11957- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11958- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11959- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11960- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11961- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11962- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11963-#else
11964- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11965- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11966- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11967- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11968- /*
11969- * Segments used for calling PnP BIOS have byte granularity.
11970- * They code segments and data segments have fixed 64k limits,
11971- * the transfer segment sizes are set at run time.
11972- */
11973- /* 32-bit code */
11974- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11975- /* 16-bit code */
11976- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11977- /* 16-bit data */
11978- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11979- /* 16-bit data */
11980- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11981- /* 16-bit data */
11982- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11983- /*
11984- * The APM segments have byte granularity and their bases
11985- * are set at run time. All have 64k limits.
11986- */
11987- /* 32-bit code */
11988- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11989- /* 16-bit code */
11990- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11991- /* data */
11992- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11993-
11994- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11995- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11996- GDT_STACK_CANARY_INIT
11997-#endif
11998-} };
11999-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12000-
12001 static int __init x86_xsave_setup(char *s)
12002 {
12003 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12004@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12005 {
12006 struct desc_ptr gdt_descr;
12007
12008- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12009+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12010 gdt_descr.size = GDT_SIZE - 1;
12011 load_gdt(&gdt_descr);
12012 /* Reload the per-cpu base */
12013@@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12014 /* Filter out anything that depends on CPUID levels we don't have */
12015 filter_cpuid_features(c, true);
12016
12017+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12018+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12019+#endif
12020+
12021 /* If the model name is still unset, do table lookup. */
12022 if (!c->x86_model_id[0]) {
12023 const char *p;
12024@@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12025 }
12026 __setup("clearcpuid=", setup_disablecpuid);
12027
12028+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12029+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12030+
12031 #ifdef CONFIG_X86_64
12032 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12033
12034@@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12035 EXPORT_PER_CPU_SYMBOL(current_task);
12036
12037 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12038- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12039+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12040 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12041
12042 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12043@@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12044 {
12045 memset(regs, 0, sizeof(struct pt_regs));
12046 regs->fs = __KERNEL_PERCPU;
12047- regs->gs = __KERNEL_STACK_CANARY;
12048+ savesegment(gs, regs->gs);
12049
12050 return regs;
12051 }
12052@@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12053 int i;
12054
12055 cpu = stack_smp_processor_id();
12056- t = &per_cpu(init_tss, cpu);
12057+ t = init_tss + cpu;
12058 oist = &per_cpu(orig_ist, cpu);
12059
12060 #ifdef CONFIG_NUMA
12061@@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12062 switch_to_new_gdt(cpu);
12063 loadsegment(fs, 0);
12064
12065- load_idt((const struct desc_ptr *)&idt_descr);
12066+ load_idt(&idt_descr);
12067
12068 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12069 syscall_init();
12070@@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12071 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12072 barrier();
12073
12074- x86_configure_nx();
12075 if (cpu != 0)
12076 enable_x2apic();
12077
12078@@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12079 {
12080 int cpu = smp_processor_id();
12081 struct task_struct *curr = current;
12082- struct tss_struct *t = &per_cpu(init_tss, cpu);
12083+ struct tss_struct *t = init_tss + cpu;
12084 struct thread_struct *thread = &curr->thread;
12085
12086 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12087diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12088index 5231312..a78a987 100644
12089--- a/arch/x86/kernel/cpu/intel.c
12090+++ b/arch/x86/kernel/cpu/intel.c
12091@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12092 * Update the IDT descriptor and reload the IDT so that
12093 * it uses the read-only mapped virtual address.
12094 */
12095- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12096+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12097 load_idt(&idt_descr);
12098 }
12099 #endif
12100diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12101index 2af127d..8ff7ac0 100644
12102--- a/arch/x86/kernel/cpu/mcheck/mce.c
12103+++ b/arch/x86/kernel/cpu/mcheck/mce.c
12104@@ -42,6 +42,7 @@
12105 #include <asm/processor.h>
12106 #include <asm/mce.h>
12107 #include <asm/msr.h>
12108+#include <asm/local.h>
12109
12110 #include "mce-internal.h"
12111
12112@@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12113 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12114 m->cs, m->ip);
12115
12116- if (m->cs == __KERNEL_CS)
12117+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12118 print_symbol("{%s}", m->ip);
12119 pr_cont("\n");
12120 }
12121@@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12122
12123 #define PANIC_TIMEOUT 5 /* 5 seconds */
12124
12125-static atomic_t mce_paniced;
12126+static atomic_unchecked_t mce_paniced;
12127
12128 static int fake_panic;
12129-static atomic_t mce_fake_paniced;
12130+static atomic_unchecked_t mce_fake_paniced;
12131
12132 /* Panic in progress. Enable interrupts and wait for final IPI */
12133 static void wait_for_panic(void)
12134@@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12135 /*
12136 * Make sure only one CPU runs in machine check panic
12137 */
12138- if (atomic_inc_return(&mce_paniced) > 1)
12139+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12140 wait_for_panic();
12141 barrier();
12142
12143@@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12144 console_verbose();
12145 } else {
12146 /* Don't log too much for fake panic */
12147- if (atomic_inc_return(&mce_fake_paniced) > 1)
12148+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12149 return;
12150 }
12151 /* First print corrected ones that are still unlogged */
12152@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12153 * might have been modified by someone else.
12154 */
12155 rmb();
12156- if (atomic_read(&mce_paniced))
12157+ if (atomic_read_unchecked(&mce_paniced))
12158 wait_for_panic();
12159 if (!monarch_timeout)
12160 goto out;
12161@@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12162 }
12163
12164 /* Call the installed machine check handler for this CPU setup. */
12165-void (*machine_check_vector)(struct pt_regs *, long error_code) =
12166+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12167 unexpected_machine_check;
12168
12169 /*
12170@@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12171 return;
12172 }
12173
12174+ pax_open_kernel();
12175 machine_check_vector = do_machine_check;
12176+ pax_close_kernel();
12177
12178 __mcheck_cpu_init_generic();
12179 __mcheck_cpu_init_vendor(c);
12180@@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12181 */
12182
12183 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12184-static int mce_chrdev_open_count; /* #times opened */
12185+static local_t mce_chrdev_open_count; /* #times opened */
12186 static int mce_chrdev_open_exclu; /* already open exclusive? */
12187
12188 static int mce_chrdev_open(struct inode *inode, struct file *file)
12189@@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12190 spin_lock(&mce_chrdev_state_lock);
12191
12192 if (mce_chrdev_open_exclu ||
12193- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12194+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12195 spin_unlock(&mce_chrdev_state_lock);
12196
12197 return -EBUSY;
12198@@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12199
12200 if (file->f_flags & O_EXCL)
12201 mce_chrdev_open_exclu = 1;
12202- mce_chrdev_open_count++;
12203+ local_inc(&mce_chrdev_open_count);
12204
12205 spin_unlock(&mce_chrdev_state_lock);
12206
12207@@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12208 {
12209 spin_lock(&mce_chrdev_state_lock);
12210
12211- mce_chrdev_open_count--;
12212+ local_dec(&mce_chrdev_open_count);
12213 mce_chrdev_open_exclu = 0;
12214
12215 spin_unlock(&mce_chrdev_state_lock);
12216@@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12217 static void mce_reset(void)
12218 {
12219 cpu_missing = 0;
12220- atomic_set(&mce_fake_paniced, 0);
12221+ atomic_set_unchecked(&mce_fake_paniced, 0);
12222 atomic_set(&mce_executing, 0);
12223 atomic_set(&mce_callin, 0);
12224 atomic_set(&global_nwo, 0);
12225diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12226index 5c0e653..51ddf2c 100644
12227--- a/arch/x86/kernel/cpu/mcheck/p5.c
12228+++ b/arch/x86/kernel/cpu/mcheck/p5.c
12229@@ -11,7 +11,7 @@
12230 #include <asm/processor.h>
12231 #include <asm/system.h>
12232 #include <asm/mce.h>
12233-#include <asm/msr.h>
12234+#include <asm/pgtable.h>
12235
12236 /* By default disabled */
12237 int mce_p5_enabled __read_mostly;
12238@@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12239 if (!cpu_has(c, X86_FEATURE_MCE))
12240 return;
12241
12242+ pax_open_kernel();
12243 machine_check_vector = pentium_machine_check;
12244+ pax_close_kernel();
12245 /* Make sure the vector pointer is visible before we enable MCEs: */
12246 wmb();
12247
12248diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12249index 54060f5..c1a7577 100644
12250--- a/arch/x86/kernel/cpu/mcheck/winchip.c
12251+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12252@@ -11,6 +11,7 @@
12253 #include <asm/system.h>
12254 #include <asm/mce.h>
12255 #include <asm/msr.h>
12256+#include <asm/pgtable.h>
12257
12258 /* Machine check handler for WinChip C6: */
12259 static void winchip_machine_check(struct pt_regs *regs, long error_code)
12260@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12261 {
12262 u32 lo, hi;
12263
12264+ pax_open_kernel();
12265 machine_check_vector = winchip_machine_check;
12266+ pax_close_kernel();
12267 /* Make sure the vector pointer is visible before we enable MCEs: */
12268 wmb();
12269
12270diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12271index 6b96110..0da73eb 100644
12272--- a/arch/x86/kernel/cpu/mtrr/main.c
12273+++ b/arch/x86/kernel/cpu/mtrr/main.c
12274@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12275 u64 size_or_mask, size_and_mask;
12276 static bool mtrr_aps_delayed_init;
12277
12278-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12279+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12280
12281 const struct mtrr_ops *mtrr_if;
12282
12283diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12284index df5e41f..816c719 100644
12285--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12286+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12287@@ -25,7 +25,7 @@ struct mtrr_ops {
12288 int (*validate_add_page)(unsigned long base, unsigned long size,
12289 unsigned int type);
12290 int (*have_wrcomb)(void);
12291-};
12292+} __do_const;
12293
12294 extern int generic_get_free_region(unsigned long base, unsigned long size,
12295 int replace_reg);
12296diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12297index 2bda212..78cc605 100644
12298--- a/arch/x86/kernel/cpu/perf_event.c
12299+++ b/arch/x86/kernel/cpu/perf_event.c
12300@@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12301 break;
12302
12303 perf_callchain_store(entry, frame.return_address);
12304- fp = frame.next_frame;
12305+ fp = (const void __force_user *)frame.next_frame;
12306 }
12307 }
12308
12309diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12310index 13ad899..f642b9a 100644
12311--- a/arch/x86/kernel/crash.c
12312+++ b/arch/x86/kernel/crash.c
12313@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12314 {
12315 #ifdef CONFIG_X86_32
12316 struct pt_regs fixed_regs;
12317-#endif
12318
12319-#ifdef CONFIG_X86_32
12320- if (!user_mode_vm(regs)) {
12321+ if (!user_mode(regs)) {
12322 crash_fixup_ss_esp(&fixed_regs, regs);
12323 regs = &fixed_regs;
12324 }
12325diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12326index 37250fe..bf2ec74 100644
12327--- a/arch/x86/kernel/doublefault_32.c
12328+++ b/arch/x86/kernel/doublefault_32.c
12329@@ -11,7 +11,7 @@
12330
12331 #define DOUBLEFAULT_STACKSIZE (1024)
12332 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12333-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12334+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12335
12336 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12337
12338@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12339 unsigned long gdt, tss;
12340
12341 store_gdt(&gdt_desc);
12342- gdt = gdt_desc.address;
12343+ gdt = (unsigned long)gdt_desc.address;
12344
12345 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12346
12347@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12348 /* 0x2 bit is always set */
12349 .flags = X86_EFLAGS_SF | 0x2,
12350 .sp = STACK_START,
12351- .es = __USER_DS,
12352+ .es = __KERNEL_DS,
12353 .cs = __KERNEL_CS,
12354 .ss = __KERNEL_DS,
12355- .ds = __USER_DS,
12356+ .ds = __KERNEL_DS,
12357 .fs = __KERNEL_PERCPU,
12358
12359 .__cr3 = __pa_nodebug(swapper_pg_dir),
12360diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12361index 1aae78f..aab3a3d 100644
12362--- a/arch/x86/kernel/dumpstack.c
12363+++ b/arch/x86/kernel/dumpstack.c
12364@@ -2,6 +2,9 @@
12365 * Copyright (C) 1991, 1992 Linus Torvalds
12366 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12367 */
12368+#ifdef CONFIG_GRKERNSEC_HIDESYM
12369+#define __INCLUDED_BY_HIDESYM 1
12370+#endif
12371 #include <linux/kallsyms.h>
12372 #include <linux/kprobes.h>
12373 #include <linux/uaccess.h>
12374@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12375 static void
12376 print_ftrace_graph_addr(unsigned long addr, void *data,
12377 const struct stacktrace_ops *ops,
12378- struct thread_info *tinfo, int *graph)
12379+ struct task_struct *task, int *graph)
12380 {
12381- struct task_struct *task = tinfo->task;
12382 unsigned long ret_addr;
12383 int index = task->curr_ret_stack;
12384
12385@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12386 static inline void
12387 print_ftrace_graph_addr(unsigned long addr, void *data,
12388 const struct stacktrace_ops *ops,
12389- struct thread_info *tinfo, int *graph)
12390+ struct task_struct *task, int *graph)
12391 { }
12392 #endif
12393
12394@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12395 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12396 */
12397
12398-static inline int valid_stack_ptr(struct thread_info *tinfo,
12399- void *p, unsigned int size, void *end)
12400+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12401 {
12402- void *t = tinfo;
12403 if (end) {
12404 if (p < end && p >= (end-THREAD_SIZE))
12405 return 1;
12406@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12407 }
12408
12409 unsigned long
12410-print_context_stack(struct thread_info *tinfo,
12411+print_context_stack(struct task_struct *task, void *stack_start,
12412 unsigned long *stack, unsigned long bp,
12413 const struct stacktrace_ops *ops, void *data,
12414 unsigned long *end, int *graph)
12415 {
12416 struct stack_frame *frame = (struct stack_frame *)bp;
12417
12418- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12419+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12420 unsigned long addr;
12421
12422 addr = *stack;
12423@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12424 } else {
12425 ops->address(data, addr, 0);
12426 }
12427- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12428+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12429 }
12430 stack++;
12431 }
12432@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12433 EXPORT_SYMBOL_GPL(print_context_stack);
12434
12435 unsigned long
12436-print_context_stack_bp(struct thread_info *tinfo,
12437+print_context_stack_bp(struct task_struct *task, void *stack_start,
12438 unsigned long *stack, unsigned long bp,
12439 const struct stacktrace_ops *ops, void *data,
12440 unsigned long *end, int *graph)
12441@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12442 struct stack_frame *frame = (struct stack_frame *)bp;
12443 unsigned long *ret_addr = &frame->return_address;
12444
12445- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12446+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12447 unsigned long addr = *ret_addr;
12448
12449 if (!__kernel_text_address(addr))
12450@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12451 ops->address(data, addr, 1);
12452 frame = frame->next_frame;
12453 ret_addr = &frame->return_address;
12454- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12455+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12456 }
12457
12458 return (unsigned long)frame;
12459@@ -186,7 +186,7 @@ void dump_stack(void)
12460
12461 bp = stack_frame(current, NULL);
12462 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12463- current->pid, current->comm, print_tainted(),
12464+ task_pid_nr(current), current->comm, print_tainted(),
12465 init_utsname()->release,
12466 (int)strcspn(init_utsname()->version, " "),
12467 init_utsname()->version);
12468@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12469 }
12470 EXPORT_SYMBOL_GPL(oops_begin);
12471
12472+extern void gr_handle_kernel_exploit(void);
12473+
12474 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12475 {
12476 if (regs && kexec_should_crash(current))
12477@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12478 panic("Fatal exception in interrupt");
12479 if (panic_on_oops)
12480 panic("Fatal exception");
12481- do_exit(signr);
12482+
12483+ gr_handle_kernel_exploit();
12484+
12485+ do_group_exit(signr);
12486 }
12487
12488 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12489@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12490
12491 show_registers(regs);
12492 #ifdef CONFIG_X86_32
12493- if (user_mode_vm(regs)) {
12494+ if (user_mode(regs)) {
12495 sp = regs->sp;
12496 ss = regs->ss & 0xffff;
12497 } else {
12498@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12499 unsigned long flags = oops_begin();
12500 int sig = SIGSEGV;
12501
12502- if (!user_mode_vm(regs))
12503+ if (!user_mode(regs))
12504 report_bug(regs->ip, regs);
12505
12506 if (__die(str, regs, err))
12507diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12508index c99f9ed..2a15d80 100644
12509--- a/arch/x86/kernel/dumpstack_32.c
12510+++ b/arch/x86/kernel/dumpstack_32.c
12511@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12512 bp = stack_frame(task, regs);
12513
12514 for (;;) {
12515- struct thread_info *context;
12516+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12517
12518- context = (struct thread_info *)
12519- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12520- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12521+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12522
12523- stack = (unsigned long *)context->previous_esp;
12524- if (!stack)
12525+ if (stack_start == task_stack_page(task))
12526 break;
12527+ stack = *(unsigned long **)stack_start;
12528 if (ops->stack(data, "IRQ") < 0)
12529 break;
12530 touch_nmi_watchdog();
12531@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12532 * When in-kernel, we also print out the stack and code at the
12533 * time of the fault..
12534 */
12535- if (!user_mode_vm(regs)) {
12536+ if (!user_mode(regs)) {
12537 unsigned int code_prologue = code_bytes * 43 / 64;
12538 unsigned int code_len = code_bytes;
12539 unsigned char c;
12540 u8 *ip;
12541+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12542
12543 printk(KERN_EMERG "Stack:\n");
12544 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12545
12546 printk(KERN_EMERG "Code: ");
12547
12548- ip = (u8 *)regs->ip - code_prologue;
12549+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12550 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12551 /* try starting at IP */
12552- ip = (u8 *)regs->ip;
12553+ ip = (u8 *)regs->ip + cs_base;
12554 code_len = code_len - code_prologue + 1;
12555 }
12556 for (i = 0; i < code_len; i++, ip++) {
12557@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12558 printk(KERN_CONT " Bad EIP value.");
12559 break;
12560 }
12561- if (ip == (u8 *)regs->ip)
12562+ if (ip == (u8 *)regs->ip + cs_base)
12563 printk(KERN_CONT "<%02x> ", c);
12564 else
12565 printk(KERN_CONT "%02x ", c);
12566@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12567 {
12568 unsigned short ud2;
12569
12570+ ip = ktla_ktva(ip);
12571 if (ip < PAGE_OFFSET)
12572 return 0;
12573 if (probe_kernel_address((unsigned short *)ip, ud2))
12574@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12575
12576 return ud2 == 0x0b0f;
12577 }
12578+
12579+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12580+void pax_check_alloca(unsigned long size)
12581+{
12582+ unsigned long sp = (unsigned long)&sp, stack_left;
12583+
12584+ /* all kernel stacks are of the same size */
12585+ stack_left = sp & (THREAD_SIZE - 1);
12586+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12587+}
12588+EXPORT_SYMBOL(pax_check_alloca);
12589+#endif
12590diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12591index 6d728d9..279514e 100644
12592--- a/arch/x86/kernel/dumpstack_64.c
12593+++ b/arch/x86/kernel/dumpstack_64.c
12594@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12595 unsigned long *irq_stack_end =
12596 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12597 unsigned used = 0;
12598- struct thread_info *tinfo;
12599 int graph = 0;
12600 unsigned long dummy;
12601+ void *stack_start;
12602
12603 if (!task)
12604 task = current;
12605@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12606 * current stack address. If the stacks consist of nested
12607 * exceptions
12608 */
12609- tinfo = task_thread_info(task);
12610 for (;;) {
12611 char *id;
12612 unsigned long *estack_end;
12613+
12614 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12615 &used, &id);
12616
12617@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12618 if (ops->stack(data, id) < 0)
12619 break;
12620
12621- bp = ops->walk_stack(tinfo, stack, bp, ops,
12622+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12623 data, estack_end, &graph);
12624 ops->stack(data, "<EOE>");
12625 /*
12626@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12627 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12628 if (ops->stack(data, "IRQ") < 0)
12629 break;
12630- bp = ops->walk_stack(tinfo, stack, bp,
12631+ bp = ops->walk_stack(task, irq_stack, stack, bp,
12632 ops, data, irq_stack_end, &graph);
12633 /*
12634 * We link to the next stack (which would be
12635@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12636 /*
12637 * This handles the process stack:
12638 */
12639- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12640+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12641+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12642 put_cpu();
12643 }
12644 EXPORT_SYMBOL(dump_trace);
12645@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12646
12647 return ud2 == 0x0b0f;
12648 }
12649+
12650+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12651+void pax_check_alloca(unsigned long size)
12652+{
12653+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12654+ unsigned cpu, used;
12655+ char *id;
12656+
12657+ /* check the process stack first */
12658+ stack_start = (unsigned long)task_stack_page(current);
12659+ stack_end = stack_start + THREAD_SIZE;
12660+ if (likely(stack_start <= sp && sp < stack_end)) {
12661+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
12662+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12663+ return;
12664+ }
12665+
12666+ cpu = get_cpu();
12667+
12668+ /* check the irq stacks */
12669+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12670+ stack_start = stack_end - IRQ_STACK_SIZE;
12671+ if (stack_start <= sp && sp < stack_end) {
12672+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12673+ put_cpu();
12674+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12675+ return;
12676+ }
12677+
12678+ /* check the exception stacks */
12679+ used = 0;
12680+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12681+ stack_start = stack_end - EXCEPTION_STKSZ;
12682+ if (stack_end && stack_start <= sp && sp < stack_end) {
12683+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12684+ put_cpu();
12685+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12686+ return;
12687+ }
12688+
12689+ put_cpu();
12690+
12691+ /* unknown stack */
12692+ BUG();
12693+}
12694+EXPORT_SYMBOL(pax_check_alloca);
12695+#endif
12696diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12697index cd28a35..c72ed9a 100644
12698--- a/arch/x86/kernel/early_printk.c
12699+++ b/arch/x86/kernel/early_printk.c
12700@@ -7,6 +7,7 @@
12701 #include <linux/pci_regs.h>
12702 #include <linux/pci_ids.h>
12703 #include <linux/errno.h>
12704+#include <linux/sched.h>
12705 #include <asm/io.h>
12706 #include <asm/processor.h>
12707 #include <asm/fcntl.h>
12708diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12709index f3f6f53..0841b66 100644
12710--- a/arch/x86/kernel/entry_32.S
12711+++ b/arch/x86/kernel/entry_32.S
12712@@ -186,13 +186,146 @@
12713 /*CFI_REL_OFFSET gs, PT_GS*/
12714 .endm
12715 .macro SET_KERNEL_GS reg
12716+
12717+#ifdef CONFIG_CC_STACKPROTECTOR
12718 movl $(__KERNEL_STACK_CANARY), \reg
12719+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12720+ movl $(__USER_DS), \reg
12721+#else
12722+ xorl \reg, \reg
12723+#endif
12724+
12725 movl \reg, %gs
12726 .endm
12727
12728 #endif /* CONFIG_X86_32_LAZY_GS */
12729
12730-.macro SAVE_ALL
12731+.macro pax_enter_kernel
12732+#ifdef CONFIG_PAX_KERNEXEC
12733+ call pax_enter_kernel
12734+#endif
12735+.endm
12736+
12737+.macro pax_exit_kernel
12738+#ifdef CONFIG_PAX_KERNEXEC
12739+ call pax_exit_kernel
12740+#endif
12741+.endm
12742+
12743+#ifdef CONFIG_PAX_KERNEXEC
12744+ENTRY(pax_enter_kernel)
12745+#ifdef CONFIG_PARAVIRT
12746+ pushl %eax
12747+ pushl %ecx
12748+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12749+ mov %eax, %esi
12750+#else
12751+ mov %cr0, %esi
12752+#endif
12753+ bts $16, %esi
12754+ jnc 1f
12755+ mov %cs, %esi
12756+ cmp $__KERNEL_CS, %esi
12757+ jz 3f
12758+ ljmp $__KERNEL_CS, $3f
12759+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12760+2:
12761+#ifdef CONFIG_PARAVIRT
12762+ mov %esi, %eax
12763+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12764+#else
12765+ mov %esi, %cr0
12766+#endif
12767+3:
12768+#ifdef CONFIG_PARAVIRT
12769+ popl %ecx
12770+ popl %eax
12771+#endif
12772+ ret
12773+ENDPROC(pax_enter_kernel)
12774+
12775+ENTRY(pax_exit_kernel)
12776+#ifdef CONFIG_PARAVIRT
12777+ pushl %eax
12778+ pushl %ecx
12779+#endif
12780+ mov %cs, %esi
12781+ cmp $__KERNEXEC_KERNEL_CS, %esi
12782+ jnz 2f
12783+#ifdef CONFIG_PARAVIRT
12784+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12785+ mov %eax, %esi
12786+#else
12787+ mov %cr0, %esi
12788+#endif
12789+ btr $16, %esi
12790+ ljmp $__KERNEL_CS, $1f
12791+1:
12792+#ifdef CONFIG_PARAVIRT
12793+ mov %esi, %eax
12794+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12795+#else
12796+ mov %esi, %cr0
12797+#endif
12798+2:
12799+#ifdef CONFIG_PARAVIRT
12800+ popl %ecx
12801+ popl %eax
12802+#endif
12803+ ret
12804+ENDPROC(pax_exit_kernel)
12805+#endif
12806+
12807+.macro pax_erase_kstack
12808+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12809+ call pax_erase_kstack
12810+#endif
12811+.endm
12812+
12813+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12814+/*
12815+ * ebp: thread_info
12816+ * ecx, edx: can be clobbered
12817+ */
12818+ENTRY(pax_erase_kstack)
12819+ pushl %edi
12820+ pushl %eax
12821+
12822+ mov TI_lowest_stack(%ebp), %edi
12823+ mov $-0xBEEF, %eax
12824+ std
12825+
12826+1: mov %edi, %ecx
12827+ and $THREAD_SIZE_asm - 1, %ecx
12828+ shr $2, %ecx
12829+ repne scasl
12830+ jecxz 2f
12831+
12832+ cmp $2*16, %ecx
12833+ jc 2f
12834+
12835+ mov $2*16, %ecx
12836+ repe scasl
12837+ jecxz 2f
12838+ jne 1b
12839+
12840+2: cld
12841+ mov %esp, %ecx
12842+ sub %edi, %ecx
12843+ shr $2, %ecx
12844+ rep stosl
12845+
12846+ mov TI_task_thread_sp0(%ebp), %edi
12847+ sub $128, %edi
12848+ mov %edi, TI_lowest_stack(%ebp)
12849+
12850+ popl %eax
12851+ popl %edi
12852+ ret
12853+ENDPROC(pax_erase_kstack)
12854+#endif
12855+
12856+.macro __SAVE_ALL _DS
12857 cld
12858 PUSH_GS
12859 pushl_cfi %fs
12860@@ -215,7 +348,7 @@
12861 CFI_REL_OFFSET ecx, 0
12862 pushl_cfi %ebx
12863 CFI_REL_OFFSET ebx, 0
12864- movl $(__USER_DS), %edx
12865+ movl $\_DS, %edx
12866 movl %edx, %ds
12867 movl %edx, %es
12868 movl $(__KERNEL_PERCPU), %edx
12869@@ -223,6 +356,15 @@
12870 SET_KERNEL_GS %edx
12871 .endm
12872
12873+.macro SAVE_ALL
12874+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12875+ __SAVE_ALL __KERNEL_DS
12876+ pax_enter_kernel
12877+#else
12878+ __SAVE_ALL __USER_DS
12879+#endif
12880+.endm
12881+
12882 .macro RESTORE_INT_REGS
12883 popl_cfi %ebx
12884 CFI_RESTORE ebx
12885@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12886 popfl_cfi
12887 jmp syscall_exit
12888 CFI_ENDPROC
12889-END(ret_from_fork)
12890+ENDPROC(ret_from_fork)
12891
12892 /*
12893 * Interrupt exit functions should be protected against kprobes
12894@@ -333,7 +475,15 @@ check_userspace:
12895 movb PT_CS(%esp), %al
12896 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12897 cmpl $USER_RPL, %eax
12898+
12899+#ifdef CONFIG_PAX_KERNEXEC
12900+ jae resume_userspace
12901+
12902+ PAX_EXIT_KERNEL
12903+ jmp resume_kernel
12904+#else
12905 jb resume_kernel # not returning to v8086 or userspace
12906+#endif
12907
12908 ENTRY(resume_userspace)
12909 LOCKDEP_SYS_EXIT
12910@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12911 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12912 # int/exception return?
12913 jne work_pending
12914- jmp restore_all
12915-END(ret_from_exception)
12916+ jmp restore_all_pax
12917+ENDPROC(ret_from_exception)
12918
12919 #ifdef CONFIG_PREEMPT
12920 ENTRY(resume_kernel)
12921@@ -361,7 +511,7 @@ need_resched:
12922 jz restore_all
12923 call preempt_schedule_irq
12924 jmp need_resched
12925-END(resume_kernel)
12926+ENDPROC(resume_kernel)
12927 #endif
12928 CFI_ENDPROC
12929 /*
12930@@ -395,23 +545,34 @@ sysenter_past_esp:
12931 /*CFI_REL_OFFSET cs, 0*/
12932 /*
12933 * Push current_thread_info()->sysenter_return to the stack.
12934- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12935- * pushed above; +8 corresponds to copy_thread's esp0 setting.
12936 */
12937- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12938+ pushl_cfi $0
12939 CFI_REL_OFFSET eip, 0
12940
12941 pushl_cfi %eax
12942 SAVE_ALL
12943+ GET_THREAD_INFO(%ebp)
12944+ movl TI_sysenter_return(%ebp),%ebp
12945+ movl %ebp,PT_EIP(%esp)
12946 ENABLE_INTERRUPTS(CLBR_NONE)
12947
12948 /*
12949 * Load the potential sixth argument from user stack.
12950 * Careful about security.
12951 */
12952+ movl PT_OLDESP(%esp),%ebp
12953+
12954+#ifdef CONFIG_PAX_MEMORY_UDEREF
12955+ mov PT_OLDSS(%esp),%ds
12956+1: movl %ds:(%ebp),%ebp
12957+ push %ss
12958+ pop %ds
12959+#else
12960 cmpl $__PAGE_OFFSET-3,%ebp
12961 jae syscall_fault
12962 1: movl (%ebp),%ebp
12963+#endif
12964+
12965 movl %ebp,PT_EBP(%esp)
12966 .section __ex_table,"a"
12967 .align 4
12968@@ -434,12 +595,24 @@ sysenter_do_call:
12969 testl $_TIF_ALLWORK_MASK, %ecx
12970 jne sysexit_audit
12971 sysenter_exit:
12972+
12973+#ifdef CONFIG_PAX_RANDKSTACK
12974+ pushl_cfi %eax
12975+ movl %esp, %eax
12976+ call pax_randomize_kstack
12977+ popl_cfi %eax
12978+#endif
12979+
12980+ pax_erase_kstack
12981+
12982 /* if something modifies registers it must also disable sysexit */
12983 movl PT_EIP(%esp), %edx
12984 movl PT_OLDESP(%esp), %ecx
12985 xorl %ebp,%ebp
12986 TRACE_IRQS_ON
12987 1: mov PT_FS(%esp), %fs
12988+2: mov PT_DS(%esp), %ds
12989+3: mov PT_ES(%esp), %es
12990 PTGS_TO_GS
12991 ENABLE_INTERRUPTS_SYSEXIT
12992
12993@@ -456,6 +629,9 @@ sysenter_audit:
12994 movl %eax,%edx /* 2nd arg: syscall number */
12995 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12996 call audit_syscall_entry
12997+
12998+ pax_erase_kstack
12999+
13000 pushl_cfi %ebx
13001 movl PT_EAX(%esp),%eax /* reload syscall number */
13002 jmp sysenter_do_call
13003@@ -482,11 +658,17 @@ sysexit_audit:
13004
13005 CFI_ENDPROC
13006 .pushsection .fixup,"ax"
13007-2: movl $0,PT_FS(%esp)
13008+4: movl $0,PT_FS(%esp)
13009+ jmp 1b
13010+5: movl $0,PT_DS(%esp)
13011+ jmp 1b
13012+6: movl $0,PT_ES(%esp)
13013 jmp 1b
13014 .section __ex_table,"a"
13015 .align 4
13016- .long 1b,2b
13017+ .long 1b,4b
13018+ .long 2b,5b
13019+ .long 3b,6b
13020 .popsection
13021 PTGS_TO_GS_EX
13022 ENDPROC(ia32_sysenter_target)
13023@@ -519,6 +701,15 @@ syscall_exit:
13024 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13025 jne syscall_exit_work
13026
13027+restore_all_pax:
13028+
13029+#ifdef CONFIG_PAX_RANDKSTACK
13030+ movl %esp, %eax
13031+ call pax_randomize_kstack
13032+#endif
13033+
13034+ pax_erase_kstack
13035+
13036 restore_all:
13037 TRACE_IRQS_IRET
13038 restore_all_notrace:
13039@@ -578,14 +769,34 @@ ldt_ss:
13040 * compensating for the offset by changing to the ESPFIX segment with
13041 * a base address that matches for the difference.
13042 */
13043-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13044+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13045 mov %esp, %edx /* load kernel esp */
13046 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13047 mov %dx, %ax /* eax: new kernel esp */
13048 sub %eax, %edx /* offset (low word is 0) */
13049+#ifdef CONFIG_SMP
13050+ movl PER_CPU_VAR(cpu_number), %ebx
13051+ shll $PAGE_SHIFT_asm, %ebx
13052+ addl $cpu_gdt_table, %ebx
13053+#else
13054+ movl $cpu_gdt_table, %ebx
13055+#endif
13056 shr $16, %edx
13057- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13058- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13059+
13060+#ifdef CONFIG_PAX_KERNEXEC
13061+ mov %cr0, %esi
13062+ btr $16, %esi
13063+ mov %esi, %cr0
13064+#endif
13065+
13066+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13067+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13068+
13069+#ifdef CONFIG_PAX_KERNEXEC
13070+ bts $16, %esi
13071+ mov %esi, %cr0
13072+#endif
13073+
13074 pushl_cfi $__ESPFIX_SS
13075 pushl_cfi %eax /* new kernel esp */
13076 /* Disable interrupts, but do not irqtrace this section: we
13077@@ -614,34 +825,28 @@ work_resched:
13078 movl TI_flags(%ebp), %ecx
13079 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13080 # than syscall tracing?
13081- jz restore_all
13082+ jz restore_all_pax
13083 testb $_TIF_NEED_RESCHED, %cl
13084 jnz work_resched
13085
13086 work_notifysig: # deal with pending signals and
13087 # notify-resume requests
13088+ movl %esp, %eax
13089 #ifdef CONFIG_VM86
13090 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13091- movl %esp, %eax
13092- jne work_notifysig_v86 # returning to kernel-space or
13093+ jz 1f # returning to kernel-space or
13094 # vm86-space
13095- xorl %edx, %edx
13096- call do_notify_resume
13097- jmp resume_userspace_sig
13098
13099- ALIGN
13100-work_notifysig_v86:
13101 pushl_cfi %ecx # save ti_flags for do_notify_resume
13102 call save_v86_state # %eax contains pt_regs pointer
13103 popl_cfi %ecx
13104 movl %eax, %esp
13105-#else
13106- movl %esp, %eax
13107+1:
13108 #endif
13109 xorl %edx, %edx
13110 call do_notify_resume
13111 jmp resume_userspace_sig
13112-END(work_pending)
13113+ENDPROC(work_pending)
13114
13115 # perform syscall exit tracing
13116 ALIGN
13117@@ -649,11 +854,14 @@ syscall_trace_entry:
13118 movl $-ENOSYS,PT_EAX(%esp)
13119 movl %esp, %eax
13120 call syscall_trace_enter
13121+
13122+ pax_erase_kstack
13123+
13124 /* What it returned is what we'll actually use. */
13125 cmpl $(nr_syscalls), %eax
13126 jnae syscall_call
13127 jmp syscall_exit
13128-END(syscall_trace_entry)
13129+ENDPROC(syscall_trace_entry)
13130
13131 # perform syscall exit tracing
13132 ALIGN
13133@@ -666,20 +874,24 @@ syscall_exit_work:
13134 movl %esp, %eax
13135 call syscall_trace_leave
13136 jmp resume_userspace
13137-END(syscall_exit_work)
13138+ENDPROC(syscall_exit_work)
13139 CFI_ENDPROC
13140
13141 RING0_INT_FRAME # can't unwind into user space anyway
13142 syscall_fault:
13143+#ifdef CONFIG_PAX_MEMORY_UDEREF
13144+ push %ss
13145+ pop %ds
13146+#endif
13147 GET_THREAD_INFO(%ebp)
13148 movl $-EFAULT,PT_EAX(%esp)
13149 jmp resume_userspace
13150-END(syscall_fault)
13151+ENDPROC(syscall_fault)
13152
13153 syscall_badsys:
13154 movl $-ENOSYS,PT_EAX(%esp)
13155 jmp resume_userspace
13156-END(syscall_badsys)
13157+ENDPROC(syscall_badsys)
13158 CFI_ENDPROC
13159 /*
13160 * End of kprobes section
13161@@ -753,6 +965,36 @@ ptregs_clone:
13162 CFI_ENDPROC
13163 ENDPROC(ptregs_clone)
13164
13165+ ALIGN;
13166+ENTRY(kernel_execve)
13167+ CFI_STARTPROC
13168+ pushl_cfi %ebp
13169+ sub $PT_OLDSS+4,%esp
13170+ pushl_cfi %edi
13171+ pushl_cfi %ecx
13172+ pushl_cfi %eax
13173+ lea 3*4(%esp),%edi
13174+ mov $PT_OLDSS/4+1,%ecx
13175+ xorl %eax,%eax
13176+ rep stosl
13177+ popl_cfi %eax
13178+ popl_cfi %ecx
13179+ popl_cfi %edi
13180+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13181+ pushl_cfi %esp
13182+ call sys_execve
13183+ add $4,%esp
13184+ CFI_ADJUST_CFA_OFFSET -4
13185+ GET_THREAD_INFO(%ebp)
13186+ test %eax,%eax
13187+ jz syscall_exit
13188+ add $PT_OLDSS+4,%esp
13189+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13190+ popl_cfi %ebp
13191+ ret
13192+ CFI_ENDPROC
13193+ENDPROC(kernel_execve)
13194+
13195 .macro FIXUP_ESPFIX_STACK
13196 /*
13197 * Switch back for ESPFIX stack to the normal zerobased stack
13198@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13199 * normal stack and adjusts ESP with the matching offset.
13200 */
13201 /* fixup the stack */
13202- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13203- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13204+#ifdef CONFIG_SMP
13205+ movl PER_CPU_VAR(cpu_number), %ebx
13206+ shll $PAGE_SHIFT_asm, %ebx
13207+ addl $cpu_gdt_table, %ebx
13208+#else
13209+ movl $cpu_gdt_table, %ebx
13210+#endif
13211+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13212+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13213 shl $16, %eax
13214 addl %esp, %eax /* the adjusted stack pointer */
13215 pushl_cfi $__KERNEL_DS
13216@@ -816,7 +1065,7 @@ vector=vector+1
13217 .endr
13218 2: jmp common_interrupt
13219 .endr
13220-END(irq_entries_start)
13221+ENDPROC(irq_entries_start)
13222
13223 .previous
13224 END(interrupt)
13225@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13226 pushl_cfi $do_coprocessor_error
13227 jmp error_code
13228 CFI_ENDPROC
13229-END(coprocessor_error)
13230+ENDPROC(coprocessor_error)
13231
13232 ENTRY(simd_coprocessor_error)
13233 RING0_INT_FRAME
13234@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13235 #endif
13236 jmp error_code
13237 CFI_ENDPROC
13238-END(simd_coprocessor_error)
13239+ENDPROC(simd_coprocessor_error)
13240
13241 ENTRY(device_not_available)
13242 RING0_INT_FRAME
13243@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13244 pushl_cfi $do_device_not_available
13245 jmp error_code
13246 CFI_ENDPROC
13247-END(device_not_available)
13248+ENDPROC(device_not_available)
13249
13250 #ifdef CONFIG_PARAVIRT
13251 ENTRY(native_iret)
13252@@ -902,12 +1151,12 @@ ENTRY(native_iret)
13253 .align 4
13254 .long native_iret, iret_exc
13255 .previous
13256-END(native_iret)
13257+ENDPROC(native_iret)
13258
13259 ENTRY(native_irq_enable_sysexit)
13260 sti
13261 sysexit
13262-END(native_irq_enable_sysexit)
13263+ENDPROC(native_irq_enable_sysexit)
13264 #endif
13265
13266 ENTRY(overflow)
13267@@ -916,7 +1165,7 @@ ENTRY(overflow)
13268 pushl_cfi $do_overflow
13269 jmp error_code
13270 CFI_ENDPROC
13271-END(overflow)
13272+ENDPROC(overflow)
13273
13274 ENTRY(bounds)
13275 RING0_INT_FRAME
13276@@ -924,7 +1173,7 @@ ENTRY(bounds)
13277 pushl_cfi $do_bounds
13278 jmp error_code
13279 CFI_ENDPROC
13280-END(bounds)
13281+ENDPROC(bounds)
13282
13283 ENTRY(invalid_op)
13284 RING0_INT_FRAME
13285@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13286 pushl_cfi $do_invalid_op
13287 jmp error_code
13288 CFI_ENDPROC
13289-END(invalid_op)
13290+ENDPROC(invalid_op)
13291
13292 ENTRY(coprocessor_segment_overrun)
13293 RING0_INT_FRAME
13294@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13295 pushl_cfi $do_coprocessor_segment_overrun
13296 jmp error_code
13297 CFI_ENDPROC
13298-END(coprocessor_segment_overrun)
13299+ENDPROC(coprocessor_segment_overrun)
13300
13301 ENTRY(invalid_TSS)
13302 RING0_EC_FRAME
13303 pushl_cfi $do_invalid_TSS
13304 jmp error_code
13305 CFI_ENDPROC
13306-END(invalid_TSS)
13307+ENDPROC(invalid_TSS)
13308
13309 ENTRY(segment_not_present)
13310 RING0_EC_FRAME
13311 pushl_cfi $do_segment_not_present
13312 jmp error_code
13313 CFI_ENDPROC
13314-END(segment_not_present)
13315+ENDPROC(segment_not_present)
13316
13317 ENTRY(stack_segment)
13318 RING0_EC_FRAME
13319 pushl_cfi $do_stack_segment
13320 jmp error_code
13321 CFI_ENDPROC
13322-END(stack_segment)
13323+ENDPROC(stack_segment)
13324
13325 ENTRY(alignment_check)
13326 RING0_EC_FRAME
13327 pushl_cfi $do_alignment_check
13328 jmp error_code
13329 CFI_ENDPROC
13330-END(alignment_check)
13331+ENDPROC(alignment_check)
13332
13333 ENTRY(divide_error)
13334 RING0_INT_FRAME
13335@@ -976,7 +1225,7 @@ ENTRY(divide_error)
13336 pushl_cfi $do_divide_error
13337 jmp error_code
13338 CFI_ENDPROC
13339-END(divide_error)
13340+ENDPROC(divide_error)
13341
13342 #ifdef CONFIG_X86_MCE
13343 ENTRY(machine_check)
13344@@ -985,7 +1234,7 @@ ENTRY(machine_check)
13345 pushl_cfi machine_check_vector
13346 jmp error_code
13347 CFI_ENDPROC
13348-END(machine_check)
13349+ENDPROC(machine_check)
13350 #endif
13351
13352 ENTRY(spurious_interrupt_bug)
13353@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13354 pushl_cfi $do_spurious_interrupt_bug
13355 jmp error_code
13356 CFI_ENDPROC
13357-END(spurious_interrupt_bug)
13358+ENDPROC(spurious_interrupt_bug)
13359 /*
13360 * End of kprobes section
13361 */
13362@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13363
13364 ENTRY(mcount)
13365 ret
13366-END(mcount)
13367+ENDPROC(mcount)
13368
13369 ENTRY(ftrace_caller)
13370 cmpl $0, function_trace_stop
13371@@ -1138,7 +1387,7 @@ ftrace_graph_call:
13372 .globl ftrace_stub
13373 ftrace_stub:
13374 ret
13375-END(ftrace_caller)
13376+ENDPROC(ftrace_caller)
13377
13378 #else /* ! CONFIG_DYNAMIC_FTRACE */
13379
13380@@ -1174,7 +1423,7 @@ trace:
13381 popl %ecx
13382 popl %eax
13383 jmp ftrace_stub
13384-END(mcount)
13385+ENDPROC(mcount)
13386 #endif /* CONFIG_DYNAMIC_FTRACE */
13387 #endif /* CONFIG_FUNCTION_TRACER */
13388
13389@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13390 popl %ecx
13391 popl %eax
13392 ret
13393-END(ftrace_graph_caller)
13394+ENDPROC(ftrace_graph_caller)
13395
13396 .globl return_to_handler
13397 return_to_handler:
13398@@ -1209,7 +1458,6 @@ return_to_handler:
13399 jmp *%ecx
13400 #endif
13401
13402-.section .rodata,"a"
13403 #include "syscall_table_32.S"
13404
13405 syscall_table_size=(.-sys_call_table)
13406@@ -1255,15 +1503,18 @@ error_code:
13407 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13408 REG_TO_PTGS %ecx
13409 SET_KERNEL_GS %ecx
13410- movl $(__USER_DS), %ecx
13411+ movl $(__KERNEL_DS), %ecx
13412 movl %ecx, %ds
13413 movl %ecx, %es
13414+
13415+ pax_enter_kernel
13416+
13417 TRACE_IRQS_OFF
13418 movl %esp,%eax # pt_regs pointer
13419 call *%edi
13420 jmp ret_from_exception
13421 CFI_ENDPROC
13422-END(page_fault)
13423+ENDPROC(page_fault)
13424
13425 /*
13426 * Debug traps and NMI can happen at the one SYSENTER instruction
13427@@ -1305,7 +1556,7 @@ debug_stack_correct:
13428 call do_debug
13429 jmp ret_from_exception
13430 CFI_ENDPROC
13431-END(debug)
13432+ENDPROC(debug)
13433
13434 /*
13435 * NMI is doubly nasty. It can happen _while_ we're handling
13436@@ -1342,6 +1593,9 @@ nmi_stack_correct:
13437 xorl %edx,%edx # zero error code
13438 movl %esp,%eax # pt_regs pointer
13439 call do_nmi
13440+
13441+ pax_exit_kernel
13442+
13443 jmp restore_all_notrace
13444 CFI_ENDPROC
13445
13446@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13447 FIXUP_ESPFIX_STACK # %eax == %esp
13448 xorl %edx,%edx # zero error code
13449 call do_nmi
13450+
13451+ pax_exit_kernel
13452+
13453 RESTORE_REGS
13454 lss 12+4(%esp), %esp # back to espfix stack
13455 CFI_ADJUST_CFA_OFFSET -24
13456 jmp irq_return
13457 CFI_ENDPROC
13458-END(nmi)
13459+ENDPROC(nmi)
13460
13461 ENTRY(int3)
13462 RING0_INT_FRAME
13463@@ -1395,14 +1652,14 @@ ENTRY(int3)
13464 call do_int3
13465 jmp ret_from_exception
13466 CFI_ENDPROC
13467-END(int3)
13468+ENDPROC(int3)
13469
13470 ENTRY(general_protection)
13471 RING0_EC_FRAME
13472 pushl_cfi $do_general_protection
13473 jmp error_code
13474 CFI_ENDPROC
13475-END(general_protection)
13476+ENDPROC(general_protection)
13477
13478 #ifdef CONFIG_KVM_GUEST
13479 ENTRY(async_page_fault)
13480@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13481 pushl_cfi $do_async_page_fault
13482 jmp error_code
13483 CFI_ENDPROC
13484-END(async_page_fault)
13485+ENDPROC(async_page_fault)
13486 #endif
13487
13488 /*
13489diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13490index faf8d5e..f58c441 100644
13491--- a/arch/x86/kernel/entry_64.S
13492+++ b/arch/x86/kernel/entry_64.S
13493@@ -55,6 +55,8 @@
13494 #include <asm/paravirt.h>
13495 #include <asm/ftrace.h>
13496 #include <asm/percpu.h>
13497+#include <asm/pgtable.h>
13498+#include <asm/alternative-asm.h>
13499
13500 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13501 #include <linux/elf-em.h>
13502@@ -68,8 +70,9 @@
13503 #ifdef CONFIG_FUNCTION_TRACER
13504 #ifdef CONFIG_DYNAMIC_FTRACE
13505 ENTRY(mcount)
13506+ pax_force_retaddr
13507 retq
13508-END(mcount)
13509+ENDPROC(mcount)
13510
13511 ENTRY(ftrace_caller)
13512 cmpl $0, function_trace_stop
13513@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13514 #endif
13515
13516 GLOBAL(ftrace_stub)
13517+ pax_force_retaddr
13518 retq
13519-END(ftrace_caller)
13520+ENDPROC(ftrace_caller)
13521
13522 #else /* ! CONFIG_DYNAMIC_FTRACE */
13523 ENTRY(mcount)
13524@@ -112,6 +116,7 @@ ENTRY(mcount)
13525 #endif
13526
13527 GLOBAL(ftrace_stub)
13528+ pax_force_retaddr
13529 retq
13530
13531 trace:
13532@@ -121,12 +126,13 @@ trace:
13533 movq 8(%rbp), %rsi
13534 subq $MCOUNT_INSN_SIZE, %rdi
13535
13536+ pax_force_fptr ftrace_trace_function
13537 call *ftrace_trace_function
13538
13539 MCOUNT_RESTORE_FRAME
13540
13541 jmp ftrace_stub
13542-END(mcount)
13543+ENDPROC(mcount)
13544 #endif /* CONFIG_DYNAMIC_FTRACE */
13545 #endif /* CONFIG_FUNCTION_TRACER */
13546
13547@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13548
13549 MCOUNT_RESTORE_FRAME
13550
13551+ pax_force_retaddr
13552 retq
13553-END(ftrace_graph_caller)
13554+ENDPROC(ftrace_graph_caller)
13555
13556 GLOBAL(return_to_handler)
13557 subq $24, %rsp
13558@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13559 movq 8(%rsp), %rdx
13560 movq (%rsp), %rax
13561 addq $24, %rsp
13562+ pax_force_fptr %rdi
13563 jmp *%rdi
13564 #endif
13565
13566@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13567 ENDPROC(native_usergs_sysret64)
13568 #endif /* CONFIG_PARAVIRT */
13569
13570+ .macro ljmpq sel, off
13571+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13572+ .byte 0x48; ljmp *1234f(%rip)
13573+ .pushsection .rodata
13574+ .align 16
13575+ 1234: .quad \off; .word \sel
13576+ .popsection
13577+#else
13578+ pushq $\sel
13579+ pushq $\off
13580+ lretq
13581+#endif
13582+ .endm
13583+
13584+ .macro pax_enter_kernel
13585+ pax_set_fptr_mask
13586+#ifdef CONFIG_PAX_KERNEXEC
13587+ call pax_enter_kernel
13588+#endif
13589+ .endm
13590+
13591+ .macro pax_exit_kernel
13592+#ifdef CONFIG_PAX_KERNEXEC
13593+ call pax_exit_kernel
13594+#endif
13595+ .endm
13596+
13597+#ifdef CONFIG_PAX_KERNEXEC
13598+ENTRY(pax_enter_kernel)
13599+ pushq %rdi
13600+
13601+#ifdef CONFIG_PARAVIRT
13602+ PV_SAVE_REGS(CLBR_RDI)
13603+#endif
13604+
13605+ GET_CR0_INTO_RDI
13606+ bts $16,%rdi
13607+ jnc 3f
13608+ mov %cs,%edi
13609+ cmp $__KERNEL_CS,%edi
13610+ jnz 2f
13611+1:
13612+
13613+#ifdef CONFIG_PARAVIRT
13614+ PV_RESTORE_REGS(CLBR_RDI)
13615+#endif
13616+
13617+ popq %rdi
13618+ pax_force_retaddr
13619+ retq
13620+
13621+2: ljmpq __KERNEL_CS,1f
13622+3: ljmpq __KERNEXEC_KERNEL_CS,4f
13623+4: SET_RDI_INTO_CR0
13624+ jmp 1b
13625+ENDPROC(pax_enter_kernel)
13626+
13627+ENTRY(pax_exit_kernel)
13628+ pushq %rdi
13629+
13630+#ifdef CONFIG_PARAVIRT
13631+ PV_SAVE_REGS(CLBR_RDI)
13632+#endif
13633+
13634+ mov %cs,%rdi
13635+ cmp $__KERNEXEC_KERNEL_CS,%edi
13636+ jz 2f
13637+1:
13638+
13639+#ifdef CONFIG_PARAVIRT
13640+ PV_RESTORE_REGS(CLBR_RDI);
13641+#endif
13642+
13643+ popq %rdi
13644+ pax_force_retaddr
13645+ retq
13646+
13647+2: GET_CR0_INTO_RDI
13648+ btr $16,%rdi
13649+ ljmpq __KERNEL_CS,3f
13650+3: SET_RDI_INTO_CR0
13651+ jmp 1b
13652+#ifdef CONFIG_PARAVIRT
13653+ PV_RESTORE_REGS(CLBR_RDI);
13654+#endif
13655+
13656+ popq %rdi
13657+ pax_force_retaddr
13658+ retq
13659+ENDPROC(pax_exit_kernel)
13660+#endif
13661+
13662+ .macro pax_enter_kernel_user
13663+ pax_set_fptr_mask
13664+#ifdef CONFIG_PAX_MEMORY_UDEREF
13665+ call pax_enter_kernel_user
13666+#endif
13667+ .endm
13668+
13669+ .macro pax_exit_kernel_user
13670+#ifdef CONFIG_PAX_MEMORY_UDEREF
13671+ call pax_exit_kernel_user
13672+#endif
13673+#ifdef CONFIG_PAX_RANDKSTACK
13674+ pushq %rax
13675+ call pax_randomize_kstack
13676+ popq %rax
13677+#endif
13678+ .endm
13679+
13680+#ifdef CONFIG_PAX_MEMORY_UDEREF
13681+ENTRY(pax_enter_kernel_user)
13682+ pushq %rdi
13683+ pushq %rbx
13684+
13685+#ifdef CONFIG_PARAVIRT
13686+ PV_SAVE_REGS(CLBR_RDI)
13687+#endif
13688+
13689+ GET_CR3_INTO_RDI
13690+ mov %rdi,%rbx
13691+ add $__START_KERNEL_map,%rbx
13692+ sub phys_base(%rip),%rbx
13693+
13694+#ifdef CONFIG_PARAVIRT
13695+ pushq %rdi
13696+ cmpl $0, pv_info+PARAVIRT_enabled
13697+ jz 1f
13698+ i = 0
13699+ .rept USER_PGD_PTRS
13700+ mov i*8(%rbx),%rsi
13701+ mov $0,%sil
13702+ lea i*8(%rbx),%rdi
13703+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13704+ i = i + 1
13705+ .endr
13706+ jmp 2f
13707+1:
13708+#endif
13709+
13710+ i = 0
13711+ .rept USER_PGD_PTRS
13712+ movb $0,i*8(%rbx)
13713+ i = i + 1
13714+ .endr
13715+
13716+#ifdef CONFIG_PARAVIRT
13717+2: popq %rdi
13718+#endif
13719+ SET_RDI_INTO_CR3
13720+
13721+#ifdef CONFIG_PAX_KERNEXEC
13722+ GET_CR0_INTO_RDI
13723+ bts $16,%rdi
13724+ SET_RDI_INTO_CR0
13725+#endif
13726+
13727+#ifdef CONFIG_PARAVIRT
13728+ PV_RESTORE_REGS(CLBR_RDI)
13729+#endif
13730+
13731+ popq %rbx
13732+ popq %rdi
13733+ pax_force_retaddr
13734+ retq
13735+ENDPROC(pax_enter_kernel_user)
13736+
13737+ENTRY(pax_exit_kernel_user)
13738+ push %rdi
13739+
13740+#ifdef CONFIG_PARAVIRT
13741+ pushq %rbx
13742+ PV_SAVE_REGS(CLBR_RDI)
13743+#endif
13744+
13745+#ifdef CONFIG_PAX_KERNEXEC
13746+ GET_CR0_INTO_RDI
13747+ btr $16,%rdi
13748+ SET_RDI_INTO_CR0
13749+#endif
13750+
13751+ GET_CR3_INTO_RDI
13752+ add $__START_KERNEL_map,%rdi
13753+ sub phys_base(%rip),%rdi
13754+
13755+#ifdef CONFIG_PARAVIRT
13756+ cmpl $0, pv_info+PARAVIRT_enabled
13757+ jz 1f
13758+ mov %rdi,%rbx
13759+ i = 0
13760+ .rept USER_PGD_PTRS
13761+ mov i*8(%rbx),%rsi
13762+ mov $0x67,%sil
13763+ lea i*8(%rbx),%rdi
13764+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13765+ i = i + 1
13766+ .endr
13767+ jmp 2f
13768+1:
13769+#endif
13770+
13771+ i = 0
13772+ .rept USER_PGD_PTRS
13773+ movb $0x67,i*8(%rdi)
13774+ i = i + 1
13775+ .endr
13776+
13777+#ifdef CONFIG_PARAVIRT
13778+2: PV_RESTORE_REGS(CLBR_RDI)
13779+ popq %rbx
13780+#endif
13781+
13782+ popq %rdi
13783+ pax_force_retaddr
13784+ retq
13785+ENDPROC(pax_exit_kernel_user)
13786+#endif
13787+
13788+.macro pax_erase_kstack
13789+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13790+ call pax_erase_kstack
13791+#endif
13792+.endm
13793+
13794+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13795+/*
13796+ * r11: thread_info
13797+ * rcx, rdx: can be clobbered
13798+ */
13799+ENTRY(pax_erase_kstack)
13800+ pushq %rdi
13801+ pushq %rax
13802+ pushq %r11
13803+
13804+ GET_THREAD_INFO(%r11)
13805+ mov TI_lowest_stack(%r11), %rdi
13806+ mov $-0xBEEF, %rax
13807+ std
13808+
13809+1: mov %edi, %ecx
13810+ and $THREAD_SIZE_asm - 1, %ecx
13811+ shr $3, %ecx
13812+ repne scasq
13813+ jecxz 2f
13814+
13815+ cmp $2*8, %ecx
13816+ jc 2f
13817+
13818+ mov $2*8, %ecx
13819+ repe scasq
13820+ jecxz 2f
13821+ jne 1b
13822+
13823+2: cld
13824+ mov %esp, %ecx
13825+ sub %edi, %ecx
13826+
13827+ cmp $THREAD_SIZE_asm, %rcx
13828+ jb 3f
13829+ ud2
13830+3:
13831+
13832+ shr $3, %ecx
13833+ rep stosq
13834+
13835+ mov TI_task_thread_sp0(%r11), %rdi
13836+ sub $256, %rdi
13837+ mov %rdi, TI_lowest_stack(%r11)
13838+
13839+ popq %r11
13840+ popq %rax
13841+ popq %rdi
13842+ pax_force_retaddr
13843+ ret
13844+ENDPROC(pax_erase_kstack)
13845+#endif
13846
13847 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13848 #ifdef CONFIG_TRACE_IRQFLAGS
13849@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
13850 .endm
13851
13852 .macro UNFAKE_STACK_FRAME
13853- addq $8*6, %rsp
13854- CFI_ADJUST_CFA_OFFSET -(6*8)
13855+ addq $8*6 + ARG_SKIP, %rsp
13856+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
13857 .endm
13858
13859 /*
13860@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13861 movq %rsp, %rsi
13862
13863 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13864- testl $3, CS(%rdi)
13865+ testb $3, CS(%rdi)
13866 je 1f
13867 SWAPGS
13868 /*
13869@@ -355,9 +639,10 @@ ENTRY(save_rest)
13870 movq_cfi r15, R15+16
13871 movq %r11, 8(%rsp) /* return address */
13872 FIXUP_TOP_OF_STACK %r11, 16
13873+ pax_force_retaddr
13874 ret
13875 CFI_ENDPROC
13876-END(save_rest)
13877+ENDPROC(save_rest)
13878
13879 /* save complete stack frame */
13880 .pushsection .kprobes.text, "ax"
13881@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
13882 js 1f /* negative -> in kernel */
13883 SWAPGS
13884 xorl %ebx,%ebx
13885-1: ret
13886+1: pax_force_retaddr_bts
13887+ ret
13888 CFI_ENDPROC
13889-END(save_paranoid)
13890+ENDPROC(save_paranoid)
13891 .popsection
13892
13893 /*
13894@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
13895
13896 RESTORE_REST
13897
13898- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13899+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13900 je int_ret_from_sys_call
13901
13902 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13903@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
13904 jmp ret_from_sys_call # go to the SYSRET fastpath
13905
13906 CFI_ENDPROC
13907-END(ret_from_fork)
13908+ENDPROC(ret_from_fork)
13909
13910 /*
13911 * System call entry. Up to 6 arguments in registers are supported.
13912@@ -456,7 +742,7 @@ END(ret_from_fork)
13913 ENTRY(system_call)
13914 CFI_STARTPROC simple
13915 CFI_SIGNAL_FRAME
13916- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13917+ CFI_DEF_CFA rsp,0
13918 CFI_REGISTER rip,rcx
13919 /*CFI_REGISTER rflags,r11*/
13920 SWAPGS_UNSAFE_STACK
13921@@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
13922
13923 movq %rsp,PER_CPU_VAR(old_rsp)
13924 movq PER_CPU_VAR(kernel_stack),%rsp
13925+ SAVE_ARGS 8*6,0
13926+ pax_enter_kernel_user
13927 /*
13928 * No need to follow this irqs off/on section - it's straight
13929 * and short:
13930 */
13931 ENABLE_INTERRUPTS(CLBR_NONE)
13932- SAVE_ARGS 8,0
13933 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13934 movq %rcx,RIP-ARGOFFSET(%rsp)
13935 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13936@@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
13937 system_call_fastpath:
13938 cmpq $__NR_syscall_max,%rax
13939 ja badsys
13940- movq %r10,%rcx
13941+ movq R10-ARGOFFSET(%rsp),%rcx
13942 call *sys_call_table(,%rax,8) # XXX: rip relative
13943 movq %rax,RAX-ARGOFFSET(%rsp)
13944 /*
13945@@ -503,6 +790,8 @@ sysret_check:
13946 andl %edi,%edx
13947 jnz sysret_careful
13948 CFI_REMEMBER_STATE
13949+ pax_exit_kernel_user
13950+ pax_erase_kstack
13951 /*
13952 * sysretq will re-enable interrupts:
13953 */
13954@@ -554,14 +843,18 @@ badsys:
13955 * jump back to the normal fast path.
13956 */
13957 auditsys:
13958- movq %r10,%r9 /* 6th arg: 4th syscall arg */
13959+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13960 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13961 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13962 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13963 movq %rax,%rsi /* 2nd arg: syscall number */
13964 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13965 call audit_syscall_entry
13966+
13967+ pax_erase_kstack
13968+
13969 LOAD_ARGS 0 /* reload call-clobbered registers */
13970+ pax_set_fptr_mask
13971 jmp system_call_fastpath
13972
13973 /*
13974@@ -591,16 +884,20 @@ tracesys:
13975 FIXUP_TOP_OF_STACK %rdi
13976 movq %rsp,%rdi
13977 call syscall_trace_enter
13978+
13979+ pax_erase_kstack
13980+
13981 /*
13982 * Reload arg registers from stack in case ptrace changed them.
13983 * We don't reload %rax because syscall_trace_enter() returned
13984 * the value it wants us to use in the table lookup.
13985 */
13986 LOAD_ARGS ARGOFFSET, 1
13987+ pax_set_fptr_mask
13988 RESTORE_REST
13989 cmpq $__NR_syscall_max,%rax
13990 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13991- movq %r10,%rcx /* fixup for C */
13992+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13993 call *sys_call_table(,%rax,8)
13994 movq %rax,RAX-ARGOFFSET(%rsp)
13995 /* Use IRET because user could have changed frame */
13996@@ -612,7 +909,7 @@ tracesys:
13997 GLOBAL(int_ret_from_sys_call)
13998 DISABLE_INTERRUPTS(CLBR_NONE)
13999 TRACE_IRQS_OFF
14000- testl $3,CS-ARGOFFSET(%rsp)
14001+ testb $3,CS-ARGOFFSET(%rsp)
14002 je retint_restore_args
14003 movl $_TIF_ALLWORK_MASK,%edi
14004 /* edi: mask to check */
14005@@ -669,7 +966,7 @@ int_restore_rest:
14006 TRACE_IRQS_OFF
14007 jmp int_with_check
14008 CFI_ENDPROC
14009-END(system_call)
14010+ENDPROC(system_call)
14011
14012 /*
14013 * Certain special system calls that need to save a complete full stack frame.
14014@@ -685,7 +982,7 @@ ENTRY(\label)
14015 call \func
14016 jmp ptregscall_common
14017 CFI_ENDPROC
14018-END(\label)
14019+ENDPROC(\label)
14020 .endm
14021
14022 PTREGSCALL stub_clone, sys_clone, %r8
14023@@ -703,9 +1000,10 @@ ENTRY(ptregscall_common)
14024 movq_cfi_restore R12+8, r12
14025 movq_cfi_restore RBP+8, rbp
14026 movq_cfi_restore RBX+8, rbx
14027+ pax_force_retaddr
14028 ret $REST_SKIP /* pop extended registers */
14029 CFI_ENDPROC
14030-END(ptregscall_common)
14031+ENDPROC(ptregscall_common)
14032
14033 ENTRY(stub_execve)
14034 CFI_STARTPROC
14035@@ -720,7 +1018,7 @@ ENTRY(stub_execve)
14036 RESTORE_REST
14037 jmp int_ret_from_sys_call
14038 CFI_ENDPROC
14039-END(stub_execve)
14040+ENDPROC(stub_execve)
14041
14042 /*
14043 * sigreturn is special because it needs to restore all registers on return.
14044@@ -738,7 +1036,7 @@ ENTRY(stub_rt_sigreturn)
14045 RESTORE_REST
14046 jmp int_ret_from_sys_call
14047 CFI_ENDPROC
14048-END(stub_rt_sigreturn)
14049+ENDPROC(stub_rt_sigreturn)
14050
14051 /*
14052 * Build the entry stubs and pointer table with some assembler magic.
14053@@ -773,7 +1071,7 @@ vector=vector+1
14054 2: jmp common_interrupt
14055 .endr
14056 CFI_ENDPROC
14057-END(irq_entries_start)
14058+ENDPROC(irq_entries_start)
14059
14060 .previous
14061 END(interrupt)
14062@@ -793,6 +1091,16 @@ END(interrupt)
14063 subq $ORIG_RAX-RBP, %rsp
14064 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14065 SAVE_ARGS_IRQ
14066+#ifdef CONFIG_PAX_MEMORY_UDEREF
14067+ testb $3, CS(%rdi)
14068+ jnz 1f
14069+ pax_enter_kernel
14070+ jmp 2f
14071+1: pax_enter_kernel_user
14072+2:
14073+#else
14074+ pax_enter_kernel
14075+#endif
14076 call \func
14077 .endm
14078
14079@@ -824,7 +1132,7 @@ ret_from_intr:
14080
14081 exit_intr:
14082 GET_THREAD_INFO(%rcx)
14083- testl $3,CS-ARGOFFSET(%rsp)
14084+ testb $3,CS-ARGOFFSET(%rsp)
14085 je retint_kernel
14086
14087 /* Interrupt came from user space */
14088@@ -846,12 +1154,16 @@ retint_swapgs: /* return to user-space */
14089 * The iretq could re-enable interrupts:
14090 */
14091 DISABLE_INTERRUPTS(CLBR_ANY)
14092+ pax_exit_kernel_user
14093+ pax_erase_kstack
14094 TRACE_IRQS_IRETQ
14095 SWAPGS
14096 jmp restore_args
14097
14098 retint_restore_args: /* return to kernel space */
14099 DISABLE_INTERRUPTS(CLBR_ANY)
14100+ pax_exit_kernel
14101+ pax_force_retaddr RIP-ARGOFFSET
14102 /*
14103 * The iretq could re-enable interrupts:
14104 */
14105@@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14106 #endif
14107
14108 CFI_ENDPROC
14109-END(common_interrupt)
14110+ENDPROC(common_interrupt)
14111 /*
14112 * End of kprobes section
14113 */
14114@@ -956,7 +1268,7 @@ ENTRY(\sym)
14115 interrupt \do_sym
14116 jmp ret_from_intr
14117 CFI_ENDPROC
14118-END(\sym)
14119+ENDPROC(\sym)
14120 .endm
14121
14122 #ifdef CONFIG_SMP
14123@@ -1021,12 +1333,22 @@ ENTRY(\sym)
14124 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14125 call error_entry
14126 DEFAULT_FRAME 0
14127+#ifdef CONFIG_PAX_MEMORY_UDEREF
14128+ testb $3, CS(%rsp)
14129+ jnz 1f
14130+ pax_enter_kernel
14131+ jmp 2f
14132+1: pax_enter_kernel_user
14133+2:
14134+#else
14135+ pax_enter_kernel
14136+#endif
14137 movq %rsp,%rdi /* pt_regs pointer */
14138 xorl %esi,%esi /* no error code */
14139 call \do_sym
14140 jmp error_exit /* %ebx: no swapgs flag */
14141 CFI_ENDPROC
14142-END(\sym)
14143+ENDPROC(\sym)
14144 .endm
14145
14146 .macro paranoidzeroentry sym do_sym
14147@@ -1038,15 +1360,25 @@ ENTRY(\sym)
14148 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14149 call save_paranoid
14150 TRACE_IRQS_OFF
14151+#ifdef CONFIG_PAX_MEMORY_UDEREF
14152+ testb $3, CS(%rsp)
14153+ jnz 1f
14154+ pax_enter_kernel
14155+ jmp 2f
14156+1: pax_enter_kernel_user
14157+2:
14158+#else
14159+ pax_enter_kernel
14160+#endif
14161 movq %rsp,%rdi /* pt_regs pointer */
14162 xorl %esi,%esi /* no error code */
14163 call \do_sym
14164 jmp paranoid_exit /* %ebx: no swapgs flag */
14165 CFI_ENDPROC
14166-END(\sym)
14167+ENDPROC(\sym)
14168 .endm
14169
14170-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14171+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14172 .macro paranoidzeroentry_ist sym do_sym ist
14173 ENTRY(\sym)
14174 INTR_FRAME
14175@@ -1056,14 +1388,30 @@ ENTRY(\sym)
14176 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14177 call save_paranoid
14178 TRACE_IRQS_OFF
14179+#ifdef CONFIG_PAX_MEMORY_UDEREF
14180+ testb $3, CS(%rsp)
14181+ jnz 1f
14182+ pax_enter_kernel
14183+ jmp 2f
14184+1: pax_enter_kernel_user
14185+2:
14186+#else
14187+ pax_enter_kernel
14188+#endif
14189 movq %rsp,%rdi /* pt_regs pointer */
14190 xorl %esi,%esi /* no error code */
14191+#ifdef CONFIG_SMP
14192+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14193+ lea init_tss(%r12), %r12
14194+#else
14195+ lea init_tss(%rip), %r12
14196+#endif
14197 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14198 call \do_sym
14199 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14200 jmp paranoid_exit /* %ebx: no swapgs flag */
14201 CFI_ENDPROC
14202-END(\sym)
14203+ENDPROC(\sym)
14204 .endm
14205
14206 .macro errorentry sym do_sym
14207@@ -1074,13 +1422,23 @@ ENTRY(\sym)
14208 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14209 call error_entry
14210 DEFAULT_FRAME 0
14211+#ifdef CONFIG_PAX_MEMORY_UDEREF
14212+ testb $3, CS(%rsp)
14213+ jnz 1f
14214+ pax_enter_kernel
14215+ jmp 2f
14216+1: pax_enter_kernel_user
14217+2:
14218+#else
14219+ pax_enter_kernel
14220+#endif
14221 movq %rsp,%rdi /* pt_regs pointer */
14222 movq ORIG_RAX(%rsp),%rsi /* get error code */
14223 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14224 call \do_sym
14225 jmp error_exit /* %ebx: no swapgs flag */
14226 CFI_ENDPROC
14227-END(\sym)
14228+ENDPROC(\sym)
14229 .endm
14230
14231 /* error code is on the stack already */
14232@@ -1093,13 +1451,23 @@ ENTRY(\sym)
14233 call save_paranoid
14234 DEFAULT_FRAME 0
14235 TRACE_IRQS_OFF
14236+#ifdef CONFIG_PAX_MEMORY_UDEREF
14237+ testb $3, CS(%rsp)
14238+ jnz 1f
14239+ pax_enter_kernel
14240+ jmp 2f
14241+1: pax_enter_kernel_user
14242+2:
14243+#else
14244+ pax_enter_kernel
14245+#endif
14246 movq %rsp,%rdi /* pt_regs pointer */
14247 movq ORIG_RAX(%rsp),%rsi /* get error code */
14248 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14249 call \do_sym
14250 jmp paranoid_exit /* %ebx: no swapgs flag */
14251 CFI_ENDPROC
14252-END(\sym)
14253+ENDPROC(\sym)
14254 .endm
14255
14256 zeroentry divide_error do_divide_error
14257@@ -1129,9 +1497,10 @@ gs_change:
14258 2: mfence /* workaround */
14259 SWAPGS
14260 popfq_cfi
14261+ pax_force_retaddr
14262 ret
14263 CFI_ENDPROC
14264-END(native_load_gs_index)
14265+ENDPROC(native_load_gs_index)
14266
14267 .section __ex_table,"a"
14268 .align 8
14269@@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14270 * Here we are in the child and the registers are set as they were
14271 * at kernel_thread() invocation in the parent.
14272 */
14273+ pax_force_fptr %rsi
14274 call *%rsi
14275 # exit
14276 mov %eax, %edi
14277 call do_exit
14278 ud2 # padding for call trace
14279 CFI_ENDPROC
14280-END(kernel_thread_helper)
14281+ENDPROC(kernel_thread_helper)
14282
14283 /*
14284 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14285@@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14286 RESTORE_REST
14287 testq %rax,%rax
14288 je int_ret_from_sys_call
14289- RESTORE_ARGS
14290 UNFAKE_STACK_FRAME
14291+ pax_force_retaddr
14292 ret
14293 CFI_ENDPROC
14294-END(kernel_execve)
14295+ENDPROC(kernel_execve)
14296
14297 /* Call softirq on interrupt stack. Interrupts are off. */
14298 ENTRY(call_softirq)
14299@@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14300 CFI_DEF_CFA_REGISTER rsp
14301 CFI_ADJUST_CFA_OFFSET -8
14302 decl PER_CPU_VAR(irq_count)
14303+ pax_force_retaddr
14304 ret
14305 CFI_ENDPROC
14306-END(call_softirq)
14307+ENDPROC(call_softirq)
14308
14309 #ifdef CONFIG_XEN
14310 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14311@@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14312 decl PER_CPU_VAR(irq_count)
14313 jmp error_exit
14314 CFI_ENDPROC
14315-END(xen_do_hypervisor_callback)
14316+ENDPROC(xen_do_hypervisor_callback)
14317
14318 /*
14319 * Hypervisor uses this for application faults while it executes.
14320@@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14321 SAVE_ALL
14322 jmp error_exit
14323 CFI_ENDPROC
14324-END(xen_failsafe_callback)
14325+ENDPROC(xen_failsafe_callback)
14326
14327 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14328 xen_hvm_callback_vector xen_evtchn_do_upcall
14329@@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14330 TRACE_IRQS_OFF
14331 testl %ebx,%ebx /* swapgs needed? */
14332 jnz paranoid_restore
14333- testl $3,CS(%rsp)
14334+ testb $3,CS(%rsp)
14335 jnz paranoid_userspace
14336+#ifdef CONFIG_PAX_MEMORY_UDEREF
14337+ pax_exit_kernel
14338+ TRACE_IRQS_IRETQ 0
14339+ SWAPGS_UNSAFE_STACK
14340+ RESTORE_ALL 8
14341+ pax_force_retaddr_bts
14342+ jmp irq_return
14343+#endif
14344 paranoid_swapgs:
14345+#ifdef CONFIG_PAX_MEMORY_UDEREF
14346+ pax_exit_kernel_user
14347+#else
14348+ pax_exit_kernel
14349+#endif
14350 TRACE_IRQS_IRETQ 0
14351 SWAPGS_UNSAFE_STACK
14352 RESTORE_ALL 8
14353 jmp irq_return
14354 paranoid_restore:
14355+ pax_exit_kernel
14356 TRACE_IRQS_IRETQ 0
14357 RESTORE_ALL 8
14358+ pax_force_retaddr_bts
14359 jmp irq_return
14360 paranoid_userspace:
14361 GET_THREAD_INFO(%rcx)
14362@@ -1394,7 +1780,7 @@ paranoid_schedule:
14363 TRACE_IRQS_OFF
14364 jmp paranoid_userspace
14365 CFI_ENDPROC
14366-END(paranoid_exit)
14367+ENDPROC(paranoid_exit)
14368
14369 /*
14370 * Exception entry point. This expects an error code/orig_rax on the stack.
14371@@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14372 movq_cfi r14, R14+8
14373 movq_cfi r15, R15+8
14374 xorl %ebx,%ebx
14375- testl $3,CS+8(%rsp)
14376+ testb $3,CS+8(%rsp)
14377 je error_kernelspace
14378 error_swapgs:
14379 SWAPGS
14380 error_sti:
14381 TRACE_IRQS_OFF
14382+ pax_force_retaddr_bts
14383 ret
14384
14385 /*
14386@@ -1453,7 +1840,7 @@ bstep_iret:
14387 movq %rcx,RIP+8(%rsp)
14388 jmp error_swapgs
14389 CFI_ENDPROC
14390-END(error_entry)
14391+ENDPROC(error_entry)
14392
14393
14394 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14395@@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14396 jnz retint_careful
14397 jmp retint_swapgs
14398 CFI_ENDPROC
14399-END(error_exit)
14400+ENDPROC(error_exit)
14401
14402
14403 /* runs on exception stack */
14404@@ -1485,6 +1872,16 @@ ENTRY(nmi)
14405 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14406 call save_paranoid
14407 DEFAULT_FRAME 0
14408+#ifdef CONFIG_PAX_MEMORY_UDEREF
14409+ testb $3, CS(%rsp)
14410+ jnz 1f
14411+ pax_enter_kernel
14412+ jmp 2f
14413+1: pax_enter_kernel_user
14414+2:
14415+#else
14416+ pax_enter_kernel
14417+#endif
14418 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14419 movq %rsp,%rdi
14420 movq $-1,%rsi
14421@@ -1495,12 +1892,28 @@ ENTRY(nmi)
14422 DISABLE_INTERRUPTS(CLBR_NONE)
14423 testl %ebx,%ebx /* swapgs needed? */
14424 jnz nmi_restore
14425- testl $3,CS(%rsp)
14426+ testb $3,CS(%rsp)
14427 jnz nmi_userspace
14428+#ifdef CONFIG_PAX_MEMORY_UDEREF
14429+ pax_exit_kernel
14430+ SWAPGS_UNSAFE_STACK
14431+ RESTORE_ALL 8
14432+ pax_force_retaddr_bts
14433+ jmp irq_return
14434+#endif
14435 nmi_swapgs:
14436+#ifdef CONFIG_PAX_MEMORY_UDEREF
14437+ pax_exit_kernel_user
14438+#else
14439+ pax_exit_kernel
14440+#endif
14441 SWAPGS_UNSAFE_STACK
14442+ RESTORE_ALL 8
14443+ jmp irq_return
14444 nmi_restore:
14445+ pax_exit_kernel
14446 RESTORE_ALL 8
14447+ pax_force_retaddr_bts
14448 jmp irq_return
14449 nmi_userspace:
14450 GET_THREAD_INFO(%rcx)
14451@@ -1529,14 +1942,14 @@ nmi_schedule:
14452 jmp paranoid_exit
14453 CFI_ENDPROC
14454 #endif
14455-END(nmi)
14456+ENDPROC(nmi)
14457
14458 ENTRY(ignore_sysret)
14459 CFI_STARTPROC
14460 mov $-ENOSYS,%eax
14461 sysret
14462 CFI_ENDPROC
14463-END(ignore_sysret)
14464+ENDPROC(ignore_sysret)
14465
14466 /*
14467 * End of kprobes section
14468diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14469index c9a281f..ce2f317 100644
14470--- a/arch/x86/kernel/ftrace.c
14471+++ b/arch/x86/kernel/ftrace.c
14472@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14473 static const void *mod_code_newcode; /* holds the text to write to the IP */
14474
14475 static unsigned nmi_wait_count;
14476-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14477+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14478
14479 int ftrace_arch_read_dyn_info(char *buf, int size)
14480 {
14481@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14482
14483 r = snprintf(buf, size, "%u %u",
14484 nmi_wait_count,
14485- atomic_read(&nmi_update_count));
14486+ atomic_read_unchecked(&nmi_update_count));
14487 return r;
14488 }
14489
14490@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14491
14492 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14493 smp_rmb();
14494+ pax_open_kernel();
14495 ftrace_mod_code();
14496- atomic_inc(&nmi_update_count);
14497+ pax_close_kernel();
14498+ atomic_inc_unchecked(&nmi_update_count);
14499 }
14500 /* Must have previous changes seen before executions */
14501 smp_mb();
14502@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14503 {
14504 unsigned char replaced[MCOUNT_INSN_SIZE];
14505
14506+ ip = ktla_ktva(ip);
14507+
14508 /*
14509 * Note: Due to modules and __init, code can
14510 * disappear and change, we need to protect against faulting
14511@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14512 unsigned char old[MCOUNT_INSN_SIZE], *new;
14513 int ret;
14514
14515- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14516+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14517 new = ftrace_call_replace(ip, (unsigned long)func);
14518 ret = ftrace_modify_code(ip, old, new);
14519
14520@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14521 {
14522 unsigned char code[MCOUNT_INSN_SIZE];
14523
14524+ ip = ktla_ktva(ip);
14525+
14526 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14527 return -EFAULT;
14528
14529diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14530index 3bb0850..55a56f4 100644
14531--- a/arch/x86/kernel/head32.c
14532+++ b/arch/x86/kernel/head32.c
14533@@ -19,6 +19,7 @@
14534 #include <asm/io_apic.h>
14535 #include <asm/bios_ebda.h>
14536 #include <asm/tlbflush.h>
14537+#include <asm/boot.h>
14538
14539 static void __init i386_default_early_setup(void)
14540 {
14541@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14542 {
14543 memblock_init();
14544
14545- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14546+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14547
14548 #ifdef CONFIG_BLK_DEV_INITRD
14549 /* Reserve INITRD */
14550diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14551index ce0be7c..c41476e 100644
14552--- a/arch/x86/kernel/head_32.S
14553+++ b/arch/x86/kernel/head_32.S
14554@@ -25,6 +25,12 @@
14555 /* Physical address */
14556 #define pa(X) ((X) - __PAGE_OFFSET)
14557
14558+#ifdef CONFIG_PAX_KERNEXEC
14559+#define ta(X) (X)
14560+#else
14561+#define ta(X) ((X) - __PAGE_OFFSET)
14562+#endif
14563+
14564 /*
14565 * References to members of the new_cpu_data structure.
14566 */
14567@@ -54,11 +60,7 @@
14568 * and small than max_low_pfn, otherwise will waste some page table entries
14569 */
14570
14571-#if PTRS_PER_PMD > 1
14572-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14573-#else
14574-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14575-#endif
14576+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14577
14578 /* Number of possible pages in the lowmem region */
14579 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14580@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14581 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14582
14583 /*
14584+ * Real beginning of normal "text" segment
14585+ */
14586+ENTRY(stext)
14587+ENTRY(_stext)
14588+
14589+/*
14590 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14591 * %esi points to the real-mode code as a 32-bit pointer.
14592 * CS and DS must be 4 GB flat segments, but we don't depend on
14593@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14594 * can.
14595 */
14596 __HEAD
14597+
14598+#ifdef CONFIG_PAX_KERNEXEC
14599+ jmp startup_32
14600+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14601+.fill PAGE_SIZE-5,1,0xcc
14602+#endif
14603+
14604 ENTRY(startup_32)
14605 movl pa(stack_start),%ecx
14606
14607@@ -105,6 +120,57 @@ ENTRY(startup_32)
14608 2:
14609 leal -__PAGE_OFFSET(%ecx),%esp
14610
14611+#ifdef CONFIG_SMP
14612+ movl $pa(cpu_gdt_table),%edi
14613+ movl $__per_cpu_load,%eax
14614+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14615+ rorl $16,%eax
14616+ movb %al,__KERNEL_PERCPU + 4(%edi)
14617+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14618+ movl $__per_cpu_end - 1,%eax
14619+ subl $__per_cpu_start,%eax
14620+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14621+#endif
14622+
14623+#ifdef CONFIG_PAX_MEMORY_UDEREF
14624+ movl $NR_CPUS,%ecx
14625+ movl $pa(cpu_gdt_table),%edi
14626+1:
14627+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14628+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14629+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14630+ addl $PAGE_SIZE_asm,%edi
14631+ loop 1b
14632+#endif
14633+
14634+#ifdef CONFIG_PAX_KERNEXEC
14635+ movl $pa(boot_gdt),%edi
14636+ movl $__LOAD_PHYSICAL_ADDR,%eax
14637+ movw %ax,__BOOT_CS + 2(%edi)
14638+ rorl $16,%eax
14639+ movb %al,__BOOT_CS + 4(%edi)
14640+ movb %ah,__BOOT_CS + 7(%edi)
14641+ rorl $16,%eax
14642+
14643+ ljmp $(__BOOT_CS),$1f
14644+1:
14645+
14646+ movl $NR_CPUS,%ecx
14647+ movl $pa(cpu_gdt_table),%edi
14648+ addl $__PAGE_OFFSET,%eax
14649+1:
14650+ movw %ax,__KERNEL_CS + 2(%edi)
14651+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14652+ rorl $16,%eax
14653+ movb %al,__KERNEL_CS + 4(%edi)
14654+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14655+ movb %ah,__KERNEL_CS + 7(%edi)
14656+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14657+ rorl $16,%eax
14658+ addl $PAGE_SIZE_asm,%edi
14659+ loop 1b
14660+#endif
14661+
14662 /*
14663 * Clear BSS first so that there are no surprises...
14664 */
14665@@ -195,8 +261,11 @@ ENTRY(startup_32)
14666 movl %eax, pa(max_pfn_mapped)
14667
14668 /* Do early initialization of the fixmap area */
14669- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14670- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14671+#ifdef CONFIG_COMPAT_VDSO
14672+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14673+#else
14674+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14675+#endif
14676 #else /* Not PAE */
14677
14678 page_pde_offset = (__PAGE_OFFSET >> 20);
14679@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14680 movl %eax, pa(max_pfn_mapped)
14681
14682 /* Do early initialization of the fixmap area */
14683- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14684- movl %eax,pa(initial_page_table+0xffc)
14685+#ifdef CONFIG_COMPAT_VDSO
14686+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14687+#else
14688+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14689+#endif
14690 #endif
14691
14692 #ifdef CONFIG_PARAVIRT
14693@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14694 cmpl $num_subarch_entries, %eax
14695 jae bad_subarch
14696
14697- movl pa(subarch_entries)(,%eax,4), %eax
14698- subl $__PAGE_OFFSET, %eax
14699- jmp *%eax
14700+ jmp *pa(subarch_entries)(,%eax,4)
14701
14702 bad_subarch:
14703 WEAK(lguest_entry)
14704@@ -255,10 +325,10 @@ WEAK(xen_entry)
14705 __INITDATA
14706
14707 subarch_entries:
14708- .long default_entry /* normal x86/PC */
14709- .long lguest_entry /* lguest hypervisor */
14710- .long xen_entry /* Xen hypervisor */
14711- .long default_entry /* Moorestown MID */
14712+ .long ta(default_entry) /* normal x86/PC */
14713+ .long ta(lguest_entry) /* lguest hypervisor */
14714+ .long ta(xen_entry) /* Xen hypervisor */
14715+ .long ta(default_entry) /* Moorestown MID */
14716 num_subarch_entries = (. - subarch_entries) / 4
14717 .previous
14718 #else
14719@@ -312,6 +382,7 @@ default_entry:
14720 orl %edx,%eax
14721 movl %eax,%cr4
14722
14723+#ifdef CONFIG_X86_PAE
14724 testb $X86_CR4_PAE, %al # check if PAE is enabled
14725 jz 6f
14726
14727@@ -340,6 +411,9 @@ default_entry:
14728 /* Make changes effective */
14729 wrmsr
14730
14731+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14732+#endif
14733+
14734 6:
14735
14736 /*
14737@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14738 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14739 movl %eax,%ss # after changing gdt.
14740
14741- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14742+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14743 movl %eax,%ds
14744 movl %eax,%es
14745
14746@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14747 */
14748 cmpb $0,ready
14749 jne 1f
14750- movl $gdt_page,%eax
14751+ movl $cpu_gdt_table,%eax
14752 movl $stack_canary,%ecx
14753+#ifdef CONFIG_SMP
14754+ addl $__per_cpu_load,%ecx
14755+#endif
14756 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14757 shrl $16, %ecx
14758 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14759 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14760 1:
14761-#endif
14762 movl $(__KERNEL_STACK_CANARY),%eax
14763+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14764+ movl $(__USER_DS),%eax
14765+#else
14766+ xorl %eax,%eax
14767+#endif
14768 movl %eax,%gs
14769
14770 xorl %eax,%eax # Clear LDT
14771@@ -558,22 +639,22 @@ early_page_fault:
14772 jmp early_fault
14773
14774 early_fault:
14775- cld
14776 #ifdef CONFIG_PRINTK
14777+ cmpl $1,%ss:early_recursion_flag
14778+ je hlt_loop
14779+ incl %ss:early_recursion_flag
14780+ cld
14781 pusha
14782 movl $(__KERNEL_DS),%eax
14783 movl %eax,%ds
14784 movl %eax,%es
14785- cmpl $2,early_recursion_flag
14786- je hlt_loop
14787- incl early_recursion_flag
14788 movl %cr2,%eax
14789 pushl %eax
14790 pushl %edx /* trapno */
14791 pushl $fault_msg
14792 call printk
14793+; call dump_stack
14794 #endif
14795- call dump_stack
14796 hlt_loop:
14797 hlt
14798 jmp hlt_loop
14799@@ -581,8 +662,11 @@ hlt_loop:
14800 /* This is the default interrupt "handler" :-) */
14801 ALIGN
14802 ignore_int:
14803- cld
14804 #ifdef CONFIG_PRINTK
14805+ cmpl $2,%ss:early_recursion_flag
14806+ je hlt_loop
14807+ incl %ss:early_recursion_flag
14808+ cld
14809 pushl %eax
14810 pushl %ecx
14811 pushl %edx
14812@@ -591,9 +675,6 @@ ignore_int:
14813 movl $(__KERNEL_DS),%eax
14814 movl %eax,%ds
14815 movl %eax,%es
14816- cmpl $2,early_recursion_flag
14817- je hlt_loop
14818- incl early_recursion_flag
14819 pushl 16(%esp)
14820 pushl 24(%esp)
14821 pushl 32(%esp)
14822@@ -622,29 +703,43 @@ ENTRY(initial_code)
14823 /*
14824 * BSS section
14825 */
14826-__PAGE_ALIGNED_BSS
14827- .align PAGE_SIZE
14828 #ifdef CONFIG_X86_PAE
14829+.section .initial_pg_pmd,"a",@progbits
14830 initial_pg_pmd:
14831 .fill 1024*KPMDS,4,0
14832 #else
14833+.section .initial_page_table,"a",@progbits
14834 ENTRY(initial_page_table)
14835 .fill 1024,4,0
14836 #endif
14837+.section .initial_pg_fixmap,"a",@progbits
14838 initial_pg_fixmap:
14839 .fill 1024,4,0
14840+.section .empty_zero_page,"a",@progbits
14841 ENTRY(empty_zero_page)
14842 .fill 4096,1,0
14843+.section .swapper_pg_dir,"a",@progbits
14844 ENTRY(swapper_pg_dir)
14845+#ifdef CONFIG_X86_PAE
14846+ .fill 4,8,0
14847+#else
14848 .fill 1024,4,0
14849+#endif
14850+
14851+/*
14852+ * The IDT has to be page-aligned to simplify the Pentium
14853+ * F0 0F bug workaround.. We have a special link segment
14854+ * for this.
14855+ */
14856+.section .idt,"a",@progbits
14857+ENTRY(idt_table)
14858+ .fill 256,8,0
14859
14860 /*
14861 * This starts the data section.
14862 */
14863 #ifdef CONFIG_X86_PAE
14864-__PAGE_ALIGNED_DATA
14865- /* Page-aligned for the benefit of paravirt? */
14866- .align PAGE_SIZE
14867+.section .initial_page_table,"a",@progbits
14868 ENTRY(initial_page_table)
14869 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14870 # if KPMDS == 3
14871@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14872 # error "Kernel PMDs should be 1, 2 or 3"
14873 # endif
14874 .align PAGE_SIZE /* needs to be page-sized too */
14875+
14876+#ifdef CONFIG_PAX_PER_CPU_PGD
14877+ENTRY(cpu_pgd)
14878+ .rept NR_CPUS
14879+ .fill 4,8,0
14880+ .endr
14881+#endif
14882+
14883 #endif
14884
14885 .data
14886 .balign 4
14887 ENTRY(stack_start)
14888- .long init_thread_union+THREAD_SIZE
14889+ .long init_thread_union+THREAD_SIZE-8
14890
14891+ready: .byte 0
14892+
14893+.section .rodata,"a",@progbits
14894 early_recursion_flag:
14895 .long 0
14896
14897-ready: .byte 0
14898-
14899 int_msg:
14900 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14901
14902@@ -707,7 +811,7 @@ fault_msg:
14903 .word 0 # 32 bit align gdt_desc.address
14904 boot_gdt_descr:
14905 .word __BOOT_DS+7
14906- .long boot_gdt - __PAGE_OFFSET
14907+ .long pa(boot_gdt)
14908
14909 .word 0 # 32-bit align idt_desc.address
14910 idt_descr:
14911@@ -718,7 +822,7 @@ idt_descr:
14912 .word 0 # 32 bit align gdt_desc.address
14913 ENTRY(early_gdt_descr)
14914 .word GDT_ENTRIES*8-1
14915- .long gdt_page /* Overwritten for secondary CPUs */
14916+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14917
14918 /*
14919 * The boot_gdt must mirror the equivalent in setup.S and is
14920@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14921 .align L1_CACHE_BYTES
14922 ENTRY(boot_gdt)
14923 .fill GDT_ENTRY_BOOT_CS,8,0
14924- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14925- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14926+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14927+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14928+
14929+ .align PAGE_SIZE_asm
14930+ENTRY(cpu_gdt_table)
14931+ .rept NR_CPUS
14932+ .quad 0x0000000000000000 /* NULL descriptor */
14933+ .quad 0x0000000000000000 /* 0x0b reserved */
14934+ .quad 0x0000000000000000 /* 0x13 reserved */
14935+ .quad 0x0000000000000000 /* 0x1b reserved */
14936+
14937+#ifdef CONFIG_PAX_KERNEXEC
14938+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14939+#else
14940+ .quad 0x0000000000000000 /* 0x20 unused */
14941+#endif
14942+
14943+ .quad 0x0000000000000000 /* 0x28 unused */
14944+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14945+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14946+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14947+ .quad 0x0000000000000000 /* 0x4b reserved */
14948+ .quad 0x0000000000000000 /* 0x53 reserved */
14949+ .quad 0x0000000000000000 /* 0x5b reserved */
14950+
14951+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14952+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14953+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14954+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14955+
14956+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14957+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14958+
14959+ /*
14960+ * Segments used for calling PnP BIOS have byte granularity.
14961+ * The code segments and data segments have fixed 64k limits,
14962+ * the transfer segment sizes are set at run time.
14963+ */
14964+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14965+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14966+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14967+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14968+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14969+
14970+ /*
14971+ * The APM segments have byte granularity and their bases
14972+ * are set at run time. All have 64k limits.
14973+ */
14974+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14975+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14976+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14977+
14978+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14979+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14980+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14981+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14982+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14983+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14984+
14985+ /* Be sure this is zeroed to avoid false validations in Xen */
14986+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14987+ .endr
14988diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
14989index e11e394..9aebc5d 100644
14990--- a/arch/x86/kernel/head_64.S
14991+++ b/arch/x86/kernel/head_64.S
14992@@ -19,6 +19,8 @@
14993 #include <asm/cache.h>
14994 #include <asm/processor-flags.h>
14995 #include <asm/percpu.h>
14996+#include <asm/cpufeature.h>
14997+#include <asm/alternative-asm.h>
14998
14999 #ifdef CONFIG_PARAVIRT
15000 #include <asm/asm-offsets.h>
15001@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
15002 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15003 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15004 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15005+L4_VMALLOC_START = pgd_index(VMALLOC_START)
15006+L3_VMALLOC_START = pud_index(VMALLOC_START)
15007+L4_VMALLOC_END = pgd_index(VMALLOC_END)
15008+L3_VMALLOC_END = pud_index(VMALLOC_END)
15009+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15010+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15011
15012 .text
15013 __HEAD
15014@@ -85,35 +93,23 @@ startup_64:
15015 */
15016 addq %rbp, init_level4_pgt + 0(%rip)
15017 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15018+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15019+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15020+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15021 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15022
15023 addq %rbp, level3_ident_pgt + 0(%rip)
15024+#ifndef CONFIG_XEN
15025+ addq %rbp, level3_ident_pgt + 8(%rip)
15026+#endif
15027
15028- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15029- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15030+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15031+
15032+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15033+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15034
15035 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15036-
15037- /* Add an Identity mapping if I am above 1G */
15038- leaq _text(%rip), %rdi
15039- andq $PMD_PAGE_MASK, %rdi
15040-
15041- movq %rdi, %rax
15042- shrq $PUD_SHIFT, %rax
15043- andq $(PTRS_PER_PUD - 1), %rax
15044- jz ident_complete
15045-
15046- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15047- leaq level3_ident_pgt(%rip), %rbx
15048- movq %rdx, 0(%rbx, %rax, 8)
15049-
15050- movq %rdi, %rax
15051- shrq $PMD_SHIFT, %rax
15052- andq $(PTRS_PER_PMD - 1), %rax
15053- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15054- leaq level2_spare_pgt(%rip), %rbx
15055- movq %rdx, 0(%rbx, %rax, 8)
15056-ident_complete:
15057+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15058
15059 /*
15060 * Fixup the kernel text+data virtual addresses. Note that
15061@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15062 * after the boot processor executes this code.
15063 */
15064
15065- /* Enable PAE mode and PGE */
15066- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15067+ /* Enable PAE mode and PSE/PGE */
15068+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15069 movq %rax, %cr4
15070
15071 /* Setup early boot stage 4 level pagetables. */
15072@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15073 movl $MSR_EFER, %ecx
15074 rdmsr
15075 btsl $_EFER_SCE, %eax /* Enable System Call */
15076- btl $20,%edi /* No Execute supported? */
15077+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15078 jnc 1f
15079 btsl $_EFER_NX, %eax
15080+ leaq init_level4_pgt(%rip), %rdi
15081+#ifndef CONFIG_EFI
15082+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15083+#endif
15084+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15085+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15086+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15087+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15088 1: wrmsr /* Make changes effective */
15089
15090 /* Setup cr0 */
15091@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15092 * jump. In addition we need to ensure %cs is set so we make this
15093 * a far return.
15094 */
15095+ pax_set_fptr_mask
15096 movq initial_code(%rip),%rax
15097 pushq $0 # fake return address to stop unwinder
15098 pushq $__KERNEL_CS # set correct cs
15099@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15100 bad_address:
15101 jmp bad_address
15102
15103- .section ".init.text","ax"
15104+ __INIT
15105 #ifdef CONFIG_EARLY_PRINTK
15106 .globl early_idt_handlers
15107 early_idt_handlers:
15108@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15109 #endif /* EARLY_PRINTK */
15110 1: hlt
15111 jmp 1b
15112+ .previous
15113
15114 #ifdef CONFIG_EARLY_PRINTK
15115+ __INITDATA
15116 early_recursion_flag:
15117 .long 0
15118+ .previous
15119
15120+ .section .rodata,"a",@progbits
15121 early_idt_msg:
15122 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15123 early_idt_ripmsg:
15124 .asciz "RIP %s\n"
15125+ .previous
15126 #endif /* CONFIG_EARLY_PRINTK */
15127- .previous
15128
15129+ .section .rodata,"a",@progbits
15130 #define NEXT_PAGE(name) \
15131 .balign PAGE_SIZE; \
15132 ENTRY(name)
15133@@ -338,7 +348,6 @@ ENTRY(name)
15134 i = i + 1 ; \
15135 .endr
15136
15137- .data
15138 /*
15139 * This default setting generates an ident mapping at address 0x100000
15140 * and a mapping for the kernel that precisely maps virtual address
15141@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15142 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15143 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15144 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15145+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
15146+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15147+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
15148+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15149+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15150+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15151 .org init_level4_pgt + L4_START_KERNEL*8, 0
15152 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15153 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15154
15155+#ifdef CONFIG_PAX_PER_CPU_PGD
15156+NEXT_PAGE(cpu_pgd)
15157+ .rept NR_CPUS
15158+ .fill 512,8,0
15159+ .endr
15160+#endif
15161+
15162 NEXT_PAGE(level3_ident_pgt)
15163 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15164+#ifdef CONFIG_XEN
15165 .fill 511,8,0
15166+#else
15167+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15168+ .fill 510,8,0
15169+#endif
15170+
15171+NEXT_PAGE(level3_vmalloc_start_pgt)
15172+ .fill 512,8,0
15173+
15174+NEXT_PAGE(level3_vmalloc_end_pgt)
15175+ .fill 512,8,0
15176+
15177+NEXT_PAGE(level3_vmemmap_pgt)
15178+ .fill L3_VMEMMAP_START,8,0
15179+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15180
15181 NEXT_PAGE(level3_kernel_pgt)
15182 .fill L3_START_KERNEL,8,0
15183@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15184 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15185 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15186
15187+NEXT_PAGE(level2_vmemmap_pgt)
15188+ .fill 512,8,0
15189+
15190 NEXT_PAGE(level2_fixmap_pgt)
15191- .fill 506,8,0
15192- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15193- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15194- .fill 5,8,0
15195+ .fill 507,8,0
15196+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15197+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15198+ .fill 4,8,0
15199
15200-NEXT_PAGE(level1_fixmap_pgt)
15201+NEXT_PAGE(level1_vsyscall_pgt)
15202 .fill 512,8,0
15203
15204-NEXT_PAGE(level2_ident_pgt)
15205- /* Since I easily can, map the first 1G.
15206+ /* Since I easily can, map the first 2G.
15207 * Don't set NX because code runs from these pages.
15208 */
15209- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15210+NEXT_PAGE(level2_ident_pgt)
15211+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15212
15213 NEXT_PAGE(level2_kernel_pgt)
15214 /*
15215@@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15216 * If you want to increase this then increase MODULES_VADDR
15217 * too.)
15218 */
15219- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15220- KERNEL_IMAGE_SIZE/PMD_SIZE)
15221-
15222-NEXT_PAGE(level2_spare_pgt)
15223- .fill 512, 8, 0
15224+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15225
15226 #undef PMDS
15227 #undef NEXT_PAGE
15228
15229- .data
15230+ .align PAGE_SIZE
15231+ENTRY(cpu_gdt_table)
15232+ .rept NR_CPUS
15233+ .quad 0x0000000000000000 /* NULL descriptor */
15234+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15235+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15236+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15237+ .quad 0x00cffb000000ffff /* __USER32_CS */
15238+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15239+ .quad 0x00affb000000ffff /* __USER_CS */
15240+
15241+#ifdef CONFIG_PAX_KERNEXEC
15242+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15243+#else
15244+ .quad 0x0 /* unused */
15245+#endif
15246+
15247+ .quad 0,0 /* TSS */
15248+ .quad 0,0 /* LDT */
15249+ .quad 0,0,0 /* three TLS descriptors */
15250+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15251+ /* asm/segment.h:GDT_ENTRIES must match this */
15252+
15253+ /* zero the remaining page */
15254+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15255+ .endr
15256+
15257 .align 16
15258 .globl early_gdt_descr
15259 early_gdt_descr:
15260 .word GDT_ENTRIES*8-1
15261 early_gdt_descr_base:
15262- .quad INIT_PER_CPU_VAR(gdt_page)
15263+ .quad cpu_gdt_table
15264
15265 ENTRY(phys_base)
15266 /* This must match the first entry in level2_kernel_pgt */
15267 .quad 0x0000000000000000
15268
15269 #include "../../x86/xen/xen-head.S"
15270-
15271- .section .bss, "aw", @nobits
15272+
15273+ .section .rodata,"a",@progbits
15274 .align L1_CACHE_BYTES
15275 ENTRY(idt_table)
15276- .skip IDT_ENTRIES * 16
15277+ .fill 512,8,0
15278
15279 __PAGE_ALIGNED_BSS
15280 .align PAGE_SIZE
15281diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15282index 9c3bd4a..e1d9b35 100644
15283--- a/arch/x86/kernel/i386_ksyms_32.c
15284+++ b/arch/x86/kernel/i386_ksyms_32.c
15285@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15286 EXPORT_SYMBOL(cmpxchg8b_emu);
15287 #endif
15288
15289+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15290+
15291 /* Networking helper routines. */
15292 EXPORT_SYMBOL(csum_partial_copy_generic);
15293+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15294+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15295
15296 EXPORT_SYMBOL(__get_user_1);
15297 EXPORT_SYMBOL(__get_user_2);
15298@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15299
15300 EXPORT_SYMBOL(csum_partial);
15301 EXPORT_SYMBOL(empty_zero_page);
15302+
15303+#ifdef CONFIG_PAX_KERNEXEC
15304+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15305+#endif
15306diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15307index 6104852..6114160 100644
15308--- a/arch/x86/kernel/i8259.c
15309+++ b/arch/x86/kernel/i8259.c
15310@@ -210,7 +210,7 @@ spurious_8259A_irq:
15311 "spurious 8259A interrupt: IRQ%d.\n", irq);
15312 spurious_irq_mask |= irqmask;
15313 }
15314- atomic_inc(&irq_err_count);
15315+ atomic_inc_unchecked(&irq_err_count);
15316 /*
15317 * Theoretically we do not have to handle this IRQ,
15318 * but in Linux this does not cause problems and is
15319diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15320index 43e9ccf..44ccf6f 100644
15321--- a/arch/x86/kernel/init_task.c
15322+++ b/arch/x86/kernel/init_task.c
15323@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15324 * way process stacks are handled. This is done by having a special
15325 * "init_task" linker map entry..
15326 */
15327-union thread_union init_thread_union __init_task_data =
15328- { INIT_THREAD_INFO(init_task) };
15329+union thread_union init_thread_union __init_task_data;
15330
15331 /*
15332 * Initial task structure.
15333@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15334 * section. Since TSS's are completely CPU-local, we want them
15335 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15336 */
15337-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15338-
15339+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15340+EXPORT_SYMBOL(init_tss);
15341diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15342index 8c96897..be66bfa 100644
15343--- a/arch/x86/kernel/ioport.c
15344+++ b/arch/x86/kernel/ioport.c
15345@@ -6,6 +6,7 @@
15346 #include <linux/sched.h>
15347 #include <linux/kernel.h>
15348 #include <linux/capability.h>
15349+#include <linux/security.h>
15350 #include <linux/errno.h>
15351 #include <linux/types.h>
15352 #include <linux/ioport.h>
15353@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15354
15355 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15356 return -EINVAL;
15357+#ifdef CONFIG_GRKERNSEC_IO
15358+ if (turn_on && grsec_disable_privio) {
15359+ gr_handle_ioperm();
15360+ return -EPERM;
15361+ }
15362+#endif
15363 if (turn_on && !capable(CAP_SYS_RAWIO))
15364 return -EPERM;
15365
15366@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15367 * because the ->io_bitmap_max value must match the bitmap
15368 * contents:
15369 */
15370- tss = &per_cpu(init_tss, get_cpu());
15371+ tss = init_tss + get_cpu();
15372
15373 if (turn_on)
15374 bitmap_clear(t->io_bitmap_ptr, from, num);
15375@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15376 return -EINVAL;
15377 /* Trying to gain more privileges? */
15378 if (level > old) {
15379+#ifdef CONFIG_GRKERNSEC_IO
15380+ if (grsec_disable_privio) {
15381+ gr_handle_iopl();
15382+ return -EPERM;
15383+ }
15384+#endif
15385 if (!capable(CAP_SYS_RAWIO))
15386 return -EPERM;
15387 }
15388diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15389index 429e0c9..17b3ece 100644
15390--- a/arch/x86/kernel/irq.c
15391+++ b/arch/x86/kernel/irq.c
15392@@ -18,7 +18,7 @@
15393 #include <asm/mce.h>
15394 #include <asm/hw_irq.h>
15395
15396-atomic_t irq_err_count;
15397+atomic_unchecked_t irq_err_count;
15398
15399 /* Function pointer for generic interrupt vector handling */
15400 void (*x86_platform_ipi_callback)(void) = NULL;
15401@@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15402 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15403 seq_printf(p, " Machine check polls\n");
15404 #endif
15405- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15406+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15407 #if defined(CONFIG_X86_IO_APIC)
15408- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15409+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15410 #endif
15411 return 0;
15412 }
15413@@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15414
15415 u64 arch_irq_stat(void)
15416 {
15417- u64 sum = atomic_read(&irq_err_count);
15418+ u64 sum = atomic_read_unchecked(&irq_err_count);
15419
15420 #ifdef CONFIG_X86_IO_APIC
15421- sum += atomic_read(&irq_mis_count);
15422+ sum += atomic_read_unchecked(&irq_mis_count);
15423 #endif
15424 return sum;
15425 }
15426diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15427index 7209070..cbcd71a 100644
15428--- a/arch/x86/kernel/irq_32.c
15429+++ b/arch/x86/kernel/irq_32.c
15430@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15431 __asm__ __volatile__("andl %%esp,%0" :
15432 "=r" (sp) : "0" (THREAD_SIZE - 1));
15433
15434- return sp < (sizeof(struct thread_info) + STACK_WARN);
15435+ return sp < STACK_WARN;
15436 }
15437
15438 static void print_stack_overflow(void)
15439@@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15440 * per-CPU IRQ handling contexts (thread information and stack)
15441 */
15442 union irq_ctx {
15443- struct thread_info tinfo;
15444- u32 stack[THREAD_SIZE/sizeof(u32)];
15445+ unsigned long previous_esp;
15446+ u32 stack[THREAD_SIZE/sizeof(u32)];
15447 } __attribute__((aligned(THREAD_SIZE)));
15448
15449 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15450@@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15451 static inline int
15452 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15453 {
15454- union irq_ctx *curctx, *irqctx;
15455+ union irq_ctx *irqctx;
15456 u32 *isp, arg1, arg2;
15457
15458- curctx = (union irq_ctx *) current_thread_info();
15459 irqctx = __this_cpu_read(hardirq_ctx);
15460
15461 /*
15462@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15463 * handler) we can't do that and just have to keep using the
15464 * current stack (which is the irq stack already after all)
15465 */
15466- if (unlikely(curctx == irqctx))
15467+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15468 return 0;
15469
15470 /* build the stack frame on the IRQ stack */
15471- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15472- irqctx->tinfo.task = curctx->tinfo.task;
15473- irqctx->tinfo.previous_esp = current_stack_pointer;
15474+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15475+ irqctx->previous_esp = current_stack_pointer;
15476
15477- /*
15478- * Copy the softirq bits in preempt_count so that the
15479- * softirq checks work in the hardirq context.
15480- */
15481- irqctx->tinfo.preempt_count =
15482- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15483- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15484+#ifdef CONFIG_PAX_MEMORY_UDEREF
15485+ __set_fs(MAKE_MM_SEG(0));
15486+#endif
15487
15488 if (unlikely(overflow))
15489 call_on_stack(print_stack_overflow, isp);
15490@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15491 : "0" (irq), "1" (desc), "2" (isp),
15492 "D" (desc->handle_irq)
15493 : "memory", "cc", "ecx");
15494+
15495+#ifdef CONFIG_PAX_MEMORY_UDEREF
15496+ __set_fs(current_thread_info()->addr_limit);
15497+#endif
15498+
15499 return 1;
15500 }
15501
15502@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15503 */
15504 void __cpuinit irq_ctx_init(int cpu)
15505 {
15506- union irq_ctx *irqctx;
15507-
15508 if (per_cpu(hardirq_ctx, cpu))
15509 return;
15510
15511- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15512- THREAD_FLAGS,
15513- THREAD_ORDER));
15514- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15515- irqctx->tinfo.cpu = cpu;
15516- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15517- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15518-
15519- per_cpu(hardirq_ctx, cpu) = irqctx;
15520-
15521- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15522- THREAD_FLAGS,
15523- THREAD_ORDER));
15524- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15525- irqctx->tinfo.cpu = cpu;
15526- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15527-
15528- per_cpu(softirq_ctx, cpu) = irqctx;
15529+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15530+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15531
15532 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15533 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15534@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15535 asmlinkage void do_softirq(void)
15536 {
15537 unsigned long flags;
15538- struct thread_info *curctx;
15539 union irq_ctx *irqctx;
15540 u32 *isp;
15541
15542@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15543 local_irq_save(flags);
15544
15545 if (local_softirq_pending()) {
15546- curctx = current_thread_info();
15547 irqctx = __this_cpu_read(softirq_ctx);
15548- irqctx->tinfo.task = curctx->task;
15549- irqctx->tinfo.previous_esp = current_stack_pointer;
15550+ irqctx->previous_esp = current_stack_pointer;
15551
15552 /* build the stack frame on the softirq stack */
15553- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15554+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15555+
15556+#ifdef CONFIG_PAX_MEMORY_UDEREF
15557+ __set_fs(MAKE_MM_SEG(0));
15558+#endif
15559
15560 call_on_stack(__do_softirq, isp);
15561+
15562+#ifdef CONFIG_PAX_MEMORY_UDEREF
15563+ __set_fs(current_thread_info()->addr_limit);
15564+#endif
15565+
15566 /*
15567 * Shouldn't happen, we returned above if in_interrupt():
15568 */
15569diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15570index 69bca46..0bac999 100644
15571--- a/arch/x86/kernel/irq_64.c
15572+++ b/arch/x86/kernel/irq_64.c
15573@@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15574 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15575 u64 curbase = (u64)task_stack_page(current);
15576
15577- if (user_mode_vm(regs))
15578+ if (user_mode(regs))
15579 return;
15580
15581 WARN_ONCE(regs->sp >= curbase &&
15582diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15583index faba577..93b9e71 100644
15584--- a/arch/x86/kernel/kgdb.c
15585+++ b/arch/x86/kernel/kgdb.c
15586@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15587 #ifdef CONFIG_X86_32
15588 switch (regno) {
15589 case GDB_SS:
15590- if (!user_mode_vm(regs))
15591+ if (!user_mode(regs))
15592 *(unsigned long *)mem = __KERNEL_DS;
15593 break;
15594 case GDB_SP:
15595- if (!user_mode_vm(regs))
15596+ if (!user_mode(regs))
15597 *(unsigned long *)mem = kernel_stack_pointer(regs);
15598 break;
15599 case GDB_GS:
15600@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15601 case 'k':
15602 /* clear the trace bit */
15603 linux_regs->flags &= ~X86_EFLAGS_TF;
15604- atomic_set(&kgdb_cpu_doing_single_step, -1);
15605+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15606
15607 /* set the trace bit if we're stepping */
15608 if (remcomInBuffer[0] == 's') {
15609 linux_regs->flags |= X86_EFLAGS_TF;
15610- atomic_set(&kgdb_cpu_doing_single_step,
15611+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15612 raw_smp_processor_id());
15613 }
15614
15615@@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15616
15617 switch (cmd) {
15618 case DIE_DEBUG:
15619- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15620+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15621 if (user_mode(regs))
15622 return single_step_cont(regs, args);
15623 break;
15624diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15625index 7da647d..5d3c4c1 100644
15626--- a/arch/x86/kernel/kprobes.c
15627+++ b/arch/x86/kernel/kprobes.c
15628@@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15629 } __attribute__((packed)) *insn;
15630
15631 insn = (struct __arch_relative_insn *)from;
15632+
15633+ pax_open_kernel();
15634 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15635 insn->op = op;
15636+ pax_close_kernel();
15637 }
15638
15639 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15640@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15641 kprobe_opcode_t opcode;
15642 kprobe_opcode_t *orig_opcodes = opcodes;
15643
15644- if (search_exception_tables((unsigned long)opcodes))
15645+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15646 return 0; /* Page fault may occur on this address. */
15647
15648 retry:
15649@@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15650 }
15651 }
15652 insn_get_length(&insn);
15653+ pax_open_kernel();
15654 memcpy(dest, insn.kaddr, insn.length);
15655+ pax_close_kernel();
15656
15657 #ifdef CONFIG_X86_64
15658 if (insn_rip_relative(&insn)) {
15659@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15660 (u8 *) dest;
15661 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15662 disp = (u8 *) dest + insn_offset_displacement(&insn);
15663+ pax_open_kernel();
15664 *(s32 *) disp = (s32) newdisp;
15665+ pax_close_kernel();
15666 }
15667 #endif
15668 return insn.length;
15669@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15670 */
15671 __copy_instruction(p->ainsn.insn, p->addr, 0);
15672
15673- if (can_boost(p->addr))
15674+ if (can_boost(ktla_ktva(p->addr)))
15675 p->ainsn.boostable = 0;
15676 else
15677 p->ainsn.boostable = -1;
15678
15679- p->opcode = *p->addr;
15680+ p->opcode = *(ktla_ktva(p->addr));
15681 }
15682
15683 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15684@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15685 * nor set current_kprobe, because it doesn't use single
15686 * stepping.
15687 */
15688- regs->ip = (unsigned long)p->ainsn.insn;
15689+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15690 preempt_enable_no_resched();
15691 return;
15692 }
15693@@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15694 if (p->opcode == BREAKPOINT_INSTRUCTION)
15695 regs->ip = (unsigned long)p->addr;
15696 else
15697- regs->ip = (unsigned long)p->ainsn.insn;
15698+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15699 }
15700
15701 /*
15702@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15703 setup_singlestep(p, regs, kcb, 0);
15704 return 1;
15705 }
15706- } else if (*addr != BREAKPOINT_INSTRUCTION) {
15707+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15708 /*
15709 * The breakpoint instruction was removed right
15710 * after we hit it. Another cpu has removed
15711@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15712 " movq %rax, 152(%rsp)\n"
15713 RESTORE_REGS_STRING
15714 " popfq\n"
15715+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15716+ " btsq $63,(%rsp)\n"
15717+#endif
15718 #else
15719 " pushf\n"
15720 SAVE_REGS_STRING
15721@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15722 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15723 {
15724 unsigned long *tos = stack_addr(regs);
15725- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15726+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15727 unsigned long orig_ip = (unsigned long)p->addr;
15728 kprobe_opcode_t *insn = p->ainsn.insn;
15729
15730@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15731 struct die_args *args = data;
15732 int ret = NOTIFY_DONE;
15733
15734- if (args->regs && user_mode_vm(args->regs))
15735+ if (args->regs && user_mode(args->regs))
15736 return ret;
15737
15738 switch (val) {
15739@@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15740 * Verify if the address gap is in 2GB range, because this uses
15741 * a relative jump.
15742 */
15743- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15744+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15745 if (abs(rel) > 0x7fffffff)
15746 return -ERANGE;
15747
15748@@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15749 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15750
15751 /* Set probe function call */
15752- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15753+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15754
15755 /* Set returning jmp instruction at the tail of out-of-line buffer */
15756 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15757- (u8 *)op->kp.addr + op->optinsn.size);
15758+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15759
15760 flush_icache_range((unsigned long) buf,
15761 (unsigned long) buf + TMPL_END_IDX +
15762@@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
15763 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15764
15765 /* Backup instructions which will be replaced by jump address */
15766- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15767+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15768 RELATIVE_ADDR_SIZE);
15769
15770 insn_buf[0] = RELATIVEJUMP_OPCODE;
15771diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15772index a9c2116..a52d4fc 100644
15773--- a/arch/x86/kernel/kvm.c
15774+++ b/arch/x86/kernel/kvm.c
15775@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15776 pv_mmu_ops.set_pud = kvm_set_pud;
15777 #if PAGETABLE_LEVELS == 4
15778 pv_mmu_ops.set_pgd = kvm_set_pgd;
15779+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15780 #endif
15781 #endif
15782 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15783diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15784index ea69726..604d066 100644
15785--- a/arch/x86/kernel/ldt.c
15786+++ b/arch/x86/kernel/ldt.c
15787@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
15788 if (reload) {
15789 #ifdef CONFIG_SMP
15790 preempt_disable();
15791- load_LDT(pc);
15792+ load_LDT_nolock(pc);
15793 if (!cpumask_equal(mm_cpumask(current->mm),
15794 cpumask_of(smp_processor_id())))
15795 smp_call_function(flush_ldt, current->mm, 1);
15796 preempt_enable();
15797 #else
15798- load_LDT(pc);
15799+ load_LDT_nolock(pc);
15800 #endif
15801 }
15802 if (oldsize) {
15803@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
15804 return err;
15805
15806 for (i = 0; i < old->size; i++)
15807- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15808+ write_ldt_entry(new->ldt, i, old->ldt + i);
15809 return 0;
15810 }
15811
15812@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15813 retval = copy_ldt(&mm->context, &old_mm->context);
15814 mutex_unlock(&old_mm->context.lock);
15815 }
15816+
15817+ if (tsk == current) {
15818+ mm->context.vdso = 0;
15819+
15820+#ifdef CONFIG_X86_32
15821+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15822+ mm->context.user_cs_base = 0UL;
15823+ mm->context.user_cs_limit = ~0UL;
15824+
15825+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15826+ cpus_clear(mm->context.cpu_user_cs_mask);
15827+#endif
15828+
15829+#endif
15830+#endif
15831+
15832+ }
15833+
15834 return retval;
15835 }
15836
15837@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
15838 }
15839 }
15840
15841+#ifdef CONFIG_PAX_SEGMEXEC
15842+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15843+ error = -EINVAL;
15844+ goto out_unlock;
15845+ }
15846+#endif
15847+
15848 fill_ldt(&ldt, &ldt_info);
15849 if (oldmode)
15850 ldt.avl = 0;
15851diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15852index a3fa43b..8966f4c 100644
15853--- a/arch/x86/kernel/machine_kexec_32.c
15854+++ b/arch/x86/kernel/machine_kexec_32.c
15855@@ -27,7 +27,7 @@
15856 #include <asm/cacheflush.h>
15857 #include <asm/debugreg.h>
15858
15859-static void set_idt(void *newidt, __u16 limit)
15860+static void set_idt(struct desc_struct *newidt, __u16 limit)
15861 {
15862 struct desc_ptr curidt;
15863
15864@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
15865 }
15866
15867
15868-static void set_gdt(void *newgdt, __u16 limit)
15869+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15870 {
15871 struct desc_ptr curgdt;
15872
15873@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15874 }
15875
15876 control_page = page_address(image->control_code_page);
15877- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15878+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15879
15880 relocate_kernel_ptr = control_page;
15881 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15882diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15883index 3ca42d0..7cff8cc 100644
15884--- a/arch/x86/kernel/microcode_intel.c
15885+++ b/arch/x86/kernel/microcode_intel.c
15886@@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
15887
15888 static int get_ucode_user(void *to, const void *from, size_t n)
15889 {
15890- return copy_from_user(to, from, n);
15891+ return copy_from_user(to, (const void __force_user *)from, n);
15892 }
15893
15894 static enum ucode_state
15895 request_microcode_user(int cpu, const void __user *buf, size_t size)
15896 {
15897- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15898+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15899 }
15900
15901 static void microcode_fini_cpu(int cpu)
15902diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15903index 925179f..267ac7a 100644
15904--- a/arch/x86/kernel/module.c
15905+++ b/arch/x86/kernel/module.c
15906@@ -36,15 +36,60 @@
15907 #define DEBUGP(fmt...)
15908 #endif
15909
15910-void *module_alloc(unsigned long size)
15911+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15912 {
15913- if (PAGE_ALIGN(size) > MODULES_LEN)
15914+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
15915 return NULL;
15916 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15917- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15918+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15919 -1, __builtin_return_address(0));
15920 }
15921
15922+void *module_alloc(unsigned long size)
15923+{
15924+
15925+#ifdef CONFIG_PAX_KERNEXEC
15926+ return __module_alloc(size, PAGE_KERNEL);
15927+#else
15928+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15929+#endif
15930+
15931+}
15932+
15933+#ifdef CONFIG_PAX_KERNEXEC
15934+#ifdef CONFIG_X86_32
15935+void *module_alloc_exec(unsigned long size)
15936+{
15937+ struct vm_struct *area;
15938+
15939+ if (size == 0)
15940+ return NULL;
15941+
15942+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15943+ return area ? area->addr : NULL;
15944+}
15945+EXPORT_SYMBOL(module_alloc_exec);
15946+
15947+void module_free_exec(struct module *mod, void *module_region)
15948+{
15949+ vunmap(module_region);
15950+}
15951+EXPORT_SYMBOL(module_free_exec);
15952+#else
15953+void module_free_exec(struct module *mod, void *module_region)
15954+{
15955+ module_free(mod, module_region);
15956+}
15957+EXPORT_SYMBOL(module_free_exec);
15958+
15959+void *module_alloc_exec(unsigned long size)
15960+{
15961+ return __module_alloc(size, PAGE_KERNEL_RX);
15962+}
15963+EXPORT_SYMBOL(module_alloc_exec);
15964+#endif
15965+#endif
15966+
15967 #ifdef CONFIG_X86_32
15968 int apply_relocate(Elf32_Shdr *sechdrs,
15969 const char *strtab,
15970@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15971 unsigned int i;
15972 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15973 Elf32_Sym *sym;
15974- uint32_t *location;
15975+ uint32_t *plocation, location;
15976
15977 DEBUGP("Applying relocate section %u to %u\n", relsec,
15978 sechdrs[relsec].sh_info);
15979 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15980 /* This is where to make the change */
15981- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15982- + rel[i].r_offset;
15983+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15984+ location = (uint32_t)plocation;
15985+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15986+ plocation = ktla_ktva((void *)plocation);
15987 /* This is the symbol it is referring to. Note that all
15988 undefined symbols have been resolved. */
15989 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15990@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15991 switch (ELF32_R_TYPE(rel[i].r_info)) {
15992 case R_386_32:
15993 /* We add the value into the location given */
15994- *location += sym->st_value;
15995+ pax_open_kernel();
15996+ *plocation += sym->st_value;
15997+ pax_close_kernel();
15998 break;
15999 case R_386_PC32:
16000 /* Add the value, subtract its postition */
16001- *location += sym->st_value - (uint32_t)location;
16002+ pax_open_kernel();
16003+ *plocation += sym->st_value - location;
16004+ pax_close_kernel();
16005 break;
16006 default:
16007 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16008@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
16009 case R_X86_64_NONE:
16010 break;
16011 case R_X86_64_64:
16012+ pax_open_kernel();
16013 *(u64 *)loc = val;
16014+ pax_close_kernel();
16015 break;
16016 case R_X86_64_32:
16017+ pax_open_kernel();
16018 *(u32 *)loc = val;
16019+ pax_close_kernel();
16020 if (val != *(u32 *)loc)
16021 goto overflow;
16022 break;
16023 case R_X86_64_32S:
16024+ pax_open_kernel();
16025 *(s32 *)loc = val;
16026+ pax_close_kernel();
16027 if ((s64)val != *(s32 *)loc)
16028 goto overflow;
16029 break;
16030 case R_X86_64_PC32:
16031 val -= (u64)loc;
16032+ pax_open_kernel();
16033 *(u32 *)loc = val;
16034+ pax_close_kernel();
16035+
16036 #if 0
16037 if ((s64)val != *(s32 *)loc)
16038 goto overflow;
16039diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16040index e88f37b..1353db6 100644
16041--- a/arch/x86/kernel/nmi.c
16042+++ b/arch/x86/kernel/nmi.c
16043@@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16044 dotraplinkage notrace __kprobes void
16045 do_nmi(struct pt_regs *regs, long error_code)
16046 {
16047+
16048+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16049+ if (!user_mode(regs)) {
16050+ unsigned long cs = regs->cs & 0xFFFF;
16051+ unsigned long ip = ktva_ktla(regs->ip);
16052+
16053+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16054+ regs->ip = ip;
16055+ }
16056+#endif
16057+
16058 nmi_enter();
16059
16060 inc_irq_stat(__nmi_count);
16061diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16062index 676b8c7..870ba04 100644
16063--- a/arch/x86/kernel/paravirt-spinlocks.c
16064+++ b/arch/x86/kernel/paravirt-spinlocks.c
16065@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16066 arch_spin_lock(lock);
16067 }
16068
16069-struct pv_lock_ops pv_lock_ops = {
16070+struct pv_lock_ops pv_lock_ops __read_only = {
16071 #ifdef CONFIG_SMP
16072 .spin_is_locked = __ticket_spin_is_locked,
16073 .spin_is_contended = __ticket_spin_is_contended,
16074diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16075index d90272e..6bb013b 100644
16076--- a/arch/x86/kernel/paravirt.c
16077+++ b/arch/x86/kernel/paravirt.c
16078@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16079 {
16080 return x;
16081 }
16082+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16083+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16084+#endif
16085
16086 void __init default_banner(void)
16087 {
16088@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16089 if (opfunc == NULL)
16090 /* If there's no function, patch it with a ud2a (BUG) */
16091 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16092- else if (opfunc == _paravirt_nop)
16093+ else if (opfunc == (void *)_paravirt_nop)
16094 /* If the operation is a nop, then nop the callsite */
16095 ret = paravirt_patch_nop();
16096
16097 /* identity functions just return their single argument */
16098- else if (opfunc == _paravirt_ident_32)
16099+ else if (opfunc == (void *)_paravirt_ident_32)
16100 ret = paravirt_patch_ident_32(insnbuf, len);
16101- else if (opfunc == _paravirt_ident_64)
16102+ else if (opfunc == (void *)_paravirt_ident_64)
16103 ret = paravirt_patch_ident_64(insnbuf, len);
16104+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16105+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16106+ ret = paravirt_patch_ident_64(insnbuf, len);
16107+#endif
16108
16109 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16110 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16111@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16112 if (insn_len > len || start == NULL)
16113 insn_len = len;
16114 else
16115- memcpy(insnbuf, start, insn_len);
16116+ memcpy(insnbuf, ktla_ktva(start), insn_len);
16117
16118 return insn_len;
16119 }
16120@@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16121 preempt_enable();
16122 }
16123
16124-struct pv_info pv_info = {
16125+struct pv_info pv_info __read_only = {
16126 .name = "bare hardware",
16127 .paravirt_enabled = 0,
16128 .kernel_rpl = 0,
16129@@ -313,16 +320,16 @@ struct pv_info pv_info = {
16130 #endif
16131 };
16132
16133-struct pv_init_ops pv_init_ops = {
16134+struct pv_init_ops pv_init_ops __read_only = {
16135 .patch = native_patch,
16136 };
16137
16138-struct pv_time_ops pv_time_ops = {
16139+struct pv_time_ops pv_time_ops __read_only = {
16140 .sched_clock = native_sched_clock,
16141 .steal_clock = native_steal_clock,
16142 };
16143
16144-struct pv_irq_ops pv_irq_ops = {
16145+struct pv_irq_ops pv_irq_ops __read_only = {
16146 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16147 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16148 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16149@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16150 #endif
16151 };
16152
16153-struct pv_cpu_ops pv_cpu_ops = {
16154+struct pv_cpu_ops pv_cpu_ops __read_only = {
16155 .cpuid = native_cpuid,
16156 .get_debugreg = native_get_debugreg,
16157 .set_debugreg = native_set_debugreg,
16158@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16159 .end_context_switch = paravirt_nop,
16160 };
16161
16162-struct pv_apic_ops pv_apic_ops = {
16163+struct pv_apic_ops pv_apic_ops __read_only = {
16164 #ifdef CONFIG_X86_LOCAL_APIC
16165 .startup_ipi_hook = paravirt_nop,
16166 #endif
16167 };
16168
16169-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16170+#ifdef CONFIG_X86_32
16171+#ifdef CONFIG_X86_PAE
16172+/* 64-bit pagetable entries */
16173+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16174+#else
16175 /* 32-bit pagetable entries */
16176 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16177+#endif
16178 #else
16179 /* 64-bit pagetable entries */
16180 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16181 #endif
16182
16183-struct pv_mmu_ops pv_mmu_ops = {
16184+struct pv_mmu_ops pv_mmu_ops __read_only = {
16185
16186 .read_cr2 = native_read_cr2,
16187 .write_cr2 = native_write_cr2,
16188@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16189 .make_pud = PTE_IDENT,
16190
16191 .set_pgd = native_set_pgd,
16192+ .set_pgd_batched = native_set_pgd_batched,
16193 #endif
16194 #endif /* PAGETABLE_LEVELS >= 3 */
16195
16196@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16197 },
16198
16199 .set_fixmap = native_set_fixmap,
16200+
16201+#ifdef CONFIG_PAX_KERNEXEC
16202+ .pax_open_kernel = native_pax_open_kernel,
16203+ .pax_close_kernel = native_pax_close_kernel,
16204+#endif
16205+
16206 };
16207
16208 EXPORT_SYMBOL_GPL(pv_time_ops);
16209diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16210index 35ccf75..7a15747 100644
16211--- a/arch/x86/kernel/pci-iommu_table.c
16212+++ b/arch/x86/kernel/pci-iommu_table.c
16213@@ -2,7 +2,7 @@
16214 #include <asm/iommu_table.h>
16215 #include <linux/string.h>
16216 #include <linux/kallsyms.h>
16217-
16218+#include <linux/sched.h>
16219
16220 #define DEBUG 1
16221
16222diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16223index ee5d4fb..426649b 100644
16224--- a/arch/x86/kernel/process.c
16225+++ b/arch/x86/kernel/process.c
16226@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16227
16228 void free_thread_info(struct thread_info *ti)
16229 {
16230- free_thread_xstate(ti->task);
16231 free_pages((unsigned long)ti, THREAD_ORDER);
16232 }
16233
16234+static struct kmem_cache *task_struct_cachep;
16235+
16236 void arch_task_cache_init(void)
16237 {
16238- task_xstate_cachep =
16239- kmem_cache_create("task_xstate", xstate_size,
16240+ /* create a slab on which task_structs can be allocated */
16241+ task_struct_cachep =
16242+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16243+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16244+
16245+ task_xstate_cachep =
16246+ kmem_cache_create("task_xstate", xstate_size,
16247 __alignof__(union thread_xstate),
16248- SLAB_PANIC | SLAB_NOTRACK, NULL);
16249+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16250+}
16251+
16252+struct task_struct *alloc_task_struct_node(int node)
16253+{
16254+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16255+}
16256+
16257+void free_task_struct(struct task_struct *task)
16258+{
16259+ free_thread_xstate(task);
16260+ kmem_cache_free(task_struct_cachep, task);
16261 }
16262
16263 /*
16264@@ -70,7 +87,7 @@ void exit_thread(void)
16265 unsigned long *bp = t->io_bitmap_ptr;
16266
16267 if (bp) {
16268- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16269+ struct tss_struct *tss = init_tss + get_cpu();
16270
16271 t->io_bitmap_ptr = NULL;
16272 clear_thread_flag(TIF_IO_BITMAP);
16273@@ -106,7 +123,7 @@ void show_regs_common(void)
16274
16275 printk(KERN_CONT "\n");
16276 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16277- current->pid, current->comm, print_tainted(),
16278+ task_pid_nr(current), current->comm, print_tainted(),
16279 init_utsname()->release,
16280 (int)strcspn(init_utsname()->version, " "),
16281 init_utsname()->version);
16282@@ -120,6 +137,9 @@ void flush_thread(void)
16283 {
16284 struct task_struct *tsk = current;
16285
16286+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16287+ loadsegment(gs, 0);
16288+#endif
16289 flush_ptrace_hw_breakpoint(tsk);
16290 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16291 /*
16292@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16293 regs.di = (unsigned long) arg;
16294
16295 #ifdef CONFIG_X86_32
16296- regs.ds = __USER_DS;
16297- regs.es = __USER_DS;
16298+ regs.ds = __KERNEL_DS;
16299+ regs.es = __KERNEL_DS;
16300 regs.fs = __KERNEL_PERCPU;
16301- regs.gs = __KERNEL_STACK_CANARY;
16302+ savesegment(gs, regs.gs);
16303 #else
16304 regs.ss = __KERNEL_DS;
16305 #endif
16306@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16307
16308 return ret;
16309 }
16310-void stop_this_cpu(void *dummy)
16311+__noreturn void stop_this_cpu(void *dummy)
16312 {
16313 local_irq_disable();
16314 /*
16315@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16316 }
16317 early_param("idle", idle_setup);
16318
16319-unsigned long arch_align_stack(unsigned long sp)
16320+#ifdef CONFIG_PAX_RANDKSTACK
16321+void pax_randomize_kstack(struct pt_regs *regs)
16322 {
16323- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16324- sp -= get_random_int() % 8192;
16325- return sp & ~0xf;
16326-}
16327+ struct thread_struct *thread = &current->thread;
16328+ unsigned long time;
16329
16330-unsigned long arch_randomize_brk(struct mm_struct *mm)
16331-{
16332- unsigned long range_end = mm->brk + 0x02000000;
16333- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16334-}
16335+ if (!randomize_va_space)
16336+ return;
16337+
16338+ if (v8086_mode(regs))
16339+ return;
16340
16341+ rdtscl(time);
16342+
16343+ /* P4 seems to return a 0 LSB, ignore it */
16344+#ifdef CONFIG_MPENTIUM4
16345+ time &= 0x3EUL;
16346+ time <<= 2;
16347+#elif defined(CONFIG_X86_64)
16348+ time &= 0xFUL;
16349+ time <<= 4;
16350+#else
16351+ time &= 0x1FUL;
16352+ time <<= 3;
16353+#endif
16354+
16355+ thread->sp0 ^= time;
16356+ load_sp0(init_tss + smp_processor_id(), thread);
16357+
16358+#ifdef CONFIG_X86_64
16359+ percpu_write(kernel_stack, thread->sp0);
16360+#endif
16361+}
16362+#endif
16363diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16364index 795b79f..063767a 100644
16365--- a/arch/x86/kernel/process_32.c
16366+++ b/arch/x86/kernel/process_32.c
16367@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16368 unsigned long thread_saved_pc(struct task_struct *tsk)
16369 {
16370 return ((unsigned long *)tsk->thread.sp)[3];
16371+//XXX return tsk->thread.eip;
16372 }
16373
16374 #ifndef CONFIG_SMP
16375@@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16376 unsigned long sp;
16377 unsigned short ss, gs;
16378
16379- if (user_mode_vm(regs)) {
16380+ if (user_mode(regs)) {
16381 sp = regs->sp;
16382 ss = regs->ss & 0xffff;
16383- gs = get_user_gs(regs);
16384 } else {
16385 sp = kernel_stack_pointer(regs);
16386 savesegment(ss, ss);
16387- savesegment(gs, gs);
16388 }
16389+ gs = get_user_gs(regs);
16390
16391 show_regs_common();
16392
16393@@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16394 struct task_struct *tsk;
16395 int err;
16396
16397- childregs = task_pt_regs(p);
16398+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16399 *childregs = *regs;
16400 childregs->ax = 0;
16401 childregs->sp = sp;
16402
16403 p->thread.sp = (unsigned long) childregs;
16404 p->thread.sp0 = (unsigned long) (childregs+1);
16405+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16406
16407 p->thread.ip = (unsigned long) ret_from_fork;
16408
16409@@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16410 struct thread_struct *prev = &prev_p->thread,
16411 *next = &next_p->thread;
16412 int cpu = smp_processor_id();
16413- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16414+ struct tss_struct *tss = init_tss + cpu;
16415 bool preload_fpu;
16416
16417 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16418@@ -331,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16419 */
16420 lazy_save_gs(prev->gs);
16421
16422+#ifdef CONFIG_PAX_MEMORY_UDEREF
16423+ __set_fs(task_thread_info(next_p)->addr_limit);
16424+#endif
16425+
16426 /*
16427 * Load the per-thread Thread-Local Storage descriptor.
16428 */
16429@@ -366,6 +371,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16430 */
16431 arch_end_context_switch(next_p);
16432
16433+ percpu_write(current_task, next_p);
16434+ percpu_write(current_tinfo, &next_p->tinfo);
16435+
16436 if (preload_fpu)
16437 __math_state_restore();
16438
16439@@ -375,8 +383,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16440 if (prev->gs | next->gs)
16441 lazy_load_gs(next->gs);
16442
16443- percpu_write(current_task, next_p);
16444-
16445 return prev_p;
16446 }
16447
16448@@ -406,4 +412,3 @@ unsigned long get_wchan(struct task_struct *p)
16449 } while (count++ < 16);
16450 return 0;
16451 }
16452-
16453diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16454index 3bd7e6e..90b2bcf 100644
16455--- a/arch/x86/kernel/process_64.c
16456+++ b/arch/x86/kernel/process_64.c
16457@@ -89,7 +89,7 @@ static void __exit_idle(void)
16458 void exit_idle(void)
16459 {
16460 /* idle loop has pid 0 */
16461- if (current->pid)
16462+ if (task_pid_nr(current))
16463 return;
16464 __exit_idle();
16465 }
16466@@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16467 struct pt_regs *childregs;
16468 struct task_struct *me = current;
16469
16470- childregs = ((struct pt_regs *)
16471- (THREAD_SIZE + task_stack_page(p))) - 1;
16472+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16473 *childregs = *regs;
16474
16475 childregs->ax = 0;
16476@@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16477 p->thread.sp = (unsigned long) childregs;
16478 p->thread.sp0 = (unsigned long) (childregs+1);
16479 p->thread.usersp = me->thread.usersp;
16480+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16481
16482 set_tsk_thread_flag(p, TIF_FORK);
16483
16484@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16485 struct thread_struct *prev = &prev_p->thread;
16486 struct thread_struct *next = &next_p->thread;
16487 int cpu = smp_processor_id();
16488- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16489+ struct tss_struct *tss = init_tss + cpu;
16490 unsigned fsindex, gsindex;
16491 bool preload_fpu;
16492
16493@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16494 prev->usersp = percpu_read(old_rsp);
16495 percpu_write(old_rsp, next->usersp);
16496 percpu_write(current_task, next_p);
16497+ percpu_write(current_tinfo, &next_p->tinfo);
16498
16499- percpu_write(kernel_stack,
16500- (unsigned long)task_stack_page(next_p) +
16501- THREAD_SIZE - KERNEL_STACK_OFFSET);
16502+ percpu_write(kernel_stack, next->sp0);
16503
16504 /*
16505 * Now maybe reload the debug registers and handle I/O bitmaps
16506@@ -540,12 +539,11 @@ unsigned long get_wchan(struct task_struct *p)
16507 if (!p || p == current || p->state == TASK_RUNNING)
16508 return 0;
16509 stack = (unsigned long)task_stack_page(p);
16510- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16511+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16512 return 0;
16513 fp = *(u64 *)(p->thread.sp);
16514 do {
16515- if (fp < (unsigned long)stack ||
16516- fp >= (unsigned long)stack+THREAD_SIZE)
16517+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16518 return 0;
16519 ip = *(u64 *)(fp+8);
16520 if (!in_sched_functions(ip))
16521diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16522index 8252879..d3219e0 100644
16523--- a/arch/x86/kernel/ptrace.c
16524+++ b/arch/x86/kernel/ptrace.c
16525@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16526 unsigned long addr, unsigned long data)
16527 {
16528 int ret;
16529- unsigned long __user *datap = (unsigned long __user *)data;
16530+ unsigned long __user *datap = (__force unsigned long __user *)data;
16531
16532 switch (request) {
16533 /* read the word at location addr in the USER area. */
16534@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16535 if ((int) addr < 0)
16536 return -EIO;
16537 ret = do_get_thread_area(child, addr,
16538- (struct user_desc __user *)data);
16539+ (__force struct user_desc __user *) data);
16540 break;
16541
16542 case PTRACE_SET_THREAD_AREA:
16543 if ((int) addr < 0)
16544 return -EIO;
16545 ret = do_set_thread_area(child, addr,
16546- (struct user_desc __user *)data, 0);
16547+ (__force struct user_desc __user *) data, 0);
16548 break;
16549 #endif
16550
16551@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16552 memset(info, 0, sizeof(*info));
16553 info->si_signo = SIGTRAP;
16554 info->si_code = si_code;
16555- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16556+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16557 }
16558
16559 void user_single_step_siginfo(struct task_struct *tsk,
16560diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16561index 42eb330..139955c 100644
16562--- a/arch/x86/kernel/pvclock.c
16563+++ b/arch/x86/kernel/pvclock.c
16564@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16565 return pv_tsc_khz;
16566 }
16567
16568-static atomic64_t last_value = ATOMIC64_INIT(0);
16569+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16570
16571 void pvclock_resume(void)
16572 {
16573- atomic64_set(&last_value, 0);
16574+ atomic64_set_unchecked(&last_value, 0);
16575 }
16576
16577 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16578@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16579 * updating at the same time, and one of them could be slightly behind,
16580 * making the assumption that last_value always go forward fail to hold.
16581 */
16582- last = atomic64_read(&last_value);
16583+ last = atomic64_read_unchecked(&last_value);
16584 do {
16585 if (ret < last)
16586 return last;
16587- last = atomic64_cmpxchg(&last_value, last, ret);
16588+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16589 } while (unlikely(last != ret));
16590
16591 return ret;
16592diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16593index 37a458b..e63d183 100644
16594--- a/arch/x86/kernel/reboot.c
16595+++ b/arch/x86/kernel/reboot.c
16596@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16597 EXPORT_SYMBOL(pm_power_off);
16598
16599 static const struct desc_ptr no_idt = {};
16600-static int reboot_mode;
16601+static unsigned short reboot_mode;
16602 enum reboot_type reboot_type = BOOT_ACPI;
16603 int reboot_force;
16604
16605@@ -324,13 +324,17 @@ core_initcall(reboot_init);
16606 extern const unsigned char machine_real_restart_asm[];
16607 extern const u64 machine_real_restart_gdt[3];
16608
16609-void machine_real_restart(unsigned int type)
16610+__noreturn void machine_real_restart(unsigned int type)
16611 {
16612 void *restart_va;
16613 unsigned long restart_pa;
16614- void (*restart_lowmem)(unsigned int);
16615+ void (* __noreturn restart_lowmem)(unsigned int);
16616 u64 *lowmem_gdt;
16617
16618+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16619+ struct desc_struct *gdt;
16620+#endif
16621+
16622 local_irq_disable();
16623
16624 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16625@@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16626 boot)". This seems like a fairly standard thing that gets set by
16627 REBOOT.COM programs, and the previous reset routine did this
16628 too. */
16629- *((unsigned short *)0x472) = reboot_mode;
16630+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16631
16632 /* Patch the GDT in the low memory trampoline */
16633 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16634
16635 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16636 restart_pa = virt_to_phys(restart_va);
16637- restart_lowmem = (void (*)(unsigned int))restart_pa;
16638+ restart_lowmem = (void *)restart_pa;
16639
16640 /* GDT[0]: GDT self-pointer */
16641 lowmem_gdt[0] =
16642@@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16643 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16644
16645 /* Jump to the identity-mapped low memory code */
16646+
16647+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16648+ gdt = get_cpu_gdt_table(smp_processor_id());
16649+ pax_open_kernel();
16650+#ifdef CONFIG_PAX_MEMORY_UDEREF
16651+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16652+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16653+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16654+#endif
16655+#ifdef CONFIG_PAX_KERNEXEC
16656+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16657+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16658+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16659+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16660+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16661+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16662+#endif
16663+ pax_close_kernel();
16664+#endif
16665+
16666+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16667+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16668+ unreachable();
16669+#else
16670 restart_lowmem(type);
16671+#endif
16672+
16673 }
16674 #ifdef CONFIG_APM_MODULE
16675 EXPORT_SYMBOL(machine_real_restart);
16676@@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16677 * try to force a triple fault and then cycle between hitting the keyboard
16678 * controller and doing that
16679 */
16680-static void native_machine_emergency_restart(void)
16681+__noreturn static void native_machine_emergency_restart(void)
16682 {
16683 int i;
16684 int attempt = 0;
16685@@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16686 #endif
16687 }
16688
16689-static void __machine_emergency_restart(int emergency)
16690+static __noreturn void __machine_emergency_restart(int emergency)
16691 {
16692 reboot_emergency = emergency;
16693 machine_ops.emergency_restart();
16694 }
16695
16696-static void native_machine_restart(char *__unused)
16697+static __noreturn void native_machine_restart(char *__unused)
16698 {
16699 printk("machine restart\n");
16700
16701@@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16702 __machine_emergency_restart(0);
16703 }
16704
16705-static void native_machine_halt(void)
16706+static __noreturn void native_machine_halt(void)
16707 {
16708 /* stop other cpus and apics */
16709 machine_shutdown();
16710@@ -690,7 +720,7 @@ static void native_machine_halt(void)
16711 stop_this_cpu(NULL);
16712 }
16713
16714-static void native_machine_power_off(void)
16715+__noreturn static void native_machine_power_off(void)
16716 {
16717 if (pm_power_off) {
16718 if (!reboot_force)
16719@@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16720 }
16721 /* a fallback in case there is no PM info available */
16722 tboot_shutdown(TB_SHUTDOWN_HALT);
16723+ unreachable();
16724 }
16725
16726 struct machine_ops machine_ops = {
16727diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16728index 7a6f3b3..bed145d7 100644
16729--- a/arch/x86/kernel/relocate_kernel_64.S
16730+++ b/arch/x86/kernel/relocate_kernel_64.S
16731@@ -11,6 +11,7 @@
16732 #include <asm/kexec.h>
16733 #include <asm/processor-flags.h>
16734 #include <asm/pgtable_types.h>
16735+#include <asm/alternative-asm.h>
16736
16737 /*
16738 * Must be relocatable PIC code callable as a C function
16739@@ -160,13 +161,14 @@ identity_mapped:
16740 xorq %rbp, %rbp
16741 xorq %r8, %r8
16742 xorq %r9, %r9
16743- xorq %r10, %r9
16744+ xorq %r10, %r10
16745 xorq %r11, %r11
16746 xorq %r12, %r12
16747 xorq %r13, %r13
16748 xorq %r14, %r14
16749 xorq %r15, %r15
16750
16751+ pax_force_retaddr 0, 1
16752 ret
16753
16754 1:
16755diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16756index cf0ef98..e3f780b 100644
16757--- a/arch/x86/kernel/setup.c
16758+++ b/arch/x86/kernel/setup.c
16759@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
16760
16761 switch (data->type) {
16762 case SETUP_E820_EXT:
16763- parse_e820_ext(data);
16764+ parse_e820_ext((struct setup_data __force_kernel *)data);
16765 break;
16766 case SETUP_DTB:
16767 add_dtb(pa_data);
16768@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
16769 * area (640->1Mb) as ram even though it is not.
16770 * take them out.
16771 */
16772- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16773+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16774 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16775 }
16776
16777@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
16778
16779 if (!boot_params.hdr.root_flags)
16780 root_mountflags &= ~MS_RDONLY;
16781- init_mm.start_code = (unsigned long) _text;
16782- init_mm.end_code = (unsigned long) _etext;
16783+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16784+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16785 init_mm.end_data = (unsigned long) _edata;
16786 init_mm.brk = _brk_end;
16787
16788- code_resource.start = virt_to_phys(_text);
16789- code_resource.end = virt_to_phys(_etext)-1;
16790- data_resource.start = virt_to_phys(_etext);
16791+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16792+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16793+ data_resource.start = virt_to_phys(_sdata);
16794 data_resource.end = virt_to_phys(_edata)-1;
16795 bss_resource.start = virt_to_phys(&__bss_start);
16796 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16797diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16798index 71f4727..16dc9f7 100644
16799--- a/arch/x86/kernel/setup_percpu.c
16800+++ b/arch/x86/kernel/setup_percpu.c
16801@@ -21,19 +21,17 @@
16802 #include <asm/cpu.h>
16803 #include <asm/stackprotector.h>
16804
16805-DEFINE_PER_CPU(int, cpu_number);
16806+#ifdef CONFIG_SMP
16807+DEFINE_PER_CPU(unsigned int, cpu_number);
16808 EXPORT_PER_CPU_SYMBOL(cpu_number);
16809+#endif
16810
16811-#ifdef CONFIG_X86_64
16812 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16813-#else
16814-#define BOOT_PERCPU_OFFSET 0
16815-#endif
16816
16817 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16818 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16819
16820-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16821+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16822 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16823 };
16824 EXPORT_SYMBOL(__per_cpu_offset);
16825@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
16826 {
16827 #ifdef CONFIG_X86_32
16828 struct desc_struct gdt;
16829+ unsigned long base = per_cpu_offset(cpu);
16830
16831- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16832- 0x2 | DESCTYPE_S, 0x8);
16833- gdt.s = 1;
16834+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16835+ 0x83 | DESCTYPE_S, 0xC);
16836 write_gdt_entry(get_cpu_gdt_table(cpu),
16837 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16838 #endif
16839@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16840 /* alrighty, percpu areas up and running */
16841 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16842 for_each_possible_cpu(cpu) {
16843+#ifdef CONFIG_CC_STACKPROTECTOR
16844+#ifdef CONFIG_X86_32
16845+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16846+#endif
16847+#endif
16848 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16849 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16850 per_cpu(cpu_number, cpu) = cpu;
16851@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16852 */
16853 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16854 #endif
16855+#ifdef CONFIG_CC_STACKPROTECTOR
16856+#ifdef CONFIG_X86_32
16857+ if (!cpu)
16858+ per_cpu(stack_canary.canary, cpu) = canary;
16859+#endif
16860+#endif
16861 /*
16862 * Up to this point, the boot CPU has been using .init.data
16863 * area. Reload any changed state for the boot CPU.
16864diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16865index 54ddaeb2..22c3bdc 100644
16866--- a/arch/x86/kernel/signal.c
16867+++ b/arch/x86/kernel/signal.c
16868@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
16869 * Align the stack pointer according to the i386 ABI,
16870 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16871 */
16872- sp = ((sp + 4) & -16ul) - 4;
16873+ sp = ((sp - 12) & -16ul) - 4;
16874 #else /* !CONFIG_X86_32 */
16875 sp = round_down(sp, 16) - 8;
16876 #endif
16877@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
16878 * Return an always-bogus address instead so we will die with SIGSEGV.
16879 */
16880 if (onsigstack && !likely(on_sig_stack(sp)))
16881- return (void __user *)-1L;
16882+ return (__force void __user *)-1L;
16883
16884 /* save i387 state */
16885 if (used_math() && save_i387_xstate(*fpstate) < 0)
16886- return (void __user *)-1L;
16887+ return (__force void __user *)-1L;
16888
16889 return (void __user *)sp;
16890 }
16891@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16892 }
16893
16894 if (current->mm->context.vdso)
16895- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16896+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16897 else
16898- restorer = &frame->retcode;
16899+ restorer = (void __user *)&frame->retcode;
16900 if (ka->sa.sa_flags & SA_RESTORER)
16901 restorer = ka->sa.sa_restorer;
16902
16903@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16904 * reasons and because gdb uses it as a signature to notice
16905 * signal handler stack frames.
16906 */
16907- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16908+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16909
16910 if (err)
16911 return -EFAULT;
16912@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16913 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16914
16915 /* Set up to return from userspace. */
16916- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16917+ if (current->mm->context.vdso)
16918+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16919+ else
16920+ restorer = (void __user *)&frame->retcode;
16921 if (ka->sa.sa_flags & SA_RESTORER)
16922 restorer = ka->sa.sa_restorer;
16923 put_user_ex(restorer, &frame->pretcode);
16924@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16925 * reasons and because gdb uses it as a signature to notice
16926 * signal handler stack frames.
16927 */
16928- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16929+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16930 } put_user_catch(err);
16931
16932 if (err)
16933@@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
16934 * X86_32: vm86 regs switched out by assembly code before reaching
16935 * here, so testing against kernel CS suffices.
16936 */
16937- if (!user_mode(regs))
16938+ if (!user_mode_novm(regs))
16939 return;
16940
16941 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16942diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16943index 9f548cb..caf76f7 100644
16944--- a/arch/x86/kernel/smpboot.c
16945+++ b/arch/x86/kernel/smpboot.c
16946@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
16947 set_idle_for_cpu(cpu, c_idle.idle);
16948 do_rest:
16949 per_cpu(current_task, cpu) = c_idle.idle;
16950+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16951 #ifdef CONFIG_X86_32
16952 /* Stack for startup_32 can be just as for start_secondary onwards */
16953 irq_ctx_init(cpu);
16954 #else
16955 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16956 initial_gs = per_cpu_offset(cpu);
16957- per_cpu(kernel_stack, cpu) =
16958- (unsigned long)task_stack_page(c_idle.idle) -
16959- KERNEL_STACK_OFFSET + THREAD_SIZE;
16960+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16961 #endif
16962+
16963+ pax_open_kernel();
16964 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16965+ pax_close_kernel();
16966+
16967 initial_code = (unsigned long)start_secondary;
16968 stack_start = c_idle.idle->thread.sp;
16969
16970@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
16971
16972 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16973
16974+#ifdef CONFIG_PAX_PER_CPU_PGD
16975+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16976+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16977+ KERNEL_PGD_PTRS);
16978+#endif
16979+
16980 err = do_boot_cpu(apicid, cpu);
16981 if (err) {
16982 pr_debug("do_boot_cpu failed %d\n", err);
16983diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16984index c346d11..d43b163 100644
16985--- a/arch/x86/kernel/step.c
16986+++ b/arch/x86/kernel/step.c
16987@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16988 struct desc_struct *desc;
16989 unsigned long base;
16990
16991- seg &= ~7UL;
16992+ seg >>= 3;
16993
16994 mutex_lock(&child->mm->context.lock);
16995- if (unlikely((seg >> 3) >= child->mm->context.size))
16996+ if (unlikely(seg >= child->mm->context.size))
16997 addr = -1L; /* bogus selector, access would fault */
16998 else {
16999 desc = child->mm->context.ldt + seg;
17000@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17001 addr += base;
17002 }
17003 mutex_unlock(&child->mm->context.lock);
17004- }
17005+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17006+ addr = ktla_ktva(addr);
17007
17008 return addr;
17009 }
17010@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
17011 unsigned char opcode[15];
17012 unsigned long addr = convert_ip_to_linear(child, regs);
17013
17014+ if (addr == -EINVAL)
17015+ return 0;
17016+
17017 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17018 for (i = 0; i < copied; i++) {
17019 switch (opcode[i]) {
17020diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17021index 0b0cb5f..db6b9ed 100644
17022--- a/arch/x86/kernel/sys_i386_32.c
17023+++ b/arch/x86/kernel/sys_i386_32.c
17024@@ -24,17 +24,224 @@
17025
17026 #include <asm/syscalls.h>
17027
17028-/*
17029- * Do a system call from kernel instead of calling sys_execve so we
17030- * end up with proper pt_regs.
17031- */
17032-int kernel_execve(const char *filename,
17033- const char *const argv[],
17034- const char *const envp[])
17035+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17036 {
17037- long __res;
17038- asm volatile ("int $0x80"
17039- : "=a" (__res)
17040- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17041- return __res;
17042+ unsigned long pax_task_size = TASK_SIZE;
17043+
17044+#ifdef CONFIG_PAX_SEGMEXEC
17045+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17046+ pax_task_size = SEGMEXEC_TASK_SIZE;
17047+#endif
17048+
17049+ if (len > pax_task_size || addr > pax_task_size - len)
17050+ return -EINVAL;
17051+
17052+ return 0;
17053+}
17054+
17055+unsigned long
17056+arch_get_unmapped_area(struct file *filp, unsigned long addr,
17057+ unsigned long len, unsigned long pgoff, unsigned long flags)
17058+{
17059+ struct mm_struct *mm = current->mm;
17060+ struct vm_area_struct *vma;
17061+ unsigned long start_addr, pax_task_size = TASK_SIZE;
17062+
17063+#ifdef CONFIG_PAX_SEGMEXEC
17064+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17065+ pax_task_size = SEGMEXEC_TASK_SIZE;
17066+#endif
17067+
17068+ pax_task_size -= PAGE_SIZE;
17069+
17070+ if (len > pax_task_size)
17071+ return -ENOMEM;
17072+
17073+ if (flags & MAP_FIXED)
17074+ return addr;
17075+
17076+#ifdef CONFIG_PAX_RANDMMAP
17077+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17078+#endif
17079+
17080+ if (addr) {
17081+ addr = PAGE_ALIGN(addr);
17082+ if (pax_task_size - len >= addr) {
17083+ vma = find_vma(mm, addr);
17084+ if (check_heap_stack_gap(vma, addr, len))
17085+ return addr;
17086+ }
17087+ }
17088+ if (len > mm->cached_hole_size) {
17089+ start_addr = addr = mm->free_area_cache;
17090+ } else {
17091+ start_addr = addr = mm->mmap_base;
17092+ mm->cached_hole_size = 0;
17093+ }
17094+
17095+#ifdef CONFIG_PAX_PAGEEXEC
17096+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17097+ start_addr = 0x00110000UL;
17098+
17099+#ifdef CONFIG_PAX_RANDMMAP
17100+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17101+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17102+#endif
17103+
17104+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17105+ start_addr = addr = mm->mmap_base;
17106+ else
17107+ addr = start_addr;
17108+ }
17109+#endif
17110+
17111+full_search:
17112+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17113+ /* At this point: (!vma || addr < vma->vm_end). */
17114+ if (pax_task_size - len < addr) {
17115+ /*
17116+ * Start a new search - just in case we missed
17117+ * some holes.
17118+ */
17119+ if (start_addr != mm->mmap_base) {
17120+ start_addr = addr = mm->mmap_base;
17121+ mm->cached_hole_size = 0;
17122+ goto full_search;
17123+ }
17124+ return -ENOMEM;
17125+ }
17126+ if (check_heap_stack_gap(vma, addr, len))
17127+ break;
17128+ if (addr + mm->cached_hole_size < vma->vm_start)
17129+ mm->cached_hole_size = vma->vm_start - addr;
17130+ addr = vma->vm_end;
17131+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17132+ start_addr = addr = mm->mmap_base;
17133+ mm->cached_hole_size = 0;
17134+ goto full_search;
17135+ }
17136+ }
17137+
17138+ /*
17139+ * Remember the place where we stopped the search:
17140+ */
17141+ mm->free_area_cache = addr + len;
17142+ return addr;
17143+}
17144+
17145+unsigned long
17146+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17147+ const unsigned long len, const unsigned long pgoff,
17148+ const unsigned long flags)
17149+{
17150+ struct vm_area_struct *vma;
17151+ struct mm_struct *mm = current->mm;
17152+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17153+
17154+#ifdef CONFIG_PAX_SEGMEXEC
17155+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17156+ pax_task_size = SEGMEXEC_TASK_SIZE;
17157+#endif
17158+
17159+ pax_task_size -= PAGE_SIZE;
17160+
17161+ /* requested length too big for entire address space */
17162+ if (len > pax_task_size)
17163+ return -ENOMEM;
17164+
17165+ if (flags & MAP_FIXED)
17166+ return addr;
17167+
17168+#ifdef CONFIG_PAX_PAGEEXEC
17169+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17170+ goto bottomup;
17171+#endif
17172+
17173+#ifdef CONFIG_PAX_RANDMMAP
17174+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17175+#endif
17176+
17177+ /* requesting a specific address */
17178+ if (addr) {
17179+ addr = PAGE_ALIGN(addr);
17180+ if (pax_task_size - len >= addr) {
17181+ vma = find_vma(mm, addr);
17182+ if (check_heap_stack_gap(vma, addr, len))
17183+ return addr;
17184+ }
17185+ }
17186+
17187+ /* check if free_area_cache is useful for us */
17188+ if (len <= mm->cached_hole_size) {
17189+ mm->cached_hole_size = 0;
17190+ mm->free_area_cache = mm->mmap_base;
17191+ }
17192+
17193+ /* either no address requested or can't fit in requested address hole */
17194+ addr = mm->free_area_cache;
17195+
17196+ /* make sure it can fit in the remaining address space */
17197+ if (addr > len) {
17198+ vma = find_vma(mm, addr-len);
17199+ if (check_heap_stack_gap(vma, addr - len, len))
17200+ /* remember the address as a hint for next time */
17201+ return (mm->free_area_cache = addr-len);
17202+ }
17203+
17204+ if (mm->mmap_base < len)
17205+ goto bottomup;
17206+
17207+ addr = mm->mmap_base-len;
17208+
17209+ do {
17210+ /*
17211+ * Lookup failure means no vma is above this address,
17212+ * else if new region fits below vma->vm_start,
17213+ * return with success:
17214+ */
17215+ vma = find_vma(mm, addr);
17216+ if (check_heap_stack_gap(vma, addr, len))
17217+ /* remember the address as a hint for next time */
17218+ return (mm->free_area_cache = addr);
17219+
17220+ /* remember the largest hole we saw so far */
17221+ if (addr + mm->cached_hole_size < vma->vm_start)
17222+ mm->cached_hole_size = vma->vm_start - addr;
17223+
17224+ /* try just below the current vma->vm_start */
17225+ addr = skip_heap_stack_gap(vma, len);
17226+ } while (!IS_ERR_VALUE(addr));
17227+
17228+bottomup:
17229+ /*
17230+ * A failed mmap() very likely causes application failure,
17231+ * so fall back to the bottom-up function here. This scenario
17232+ * can happen with large stack limits and large mmap()
17233+ * allocations.
17234+ */
17235+
17236+#ifdef CONFIG_PAX_SEGMEXEC
17237+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17238+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17239+ else
17240+#endif
17241+
17242+ mm->mmap_base = TASK_UNMAPPED_BASE;
17243+
17244+#ifdef CONFIG_PAX_RANDMMAP
17245+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17246+ mm->mmap_base += mm->delta_mmap;
17247+#endif
17248+
17249+ mm->free_area_cache = mm->mmap_base;
17250+ mm->cached_hole_size = ~0UL;
17251+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17252+ /*
17253+ * Restore the topdown base:
17254+ */
17255+ mm->mmap_base = base;
17256+ mm->free_area_cache = base;
17257+ mm->cached_hole_size = ~0UL;
17258+
17259+ return addr;
17260 }
17261diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17262index 0514890..3dbebce 100644
17263--- a/arch/x86/kernel/sys_x86_64.c
17264+++ b/arch/x86/kernel/sys_x86_64.c
17265@@ -95,8 +95,8 @@ out:
17266 return error;
17267 }
17268
17269-static void find_start_end(unsigned long flags, unsigned long *begin,
17270- unsigned long *end)
17271+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17272+ unsigned long *begin, unsigned long *end)
17273 {
17274 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17275 unsigned long new_begin;
17276@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17277 *begin = new_begin;
17278 }
17279 } else {
17280- *begin = TASK_UNMAPPED_BASE;
17281+ *begin = mm->mmap_base;
17282 *end = TASK_SIZE;
17283 }
17284 }
17285@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17286 if (flags & MAP_FIXED)
17287 return addr;
17288
17289- find_start_end(flags, &begin, &end);
17290+ find_start_end(mm, flags, &begin, &end);
17291
17292 if (len > end)
17293 return -ENOMEM;
17294
17295+#ifdef CONFIG_PAX_RANDMMAP
17296+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17297+#endif
17298+
17299 if (addr) {
17300 addr = PAGE_ALIGN(addr);
17301 vma = find_vma(mm, addr);
17302- if (end - len >= addr &&
17303- (!vma || addr + len <= vma->vm_start))
17304+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17305 return addr;
17306 }
17307 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17308@@ -172,7 +175,7 @@ full_search:
17309 }
17310 return -ENOMEM;
17311 }
17312- if (!vma || addr + len <= vma->vm_start) {
17313+ if (check_heap_stack_gap(vma, addr, len)) {
17314 /*
17315 * Remember the place where we stopped the search:
17316 */
17317@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17318 {
17319 struct vm_area_struct *vma;
17320 struct mm_struct *mm = current->mm;
17321- unsigned long addr = addr0;
17322+ unsigned long base = mm->mmap_base, addr = addr0;
17323
17324 /* requested length too big for entire address space */
17325 if (len > TASK_SIZE)
17326@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17327 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17328 goto bottomup;
17329
17330+#ifdef CONFIG_PAX_RANDMMAP
17331+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17332+#endif
17333+
17334 /* requesting a specific address */
17335 if (addr) {
17336 addr = PAGE_ALIGN(addr);
17337- vma = find_vma(mm, addr);
17338- if (TASK_SIZE - len >= addr &&
17339- (!vma || addr + len <= vma->vm_start))
17340- return addr;
17341+ if (TASK_SIZE - len >= addr) {
17342+ vma = find_vma(mm, addr);
17343+ if (check_heap_stack_gap(vma, addr, len))
17344+ return addr;
17345+ }
17346 }
17347
17348 /* check if free_area_cache is useful for us */
17349@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17350 ALIGN_TOPDOWN);
17351
17352 vma = find_vma(mm, tmp_addr);
17353- if (!vma || tmp_addr + len <= vma->vm_start)
17354+ if (check_heap_stack_gap(vma, tmp_addr, len))
17355 /* remember the address as a hint for next time */
17356 return mm->free_area_cache = tmp_addr;
17357 }
17358@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17359 * return with success:
17360 */
17361 vma = find_vma(mm, addr);
17362- if (!vma || addr+len <= vma->vm_start)
17363+ if (check_heap_stack_gap(vma, addr, len))
17364 /* remember the address as a hint for next time */
17365 return mm->free_area_cache = addr;
17366
17367@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17368 mm->cached_hole_size = vma->vm_start - addr;
17369
17370 /* try just below the current vma->vm_start */
17371- addr = vma->vm_start-len;
17372- } while (len < vma->vm_start);
17373+ addr = skip_heap_stack_gap(vma, len);
17374+ } while (!IS_ERR_VALUE(addr));
17375
17376 bottomup:
17377 /*
17378@@ -270,13 +278,21 @@ bottomup:
17379 * can happen with large stack limits and large mmap()
17380 * allocations.
17381 */
17382+ mm->mmap_base = TASK_UNMAPPED_BASE;
17383+
17384+#ifdef CONFIG_PAX_RANDMMAP
17385+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17386+ mm->mmap_base += mm->delta_mmap;
17387+#endif
17388+
17389+ mm->free_area_cache = mm->mmap_base;
17390 mm->cached_hole_size = ~0UL;
17391- mm->free_area_cache = TASK_UNMAPPED_BASE;
17392 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17393 /*
17394 * Restore the topdown base:
17395 */
17396- mm->free_area_cache = mm->mmap_base;
17397+ mm->mmap_base = base;
17398+ mm->free_area_cache = base;
17399 mm->cached_hole_size = ~0UL;
17400
17401 return addr;
17402diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17403index 9a0e312..e6f66f2 100644
17404--- a/arch/x86/kernel/syscall_table_32.S
17405+++ b/arch/x86/kernel/syscall_table_32.S
17406@@ -1,3 +1,4 @@
17407+.section .rodata,"a",@progbits
17408 ENTRY(sys_call_table)
17409 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17410 .long sys_exit
17411diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17412index e2410e2..4fe3fbc 100644
17413--- a/arch/x86/kernel/tboot.c
17414+++ b/arch/x86/kernel/tboot.c
17415@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17416
17417 void tboot_shutdown(u32 shutdown_type)
17418 {
17419- void (*shutdown)(void);
17420+ void (* __noreturn shutdown)(void);
17421
17422 if (!tboot_enabled())
17423 return;
17424@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17425
17426 switch_to_tboot_pt();
17427
17428- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17429+ shutdown = (void *)tboot->shutdown_entry;
17430 shutdown();
17431
17432 /* should not reach here */
17433@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17434 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17435 }
17436
17437-static atomic_t ap_wfs_count;
17438+static atomic_unchecked_t ap_wfs_count;
17439
17440 static int tboot_wait_for_aps(int num_aps)
17441 {
17442@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17443 {
17444 switch (action) {
17445 case CPU_DYING:
17446- atomic_inc(&ap_wfs_count);
17447+ atomic_inc_unchecked(&ap_wfs_count);
17448 if (num_online_cpus() == 1)
17449- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17450+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17451 return NOTIFY_BAD;
17452 break;
17453 }
17454@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17455
17456 tboot_create_trampoline();
17457
17458- atomic_set(&ap_wfs_count, 0);
17459+ atomic_set_unchecked(&ap_wfs_count, 0);
17460 register_hotcpu_notifier(&tboot_cpu_notifier);
17461 return 0;
17462 }
17463diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17464index dd5fbf4..b7f2232 100644
17465--- a/arch/x86/kernel/time.c
17466+++ b/arch/x86/kernel/time.c
17467@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17468 {
17469 unsigned long pc = instruction_pointer(regs);
17470
17471- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17472+ if (!user_mode(regs) && in_lock_functions(pc)) {
17473 #ifdef CONFIG_FRAME_POINTER
17474- return *(unsigned long *)(regs->bp + sizeof(long));
17475+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17476 #else
17477 unsigned long *sp =
17478 (unsigned long *)kernel_stack_pointer(regs);
17479@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17480 * or above a saved flags. Eflags has bits 22-31 zero,
17481 * kernel addresses don't.
17482 */
17483+
17484+#ifdef CONFIG_PAX_KERNEXEC
17485+ return ktla_ktva(sp[0]);
17486+#else
17487 if (sp[0] >> 22)
17488 return sp[0];
17489 if (sp[1] >> 22)
17490 return sp[1];
17491 #endif
17492+
17493+#endif
17494 }
17495 return pc;
17496 }
17497diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17498index 6bb7b85..dd853e1 100644
17499--- a/arch/x86/kernel/tls.c
17500+++ b/arch/x86/kernel/tls.c
17501@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17502 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17503 return -EINVAL;
17504
17505+#ifdef CONFIG_PAX_SEGMEXEC
17506+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17507+ return -EINVAL;
17508+#endif
17509+
17510 set_tls_desc(p, idx, &info, 1);
17511
17512 return 0;
17513diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17514index 451c0a7..e57f551 100644
17515--- a/arch/x86/kernel/trampoline_32.S
17516+++ b/arch/x86/kernel/trampoline_32.S
17517@@ -32,6 +32,12 @@
17518 #include <asm/segment.h>
17519 #include <asm/page_types.h>
17520
17521+#ifdef CONFIG_PAX_KERNEXEC
17522+#define ta(X) (X)
17523+#else
17524+#define ta(X) ((X) - __PAGE_OFFSET)
17525+#endif
17526+
17527 #ifdef CONFIG_SMP
17528
17529 .section ".x86_trampoline","a"
17530@@ -62,7 +68,7 @@ r_base = .
17531 inc %ax # protected mode (PE) bit
17532 lmsw %ax # into protected mode
17533 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17534- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17535+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17536
17537 # These need to be in the same 64K segment as the above;
17538 # hence we don't use the boot_gdt_descr defined in head.S
17539diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17540index 09ff517..df19fbff 100644
17541--- a/arch/x86/kernel/trampoline_64.S
17542+++ b/arch/x86/kernel/trampoline_64.S
17543@@ -90,7 +90,7 @@ startup_32:
17544 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17545 movl %eax, %ds
17546
17547- movl $X86_CR4_PAE, %eax
17548+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17549 movl %eax, %cr4 # Enable PAE mode
17550
17551 # Setup trampoline 4 level pagetables
17552@@ -138,7 +138,7 @@ tidt:
17553 # so the kernel can live anywhere
17554 .balign 4
17555 tgdt:
17556- .short tgdt_end - tgdt # gdt limit
17557+ .short tgdt_end - tgdt - 1 # gdt limit
17558 .long tgdt - r_base
17559 .short 0
17560 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17561diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17562index a8e3eb8..c9dbd7d 100644
17563--- a/arch/x86/kernel/traps.c
17564+++ b/arch/x86/kernel/traps.c
17565@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17566
17567 /* Do we ignore FPU interrupts ? */
17568 char ignore_fpu_irq;
17569-
17570-/*
17571- * The IDT has to be page-aligned to simplify the Pentium
17572- * F0 0F bug workaround.
17573- */
17574-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17575 #endif
17576
17577 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17578@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17579 }
17580
17581 static void __kprobes
17582-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17583+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17584 long error_code, siginfo_t *info)
17585 {
17586 struct task_struct *tsk = current;
17587
17588 #ifdef CONFIG_X86_32
17589- if (regs->flags & X86_VM_MASK) {
17590+ if (v8086_mode(regs)) {
17591 /*
17592 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17593 * On nmi (interrupt 2), do_trap should not be called.
17594@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17595 }
17596 #endif
17597
17598- if (!user_mode(regs))
17599+ if (!user_mode_novm(regs))
17600 goto kernel_trap;
17601
17602 #ifdef CONFIG_X86_32
17603@@ -148,7 +142,7 @@ trap_signal:
17604 printk_ratelimit()) {
17605 printk(KERN_INFO
17606 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17607- tsk->comm, tsk->pid, str,
17608+ tsk->comm, task_pid_nr(tsk), str,
17609 regs->ip, regs->sp, error_code);
17610 print_vma_addr(" in ", regs->ip);
17611 printk("\n");
17612@@ -165,8 +159,20 @@ kernel_trap:
17613 if (!fixup_exception(regs)) {
17614 tsk->thread.error_code = error_code;
17615 tsk->thread.trap_no = trapnr;
17616+
17617+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17618+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17619+ str = "PAX: suspicious stack segment fault";
17620+#endif
17621+
17622 die(str, regs, error_code);
17623 }
17624+
17625+#ifdef CONFIG_PAX_REFCOUNT
17626+ if (trapnr == 4)
17627+ pax_report_refcount_overflow(regs);
17628+#endif
17629+
17630 return;
17631
17632 #ifdef CONFIG_X86_32
17633@@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17634 conditional_sti(regs);
17635
17636 #ifdef CONFIG_X86_32
17637- if (regs->flags & X86_VM_MASK)
17638+ if (v8086_mode(regs))
17639 goto gp_in_vm86;
17640 #endif
17641
17642 tsk = current;
17643- if (!user_mode(regs))
17644+ if (!user_mode_novm(regs))
17645 goto gp_in_kernel;
17646
17647+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17648+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17649+ struct mm_struct *mm = tsk->mm;
17650+ unsigned long limit;
17651+
17652+ down_write(&mm->mmap_sem);
17653+ limit = mm->context.user_cs_limit;
17654+ if (limit < TASK_SIZE) {
17655+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17656+ up_write(&mm->mmap_sem);
17657+ return;
17658+ }
17659+ up_write(&mm->mmap_sem);
17660+ }
17661+#endif
17662+
17663 tsk->thread.error_code = error_code;
17664 tsk->thread.trap_no = 13;
17665
17666@@ -295,6 +317,13 @@ gp_in_kernel:
17667 if (notify_die(DIE_GPF, "general protection fault", regs,
17668 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17669 return;
17670+
17671+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17672+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17673+ die("PAX: suspicious general protection fault", regs, error_code);
17674+ else
17675+#endif
17676+
17677 die("general protection fault", regs, error_code);
17678 }
17679
17680@@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17681 /* It's safe to allow irq's after DR6 has been saved */
17682 preempt_conditional_sti(regs);
17683
17684- if (regs->flags & X86_VM_MASK) {
17685+ if (v8086_mode(regs)) {
17686 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17687 error_code, 1);
17688 preempt_conditional_cli(regs);
17689@@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17690 * We already checked v86 mode above, so we can check for kernel mode
17691 * by just checking the CPL of CS.
17692 */
17693- if ((dr6 & DR_STEP) && !user_mode(regs)) {
17694+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17695 tsk->thread.debugreg6 &= ~DR_STEP;
17696 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17697 regs->flags &= ~X86_EFLAGS_TF;
17698@@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17699 return;
17700 conditional_sti(regs);
17701
17702- if (!user_mode_vm(regs))
17703+ if (!user_mode(regs))
17704 {
17705 if (!fixup_exception(regs)) {
17706 task->thread.error_code = error_code;
17707@@ -568,7 +597,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17708 void __math_state_restore(void)
17709 {
17710 struct thread_info *thread = current_thread_info();
17711- struct task_struct *tsk = thread->task;
17712+ struct task_struct *tsk = current;
17713
17714 /*
17715 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17716@@ -595,8 +624,7 @@ void __math_state_restore(void)
17717 */
17718 asmlinkage void math_state_restore(void)
17719 {
17720- struct thread_info *thread = current_thread_info();
17721- struct task_struct *tsk = thread->task;
17722+ struct task_struct *tsk = current;
17723
17724 if (!tsk_used_math(tsk)) {
17725 local_irq_enable();
17726diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17727index b9242ba..50c5edd 100644
17728--- a/arch/x86/kernel/verify_cpu.S
17729+++ b/arch/x86/kernel/verify_cpu.S
17730@@ -20,6 +20,7 @@
17731 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17732 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17733 * arch/x86/kernel/head_32.S: processor startup
17734+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17735 *
17736 * verify_cpu, returns the status of longmode and SSE in register %eax.
17737 * 0: Success 1: Failure
17738diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17739index 863f875..4307295 100644
17740--- a/arch/x86/kernel/vm86_32.c
17741+++ b/arch/x86/kernel/vm86_32.c
17742@@ -41,6 +41,7 @@
17743 #include <linux/ptrace.h>
17744 #include <linux/audit.h>
17745 #include <linux/stddef.h>
17746+#include <linux/grsecurity.h>
17747
17748 #include <asm/uaccess.h>
17749 #include <asm/io.h>
17750@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17751 do_exit(SIGSEGV);
17752 }
17753
17754- tss = &per_cpu(init_tss, get_cpu());
17755+ tss = init_tss + get_cpu();
17756 current->thread.sp0 = current->thread.saved_sp0;
17757 current->thread.sysenter_cs = __KERNEL_CS;
17758 load_sp0(tss, &current->thread);
17759@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17760 struct task_struct *tsk;
17761 int tmp, ret = -EPERM;
17762
17763+#ifdef CONFIG_GRKERNSEC_VM86
17764+ if (!capable(CAP_SYS_RAWIO)) {
17765+ gr_handle_vm86();
17766+ goto out;
17767+ }
17768+#endif
17769+
17770 tsk = current;
17771 if (tsk->thread.saved_sp0)
17772 goto out;
17773@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
17774 int tmp, ret;
17775 struct vm86plus_struct __user *v86;
17776
17777+#ifdef CONFIG_GRKERNSEC_VM86
17778+ if (!capable(CAP_SYS_RAWIO)) {
17779+ gr_handle_vm86();
17780+ ret = -EPERM;
17781+ goto out;
17782+ }
17783+#endif
17784+
17785 tsk = current;
17786 switch (cmd) {
17787 case VM86_REQUEST_IRQ:
17788@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
17789 tsk->thread.saved_fs = info->regs32->fs;
17790 tsk->thread.saved_gs = get_user_gs(info->regs32);
17791
17792- tss = &per_cpu(init_tss, get_cpu());
17793+ tss = init_tss + get_cpu();
17794 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17795 if (cpu_has_sep)
17796 tsk->thread.sysenter_cs = 0;
17797@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
17798 goto cannot_handle;
17799 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17800 goto cannot_handle;
17801- intr_ptr = (unsigned long __user *) (i << 2);
17802+ intr_ptr = (__force unsigned long __user *) (i << 2);
17803 if (get_user(segoffs, intr_ptr))
17804 goto cannot_handle;
17805 if ((segoffs >> 16) == BIOSSEG)
17806diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17807index 0f703f1..9e15f64 100644
17808--- a/arch/x86/kernel/vmlinux.lds.S
17809+++ b/arch/x86/kernel/vmlinux.lds.S
17810@@ -26,6 +26,13 @@
17811 #include <asm/page_types.h>
17812 #include <asm/cache.h>
17813 #include <asm/boot.h>
17814+#include <asm/segment.h>
17815+
17816+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17817+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17818+#else
17819+#define __KERNEL_TEXT_OFFSET 0
17820+#endif
17821
17822 #undef i386 /* in case the preprocessor is a 32bit one */
17823
17824@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17825
17826 PHDRS {
17827 text PT_LOAD FLAGS(5); /* R_E */
17828+#ifdef CONFIG_X86_32
17829+ module PT_LOAD FLAGS(5); /* R_E */
17830+#endif
17831+#ifdef CONFIG_XEN
17832+ rodata PT_LOAD FLAGS(5); /* R_E */
17833+#else
17834+ rodata PT_LOAD FLAGS(4); /* R__ */
17835+#endif
17836 data PT_LOAD FLAGS(6); /* RW_ */
17837-#ifdef CONFIG_X86_64
17838+ init.begin PT_LOAD FLAGS(6); /* RW_ */
17839 #ifdef CONFIG_SMP
17840 percpu PT_LOAD FLAGS(6); /* RW_ */
17841 #endif
17842+ text.init PT_LOAD FLAGS(5); /* R_E */
17843+ text.exit PT_LOAD FLAGS(5); /* R_E */
17844 init PT_LOAD FLAGS(7); /* RWE */
17845-#endif
17846 note PT_NOTE FLAGS(0); /* ___ */
17847 }
17848
17849 SECTIONS
17850 {
17851 #ifdef CONFIG_X86_32
17852- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17853- phys_startup_32 = startup_32 - LOAD_OFFSET;
17854+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17855 #else
17856- . = __START_KERNEL;
17857- phys_startup_64 = startup_64 - LOAD_OFFSET;
17858+ . = __START_KERNEL;
17859 #endif
17860
17861 /* Text and read-only data */
17862- .text : AT(ADDR(.text) - LOAD_OFFSET) {
17863- _text = .;
17864+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17865 /* bootstrapping code */
17866+#ifdef CONFIG_X86_32
17867+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17868+#else
17869+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17870+#endif
17871+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17872+ _text = .;
17873 HEAD_TEXT
17874 #ifdef CONFIG_X86_32
17875 . = ALIGN(PAGE_SIZE);
17876@@ -108,13 +128,47 @@ SECTIONS
17877 IRQENTRY_TEXT
17878 *(.fixup)
17879 *(.gnu.warning)
17880- /* End of text section */
17881- _etext = .;
17882 } :text = 0x9090
17883
17884- NOTES :text :note
17885+ . += __KERNEL_TEXT_OFFSET;
17886
17887- EXCEPTION_TABLE(16) :text = 0x9090
17888+#ifdef CONFIG_X86_32
17889+ . = ALIGN(PAGE_SIZE);
17890+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17891+
17892+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17893+ MODULES_EXEC_VADDR = .;
17894+ BYTE(0)
17895+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17896+ . = ALIGN(HPAGE_SIZE);
17897+ MODULES_EXEC_END = . - 1;
17898+#endif
17899+
17900+ } :module
17901+#endif
17902+
17903+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17904+ /* End of text section */
17905+ _etext = . - __KERNEL_TEXT_OFFSET;
17906+ }
17907+
17908+#ifdef CONFIG_X86_32
17909+ . = ALIGN(PAGE_SIZE);
17910+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17911+ *(.idt)
17912+ . = ALIGN(PAGE_SIZE);
17913+ *(.empty_zero_page)
17914+ *(.initial_pg_fixmap)
17915+ *(.initial_pg_pmd)
17916+ *(.initial_page_table)
17917+ *(.swapper_pg_dir)
17918+ } :rodata
17919+#endif
17920+
17921+ . = ALIGN(PAGE_SIZE);
17922+ NOTES :rodata :note
17923+
17924+ EXCEPTION_TABLE(16) :rodata
17925
17926 #if defined(CONFIG_DEBUG_RODATA)
17927 /* .text should occupy whole number of pages */
17928@@ -126,16 +180,20 @@ SECTIONS
17929
17930 /* Data */
17931 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17932+
17933+#ifdef CONFIG_PAX_KERNEXEC
17934+ . = ALIGN(HPAGE_SIZE);
17935+#else
17936+ . = ALIGN(PAGE_SIZE);
17937+#endif
17938+
17939 /* Start of data section */
17940 _sdata = .;
17941
17942 /* init_task */
17943 INIT_TASK_DATA(THREAD_SIZE)
17944
17945-#ifdef CONFIG_X86_32
17946- /* 32 bit has nosave before _edata */
17947 NOSAVE_DATA
17948-#endif
17949
17950 PAGE_ALIGNED_DATA(PAGE_SIZE)
17951
17952@@ -176,12 +234,19 @@ SECTIONS
17953 #endif /* CONFIG_X86_64 */
17954
17955 /* Init code and data - will be freed after init */
17956- . = ALIGN(PAGE_SIZE);
17957 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17958+ BYTE(0)
17959+
17960+#ifdef CONFIG_PAX_KERNEXEC
17961+ . = ALIGN(HPAGE_SIZE);
17962+#else
17963+ . = ALIGN(PAGE_SIZE);
17964+#endif
17965+
17966 __init_begin = .; /* paired with __init_end */
17967- }
17968+ } :init.begin
17969
17970-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17971+#ifdef CONFIG_SMP
17972 /*
17973 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17974 * output PHDR, so the next output section - .init.text - should
17975@@ -190,12 +255,27 @@ SECTIONS
17976 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17977 #endif
17978
17979- INIT_TEXT_SECTION(PAGE_SIZE)
17980-#ifdef CONFIG_X86_64
17981- :init
17982-#endif
17983+ . = ALIGN(PAGE_SIZE);
17984+ init_begin = .;
17985+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17986+ VMLINUX_SYMBOL(_sinittext) = .;
17987+ INIT_TEXT
17988+ VMLINUX_SYMBOL(_einittext) = .;
17989+ . = ALIGN(PAGE_SIZE);
17990+ } :text.init
17991
17992- INIT_DATA_SECTION(16)
17993+ /*
17994+ * .exit.text is discard at runtime, not link time, to deal with
17995+ * references from .altinstructions and .eh_frame
17996+ */
17997+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17998+ EXIT_TEXT
17999+ . = ALIGN(16);
18000+ } :text.exit
18001+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18002+
18003+ . = ALIGN(PAGE_SIZE);
18004+ INIT_DATA_SECTION(16) :init
18005
18006 /*
18007 * Code and data for a variety of lowlevel trampolines, to be
18008@@ -269,19 +349,12 @@ SECTIONS
18009 }
18010
18011 . = ALIGN(8);
18012- /*
18013- * .exit.text is discard at runtime, not link time, to deal with
18014- * references from .altinstructions and .eh_frame
18015- */
18016- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18017- EXIT_TEXT
18018- }
18019
18020 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18021 EXIT_DATA
18022 }
18023
18024-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18025+#ifndef CONFIG_SMP
18026 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18027 #endif
18028
18029@@ -300,16 +373,10 @@ SECTIONS
18030 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18031 __smp_locks = .;
18032 *(.smp_locks)
18033- . = ALIGN(PAGE_SIZE);
18034 __smp_locks_end = .;
18035+ . = ALIGN(PAGE_SIZE);
18036 }
18037
18038-#ifdef CONFIG_X86_64
18039- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18040- NOSAVE_DATA
18041- }
18042-#endif
18043-
18044 /* BSS */
18045 . = ALIGN(PAGE_SIZE);
18046 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18047@@ -325,6 +392,7 @@ SECTIONS
18048 __brk_base = .;
18049 . += 64 * 1024; /* 64k alignment slop space */
18050 *(.brk_reservation) /* areas brk users have reserved */
18051+ . = ALIGN(HPAGE_SIZE);
18052 __brk_limit = .;
18053 }
18054
18055@@ -351,13 +419,12 @@ SECTIONS
18056 * for the boot processor.
18057 */
18058 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18059-INIT_PER_CPU(gdt_page);
18060 INIT_PER_CPU(irq_stack_union);
18061
18062 /*
18063 * Build-time check on the image size:
18064 */
18065-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18066+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18067 "kernel image bigger than KERNEL_IMAGE_SIZE");
18068
18069 #ifdef CONFIG_SMP
18070diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18071index e4d4a22..47ee71f 100644
18072--- a/arch/x86/kernel/vsyscall_64.c
18073+++ b/arch/x86/kernel/vsyscall_64.c
18074@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18075 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18076 };
18077
18078-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18079+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18080
18081 static int __init vsyscall_setup(char *str)
18082 {
18083 if (str) {
18084 if (!strcmp("emulate", str))
18085 vsyscall_mode = EMULATE;
18086- else if (!strcmp("native", str))
18087- vsyscall_mode = NATIVE;
18088 else if (!strcmp("none", str))
18089 vsyscall_mode = NONE;
18090 else
18091@@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18092
18093 tsk = current;
18094 if (seccomp_mode(&tsk->seccomp))
18095- do_exit(SIGKILL);
18096+ do_group_exit(SIGKILL);
18097
18098 switch (vsyscall_nr) {
18099 case 0:
18100@@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18101 return true;
18102
18103 sigsegv:
18104- force_sig(SIGSEGV, current);
18105- return true;
18106+ do_group_exit(SIGKILL);
18107 }
18108
18109 /*
18110@@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18111 extern char __vvar_page;
18112 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18113
18114- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18115- vsyscall_mode == NATIVE
18116- ? PAGE_KERNEL_VSYSCALL
18117- : PAGE_KERNEL_VVAR);
18118+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18119 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18120 (unsigned long)VSYSCALL_START);
18121
18122diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18123index 9796c2f..f686fbf 100644
18124--- a/arch/x86/kernel/x8664_ksyms_64.c
18125+++ b/arch/x86/kernel/x8664_ksyms_64.c
18126@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18127 EXPORT_SYMBOL(copy_user_generic_string);
18128 EXPORT_SYMBOL(copy_user_generic_unrolled);
18129 EXPORT_SYMBOL(__copy_user_nocache);
18130-EXPORT_SYMBOL(_copy_from_user);
18131-EXPORT_SYMBOL(_copy_to_user);
18132
18133 EXPORT_SYMBOL(copy_page);
18134 EXPORT_SYMBOL(clear_page);
18135diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18136index a391134..d0b63b6e 100644
18137--- a/arch/x86/kernel/xsave.c
18138+++ b/arch/x86/kernel/xsave.c
18139@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18140 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18141 return -EINVAL;
18142
18143- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18144+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18145 fx_sw_user->extended_size -
18146 FP_XSTATE_MAGIC2_SIZE));
18147 if (err)
18148@@ -267,7 +267,7 @@ fx_only:
18149 * the other extended state.
18150 */
18151 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18152- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18153+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18154 }
18155
18156 /*
18157@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18158 if (use_xsave())
18159 err = restore_user_xstate(buf);
18160 else
18161- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18162+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18163 buf);
18164 if (unlikely(err)) {
18165 /*
18166diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18167index f1e3be1..588efc8 100644
18168--- a/arch/x86/kvm/emulate.c
18169+++ b/arch/x86/kvm/emulate.c
18170@@ -249,6 +249,7 @@ struct gprefix {
18171
18172 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18173 do { \
18174+ unsigned long _tmp; \
18175 __asm__ __volatile__ ( \
18176 _PRE_EFLAGS("0", "4", "2") \
18177 _op _suffix " %"_x"3,%1; " \
18178@@ -263,8 +264,6 @@ struct gprefix {
18179 /* Raw emulation: instruction has two explicit operands. */
18180 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18181 do { \
18182- unsigned long _tmp; \
18183- \
18184 switch ((ctxt)->dst.bytes) { \
18185 case 2: \
18186 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18187@@ -280,7 +279,6 @@ struct gprefix {
18188
18189 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18190 do { \
18191- unsigned long _tmp; \
18192 switch ((ctxt)->dst.bytes) { \
18193 case 1: \
18194 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18195diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18196index 54abb40..a192606 100644
18197--- a/arch/x86/kvm/lapic.c
18198+++ b/arch/x86/kvm/lapic.c
18199@@ -53,7 +53,7 @@
18200 #define APIC_BUS_CYCLE_NS 1
18201
18202 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18203-#define apic_debug(fmt, arg...)
18204+#define apic_debug(fmt, arg...) do {} while (0)
18205
18206 #define APIC_LVT_NUM 6
18207 /* 14 is the version for Xeon and Pentium 8.4.8*/
18208diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18209index f1b36cf..af8a124 100644
18210--- a/arch/x86/kvm/mmu.c
18211+++ b/arch/x86/kvm/mmu.c
18212@@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18213
18214 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18215
18216- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18217+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18218
18219 /*
18220 * Assume that the pte write on a page table of the same type
18221@@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18222 }
18223
18224 spin_lock(&vcpu->kvm->mmu_lock);
18225- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18226+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18227 gentry = 0;
18228 kvm_mmu_free_some_pages(vcpu);
18229 ++vcpu->kvm->stat.mmu_pte_write;
18230diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18231index 9299410..ade2f9b 100644
18232--- a/arch/x86/kvm/paging_tmpl.h
18233+++ b/arch/x86/kvm/paging_tmpl.h
18234@@ -197,7 +197,7 @@ retry_walk:
18235 if (unlikely(kvm_is_error_hva(host_addr)))
18236 goto error;
18237
18238- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18239+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18240 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18241 goto error;
18242
18243@@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18244 if (need_flush)
18245 kvm_flush_remote_tlbs(vcpu->kvm);
18246
18247- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18248+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18249
18250 spin_unlock(&vcpu->kvm->mmu_lock);
18251
18252diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18253index e32243e..a6e6172 100644
18254--- a/arch/x86/kvm/svm.c
18255+++ b/arch/x86/kvm/svm.c
18256@@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18257 int cpu = raw_smp_processor_id();
18258
18259 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18260+
18261+ pax_open_kernel();
18262 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18263+ pax_close_kernel();
18264+
18265 load_TR_desc();
18266 }
18267
18268@@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18269 #endif
18270 #endif
18271
18272+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18273+ __set_fs(current_thread_info()->addr_limit);
18274+#endif
18275+
18276 reload_tss(vcpu);
18277
18278 local_irq_disable();
18279diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18280index 579a0b5..ed7bbf9 100644
18281--- a/arch/x86/kvm/vmx.c
18282+++ b/arch/x86/kvm/vmx.c
18283@@ -1305,7 +1305,11 @@ static void reload_tss(void)
18284 struct desc_struct *descs;
18285
18286 descs = (void *)gdt->address;
18287+
18288+ pax_open_kernel();
18289 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18290+ pax_close_kernel();
18291+
18292 load_TR_desc();
18293 }
18294
18295@@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18296 if (!cpu_has_vmx_flexpriority())
18297 flexpriority_enabled = 0;
18298
18299- if (!cpu_has_vmx_tpr_shadow())
18300- kvm_x86_ops->update_cr8_intercept = NULL;
18301+ if (!cpu_has_vmx_tpr_shadow()) {
18302+ pax_open_kernel();
18303+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18304+ pax_close_kernel();
18305+ }
18306
18307 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18308 kvm_disable_largepages();
18309@@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18310 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18311
18312 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18313- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18314+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18315
18316 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18317 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18318@@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18319 "jmp .Lkvm_vmx_return \n\t"
18320 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18321 ".Lkvm_vmx_return: "
18322+
18323+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18324+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18325+ ".Lkvm_vmx_return2: "
18326+#endif
18327+
18328 /* Save guest registers, load host registers, keep flags */
18329 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18330 "pop %0 \n\t"
18331@@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18332 #endif
18333 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18334 [wordsize]"i"(sizeof(ulong))
18335+
18336+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18337+ ,[cs]"i"(__KERNEL_CS)
18338+#endif
18339+
18340 : "cc", "memory"
18341 , R"ax", R"bx", R"di", R"si"
18342 #ifdef CONFIG_X86_64
18343@@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18344 }
18345 }
18346
18347- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18348+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18349+
18350+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18351+ loadsegment(fs, __KERNEL_PERCPU);
18352+#endif
18353+
18354+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18355+ __set_fs(current_thread_info()->addr_limit);
18356+#endif
18357+
18358 vmx->loaded_vmcs->launched = 1;
18359
18360 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18361diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18362index 4c938da..4ddef65 100644
18363--- a/arch/x86/kvm/x86.c
18364+++ b/arch/x86/kvm/x86.c
18365@@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18366 {
18367 struct kvm *kvm = vcpu->kvm;
18368 int lm = is_long_mode(vcpu);
18369- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18370- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18371+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18372+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18373 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18374 : kvm->arch.xen_hvm_config.blob_size_32;
18375 u32 page_num = data & ~PAGE_MASK;
18376@@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18377 if (n < msr_list.nmsrs)
18378 goto out;
18379 r = -EFAULT;
18380+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18381+ goto out;
18382 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18383 num_msrs_to_save * sizeof(u32)))
18384 goto out;
18385@@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18386 struct kvm_cpuid2 *cpuid,
18387 struct kvm_cpuid_entry2 __user *entries)
18388 {
18389- int r;
18390+ int r, i;
18391
18392 r = -E2BIG;
18393 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18394 goto out;
18395 r = -EFAULT;
18396- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18397- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18398+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18399 goto out;
18400+ for (i = 0; i < cpuid->nent; ++i) {
18401+ struct kvm_cpuid_entry2 cpuid_entry;
18402+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18403+ goto out;
18404+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18405+ }
18406 vcpu->arch.cpuid_nent = cpuid->nent;
18407 kvm_apic_set_version(vcpu);
18408 kvm_x86_ops->cpuid_update(vcpu);
18409@@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18410 struct kvm_cpuid2 *cpuid,
18411 struct kvm_cpuid_entry2 __user *entries)
18412 {
18413- int r;
18414+ int r, i;
18415
18416 r = -E2BIG;
18417 if (cpuid->nent < vcpu->arch.cpuid_nent)
18418 goto out;
18419 r = -EFAULT;
18420- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18421- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18422+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18423 goto out;
18424+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18425+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18426+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18427+ goto out;
18428+ }
18429 return 0;
18430
18431 out:
18432@@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18433 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18434 struct kvm_interrupt *irq)
18435 {
18436- if (irq->irq < 0 || irq->irq >= 256)
18437+ if (irq->irq >= 256)
18438 return -EINVAL;
18439 if (irqchip_in_kernel(vcpu->kvm))
18440 return -ENXIO;
18441@@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18442 kvm_mmu_set_mmio_spte_mask(mask);
18443 }
18444
18445-int kvm_arch_init(void *opaque)
18446+int kvm_arch_init(const void *opaque)
18447 {
18448 int r;
18449 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18450diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18451index cf4603b..7cdde38 100644
18452--- a/arch/x86/lguest/boot.c
18453+++ b/arch/x86/lguest/boot.c
18454@@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18455 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18456 * Launcher to reboot us.
18457 */
18458-static void lguest_restart(char *reason)
18459+static __noreturn void lguest_restart(char *reason)
18460 {
18461 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18462+ BUG();
18463 }
18464
18465 /*G:050
18466diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18467index 042f682..c92afb6 100644
18468--- a/arch/x86/lib/atomic64_32.c
18469+++ b/arch/x86/lib/atomic64_32.c
18470@@ -8,18 +8,30 @@
18471
18472 long long atomic64_read_cx8(long long, const atomic64_t *v);
18473 EXPORT_SYMBOL(atomic64_read_cx8);
18474+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18475+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18476 long long atomic64_set_cx8(long long, const atomic64_t *v);
18477 EXPORT_SYMBOL(atomic64_set_cx8);
18478+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18479+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18480 long long atomic64_xchg_cx8(long long, unsigned high);
18481 EXPORT_SYMBOL(atomic64_xchg_cx8);
18482 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18483 EXPORT_SYMBOL(atomic64_add_return_cx8);
18484+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18485+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18486 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18487 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18488+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18489+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18490 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18491 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18492+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18493+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18494 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18495 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18496+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18497+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18498 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18499 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18500 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18501@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18502 #ifndef CONFIG_X86_CMPXCHG64
18503 long long atomic64_read_386(long long, const atomic64_t *v);
18504 EXPORT_SYMBOL(atomic64_read_386);
18505+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18506+EXPORT_SYMBOL(atomic64_read_unchecked_386);
18507 long long atomic64_set_386(long long, const atomic64_t *v);
18508 EXPORT_SYMBOL(atomic64_set_386);
18509+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18510+EXPORT_SYMBOL(atomic64_set_unchecked_386);
18511 long long atomic64_xchg_386(long long, unsigned high);
18512 EXPORT_SYMBOL(atomic64_xchg_386);
18513 long long atomic64_add_return_386(long long a, atomic64_t *v);
18514 EXPORT_SYMBOL(atomic64_add_return_386);
18515+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18516+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18517 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18518 EXPORT_SYMBOL(atomic64_sub_return_386);
18519+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18520+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18521 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18522 EXPORT_SYMBOL(atomic64_inc_return_386);
18523+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18524+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18525 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18526 EXPORT_SYMBOL(atomic64_dec_return_386);
18527+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18528+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18529 long long atomic64_add_386(long long a, atomic64_t *v);
18530 EXPORT_SYMBOL(atomic64_add_386);
18531+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18532+EXPORT_SYMBOL(atomic64_add_unchecked_386);
18533 long long atomic64_sub_386(long long a, atomic64_t *v);
18534 EXPORT_SYMBOL(atomic64_sub_386);
18535+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18536+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18537 long long atomic64_inc_386(long long a, atomic64_t *v);
18538 EXPORT_SYMBOL(atomic64_inc_386);
18539+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18540+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18541 long long atomic64_dec_386(long long a, atomic64_t *v);
18542 EXPORT_SYMBOL(atomic64_dec_386);
18543+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18544+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18545 long long atomic64_dec_if_positive_386(atomic64_t *v);
18546 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18547 int atomic64_inc_not_zero_386(atomic64_t *v);
18548diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18549index e8e7e0d..56fd1b0 100644
18550--- a/arch/x86/lib/atomic64_386_32.S
18551+++ b/arch/x86/lib/atomic64_386_32.S
18552@@ -48,6 +48,10 @@ BEGIN(read)
18553 movl (v), %eax
18554 movl 4(v), %edx
18555 RET_ENDP
18556+BEGIN(read_unchecked)
18557+ movl (v), %eax
18558+ movl 4(v), %edx
18559+RET_ENDP
18560 #undef v
18561
18562 #define v %esi
18563@@ -55,6 +59,10 @@ BEGIN(set)
18564 movl %ebx, (v)
18565 movl %ecx, 4(v)
18566 RET_ENDP
18567+BEGIN(set_unchecked)
18568+ movl %ebx, (v)
18569+ movl %ecx, 4(v)
18570+RET_ENDP
18571 #undef v
18572
18573 #define v %esi
18574@@ -70,6 +78,20 @@ RET_ENDP
18575 BEGIN(add)
18576 addl %eax, (v)
18577 adcl %edx, 4(v)
18578+
18579+#ifdef CONFIG_PAX_REFCOUNT
18580+ jno 0f
18581+ subl %eax, (v)
18582+ sbbl %edx, 4(v)
18583+ int $4
18584+0:
18585+ _ASM_EXTABLE(0b, 0b)
18586+#endif
18587+
18588+RET_ENDP
18589+BEGIN(add_unchecked)
18590+ addl %eax, (v)
18591+ adcl %edx, 4(v)
18592 RET_ENDP
18593 #undef v
18594
18595@@ -77,6 +99,24 @@ RET_ENDP
18596 BEGIN(add_return)
18597 addl (v), %eax
18598 adcl 4(v), %edx
18599+
18600+#ifdef CONFIG_PAX_REFCOUNT
18601+ into
18602+1234:
18603+ _ASM_EXTABLE(1234b, 2f)
18604+#endif
18605+
18606+ movl %eax, (v)
18607+ movl %edx, 4(v)
18608+
18609+#ifdef CONFIG_PAX_REFCOUNT
18610+2:
18611+#endif
18612+
18613+RET_ENDP
18614+BEGIN(add_return_unchecked)
18615+ addl (v), %eax
18616+ adcl 4(v), %edx
18617 movl %eax, (v)
18618 movl %edx, 4(v)
18619 RET_ENDP
18620@@ -86,6 +126,20 @@ RET_ENDP
18621 BEGIN(sub)
18622 subl %eax, (v)
18623 sbbl %edx, 4(v)
18624+
18625+#ifdef CONFIG_PAX_REFCOUNT
18626+ jno 0f
18627+ addl %eax, (v)
18628+ adcl %edx, 4(v)
18629+ int $4
18630+0:
18631+ _ASM_EXTABLE(0b, 0b)
18632+#endif
18633+
18634+RET_ENDP
18635+BEGIN(sub_unchecked)
18636+ subl %eax, (v)
18637+ sbbl %edx, 4(v)
18638 RET_ENDP
18639 #undef v
18640
18641@@ -96,6 +150,27 @@ BEGIN(sub_return)
18642 sbbl $0, %edx
18643 addl (v), %eax
18644 adcl 4(v), %edx
18645+
18646+#ifdef CONFIG_PAX_REFCOUNT
18647+ into
18648+1234:
18649+ _ASM_EXTABLE(1234b, 2f)
18650+#endif
18651+
18652+ movl %eax, (v)
18653+ movl %edx, 4(v)
18654+
18655+#ifdef CONFIG_PAX_REFCOUNT
18656+2:
18657+#endif
18658+
18659+RET_ENDP
18660+BEGIN(sub_return_unchecked)
18661+ negl %edx
18662+ negl %eax
18663+ sbbl $0, %edx
18664+ addl (v), %eax
18665+ adcl 4(v), %edx
18666 movl %eax, (v)
18667 movl %edx, 4(v)
18668 RET_ENDP
18669@@ -105,6 +180,20 @@ RET_ENDP
18670 BEGIN(inc)
18671 addl $1, (v)
18672 adcl $0, 4(v)
18673+
18674+#ifdef CONFIG_PAX_REFCOUNT
18675+ jno 0f
18676+ subl $1, (v)
18677+ sbbl $0, 4(v)
18678+ int $4
18679+0:
18680+ _ASM_EXTABLE(0b, 0b)
18681+#endif
18682+
18683+RET_ENDP
18684+BEGIN(inc_unchecked)
18685+ addl $1, (v)
18686+ adcl $0, 4(v)
18687 RET_ENDP
18688 #undef v
18689
18690@@ -114,6 +203,26 @@ BEGIN(inc_return)
18691 movl 4(v), %edx
18692 addl $1, %eax
18693 adcl $0, %edx
18694+
18695+#ifdef CONFIG_PAX_REFCOUNT
18696+ into
18697+1234:
18698+ _ASM_EXTABLE(1234b, 2f)
18699+#endif
18700+
18701+ movl %eax, (v)
18702+ movl %edx, 4(v)
18703+
18704+#ifdef CONFIG_PAX_REFCOUNT
18705+2:
18706+#endif
18707+
18708+RET_ENDP
18709+BEGIN(inc_return_unchecked)
18710+ movl (v), %eax
18711+ movl 4(v), %edx
18712+ addl $1, %eax
18713+ adcl $0, %edx
18714 movl %eax, (v)
18715 movl %edx, 4(v)
18716 RET_ENDP
18717@@ -123,6 +232,20 @@ RET_ENDP
18718 BEGIN(dec)
18719 subl $1, (v)
18720 sbbl $0, 4(v)
18721+
18722+#ifdef CONFIG_PAX_REFCOUNT
18723+ jno 0f
18724+ addl $1, (v)
18725+ adcl $0, 4(v)
18726+ int $4
18727+0:
18728+ _ASM_EXTABLE(0b, 0b)
18729+#endif
18730+
18731+RET_ENDP
18732+BEGIN(dec_unchecked)
18733+ subl $1, (v)
18734+ sbbl $0, 4(v)
18735 RET_ENDP
18736 #undef v
18737
18738@@ -132,6 +255,26 @@ BEGIN(dec_return)
18739 movl 4(v), %edx
18740 subl $1, %eax
18741 sbbl $0, %edx
18742+
18743+#ifdef CONFIG_PAX_REFCOUNT
18744+ into
18745+1234:
18746+ _ASM_EXTABLE(1234b, 2f)
18747+#endif
18748+
18749+ movl %eax, (v)
18750+ movl %edx, 4(v)
18751+
18752+#ifdef CONFIG_PAX_REFCOUNT
18753+2:
18754+#endif
18755+
18756+RET_ENDP
18757+BEGIN(dec_return_unchecked)
18758+ movl (v), %eax
18759+ movl 4(v), %edx
18760+ subl $1, %eax
18761+ sbbl $0, %edx
18762 movl %eax, (v)
18763 movl %edx, 4(v)
18764 RET_ENDP
18765@@ -143,6 +286,13 @@ BEGIN(add_unless)
18766 adcl %edx, %edi
18767 addl (v), %eax
18768 adcl 4(v), %edx
18769+
18770+#ifdef CONFIG_PAX_REFCOUNT
18771+ into
18772+1234:
18773+ _ASM_EXTABLE(1234b, 2f)
18774+#endif
18775+
18776 cmpl %eax, %esi
18777 je 3f
18778 1:
18779@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18780 1:
18781 addl $1, %eax
18782 adcl $0, %edx
18783+
18784+#ifdef CONFIG_PAX_REFCOUNT
18785+ into
18786+1234:
18787+ _ASM_EXTABLE(1234b, 2f)
18788+#endif
18789+
18790 movl %eax, (v)
18791 movl %edx, 4(v)
18792 movl $1, %eax
18793@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18794 movl 4(v), %edx
18795 subl $1, %eax
18796 sbbl $0, %edx
18797+
18798+#ifdef CONFIG_PAX_REFCOUNT
18799+ into
18800+1234:
18801+ _ASM_EXTABLE(1234b, 1f)
18802+#endif
18803+
18804 js 1f
18805 movl %eax, (v)
18806 movl %edx, 4(v)
18807diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18808index 391a083..d658e9f 100644
18809--- a/arch/x86/lib/atomic64_cx8_32.S
18810+++ b/arch/x86/lib/atomic64_cx8_32.S
18811@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18812 CFI_STARTPROC
18813
18814 read64 %ecx
18815+ pax_force_retaddr
18816 ret
18817 CFI_ENDPROC
18818 ENDPROC(atomic64_read_cx8)
18819
18820+ENTRY(atomic64_read_unchecked_cx8)
18821+ CFI_STARTPROC
18822+
18823+ read64 %ecx
18824+ pax_force_retaddr
18825+ ret
18826+ CFI_ENDPROC
18827+ENDPROC(atomic64_read_unchecked_cx8)
18828+
18829 ENTRY(atomic64_set_cx8)
18830 CFI_STARTPROC
18831
18832@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18833 cmpxchg8b (%esi)
18834 jne 1b
18835
18836+ pax_force_retaddr
18837 ret
18838 CFI_ENDPROC
18839 ENDPROC(atomic64_set_cx8)
18840
18841+ENTRY(atomic64_set_unchecked_cx8)
18842+ CFI_STARTPROC
18843+
18844+1:
18845+/* we don't need LOCK_PREFIX since aligned 64-bit writes
18846+ * are atomic on 586 and newer */
18847+ cmpxchg8b (%esi)
18848+ jne 1b
18849+
18850+ pax_force_retaddr
18851+ ret
18852+ CFI_ENDPROC
18853+ENDPROC(atomic64_set_unchecked_cx8)
18854+
18855 ENTRY(atomic64_xchg_cx8)
18856 CFI_STARTPROC
18857
18858@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18859 cmpxchg8b (%esi)
18860 jne 1b
18861
18862+ pax_force_retaddr
18863 ret
18864 CFI_ENDPROC
18865 ENDPROC(atomic64_xchg_cx8)
18866
18867-.macro addsub_return func ins insc
18868-ENTRY(atomic64_\func\()_return_cx8)
18869+.macro addsub_return func ins insc unchecked=""
18870+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18871 CFI_STARTPROC
18872 SAVE ebp
18873 SAVE ebx
18874@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18875 movl %edx, %ecx
18876 \ins\()l %esi, %ebx
18877 \insc\()l %edi, %ecx
18878+
18879+.ifb \unchecked
18880+#ifdef CONFIG_PAX_REFCOUNT
18881+ into
18882+2:
18883+ _ASM_EXTABLE(2b, 3f)
18884+#endif
18885+.endif
18886+
18887 LOCK_PREFIX
18888 cmpxchg8b (%ebp)
18889 jne 1b
18890-
18891-10:
18892 movl %ebx, %eax
18893 movl %ecx, %edx
18894+
18895+.ifb \unchecked
18896+#ifdef CONFIG_PAX_REFCOUNT
18897+3:
18898+#endif
18899+.endif
18900+
18901 RESTORE edi
18902 RESTORE esi
18903 RESTORE ebx
18904 RESTORE ebp
18905+ pax_force_retaddr
18906 ret
18907 CFI_ENDPROC
18908-ENDPROC(atomic64_\func\()_return_cx8)
18909+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18910 .endm
18911
18912 addsub_return add add adc
18913 addsub_return sub sub sbb
18914+addsub_return add add adc _unchecked
18915+addsub_return sub sub sbb _unchecked
18916
18917-.macro incdec_return func ins insc
18918-ENTRY(atomic64_\func\()_return_cx8)
18919+.macro incdec_return func ins insc unchecked
18920+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18921 CFI_STARTPROC
18922 SAVE ebx
18923
18924@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18925 movl %edx, %ecx
18926 \ins\()l $1, %ebx
18927 \insc\()l $0, %ecx
18928+
18929+.ifb \unchecked
18930+#ifdef CONFIG_PAX_REFCOUNT
18931+ into
18932+2:
18933+ _ASM_EXTABLE(2b, 3f)
18934+#endif
18935+.endif
18936+
18937 LOCK_PREFIX
18938 cmpxchg8b (%esi)
18939 jne 1b
18940
18941-10:
18942 movl %ebx, %eax
18943 movl %ecx, %edx
18944+
18945+.ifb \unchecked
18946+#ifdef CONFIG_PAX_REFCOUNT
18947+3:
18948+#endif
18949+.endif
18950+
18951 RESTORE ebx
18952+ pax_force_retaddr
18953 ret
18954 CFI_ENDPROC
18955-ENDPROC(atomic64_\func\()_return_cx8)
18956+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18957 .endm
18958
18959 incdec_return inc add adc
18960 incdec_return dec sub sbb
18961+incdec_return inc add adc _unchecked
18962+incdec_return dec sub sbb _unchecked
18963
18964 ENTRY(atomic64_dec_if_positive_cx8)
18965 CFI_STARTPROC
18966@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18967 movl %edx, %ecx
18968 subl $1, %ebx
18969 sbb $0, %ecx
18970+
18971+#ifdef CONFIG_PAX_REFCOUNT
18972+ into
18973+1234:
18974+ _ASM_EXTABLE(1234b, 2f)
18975+#endif
18976+
18977 js 2f
18978 LOCK_PREFIX
18979 cmpxchg8b (%esi)
18980@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18981 movl %ebx, %eax
18982 movl %ecx, %edx
18983 RESTORE ebx
18984+ pax_force_retaddr
18985 ret
18986 CFI_ENDPROC
18987 ENDPROC(atomic64_dec_if_positive_cx8)
18988@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
18989 movl %edx, %ecx
18990 addl %esi, %ebx
18991 adcl %edi, %ecx
18992+
18993+#ifdef CONFIG_PAX_REFCOUNT
18994+ into
18995+1234:
18996+ _ASM_EXTABLE(1234b, 3f)
18997+#endif
18998+
18999 LOCK_PREFIX
19000 cmpxchg8b (%ebp)
19001 jne 1b
19002@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
19003 CFI_ADJUST_CFA_OFFSET -8
19004 RESTORE ebx
19005 RESTORE ebp
19006+ pax_force_retaddr
19007 ret
19008 4:
19009 cmpl %edx, 4(%esp)
19010@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
19011 movl %edx, %ecx
19012 addl $1, %ebx
19013 adcl $0, %ecx
19014+
19015+#ifdef CONFIG_PAX_REFCOUNT
19016+ into
19017+1234:
19018+ _ASM_EXTABLE(1234b, 3f)
19019+#endif
19020+
19021 LOCK_PREFIX
19022 cmpxchg8b (%esi)
19023 jne 1b
19024@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19025 movl $1, %eax
19026 3:
19027 RESTORE ebx
19028+ pax_force_retaddr
19029 ret
19030 4:
19031 testl %edx, %edx
19032diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19033index 78d16a5..fbcf666 100644
19034--- a/arch/x86/lib/checksum_32.S
19035+++ b/arch/x86/lib/checksum_32.S
19036@@ -28,7 +28,8 @@
19037 #include <linux/linkage.h>
19038 #include <asm/dwarf2.h>
19039 #include <asm/errno.h>
19040-
19041+#include <asm/segment.h>
19042+
19043 /*
19044 * computes a partial checksum, e.g. for TCP/UDP fragments
19045 */
19046@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19047
19048 #define ARGBASE 16
19049 #define FP 12
19050-
19051-ENTRY(csum_partial_copy_generic)
19052+
19053+ENTRY(csum_partial_copy_generic_to_user)
19054 CFI_STARTPROC
19055+
19056+#ifdef CONFIG_PAX_MEMORY_UDEREF
19057+ pushl_cfi %gs
19058+ popl_cfi %es
19059+ jmp csum_partial_copy_generic
19060+#endif
19061+
19062+ENTRY(csum_partial_copy_generic_from_user)
19063+
19064+#ifdef CONFIG_PAX_MEMORY_UDEREF
19065+ pushl_cfi %gs
19066+ popl_cfi %ds
19067+#endif
19068+
19069+ENTRY(csum_partial_copy_generic)
19070 subl $4,%esp
19071 CFI_ADJUST_CFA_OFFSET 4
19072 pushl_cfi %edi
19073@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19074 jmp 4f
19075 SRC(1: movw (%esi), %bx )
19076 addl $2, %esi
19077-DST( movw %bx, (%edi) )
19078+DST( movw %bx, %es:(%edi) )
19079 addl $2, %edi
19080 addw %bx, %ax
19081 adcl $0, %eax
19082@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19083 SRC(1: movl (%esi), %ebx )
19084 SRC( movl 4(%esi), %edx )
19085 adcl %ebx, %eax
19086-DST( movl %ebx, (%edi) )
19087+DST( movl %ebx, %es:(%edi) )
19088 adcl %edx, %eax
19089-DST( movl %edx, 4(%edi) )
19090+DST( movl %edx, %es:4(%edi) )
19091
19092 SRC( movl 8(%esi), %ebx )
19093 SRC( movl 12(%esi), %edx )
19094 adcl %ebx, %eax
19095-DST( movl %ebx, 8(%edi) )
19096+DST( movl %ebx, %es:8(%edi) )
19097 adcl %edx, %eax
19098-DST( movl %edx, 12(%edi) )
19099+DST( movl %edx, %es:12(%edi) )
19100
19101 SRC( movl 16(%esi), %ebx )
19102 SRC( movl 20(%esi), %edx )
19103 adcl %ebx, %eax
19104-DST( movl %ebx, 16(%edi) )
19105+DST( movl %ebx, %es:16(%edi) )
19106 adcl %edx, %eax
19107-DST( movl %edx, 20(%edi) )
19108+DST( movl %edx, %es:20(%edi) )
19109
19110 SRC( movl 24(%esi), %ebx )
19111 SRC( movl 28(%esi), %edx )
19112 adcl %ebx, %eax
19113-DST( movl %ebx, 24(%edi) )
19114+DST( movl %ebx, %es:24(%edi) )
19115 adcl %edx, %eax
19116-DST( movl %edx, 28(%edi) )
19117+DST( movl %edx, %es:28(%edi) )
19118
19119 lea 32(%esi), %esi
19120 lea 32(%edi), %edi
19121@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19122 shrl $2, %edx # This clears CF
19123 SRC(3: movl (%esi), %ebx )
19124 adcl %ebx, %eax
19125-DST( movl %ebx, (%edi) )
19126+DST( movl %ebx, %es:(%edi) )
19127 lea 4(%esi), %esi
19128 lea 4(%edi), %edi
19129 dec %edx
19130@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19131 jb 5f
19132 SRC( movw (%esi), %cx )
19133 leal 2(%esi), %esi
19134-DST( movw %cx, (%edi) )
19135+DST( movw %cx, %es:(%edi) )
19136 leal 2(%edi), %edi
19137 je 6f
19138 shll $16,%ecx
19139 SRC(5: movb (%esi), %cl )
19140-DST( movb %cl, (%edi) )
19141+DST( movb %cl, %es:(%edi) )
19142 6: addl %ecx, %eax
19143 adcl $0, %eax
19144 7:
19145@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19146
19147 6001:
19148 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19149- movl $-EFAULT, (%ebx)
19150+ movl $-EFAULT, %ss:(%ebx)
19151
19152 # zero the complete destination - computing the rest
19153 # is too much work
19154@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19155
19156 6002:
19157 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19158- movl $-EFAULT,(%ebx)
19159+ movl $-EFAULT,%ss:(%ebx)
19160 jmp 5000b
19161
19162 .previous
19163
19164+ pushl_cfi %ss
19165+ popl_cfi %ds
19166+ pushl_cfi %ss
19167+ popl_cfi %es
19168 popl_cfi %ebx
19169 CFI_RESTORE ebx
19170 popl_cfi %esi
19171@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19172 popl_cfi %ecx # equivalent to addl $4,%esp
19173 ret
19174 CFI_ENDPROC
19175-ENDPROC(csum_partial_copy_generic)
19176+ENDPROC(csum_partial_copy_generic_to_user)
19177
19178 #else
19179
19180 /* Version for PentiumII/PPro */
19181
19182 #define ROUND1(x) \
19183+ nop; nop; nop; \
19184 SRC(movl x(%esi), %ebx ) ; \
19185 addl %ebx, %eax ; \
19186- DST(movl %ebx, x(%edi) ) ;
19187+ DST(movl %ebx, %es:x(%edi)) ;
19188
19189 #define ROUND(x) \
19190+ nop; nop; nop; \
19191 SRC(movl x(%esi), %ebx ) ; \
19192 adcl %ebx, %eax ; \
19193- DST(movl %ebx, x(%edi) ) ;
19194+ DST(movl %ebx, %es:x(%edi)) ;
19195
19196 #define ARGBASE 12
19197-
19198-ENTRY(csum_partial_copy_generic)
19199+
19200+ENTRY(csum_partial_copy_generic_to_user)
19201 CFI_STARTPROC
19202+
19203+#ifdef CONFIG_PAX_MEMORY_UDEREF
19204+ pushl_cfi %gs
19205+ popl_cfi %es
19206+ jmp csum_partial_copy_generic
19207+#endif
19208+
19209+ENTRY(csum_partial_copy_generic_from_user)
19210+
19211+#ifdef CONFIG_PAX_MEMORY_UDEREF
19212+ pushl_cfi %gs
19213+ popl_cfi %ds
19214+#endif
19215+
19216+ENTRY(csum_partial_copy_generic)
19217 pushl_cfi %ebx
19218 CFI_REL_OFFSET ebx, 0
19219 pushl_cfi %edi
19220@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19221 subl %ebx, %edi
19222 lea -1(%esi),%edx
19223 andl $-32,%edx
19224- lea 3f(%ebx,%ebx), %ebx
19225+ lea 3f(%ebx,%ebx,2), %ebx
19226 testl %esi, %esi
19227 jmp *%ebx
19228 1: addl $64,%esi
19229@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19230 jb 5f
19231 SRC( movw (%esi), %dx )
19232 leal 2(%esi), %esi
19233-DST( movw %dx, (%edi) )
19234+DST( movw %dx, %es:(%edi) )
19235 leal 2(%edi), %edi
19236 je 6f
19237 shll $16,%edx
19238 5:
19239 SRC( movb (%esi), %dl )
19240-DST( movb %dl, (%edi) )
19241+DST( movb %dl, %es:(%edi) )
19242 6: addl %edx, %eax
19243 adcl $0, %eax
19244 7:
19245 .section .fixup, "ax"
19246 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19247- movl $-EFAULT, (%ebx)
19248+ movl $-EFAULT, %ss:(%ebx)
19249 # zero the complete destination (computing the rest is too much work)
19250 movl ARGBASE+8(%esp),%edi # dst
19251 movl ARGBASE+12(%esp),%ecx # len
19252@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19253 rep; stosb
19254 jmp 7b
19255 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19256- movl $-EFAULT, (%ebx)
19257+ movl $-EFAULT, %ss:(%ebx)
19258 jmp 7b
19259 .previous
19260
19261+#ifdef CONFIG_PAX_MEMORY_UDEREF
19262+ pushl_cfi %ss
19263+ popl_cfi %ds
19264+ pushl_cfi %ss
19265+ popl_cfi %es
19266+#endif
19267+
19268 popl_cfi %esi
19269 CFI_RESTORE esi
19270 popl_cfi %edi
19271@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19272 CFI_RESTORE ebx
19273 ret
19274 CFI_ENDPROC
19275-ENDPROC(csum_partial_copy_generic)
19276+ENDPROC(csum_partial_copy_generic_to_user)
19277
19278 #undef ROUND
19279 #undef ROUND1
19280diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19281index f2145cf..cea889d 100644
19282--- a/arch/x86/lib/clear_page_64.S
19283+++ b/arch/x86/lib/clear_page_64.S
19284@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19285 movl $4096/8,%ecx
19286 xorl %eax,%eax
19287 rep stosq
19288+ pax_force_retaddr
19289 ret
19290 CFI_ENDPROC
19291 ENDPROC(clear_page_c)
19292@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19293 movl $4096,%ecx
19294 xorl %eax,%eax
19295 rep stosb
19296+ pax_force_retaddr
19297 ret
19298 CFI_ENDPROC
19299 ENDPROC(clear_page_c_e)
19300@@ -43,6 +45,7 @@ ENTRY(clear_page)
19301 leaq 64(%rdi),%rdi
19302 jnz .Lloop
19303 nop
19304+ pax_force_retaddr
19305 ret
19306 CFI_ENDPROC
19307 .Lclear_page_end:
19308@@ -58,7 +61,7 @@ ENDPROC(clear_page)
19309
19310 #include <asm/cpufeature.h>
19311
19312- .section .altinstr_replacement,"ax"
19313+ .section .altinstr_replacement,"a"
19314 1: .byte 0xeb /* jmp <disp8> */
19315 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19316 2: .byte 0xeb /* jmp <disp8> */
19317diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19318index 1e572c5..2a162cd 100644
19319--- a/arch/x86/lib/cmpxchg16b_emu.S
19320+++ b/arch/x86/lib/cmpxchg16b_emu.S
19321@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19322
19323 popf
19324 mov $1, %al
19325+ pax_force_retaddr
19326 ret
19327
19328 not_same:
19329 popf
19330 xor %al,%al
19331+ pax_force_retaddr
19332 ret
19333
19334 CFI_ENDPROC
19335diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19336index 01c805b..dccb07f 100644
19337--- a/arch/x86/lib/copy_page_64.S
19338+++ b/arch/x86/lib/copy_page_64.S
19339@@ -9,6 +9,7 @@ copy_page_c:
19340 CFI_STARTPROC
19341 movl $4096/8,%ecx
19342 rep movsq
19343+ pax_force_retaddr
19344 ret
19345 CFI_ENDPROC
19346 ENDPROC(copy_page_c)
19347@@ -39,7 +40,7 @@ ENTRY(copy_page)
19348 movq 16 (%rsi), %rdx
19349 movq 24 (%rsi), %r8
19350 movq 32 (%rsi), %r9
19351- movq 40 (%rsi), %r10
19352+ movq 40 (%rsi), %r13
19353 movq 48 (%rsi), %r11
19354 movq 56 (%rsi), %r12
19355
19356@@ -50,7 +51,7 @@ ENTRY(copy_page)
19357 movq %rdx, 16 (%rdi)
19358 movq %r8, 24 (%rdi)
19359 movq %r9, 32 (%rdi)
19360- movq %r10, 40 (%rdi)
19361+ movq %r13, 40 (%rdi)
19362 movq %r11, 48 (%rdi)
19363 movq %r12, 56 (%rdi)
19364
19365@@ -69,7 +70,7 @@ ENTRY(copy_page)
19366 movq 16 (%rsi), %rdx
19367 movq 24 (%rsi), %r8
19368 movq 32 (%rsi), %r9
19369- movq 40 (%rsi), %r10
19370+ movq 40 (%rsi), %r13
19371 movq 48 (%rsi), %r11
19372 movq 56 (%rsi), %r12
19373
19374@@ -78,7 +79,7 @@ ENTRY(copy_page)
19375 movq %rdx, 16 (%rdi)
19376 movq %r8, 24 (%rdi)
19377 movq %r9, 32 (%rdi)
19378- movq %r10, 40 (%rdi)
19379+ movq %r13, 40 (%rdi)
19380 movq %r11, 48 (%rdi)
19381 movq %r12, 56 (%rdi)
19382
19383@@ -95,6 +96,7 @@ ENTRY(copy_page)
19384 CFI_RESTORE r13
19385 addq $3*8,%rsp
19386 CFI_ADJUST_CFA_OFFSET -3*8
19387+ pax_force_retaddr
19388 ret
19389 .Lcopy_page_end:
19390 CFI_ENDPROC
19391@@ -105,7 +107,7 @@ ENDPROC(copy_page)
19392
19393 #include <asm/cpufeature.h>
19394
19395- .section .altinstr_replacement,"ax"
19396+ .section .altinstr_replacement,"a"
19397 1: .byte 0xeb /* jmp <disp8> */
19398 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19399 2:
19400diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19401index 0248402..821c786 100644
19402--- a/arch/x86/lib/copy_user_64.S
19403+++ b/arch/x86/lib/copy_user_64.S
19404@@ -16,6 +16,7 @@
19405 #include <asm/thread_info.h>
19406 #include <asm/cpufeature.h>
19407 #include <asm/alternative-asm.h>
19408+#include <asm/pgtable.h>
19409
19410 /*
19411 * By placing feature2 after feature1 in altinstructions section, we logically
19412@@ -29,7 +30,7 @@
19413 .byte 0xe9 /* 32bit jump */
19414 .long \orig-1f /* by default jump to orig */
19415 1:
19416- .section .altinstr_replacement,"ax"
19417+ .section .altinstr_replacement,"a"
19418 2: .byte 0xe9 /* near jump with 32bit immediate */
19419 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19420 3: .byte 0xe9 /* near jump with 32bit immediate */
19421@@ -71,47 +72,20 @@
19422 #endif
19423 .endm
19424
19425-/* Standard copy_to_user with segment limit checking */
19426-ENTRY(_copy_to_user)
19427- CFI_STARTPROC
19428- GET_THREAD_INFO(%rax)
19429- movq %rdi,%rcx
19430- addq %rdx,%rcx
19431- jc bad_to_user
19432- cmpq TI_addr_limit(%rax),%rcx
19433- ja bad_to_user
19434- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19435- copy_user_generic_unrolled,copy_user_generic_string, \
19436- copy_user_enhanced_fast_string
19437- CFI_ENDPROC
19438-ENDPROC(_copy_to_user)
19439-
19440-/* Standard copy_from_user with segment limit checking */
19441-ENTRY(_copy_from_user)
19442- CFI_STARTPROC
19443- GET_THREAD_INFO(%rax)
19444- movq %rsi,%rcx
19445- addq %rdx,%rcx
19446- jc bad_from_user
19447- cmpq TI_addr_limit(%rax),%rcx
19448- ja bad_from_user
19449- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19450- copy_user_generic_unrolled,copy_user_generic_string, \
19451- copy_user_enhanced_fast_string
19452- CFI_ENDPROC
19453-ENDPROC(_copy_from_user)
19454-
19455 .section .fixup,"ax"
19456 /* must zero dest */
19457 ENTRY(bad_from_user)
19458 bad_from_user:
19459 CFI_STARTPROC
19460+ testl %edx,%edx
19461+ js bad_to_user
19462 movl %edx,%ecx
19463 xorl %eax,%eax
19464 rep
19465 stosb
19466 bad_to_user:
19467 movl %edx,%eax
19468+ pax_force_retaddr
19469 ret
19470 CFI_ENDPROC
19471 ENDPROC(bad_from_user)
19472@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19473 jz 17f
19474 1: movq (%rsi),%r8
19475 2: movq 1*8(%rsi),%r9
19476-3: movq 2*8(%rsi),%r10
19477+3: movq 2*8(%rsi),%rax
19478 4: movq 3*8(%rsi),%r11
19479 5: movq %r8,(%rdi)
19480 6: movq %r9,1*8(%rdi)
19481-7: movq %r10,2*8(%rdi)
19482+7: movq %rax,2*8(%rdi)
19483 8: movq %r11,3*8(%rdi)
19484 9: movq 4*8(%rsi),%r8
19485 10: movq 5*8(%rsi),%r9
19486-11: movq 6*8(%rsi),%r10
19487+11: movq 6*8(%rsi),%rax
19488 12: movq 7*8(%rsi),%r11
19489 13: movq %r8,4*8(%rdi)
19490 14: movq %r9,5*8(%rdi)
19491-15: movq %r10,6*8(%rdi)
19492+15: movq %rax,6*8(%rdi)
19493 16: movq %r11,7*8(%rdi)
19494 leaq 64(%rsi),%rsi
19495 leaq 64(%rdi),%rdi
19496@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19497 decl %ecx
19498 jnz 21b
19499 23: xor %eax,%eax
19500+ pax_force_retaddr
19501 ret
19502
19503 .section .fixup,"ax"
19504@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19505 3: rep
19506 movsb
19507 4: xorl %eax,%eax
19508+ pax_force_retaddr
19509 ret
19510
19511 .section .fixup,"ax"
19512@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19513 1: rep
19514 movsb
19515 2: xorl %eax,%eax
19516+ pax_force_retaddr
19517 ret
19518
19519 .section .fixup,"ax"
19520diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19521index cb0c112..e3a6895 100644
19522--- a/arch/x86/lib/copy_user_nocache_64.S
19523+++ b/arch/x86/lib/copy_user_nocache_64.S
19524@@ -8,12 +8,14 @@
19525
19526 #include <linux/linkage.h>
19527 #include <asm/dwarf2.h>
19528+#include <asm/alternative-asm.h>
19529
19530 #define FIX_ALIGNMENT 1
19531
19532 #include <asm/current.h>
19533 #include <asm/asm-offsets.h>
19534 #include <asm/thread_info.h>
19535+#include <asm/pgtable.h>
19536
19537 .macro ALIGN_DESTINATION
19538 #ifdef FIX_ALIGNMENT
19539@@ -50,6 +52,15 @@
19540 */
19541 ENTRY(__copy_user_nocache)
19542 CFI_STARTPROC
19543+
19544+#ifdef CONFIG_PAX_MEMORY_UDEREF
19545+ mov $PAX_USER_SHADOW_BASE,%rcx
19546+ cmp %rcx,%rsi
19547+ jae 1f
19548+ add %rcx,%rsi
19549+1:
19550+#endif
19551+
19552 cmpl $8,%edx
19553 jb 20f /* less then 8 bytes, go to byte copy loop */
19554 ALIGN_DESTINATION
19555@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19556 jz 17f
19557 1: movq (%rsi),%r8
19558 2: movq 1*8(%rsi),%r9
19559-3: movq 2*8(%rsi),%r10
19560+3: movq 2*8(%rsi),%rax
19561 4: movq 3*8(%rsi),%r11
19562 5: movnti %r8,(%rdi)
19563 6: movnti %r9,1*8(%rdi)
19564-7: movnti %r10,2*8(%rdi)
19565+7: movnti %rax,2*8(%rdi)
19566 8: movnti %r11,3*8(%rdi)
19567 9: movq 4*8(%rsi),%r8
19568 10: movq 5*8(%rsi),%r9
19569-11: movq 6*8(%rsi),%r10
19570+11: movq 6*8(%rsi),%rax
19571 12: movq 7*8(%rsi),%r11
19572 13: movnti %r8,4*8(%rdi)
19573 14: movnti %r9,5*8(%rdi)
19574-15: movnti %r10,6*8(%rdi)
19575+15: movnti %rax,6*8(%rdi)
19576 16: movnti %r11,7*8(%rdi)
19577 leaq 64(%rsi),%rsi
19578 leaq 64(%rdi),%rdi
19579@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19580 jnz 21b
19581 23: xorl %eax,%eax
19582 sfence
19583+ pax_force_retaddr
19584 ret
19585
19586 .section .fixup,"ax"
19587diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19588index fb903b7..c92b7f7 100644
19589--- a/arch/x86/lib/csum-copy_64.S
19590+++ b/arch/x86/lib/csum-copy_64.S
19591@@ -8,6 +8,7 @@
19592 #include <linux/linkage.h>
19593 #include <asm/dwarf2.h>
19594 #include <asm/errno.h>
19595+#include <asm/alternative-asm.h>
19596
19597 /*
19598 * Checksum copy with exception handling.
19599@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19600 CFI_RESTORE rbp
19601 addq $7*8, %rsp
19602 CFI_ADJUST_CFA_OFFSET -7*8
19603+ pax_force_retaddr 0, 1
19604 ret
19605 CFI_RESTORE_STATE
19606
19607diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19608index 459b58a..9570bc7 100644
19609--- a/arch/x86/lib/csum-wrappers_64.c
19610+++ b/arch/x86/lib/csum-wrappers_64.c
19611@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19612 len -= 2;
19613 }
19614 }
19615- isum = csum_partial_copy_generic((__force const void *)src,
19616+
19617+#ifdef CONFIG_PAX_MEMORY_UDEREF
19618+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19619+ src += PAX_USER_SHADOW_BASE;
19620+#endif
19621+
19622+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
19623 dst, len, isum, errp, NULL);
19624 if (unlikely(*errp))
19625 goto out_err;
19626@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19627 }
19628
19629 *errp = 0;
19630- return csum_partial_copy_generic(src, (void __force *)dst,
19631+
19632+#ifdef CONFIG_PAX_MEMORY_UDEREF
19633+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19634+ dst += PAX_USER_SHADOW_BASE;
19635+#endif
19636+
19637+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19638 len, isum, NULL, errp);
19639 }
19640 EXPORT_SYMBOL(csum_partial_copy_to_user);
19641diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19642index 51f1504..ddac4c1 100644
19643--- a/arch/x86/lib/getuser.S
19644+++ b/arch/x86/lib/getuser.S
19645@@ -33,15 +33,38 @@
19646 #include <asm/asm-offsets.h>
19647 #include <asm/thread_info.h>
19648 #include <asm/asm.h>
19649+#include <asm/segment.h>
19650+#include <asm/pgtable.h>
19651+#include <asm/alternative-asm.h>
19652+
19653+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19654+#define __copyuser_seg gs;
19655+#else
19656+#define __copyuser_seg
19657+#endif
19658
19659 .text
19660 ENTRY(__get_user_1)
19661 CFI_STARTPROC
19662+
19663+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19664 GET_THREAD_INFO(%_ASM_DX)
19665 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19666 jae bad_get_user
19667-1: movzb (%_ASM_AX),%edx
19668+
19669+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19670+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19671+ cmp %_ASM_DX,%_ASM_AX
19672+ jae 1234f
19673+ add %_ASM_DX,%_ASM_AX
19674+1234:
19675+#endif
19676+
19677+#endif
19678+
19679+1: __copyuser_seg movzb (%_ASM_AX),%edx
19680 xor %eax,%eax
19681+ pax_force_retaddr
19682 ret
19683 CFI_ENDPROC
19684 ENDPROC(__get_user_1)
19685@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19686 ENTRY(__get_user_2)
19687 CFI_STARTPROC
19688 add $1,%_ASM_AX
19689+
19690+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19691 jc bad_get_user
19692 GET_THREAD_INFO(%_ASM_DX)
19693 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19694 jae bad_get_user
19695-2: movzwl -1(%_ASM_AX),%edx
19696+
19697+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19698+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19699+ cmp %_ASM_DX,%_ASM_AX
19700+ jae 1234f
19701+ add %_ASM_DX,%_ASM_AX
19702+1234:
19703+#endif
19704+
19705+#endif
19706+
19707+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19708 xor %eax,%eax
19709+ pax_force_retaddr
19710 ret
19711 CFI_ENDPROC
19712 ENDPROC(__get_user_2)
19713@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19714 ENTRY(__get_user_4)
19715 CFI_STARTPROC
19716 add $3,%_ASM_AX
19717+
19718+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19719 jc bad_get_user
19720 GET_THREAD_INFO(%_ASM_DX)
19721 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19722 jae bad_get_user
19723-3: mov -3(%_ASM_AX),%edx
19724+
19725+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19726+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19727+ cmp %_ASM_DX,%_ASM_AX
19728+ jae 1234f
19729+ add %_ASM_DX,%_ASM_AX
19730+1234:
19731+#endif
19732+
19733+#endif
19734+
19735+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19736 xor %eax,%eax
19737+ pax_force_retaddr
19738 ret
19739 CFI_ENDPROC
19740 ENDPROC(__get_user_4)
19741@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19742 GET_THREAD_INFO(%_ASM_DX)
19743 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19744 jae bad_get_user
19745+
19746+#ifdef CONFIG_PAX_MEMORY_UDEREF
19747+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19748+ cmp %_ASM_DX,%_ASM_AX
19749+ jae 1234f
19750+ add %_ASM_DX,%_ASM_AX
19751+1234:
19752+#endif
19753+
19754 4: movq -7(%_ASM_AX),%_ASM_DX
19755 xor %eax,%eax
19756+ pax_force_retaddr
19757 ret
19758 CFI_ENDPROC
19759 ENDPROC(__get_user_8)
19760@@ -91,6 +152,7 @@ bad_get_user:
19761 CFI_STARTPROC
19762 xor %edx,%edx
19763 mov $(-EFAULT),%_ASM_AX
19764+ pax_force_retaddr
19765 ret
19766 CFI_ENDPROC
19767 END(bad_get_user)
19768diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19769index 374562e..a75830b 100644
19770--- a/arch/x86/lib/insn.c
19771+++ b/arch/x86/lib/insn.c
19772@@ -21,6 +21,11 @@
19773 #include <linux/string.h>
19774 #include <asm/inat.h>
19775 #include <asm/insn.h>
19776+#ifdef __KERNEL__
19777+#include <asm/pgtable_types.h>
19778+#else
19779+#define ktla_ktva(addr) addr
19780+#endif
19781
19782 /* Verify next sizeof(t) bytes can be on the same instruction */
19783 #define validate_next(t, insn, n) \
19784@@ -49,8 +54,8 @@
19785 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19786 {
19787 memset(insn, 0, sizeof(*insn));
19788- insn->kaddr = kaddr;
19789- insn->next_byte = kaddr;
19790+ insn->kaddr = ktla_ktva(kaddr);
19791+ insn->next_byte = ktla_ktva(kaddr);
19792 insn->x86_64 = x86_64 ? 1 : 0;
19793 insn->opnd_bytes = 4;
19794 if (x86_64)
19795diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19796index 05a95e7..326f2fa 100644
19797--- a/arch/x86/lib/iomap_copy_64.S
19798+++ b/arch/x86/lib/iomap_copy_64.S
19799@@ -17,6 +17,7 @@
19800
19801 #include <linux/linkage.h>
19802 #include <asm/dwarf2.h>
19803+#include <asm/alternative-asm.h>
19804
19805 /*
19806 * override generic version in lib/iomap_copy.c
19807@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19808 CFI_STARTPROC
19809 movl %edx,%ecx
19810 rep movsd
19811+ pax_force_retaddr
19812 ret
19813 CFI_ENDPROC
19814 ENDPROC(__iowrite32_copy)
19815diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19816index efbf2a0..8893637 100644
19817--- a/arch/x86/lib/memcpy_64.S
19818+++ b/arch/x86/lib/memcpy_64.S
19819@@ -34,6 +34,7 @@
19820 rep movsq
19821 movl %edx, %ecx
19822 rep movsb
19823+ pax_force_retaddr
19824 ret
19825 .Lmemcpy_e:
19826 .previous
19827@@ -51,6 +52,7 @@
19828
19829 movl %edx, %ecx
19830 rep movsb
19831+ pax_force_retaddr
19832 ret
19833 .Lmemcpy_e_e:
19834 .previous
19835@@ -81,13 +83,13 @@ ENTRY(memcpy)
19836 */
19837 movq 0*8(%rsi), %r8
19838 movq 1*8(%rsi), %r9
19839- movq 2*8(%rsi), %r10
19840+ movq 2*8(%rsi), %rcx
19841 movq 3*8(%rsi), %r11
19842 leaq 4*8(%rsi), %rsi
19843
19844 movq %r8, 0*8(%rdi)
19845 movq %r9, 1*8(%rdi)
19846- movq %r10, 2*8(%rdi)
19847+ movq %rcx, 2*8(%rdi)
19848 movq %r11, 3*8(%rdi)
19849 leaq 4*8(%rdi), %rdi
19850 jae .Lcopy_forward_loop
19851@@ -110,12 +112,12 @@ ENTRY(memcpy)
19852 subq $0x20, %rdx
19853 movq -1*8(%rsi), %r8
19854 movq -2*8(%rsi), %r9
19855- movq -3*8(%rsi), %r10
19856+ movq -3*8(%rsi), %rcx
19857 movq -4*8(%rsi), %r11
19858 leaq -4*8(%rsi), %rsi
19859 movq %r8, -1*8(%rdi)
19860 movq %r9, -2*8(%rdi)
19861- movq %r10, -3*8(%rdi)
19862+ movq %rcx, -3*8(%rdi)
19863 movq %r11, -4*8(%rdi)
19864 leaq -4*8(%rdi), %rdi
19865 jae .Lcopy_backward_loop
19866@@ -135,12 +137,13 @@ ENTRY(memcpy)
19867 */
19868 movq 0*8(%rsi), %r8
19869 movq 1*8(%rsi), %r9
19870- movq -2*8(%rsi, %rdx), %r10
19871+ movq -2*8(%rsi, %rdx), %rcx
19872 movq -1*8(%rsi, %rdx), %r11
19873 movq %r8, 0*8(%rdi)
19874 movq %r9, 1*8(%rdi)
19875- movq %r10, -2*8(%rdi, %rdx)
19876+ movq %rcx, -2*8(%rdi, %rdx)
19877 movq %r11, -1*8(%rdi, %rdx)
19878+ pax_force_retaddr
19879 retq
19880 .p2align 4
19881 .Lless_16bytes:
19882@@ -153,6 +156,7 @@ ENTRY(memcpy)
19883 movq -1*8(%rsi, %rdx), %r9
19884 movq %r8, 0*8(%rdi)
19885 movq %r9, -1*8(%rdi, %rdx)
19886+ pax_force_retaddr
19887 retq
19888 .p2align 4
19889 .Lless_8bytes:
19890@@ -166,6 +170,7 @@ ENTRY(memcpy)
19891 movl -4(%rsi, %rdx), %r8d
19892 movl %ecx, (%rdi)
19893 movl %r8d, -4(%rdi, %rdx)
19894+ pax_force_retaddr
19895 retq
19896 .p2align 4
19897 .Lless_3bytes:
19898@@ -183,6 +188,7 @@ ENTRY(memcpy)
19899 jnz .Lloop_1
19900
19901 .Lend:
19902+ pax_force_retaddr
19903 retq
19904 CFI_ENDPROC
19905 ENDPROC(memcpy)
19906diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19907index ee16461..c39c199 100644
19908--- a/arch/x86/lib/memmove_64.S
19909+++ b/arch/x86/lib/memmove_64.S
19910@@ -61,13 +61,13 @@ ENTRY(memmove)
19911 5:
19912 sub $0x20, %rdx
19913 movq 0*8(%rsi), %r11
19914- movq 1*8(%rsi), %r10
19915+ movq 1*8(%rsi), %rcx
19916 movq 2*8(%rsi), %r9
19917 movq 3*8(%rsi), %r8
19918 leaq 4*8(%rsi), %rsi
19919
19920 movq %r11, 0*8(%rdi)
19921- movq %r10, 1*8(%rdi)
19922+ movq %rcx, 1*8(%rdi)
19923 movq %r9, 2*8(%rdi)
19924 movq %r8, 3*8(%rdi)
19925 leaq 4*8(%rdi), %rdi
19926@@ -81,10 +81,10 @@ ENTRY(memmove)
19927 4:
19928 movq %rdx, %rcx
19929 movq -8(%rsi, %rdx), %r11
19930- lea -8(%rdi, %rdx), %r10
19931+ lea -8(%rdi, %rdx), %r9
19932 shrq $3, %rcx
19933 rep movsq
19934- movq %r11, (%r10)
19935+ movq %r11, (%r9)
19936 jmp 13f
19937 .Lmemmove_end_forward:
19938
19939@@ -95,14 +95,14 @@ ENTRY(memmove)
19940 7:
19941 movq %rdx, %rcx
19942 movq (%rsi), %r11
19943- movq %rdi, %r10
19944+ movq %rdi, %r9
19945 leaq -8(%rsi, %rdx), %rsi
19946 leaq -8(%rdi, %rdx), %rdi
19947 shrq $3, %rcx
19948 std
19949 rep movsq
19950 cld
19951- movq %r11, (%r10)
19952+ movq %r11, (%r9)
19953 jmp 13f
19954
19955 /*
19956@@ -127,13 +127,13 @@ ENTRY(memmove)
19957 8:
19958 subq $0x20, %rdx
19959 movq -1*8(%rsi), %r11
19960- movq -2*8(%rsi), %r10
19961+ movq -2*8(%rsi), %rcx
19962 movq -3*8(%rsi), %r9
19963 movq -4*8(%rsi), %r8
19964 leaq -4*8(%rsi), %rsi
19965
19966 movq %r11, -1*8(%rdi)
19967- movq %r10, -2*8(%rdi)
19968+ movq %rcx, -2*8(%rdi)
19969 movq %r9, -3*8(%rdi)
19970 movq %r8, -4*8(%rdi)
19971 leaq -4*8(%rdi), %rdi
19972@@ -151,11 +151,11 @@ ENTRY(memmove)
19973 * Move data from 16 bytes to 31 bytes.
19974 */
19975 movq 0*8(%rsi), %r11
19976- movq 1*8(%rsi), %r10
19977+ movq 1*8(%rsi), %rcx
19978 movq -2*8(%rsi, %rdx), %r9
19979 movq -1*8(%rsi, %rdx), %r8
19980 movq %r11, 0*8(%rdi)
19981- movq %r10, 1*8(%rdi)
19982+ movq %rcx, 1*8(%rdi)
19983 movq %r9, -2*8(%rdi, %rdx)
19984 movq %r8, -1*8(%rdi, %rdx)
19985 jmp 13f
19986@@ -167,9 +167,9 @@ ENTRY(memmove)
19987 * Move data from 8 bytes to 15 bytes.
19988 */
19989 movq 0*8(%rsi), %r11
19990- movq -1*8(%rsi, %rdx), %r10
19991+ movq -1*8(%rsi, %rdx), %r9
19992 movq %r11, 0*8(%rdi)
19993- movq %r10, -1*8(%rdi, %rdx)
19994+ movq %r9, -1*8(%rdi, %rdx)
19995 jmp 13f
19996 10:
19997 cmpq $4, %rdx
19998@@ -178,9 +178,9 @@ ENTRY(memmove)
19999 * Move data from 4 bytes to 7 bytes.
20000 */
20001 movl (%rsi), %r11d
20002- movl -4(%rsi, %rdx), %r10d
20003+ movl -4(%rsi, %rdx), %r9d
20004 movl %r11d, (%rdi)
20005- movl %r10d, -4(%rdi, %rdx)
20006+ movl %r9d, -4(%rdi, %rdx)
20007 jmp 13f
20008 11:
20009 cmp $2, %rdx
20010@@ -189,9 +189,9 @@ ENTRY(memmove)
20011 * Move data from 2 bytes to 3 bytes.
20012 */
20013 movw (%rsi), %r11w
20014- movw -2(%rsi, %rdx), %r10w
20015+ movw -2(%rsi, %rdx), %r9w
20016 movw %r11w, (%rdi)
20017- movw %r10w, -2(%rdi, %rdx)
20018+ movw %r9w, -2(%rdi, %rdx)
20019 jmp 13f
20020 12:
20021 cmp $1, %rdx
20022@@ -202,6 +202,7 @@ ENTRY(memmove)
20023 movb (%rsi), %r11b
20024 movb %r11b, (%rdi)
20025 13:
20026+ pax_force_retaddr
20027 retq
20028 CFI_ENDPROC
20029
20030@@ -210,6 +211,7 @@ ENTRY(memmove)
20031 /* Forward moving data. */
20032 movq %rdx, %rcx
20033 rep movsb
20034+ pax_force_retaddr
20035 retq
20036 .Lmemmove_end_forward_efs:
20037 .previous
20038diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20039index 79bd454..dff325a 100644
20040--- a/arch/x86/lib/memset_64.S
20041+++ b/arch/x86/lib/memset_64.S
20042@@ -31,6 +31,7 @@
20043 movl %r8d,%ecx
20044 rep stosb
20045 movq %r9,%rax
20046+ pax_force_retaddr
20047 ret
20048 .Lmemset_e:
20049 .previous
20050@@ -53,6 +54,7 @@
20051 movl %edx,%ecx
20052 rep stosb
20053 movq %r9,%rax
20054+ pax_force_retaddr
20055 ret
20056 .Lmemset_e_e:
20057 .previous
20058@@ -60,13 +62,13 @@
20059 ENTRY(memset)
20060 ENTRY(__memset)
20061 CFI_STARTPROC
20062- movq %rdi,%r10
20063 movq %rdx,%r11
20064
20065 /* expand byte value */
20066 movzbl %sil,%ecx
20067 movabs $0x0101010101010101,%rax
20068 mul %rcx /* with rax, clobbers rdx */
20069+ movq %rdi,%rdx
20070
20071 /* align dst */
20072 movl %edi,%r9d
20073@@ -120,7 +122,8 @@ ENTRY(__memset)
20074 jnz .Lloop_1
20075
20076 .Lende:
20077- movq %r10,%rax
20078+ movq %rdx,%rax
20079+ pax_force_retaddr
20080 ret
20081
20082 CFI_RESTORE_STATE
20083diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20084index c9f2d9b..e7fd2c0 100644
20085--- a/arch/x86/lib/mmx_32.c
20086+++ b/arch/x86/lib/mmx_32.c
20087@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20088 {
20089 void *p;
20090 int i;
20091+ unsigned long cr0;
20092
20093 if (unlikely(in_interrupt()))
20094 return __memcpy(to, from, len);
20095@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20096 kernel_fpu_begin();
20097
20098 __asm__ __volatile__ (
20099- "1: prefetch (%0)\n" /* This set is 28 bytes */
20100- " prefetch 64(%0)\n"
20101- " prefetch 128(%0)\n"
20102- " prefetch 192(%0)\n"
20103- " prefetch 256(%0)\n"
20104+ "1: prefetch (%1)\n" /* This set is 28 bytes */
20105+ " prefetch 64(%1)\n"
20106+ " prefetch 128(%1)\n"
20107+ " prefetch 192(%1)\n"
20108+ " prefetch 256(%1)\n"
20109 "2: \n"
20110 ".section .fixup, \"ax\"\n"
20111- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20112+ "3: \n"
20113+
20114+#ifdef CONFIG_PAX_KERNEXEC
20115+ " movl %%cr0, %0\n"
20116+ " movl %0, %%eax\n"
20117+ " andl $0xFFFEFFFF, %%eax\n"
20118+ " movl %%eax, %%cr0\n"
20119+#endif
20120+
20121+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20122+
20123+#ifdef CONFIG_PAX_KERNEXEC
20124+ " movl %0, %%cr0\n"
20125+#endif
20126+
20127 " jmp 2b\n"
20128 ".previous\n"
20129 _ASM_EXTABLE(1b, 3b)
20130- : : "r" (from));
20131+ : "=&r" (cr0) : "r" (from) : "ax");
20132
20133 for ( ; i > 5; i--) {
20134 __asm__ __volatile__ (
20135- "1: prefetch 320(%0)\n"
20136- "2: movq (%0), %%mm0\n"
20137- " movq 8(%0), %%mm1\n"
20138- " movq 16(%0), %%mm2\n"
20139- " movq 24(%0), %%mm3\n"
20140- " movq %%mm0, (%1)\n"
20141- " movq %%mm1, 8(%1)\n"
20142- " movq %%mm2, 16(%1)\n"
20143- " movq %%mm3, 24(%1)\n"
20144- " movq 32(%0), %%mm0\n"
20145- " movq 40(%0), %%mm1\n"
20146- " movq 48(%0), %%mm2\n"
20147- " movq 56(%0), %%mm3\n"
20148- " movq %%mm0, 32(%1)\n"
20149- " movq %%mm1, 40(%1)\n"
20150- " movq %%mm2, 48(%1)\n"
20151- " movq %%mm3, 56(%1)\n"
20152+ "1: prefetch 320(%1)\n"
20153+ "2: movq (%1), %%mm0\n"
20154+ " movq 8(%1), %%mm1\n"
20155+ " movq 16(%1), %%mm2\n"
20156+ " movq 24(%1), %%mm3\n"
20157+ " movq %%mm0, (%2)\n"
20158+ " movq %%mm1, 8(%2)\n"
20159+ " movq %%mm2, 16(%2)\n"
20160+ " movq %%mm3, 24(%2)\n"
20161+ " movq 32(%1), %%mm0\n"
20162+ " movq 40(%1), %%mm1\n"
20163+ " movq 48(%1), %%mm2\n"
20164+ " movq 56(%1), %%mm3\n"
20165+ " movq %%mm0, 32(%2)\n"
20166+ " movq %%mm1, 40(%2)\n"
20167+ " movq %%mm2, 48(%2)\n"
20168+ " movq %%mm3, 56(%2)\n"
20169 ".section .fixup, \"ax\"\n"
20170- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20171+ "3:\n"
20172+
20173+#ifdef CONFIG_PAX_KERNEXEC
20174+ " movl %%cr0, %0\n"
20175+ " movl %0, %%eax\n"
20176+ " andl $0xFFFEFFFF, %%eax\n"
20177+ " movl %%eax, %%cr0\n"
20178+#endif
20179+
20180+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20181+
20182+#ifdef CONFIG_PAX_KERNEXEC
20183+ " movl %0, %%cr0\n"
20184+#endif
20185+
20186 " jmp 2b\n"
20187 ".previous\n"
20188 _ASM_EXTABLE(1b, 3b)
20189- : : "r" (from), "r" (to) : "memory");
20190+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20191
20192 from += 64;
20193 to += 64;
20194@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20195 static void fast_copy_page(void *to, void *from)
20196 {
20197 int i;
20198+ unsigned long cr0;
20199
20200 kernel_fpu_begin();
20201
20202@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20203 * but that is for later. -AV
20204 */
20205 __asm__ __volatile__(
20206- "1: prefetch (%0)\n"
20207- " prefetch 64(%0)\n"
20208- " prefetch 128(%0)\n"
20209- " prefetch 192(%0)\n"
20210- " prefetch 256(%0)\n"
20211+ "1: prefetch (%1)\n"
20212+ " prefetch 64(%1)\n"
20213+ " prefetch 128(%1)\n"
20214+ " prefetch 192(%1)\n"
20215+ " prefetch 256(%1)\n"
20216 "2: \n"
20217 ".section .fixup, \"ax\"\n"
20218- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20219+ "3: \n"
20220+
20221+#ifdef CONFIG_PAX_KERNEXEC
20222+ " movl %%cr0, %0\n"
20223+ " movl %0, %%eax\n"
20224+ " andl $0xFFFEFFFF, %%eax\n"
20225+ " movl %%eax, %%cr0\n"
20226+#endif
20227+
20228+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20229+
20230+#ifdef CONFIG_PAX_KERNEXEC
20231+ " movl %0, %%cr0\n"
20232+#endif
20233+
20234 " jmp 2b\n"
20235 ".previous\n"
20236- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20237+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20238
20239 for (i = 0; i < (4096-320)/64; i++) {
20240 __asm__ __volatile__ (
20241- "1: prefetch 320(%0)\n"
20242- "2: movq (%0), %%mm0\n"
20243- " movntq %%mm0, (%1)\n"
20244- " movq 8(%0), %%mm1\n"
20245- " movntq %%mm1, 8(%1)\n"
20246- " movq 16(%0), %%mm2\n"
20247- " movntq %%mm2, 16(%1)\n"
20248- " movq 24(%0), %%mm3\n"
20249- " movntq %%mm3, 24(%1)\n"
20250- " movq 32(%0), %%mm4\n"
20251- " movntq %%mm4, 32(%1)\n"
20252- " movq 40(%0), %%mm5\n"
20253- " movntq %%mm5, 40(%1)\n"
20254- " movq 48(%0), %%mm6\n"
20255- " movntq %%mm6, 48(%1)\n"
20256- " movq 56(%0), %%mm7\n"
20257- " movntq %%mm7, 56(%1)\n"
20258+ "1: prefetch 320(%1)\n"
20259+ "2: movq (%1), %%mm0\n"
20260+ " movntq %%mm0, (%2)\n"
20261+ " movq 8(%1), %%mm1\n"
20262+ " movntq %%mm1, 8(%2)\n"
20263+ " movq 16(%1), %%mm2\n"
20264+ " movntq %%mm2, 16(%2)\n"
20265+ " movq 24(%1), %%mm3\n"
20266+ " movntq %%mm3, 24(%2)\n"
20267+ " movq 32(%1), %%mm4\n"
20268+ " movntq %%mm4, 32(%2)\n"
20269+ " movq 40(%1), %%mm5\n"
20270+ " movntq %%mm5, 40(%2)\n"
20271+ " movq 48(%1), %%mm6\n"
20272+ " movntq %%mm6, 48(%2)\n"
20273+ " movq 56(%1), %%mm7\n"
20274+ " movntq %%mm7, 56(%2)\n"
20275 ".section .fixup, \"ax\"\n"
20276- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20277+ "3:\n"
20278+
20279+#ifdef CONFIG_PAX_KERNEXEC
20280+ " movl %%cr0, %0\n"
20281+ " movl %0, %%eax\n"
20282+ " andl $0xFFFEFFFF, %%eax\n"
20283+ " movl %%eax, %%cr0\n"
20284+#endif
20285+
20286+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20287+
20288+#ifdef CONFIG_PAX_KERNEXEC
20289+ " movl %0, %%cr0\n"
20290+#endif
20291+
20292 " jmp 2b\n"
20293 ".previous\n"
20294- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20295+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20296
20297 from += 64;
20298 to += 64;
20299@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20300 static void fast_copy_page(void *to, void *from)
20301 {
20302 int i;
20303+ unsigned long cr0;
20304
20305 kernel_fpu_begin();
20306
20307 __asm__ __volatile__ (
20308- "1: prefetch (%0)\n"
20309- " prefetch 64(%0)\n"
20310- " prefetch 128(%0)\n"
20311- " prefetch 192(%0)\n"
20312- " prefetch 256(%0)\n"
20313+ "1: prefetch (%1)\n"
20314+ " prefetch 64(%1)\n"
20315+ " prefetch 128(%1)\n"
20316+ " prefetch 192(%1)\n"
20317+ " prefetch 256(%1)\n"
20318 "2: \n"
20319 ".section .fixup, \"ax\"\n"
20320- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20321+ "3: \n"
20322+
20323+#ifdef CONFIG_PAX_KERNEXEC
20324+ " movl %%cr0, %0\n"
20325+ " movl %0, %%eax\n"
20326+ " andl $0xFFFEFFFF, %%eax\n"
20327+ " movl %%eax, %%cr0\n"
20328+#endif
20329+
20330+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20331+
20332+#ifdef CONFIG_PAX_KERNEXEC
20333+ " movl %0, %%cr0\n"
20334+#endif
20335+
20336 " jmp 2b\n"
20337 ".previous\n"
20338- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20339+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20340
20341 for (i = 0; i < 4096/64; i++) {
20342 __asm__ __volatile__ (
20343- "1: prefetch 320(%0)\n"
20344- "2: movq (%0), %%mm0\n"
20345- " movq 8(%0), %%mm1\n"
20346- " movq 16(%0), %%mm2\n"
20347- " movq 24(%0), %%mm3\n"
20348- " movq %%mm0, (%1)\n"
20349- " movq %%mm1, 8(%1)\n"
20350- " movq %%mm2, 16(%1)\n"
20351- " movq %%mm3, 24(%1)\n"
20352- " movq 32(%0), %%mm0\n"
20353- " movq 40(%0), %%mm1\n"
20354- " movq 48(%0), %%mm2\n"
20355- " movq 56(%0), %%mm3\n"
20356- " movq %%mm0, 32(%1)\n"
20357- " movq %%mm1, 40(%1)\n"
20358- " movq %%mm2, 48(%1)\n"
20359- " movq %%mm3, 56(%1)\n"
20360+ "1: prefetch 320(%1)\n"
20361+ "2: movq (%1), %%mm0\n"
20362+ " movq 8(%1), %%mm1\n"
20363+ " movq 16(%1), %%mm2\n"
20364+ " movq 24(%1), %%mm3\n"
20365+ " movq %%mm0, (%2)\n"
20366+ " movq %%mm1, 8(%2)\n"
20367+ " movq %%mm2, 16(%2)\n"
20368+ " movq %%mm3, 24(%2)\n"
20369+ " movq 32(%1), %%mm0\n"
20370+ " movq 40(%1), %%mm1\n"
20371+ " movq 48(%1), %%mm2\n"
20372+ " movq 56(%1), %%mm3\n"
20373+ " movq %%mm0, 32(%2)\n"
20374+ " movq %%mm1, 40(%2)\n"
20375+ " movq %%mm2, 48(%2)\n"
20376+ " movq %%mm3, 56(%2)\n"
20377 ".section .fixup, \"ax\"\n"
20378- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20379+ "3:\n"
20380+
20381+#ifdef CONFIG_PAX_KERNEXEC
20382+ " movl %%cr0, %0\n"
20383+ " movl %0, %%eax\n"
20384+ " andl $0xFFFEFFFF, %%eax\n"
20385+ " movl %%eax, %%cr0\n"
20386+#endif
20387+
20388+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20389+
20390+#ifdef CONFIG_PAX_KERNEXEC
20391+ " movl %0, %%cr0\n"
20392+#endif
20393+
20394 " jmp 2b\n"
20395 ".previous\n"
20396 _ASM_EXTABLE(1b, 3b)
20397- : : "r" (from), "r" (to) : "memory");
20398+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20399
20400 from += 64;
20401 to += 64;
20402diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20403index 69fa106..adda88b 100644
20404--- a/arch/x86/lib/msr-reg.S
20405+++ b/arch/x86/lib/msr-reg.S
20406@@ -3,6 +3,7 @@
20407 #include <asm/dwarf2.h>
20408 #include <asm/asm.h>
20409 #include <asm/msr.h>
20410+#include <asm/alternative-asm.h>
20411
20412 #ifdef CONFIG_X86_64
20413 /*
20414@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20415 CFI_STARTPROC
20416 pushq_cfi %rbx
20417 pushq_cfi %rbp
20418- movq %rdi, %r10 /* Save pointer */
20419+ movq %rdi, %r9 /* Save pointer */
20420 xorl %r11d, %r11d /* Return value */
20421 movl (%rdi), %eax
20422 movl 4(%rdi), %ecx
20423@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20424 movl 28(%rdi), %edi
20425 CFI_REMEMBER_STATE
20426 1: \op
20427-2: movl %eax, (%r10)
20428+2: movl %eax, (%r9)
20429 movl %r11d, %eax /* Return value */
20430- movl %ecx, 4(%r10)
20431- movl %edx, 8(%r10)
20432- movl %ebx, 12(%r10)
20433- movl %ebp, 20(%r10)
20434- movl %esi, 24(%r10)
20435- movl %edi, 28(%r10)
20436+ movl %ecx, 4(%r9)
20437+ movl %edx, 8(%r9)
20438+ movl %ebx, 12(%r9)
20439+ movl %ebp, 20(%r9)
20440+ movl %esi, 24(%r9)
20441+ movl %edi, 28(%r9)
20442 popq_cfi %rbp
20443 popq_cfi %rbx
20444+ pax_force_retaddr
20445 ret
20446 3:
20447 CFI_RESTORE_STATE
20448diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20449index 36b0d15..d381858 100644
20450--- a/arch/x86/lib/putuser.S
20451+++ b/arch/x86/lib/putuser.S
20452@@ -15,7 +15,9 @@
20453 #include <asm/thread_info.h>
20454 #include <asm/errno.h>
20455 #include <asm/asm.h>
20456-
20457+#include <asm/segment.h>
20458+#include <asm/pgtable.h>
20459+#include <asm/alternative-asm.h>
20460
20461 /*
20462 * __put_user_X
20463@@ -29,52 +31,119 @@
20464 * as they get called from within inline assembly.
20465 */
20466
20467-#define ENTER CFI_STARTPROC ; \
20468- GET_THREAD_INFO(%_ASM_BX)
20469-#define EXIT ret ; \
20470+#define ENTER CFI_STARTPROC
20471+#define EXIT pax_force_retaddr; ret ; \
20472 CFI_ENDPROC
20473
20474+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20475+#define _DEST %_ASM_CX,%_ASM_BX
20476+#else
20477+#define _DEST %_ASM_CX
20478+#endif
20479+
20480+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20481+#define __copyuser_seg gs;
20482+#else
20483+#define __copyuser_seg
20484+#endif
20485+
20486 .text
20487 ENTRY(__put_user_1)
20488 ENTER
20489+
20490+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20491+ GET_THREAD_INFO(%_ASM_BX)
20492 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20493 jae bad_put_user
20494-1: movb %al,(%_ASM_CX)
20495+
20496+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20497+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20498+ cmp %_ASM_BX,%_ASM_CX
20499+ jb 1234f
20500+ xor %ebx,%ebx
20501+1234:
20502+#endif
20503+
20504+#endif
20505+
20506+1: __copyuser_seg movb %al,(_DEST)
20507 xor %eax,%eax
20508 EXIT
20509 ENDPROC(__put_user_1)
20510
20511 ENTRY(__put_user_2)
20512 ENTER
20513+
20514+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20515+ GET_THREAD_INFO(%_ASM_BX)
20516 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20517 sub $1,%_ASM_BX
20518 cmp %_ASM_BX,%_ASM_CX
20519 jae bad_put_user
20520-2: movw %ax,(%_ASM_CX)
20521+
20522+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20523+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20524+ cmp %_ASM_BX,%_ASM_CX
20525+ jb 1234f
20526+ xor %ebx,%ebx
20527+1234:
20528+#endif
20529+
20530+#endif
20531+
20532+2: __copyuser_seg movw %ax,(_DEST)
20533 xor %eax,%eax
20534 EXIT
20535 ENDPROC(__put_user_2)
20536
20537 ENTRY(__put_user_4)
20538 ENTER
20539+
20540+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20541+ GET_THREAD_INFO(%_ASM_BX)
20542 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20543 sub $3,%_ASM_BX
20544 cmp %_ASM_BX,%_ASM_CX
20545 jae bad_put_user
20546-3: movl %eax,(%_ASM_CX)
20547+
20548+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20549+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20550+ cmp %_ASM_BX,%_ASM_CX
20551+ jb 1234f
20552+ xor %ebx,%ebx
20553+1234:
20554+#endif
20555+
20556+#endif
20557+
20558+3: __copyuser_seg movl %eax,(_DEST)
20559 xor %eax,%eax
20560 EXIT
20561 ENDPROC(__put_user_4)
20562
20563 ENTRY(__put_user_8)
20564 ENTER
20565+
20566+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20567+ GET_THREAD_INFO(%_ASM_BX)
20568 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20569 sub $7,%_ASM_BX
20570 cmp %_ASM_BX,%_ASM_CX
20571 jae bad_put_user
20572-4: mov %_ASM_AX,(%_ASM_CX)
20573+
20574+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20575+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20576+ cmp %_ASM_BX,%_ASM_CX
20577+ jb 1234f
20578+ xor %ebx,%ebx
20579+1234:
20580+#endif
20581+
20582+#endif
20583+
20584+4: __copyuser_seg mov %_ASM_AX,(_DEST)
20585 #ifdef CONFIG_X86_32
20586-5: movl %edx,4(%_ASM_CX)
20587+5: __copyuser_seg movl %edx,4(_DEST)
20588 #endif
20589 xor %eax,%eax
20590 EXIT
20591diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20592index 1cad221..de671ee 100644
20593--- a/arch/x86/lib/rwlock.S
20594+++ b/arch/x86/lib/rwlock.S
20595@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20596 FRAME
20597 0: LOCK_PREFIX
20598 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20599+
20600+#ifdef CONFIG_PAX_REFCOUNT
20601+ jno 1234f
20602+ LOCK_PREFIX
20603+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20604+ int $4
20605+1234:
20606+ _ASM_EXTABLE(1234b, 1234b)
20607+#endif
20608+
20609 1: rep; nop
20610 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20611 jne 1b
20612 LOCK_PREFIX
20613 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20614+
20615+#ifdef CONFIG_PAX_REFCOUNT
20616+ jno 1234f
20617+ LOCK_PREFIX
20618+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20619+ int $4
20620+1234:
20621+ _ASM_EXTABLE(1234b, 1234b)
20622+#endif
20623+
20624 jnz 0b
20625 ENDFRAME
20626+ pax_force_retaddr
20627 ret
20628 CFI_ENDPROC
20629 END(__write_lock_failed)
20630@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20631 FRAME
20632 0: LOCK_PREFIX
20633 READ_LOCK_SIZE(inc) (%__lock_ptr)
20634+
20635+#ifdef CONFIG_PAX_REFCOUNT
20636+ jno 1234f
20637+ LOCK_PREFIX
20638+ READ_LOCK_SIZE(dec) (%__lock_ptr)
20639+ int $4
20640+1234:
20641+ _ASM_EXTABLE(1234b, 1234b)
20642+#endif
20643+
20644 1: rep; nop
20645 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20646 js 1b
20647 LOCK_PREFIX
20648 READ_LOCK_SIZE(dec) (%__lock_ptr)
20649+
20650+#ifdef CONFIG_PAX_REFCOUNT
20651+ jno 1234f
20652+ LOCK_PREFIX
20653+ READ_LOCK_SIZE(inc) (%__lock_ptr)
20654+ int $4
20655+1234:
20656+ _ASM_EXTABLE(1234b, 1234b)
20657+#endif
20658+
20659 js 0b
20660 ENDFRAME
20661+ pax_force_retaddr
20662 ret
20663 CFI_ENDPROC
20664 END(__read_lock_failed)
20665diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20666index 5dff5f0..cadebf4 100644
20667--- a/arch/x86/lib/rwsem.S
20668+++ b/arch/x86/lib/rwsem.S
20669@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20670 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20671 CFI_RESTORE __ASM_REG(dx)
20672 restore_common_regs
20673+ pax_force_retaddr
20674 ret
20675 CFI_ENDPROC
20676 ENDPROC(call_rwsem_down_read_failed)
20677@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20678 movq %rax,%rdi
20679 call rwsem_down_write_failed
20680 restore_common_regs
20681+ pax_force_retaddr
20682 ret
20683 CFI_ENDPROC
20684 ENDPROC(call_rwsem_down_write_failed)
20685@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20686 movq %rax,%rdi
20687 call rwsem_wake
20688 restore_common_regs
20689-1: ret
20690+1: pax_force_retaddr
20691+ ret
20692 CFI_ENDPROC
20693 ENDPROC(call_rwsem_wake)
20694
20695@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20696 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20697 CFI_RESTORE __ASM_REG(dx)
20698 restore_common_regs
20699+ pax_force_retaddr
20700 ret
20701 CFI_ENDPROC
20702 ENDPROC(call_rwsem_downgrade_wake)
20703diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20704index a63efd6..ccecad8 100644
20705--- a/arch/x86/lib/thunk_64.S
20706+++ b/arch/x86/lib/thunk_64.S
20707@@ -8,6 +8,7 @@
20708 #include <linux/linkage.h>
20709 #include <asm/dwarf2.h>
20710 #include <asm/calling.h>
20711+#include <asm/alternative-asm.h>
20712
20713 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20714 .macro THUNK name, func, put_ret_addr_in_rdi=0
20715@@ -41,5 +42,6 @@
20716 SAVE_ARGS
20717 restore:
20718 RESTORE_ARGS
20719+ pax_force_retaddr
20720 ret
20721 CFI_ENDPROC
20722diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20723index e218d5d..35679b4 100644
20724--- a/arch/x86/lib/usercopy_32.c
20725+++ b/arch/x86/lib/usercopy_32.c
20726@@ -43,7 +43,7 @@ do { \
20727 __asm__ __volatile__( \
20728 " testl %1,%1\n" \
20729 " jz 2f\n" \
20730- "0: lodsb\n" \
20731+ "0: "__copyuser_seg"lodsb\n" \
20732 " stosb\n" \
20733 " testb %%al,%%al\n" \
20734 " jz 1f\n" \
20735@@ -128,10 +128,12 @@ do { \
20736 int __d0; \
20737 might_fault(); \
20738 __asm__ __volatile__( \
20739+ __COPYUSER_SET_ES \
20740 "0: rep; stosl\n" \
20741 " movl %2,%0\n" \
20742 "1: rep; stosb\n" \
20743 "2:\n" \
20744+ __COPYUSER_RESTORE_ES \
20745 ".section .fixup,\"ax\"\n" \
20746 "3: lea 0(%2,%0,4),%0\n" \
20747 " jmp 2b\n" \
20748@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20749 might_fault();
20750
20751 __asm__ __volatile__(
20752+ __COPYUSER_SET_ES
20753 " testl %0, %0\n"
20754 " jz 3f\n"
20755 " andl %0,%%ecx\n"
20756@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20757 " subl %%ecx,%0\n"
20758 " addl %0,%%eax\n"
20759 "1:\n"
20760+ __COPYUSER_RESTORE_ES
20761 ".section .fixup,\"ax\"\n"
20762 "2: xorl %%eax,%%eax\n"
20763 " jmp 1b\n"
20764@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20765
20766 #ifdef CONFIG_X86_INTEL_USERCOPY
20767 static unsigned long
20768-__copy_user_intel(void __user *to, const void *from, unsigned long size)
20769+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20770 {
20771 int d0, d1;
20772 __asm__ __volatile__(
20773@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20774 " .align 2,0x90\n"
20775 "3: movl 0(%4), %%eax\n"
20776 "4: movl 4(%4), %%edx\n"
20777- "5: movl %%eax, 0(%3)\n"
20778- "6: movl %%edx, 4(%3)\n"
20779+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20780+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20781 "7: movl 8(%4), %%eax\n"
20782 "8: movl 12(%4),%%edx\n"
20783- "9: movl %%eax, 8(%3)\n"
20784- "10: movl %%edx, 12(%3)\n"
20785+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20786+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20787 "11: movl 16(%4), %%eax\n"
20788 "12: movl 20(%4), %%edx\n"
20789- "13: movl %%eax, 16(%3)\n"
20790- "14: movl %%edx, 20(%3)\n"
20791+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20792+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20793 "15: movl 24(%4), %%eax\n"
20794 "16: movl 28(%4), %%edx\n"
20795- "17: movl %%eax, 24(%3)\n"
20796- "18: movl %%edx, 28(%3)\n"
20797+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20798+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20799 "19: movl 32(%4), %%eax\n"
20800 "20: movl 36(%4), %%edx\n"
20801- "21: movl %%eax, 32(%3)\n"
20802- "22: movl %%edx, 36(%3)\n"
20803+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20804+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20805 "23: movl 40(%4), %%eax\n"
20806 "24: movl 44(%4), %%edx\n"
20807- "25: movl %%eax, 40(%3)\n"
20808- "26: movl %%edx, 44(%3)\n"
20809+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20810+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20811 "27: movl 48(%4), %%eax\n"
20812 "28: movl 52(%4), %%edx\n"
20813- "29: movl %%eax, 48(%3)\n"
20814- "30: movl %%edx, 52(%3)\n"
20815+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20816+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20817 "31: movl 56(%4), %%eax\n"
20818 "32: movl 60(%4), %%edx\n"
20819- "33: movl %%eax, 56(%3)\n"
20820- "34: movl %%edx, 60(%3)\n"
20821+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20822+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20823 " addl $-64, %0\n"
20824 " addl $64, %4\n"
20825 " addl $64, %3\n"
20826@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20827 " shrl $2, %0\n"
20828 " andl $3, %%eax\n"
20829 " cld\n"
20830+ __COPYUSER_SET_ES
20831 "99: rep; movsl\n"
20832 "36: movl %%eax, %0\n"
20833 "37: rep; movsb\n"
20834 "100:\n"
20835+ __COPYUSER_RESTORE_ES
20836+ ".section .fixup,\"ax\"\n"
20837+ "101: lea 0(%%eax,%0,4),%0\n"
20838+ " jmp 100b\n"
20839+ ".previous\n"
20840+ ".section __ex_table,\"a\"\n"
20841+ " .align 4\n"
20842+ " .long 1b,100b\n"
20843+ " .long 2b,100b\n"
20844+ " .long 3b,100b\n"
20845+ " .long 4b,100b\n"
20846+ " .long 5b,100b\n"
20847+ " .long 6b,100b\n"
20848+ " .long 7b,100b\n"
20849+ " .long 8b,100b\n"
20850+ " .long 9b,100b\n"
20851+ " .long 10b,100b\n"
20852+ " .long 11b,100b\n"
20853+ " .long 12b,100b\n"
20854+ " .long 13b,100b\n"
20855+ " .long 14b,100b\n"
20856+ " .long 15b,100b\n"
20857+ " .long 16b,100b\n"
20858+ " .long 17b,100b\n"
20859+ " .long 18b,100b\n"
20860+ " .long 19b,100b\n"
20861+ " .long 20b,100b\n"
20862+ " .long 21b,100b\n"
20863+ " .long 22b,100b\n"
20864+ " .long 23b,100b\n"
20865+ " .long 24b,100b\n"
20866+ " .long 25b,100b\n"
20867+ " .long 26b,100b\n"
20868+ " .long 27b,100b\n"
20869+ " .long 28b,100b\n"
20870+ " .long 29b,100b\n"
20871+ " .long 30b,100b\n"
20872+ " .long 31b,100b\n"
20873+ " .long 32b,100b\n"
20874+ " .long 33b,100b\n"
20875+ " .long 34b,100b\n"
20876+ " .long 35b,100b\n"
20877+ " .long 36b,100b\n"
20878+ " .long 37b,100b\n"
20879+ " .long 99b,101b\n"
20880+ ".previous"
20881+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20882+ : "1"(to), "2"(from), "0"(size)
20883+ : "eax", "edx", "memory");
20884+ return size;
20885+}
20886+
20887+static unsigned long
20888+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20889+{
20890+ int d0, d1;
20891+ __asm__ __volatile__(
20892+ " .align 2,0x90\n"
20893+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20894+ " cmpl $67, %0\n"
20895+ " jbe 3f\n"
20896+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20897+ " .align 2,0x90\n"
20898+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20899+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20900+ "5: movl %%eax, 0(%3)\n"
20901+ "6: movl %%edx, 4(%3)\n"
20902+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20903+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20904+ "9: movl %%eax, 8(%3)\n"
20905+ "10: movl %%edx, 12(%3)\n"
20906+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20907+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20908+ "13: movl %%eax, 16(%3)\n"
20909+ "14: movl %%edx, 20(%3)\n"
20910+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20911+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20912+ "17: movl %%eax, 24(%3)\n"
20913+ "18: movl %%edx, 28(%3)\n"
20914+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20915+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20916+ "21: movl %%eax, 32(%3)\n"
20917+ "22: movl %%edx, 36(%3)\n"
20918+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20919+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20920+ "25: movl %%eax, 40(%3)\n"
20921+ "26: movl %%edx, 44(%3)\n"
20922+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20923+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20924+ "29: movl %%eax, 48(%3)\n"
20925+ "30: movl %%edx, 52(%3)\n"
20926+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20927+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20928+ "33: movl %%eax, 56(%3)\n"
20929+ "34: movl %%edx, 60(%3)\n"
20930+ " addl $-64, %0\n"
20931+ " addl $64, %4\n"
20932+ " addl $64, %3\n"
20933+ " cmpl $63, %0\n"
20934+ " ja 1b\n"
20935+ "35: movl %0, %%eax\n"
20936+ " shrl $2, %0\n"
20937+ " andl $3, %%eax\n"
20938+ " cld\n"
20939+ "99: rep; "__copyuser_seg" movsl\n"
20940+ "36: movl %%eax, %0\n"
20941+ "37: rep; "__copyuser_seg" movsb\n"
20942+ "100:\n"
20943 ".section .fixup,\"ax\"\n"
20944 "101: lea 0(%%eax,%0,4),%0\n"
20945 " jmp 100b\n"
20946@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20947 int d0, d1;
20948 __asm__ __volatile__(
20949 " .align 2,0x90\n"
20950- "0: movl 32(%4), %%eax\n"
20951+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20952 " cmpl $67, %0\n"
20953 " jbe 2f\n"
20954- "1: movl 64(%4), %%eax\n"
20955+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20956 " .align 2,0x90\n"
20957- "2: movl 0(%4), %%eax\n"
20958- "21: movl 4(%4), %%edx\n"
20959+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20960+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20961 " movl %%eax, 0(%3)\n"
20962 " movl %%edx, 4(%3)\n"
20963- "3: movl 8(%4), %%eax\n"
20964- "31: movl 12(%4),%%edx\n"
20965+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20966+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20967 " movl %%eax, 8(%3)\n"
20968 " movl %%edx, 12(%3)\n"
20969- "4: movl 16(%4), %%eax\n"
20970- "41: movl 20(%4), %%edx\n"
20971+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20972+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20973 " movl %%eax, 16(%3)\n"
20974 " movl %%edx, 20(%3)\n"
20975- "10: movl 24(%4), %%eax\n"
20976- "51: movl 28(%4), %%edx\n"
20977+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20978+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20979 " movl %%eax, 24(%3)\n"
20980 " movl %%edx, 28(%3)\n"
20981- "11: movl 32(%4), %%eax\n"
20982- "61: movl 36(%4), %%edx\n"
20983+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20984+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20985 " movl %%eax, 32(%3)\n"
20986 " movl %%edx, 36(%3)\n"
20987- "12: movl 40(%4), %%eax\n"
20988- "71: movl 44(%4), %%edx\n"
20989+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20990+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20991 " movl %%eax, 40(%3)\n"
20992 " movl %%edx, 44(%3)\n"
20993- "13: movl 48(%4), %%eax\n"
20994- "81: movl 52(%4), %%edx\n"
20995+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20996+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20997 " movl %%eax, 48(%3)\n"
20998 " movl %%edx, 52(%3)\n"
20999- "14: movl 56(%4), %%eax\n"
21000- "91: movl 60(%4), %%edx\n"
21001+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21002+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21003 " movl %%eax, 56(%3)\n"
21004 " movl %%edx, 60(%3)\n"
21005 " addl $-64, %0\n"
21006@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21007 " shrl $2, %0\n"
21008 " andl $3, %%eax\n"
21009 " cld\n"
21010- "6: rep; movsl\n"
21011+ "6: rep; "__copyuser_seg" movsl\n"
21012 " movl %%eax,%0\n"
21013- "7: rep; movsb\n"
21014+ "7: rep; "__copyuser_seg" movsb\n"
21015 "8:\n"
21016 ".section .fixup,\"ax\"\n"
21017 "9: lea 0(%%eax,%0,4),%0\n"
21018@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21019
21020 __asm__ __volatile__(
21021 " .align 2,0x90\n"
21022- "0: movl 32(%4), %%eax\n"
21023+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21024 " cmpl $67, %0\n"
21025 " jbe 2f\n"
21026- "1: movl 64(%4), %%eax\n"
21027+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21028 " .align 2,0x90\n"
21029- "2: movl 0(%4), %%eax\n"
21030- "21: movl 4(%4), %%edx\n"
21031+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21032+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21033 " movnti %%eax, 0(%3)\n"
21034 " movnti %%edx, 4(%3)\n"
21035- "3: movl 8(%4), %%eax\n"
21036- "31: movl 12(%4),%%edx\n"
21037+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21038+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21039 " movnti %%eax, 8(%3)\n"
21040 " movnti %%edx, 12(%3)\n"
21041- "4: movl 16(%4), %%eax\n"
21042- "41: movl 20(%4), %%edx\n"
21043+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21044+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21045 " movnti %%eax, 16(%3)\n"
21046 " movnti %%edx, 20(%3)\n"
21047- "10: movl 24(%4), %%eax\n"
21048- "51: movl 28(%4), %%edx\n"
21049+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21050+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21051 " movnti %%eax, 24(%3)\n"
21052 " movnti %%edx, 28(%3)\n"
21053- "11: movl 32(%4), %%eax\n"
21054- "61: movl 36(%4), %%edx\n"
21055+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21056+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21057 " movnti %%eax, 32(%3)\n"
21058 " movnti %%edx, 36(%3)\n"
21059- "12: movl 40(%4), %%eax\n"
21060- "71: movl 44(%4), %%edx\n"
21061+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21062+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21063 " movnti %%eax, 40(%3)\n"
21064 " movnti %%edx, 44(%3)\n"
21065- "13: movl 48(%4), %%eax\n"
21066- "81: movl 52(%4), %%edx\n"
21067+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21068+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21069 " movnti %%eax, 48(%3)\n"
21070 " movnti %%edx, 52(%3)\n"
21071- "14: movl 56(%4), %%eax\n"
21072- "91: movl 60(%4), %%edx\n"
21073+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21074+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21075 " movnti %%eax, 56(%3)\n"
21076 " movnti %%edx, 60(%3)\n"
21077 " addl $-64, %0\n"
21078@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21079 " shrl $2, %0\n"
21080 " andl $3, %%eax\n"
21081 " cld\n"
21082- "6: rep; movsl\n"
21083+ "6: rep; "__copyuser_seg" movsl\n"
21084 " movl %%eax,%0\n"
21085- "7: rep; movsb\n"
21086+ "7: rep; "__copyuser_seg" movsb\n"
21087 "8:\n"
21088 ".section .fixup,\"ax\"\n"
21089 "9: lea 0(%%eax,%0,4),%0\n"
21090@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21091
21092 __asm__ __volatile__(
21093 " .align 2,0x90\n"
21094- "0: movl 32(%4), %%eax\n"
21095+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21096 " cmpl $67, %0\n"
21097 " jbe 2f\n"
21098- "1: movl 64(%4), %%eax\n"
21099+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21100 " .align 2,0x90\n"
21101- "2: movl 0(%4), %%eax\n"
21102- "21: movl 4(%4), %%edx\n"
21103+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21104+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21105 " movnti %%eax, 0(%3)\n"
21106 " movnti %%edx, 4(%3)\n"
21107- "3: movl 8(%4), %%eax\n"
21108- "31: movl 12(%4),%%edx\n"
21109+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21110+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21111 " movnti %%eax, 8(%3)\n"
21112 " movnti %%edx, 12(%3)\n"
21113- "4: movl 16(%4), %%eax\n"
21114- "41: movl 20(%4), %%edx\n"
21115+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21116+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21117 " movnti %%eax, 16(%3)\n"
21118 " movnti %%edx, 20(%3)\n"
21119- "10: movl 24(%4), %%eax\n"
21120- "51: movl 28(%4), %%edx\n"
21121+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21122+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21123 " movnti %%eax, 24(%3)\n"
21124 " movnti %%edx, 28(%3)\n"
21125- "11: movl 32(%4), %%eax\n"
21126- "61: movl 36(%4), %%edx\n"
21127+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21128+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21129 " movnti %%eax, 32(%3)\n"
21130 " movnti %%edx, 36(%3)\n"
21131- "12: movl 40(%4), %%eax\n"
21132- "71: movl 44(%4), %%edx\n"
21133+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21134+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21135 " movnti %%eax, 40(%3)\n"
21136 " movnti %%edx, 44(%3)\n"
21137- "13: movl 48(%4), %%eax\n"
21138- "81: movl 52(%4), %%edx\n"
21139+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21140+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21141 " movnti %%eax, 48(%3)\n"
21142 " movnti %%edx, 52(%3)\n"
21143- "14: movl 56(%4), %%eax\n"
21144- "91: movl 60(%4), %%edx\n"
21145+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21146+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21147 " movnti %%eax, 56(%3)\n"
21148 " movnti %%edx, 60(%3)\n"
21149 " addl $-64, %0\n"
21150@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21151 " shrl $2, %0\n"
21152 " andl $3, %%eax\n"
21153 " cld\n"
21154- "6: rep; movsl\n"
21155+ "6: rep; "__copyuser_seg" movsl\n"
21156 " movl %%eax,%0\n"
21157- "7: rep; movsb\n"
21158+ "7: rep; "__copyuser_seg" movsb\n"
21159 "8:\n"
21160 ".section .fixup,\"ax\"\n"
21161 "9: lea 0(%%eax,%0,4),%0\n"
21162@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21163 */
21164 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21165 unsigned long size);
21166-unsigned long __copy_user_intel(void __user *to, const void *from,
21167+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21168+ unsigned long size);
21169+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21170 unsigned long size);
21171 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21172 const void __user *from, unsigned long size);
21173 #endif /* CONFIG_X86_INTEL_USERCOPY */
21174
21175 /* Generic arbitrary sized copy. */
21176-#define __copy_user(to, from, size) \
21177+#define __copy_user(to, from, size, prefix, set, restore) \
21178 do { \
21179 int __d0, __d1, __d2; \
21180 __asm__ __volatile__( \
21181+ set \
21182 " cmp $7,%0\n" \
21183 " jbe 1f\n" \
21184 " movl %1,%0\n" \
21185 " negl %0\n" \
21186 " andl $7,%0\n" \
21187 " subl %0,%3\n" \
21188- "4: rep; movsb\n" \
21189+ "4: rep; "prefix"movsb\n" \
21190 " movl %3,%0\n" \
21191 " shrl $2,%0\n" \
21192 " andl $3,%3\n" \
21193 " .align 2,0x90\n" \
21194- "0: rep; movsl\n" \
21195+ "0: rep; "prefix"movsl\n" \
21196 " movl %3,%0\n" \
21197- "1: rep; movsb\n" \
21198+ "1: rep; "prefix"movsb\n" \
21199 "2:\n" \
21200+ restore \
21201 ".section .fixup,\"ax\"\n" \
21202 "5: addl %3,%0\n" \
21203 " jmp 2b\n" \
21204@@ -682,14 +799,14 @@ do { \
21205 " negl %0\n" \
21206 " andl $7,%0\n" \
21207 " subl %0,%3\n" \
21208- "4: rep; movsb\n" \
21209+ "4: rep; "__copyuser_seg"movsb\n" \
21210 " movl %3,%0\n" \
21211 " shrl $2,%0\n" \
21212 " andl $3,%3\n" \
21213 " .align 2,0x90\n" \
21214- "0: rep; movsl\n" \
21215+ "0: rep; "__copyuser_seg"movsl\n" \
21216 " movl %3,%0\n" \
21217- "1: rep; movsb\n" \
21218+ "1: rep; "__copyuser_seg"movsb\n" \
21219 "2:\n" \
21220 ".section .fixup,\"ax\"\n" \
21221 "5: addl %3,%0\n" \
21222@@ -775,9 +892,9 @@ survive:
21223 }
21224 #endif
21225 if (movsl_is_ok(to, from, n))
21226- __copy_user(to, from, n);
21227+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21228 else
21229- n = __copy_user_intel(to, from, n);
21230+ n = __generic_copy_to_user_intel(to, from, n);
21231 return n;
21232 }
21233 EXPORT_SYMBOL(__copy_to_user_ll);
21234@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21235 unsigned long n)
21236 {
21237 if (movsl_is_ok(to, from, n))
21238- __copy_user(to, from, n);
21239+ __copy_user(to, from, n, __copyuser_seg, "", "");
21240 else
21241- n = __copy_user_intel((void __user *)to,
21242- (const void *)from, n);
21243+ n = __generic_copy_from_user_intel(to, from, n);
21244 return n;
21245 }
21246 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21247@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21248 if (n > 64 && cpu_has_xmm2)
21249 n = __copy_user_intel_nocache(to, from, n);
21250 else
21251- __copy_user(to, from, n);
21252+ __copy_user(to, from, n, __copyuser_seg, "", "");
21253 #else
21254- __copy_user(to, from, n);
21255+ __copy_user(to, from, n, __copyuser_seg, "", "");
21256 #endif
21257 return n;
21258 }
21259 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21260
21261-/**
21262- * copy_to_user: - Copy a block of data into user space.
21263- * @to: Destination address, in user space.
21264- * @from: Source address, in kernel space.
21265- * @n: Number of bytes to copy.
21266- *
21267- * Context: User context only. This function may sleep.
21268- *
21269- * Copy data from kernel space to user space.
21270- *
21271- * Returns number of bytes that could not be copied.
21272- * On success, this will be zero.
21273- */
21274-unsigned long
21275-copy_to_user(void __user *to, const void *from, unsigned long n)
21276-{
21277- if (access_ok(VERIFY_WRITE, to, n))
21278- n = __copy_to_user(to, from, n);
21279- return n;
21280-}
21281-EXPORT_SYMBOL(copy_to_user);
21282-
21283-/**
21284- * copy_from_user: - Copy a block of data from user space.
21285- * @to: Destination address, in kernel space.
21286- * @from: Source address, in user space.
21287- * @n: Number of bytes to copy.
21288- *
21289- * Context: User context only. This function may sleep.
21290- *
21291- * Copy data from user space to kernel space.
21292- *
21293- * Returns number of bytes that could not be copied.
21294- * On success, this will be zero.
21295- *
21296- * If some data could not be copied, this function will pad the copied
21297- * data to the requested size using zero bytes.
21298- */
21299-unsigned long
21300-_copy_from_user(void *to, const void __user *from, unsigned long n)
21301-{
21302- if (access_ok(VERIFY_READ, from, n))
21303- n = __copy_from_user(to, from, n);
21304- else
21305- memset(to, 0, n);
21306- return n;
21307-}
21308-EXPORT_SYMBOL(_copy_from_user);
21309-
21310 void copy_from_user_overflow(void)
21311 {
21312 WARN(1, "Buffer overflow detected!\n");
21313 }
21314 EXPORT_SYMBOL(copy_from_user_overflow);
21315+
21316+void copy_to_user_overflow(void)
21317+{
21318+ WARN(1, "Buffer overflow detected!\n");
21319+}
21320+EXPORT_SYMBOL(copy_to_user_overflow);
21321+
21322+#ifdef CONFIG_PAX_MEMORY_UDEREF
21323+void __set_fs(mm_segment_t x)
21324+{
21325+ switch (x.seg) {
21326+ case 0:
21327+ loadsegment(gs, 0);
21328+ break;
21329+ case TASK_SIZE_MAX:
21330+ loadsegment(gs, __USER_DS);
21331+ break;
21332+ case -1UL:
21333+ loadsegment(gs, __KERNEL_DS);
21334+ break;
21335+ default:
21336+ BUG();
21337+ }
21338+ return;
21339+}
21340+EXPORT_SYMBOL(__set_fs);
21341+
21342+void set_fs(mm_segment_t x)
21343+{
21344+ current_thread_info()->addr_limit = x;
21345+ __set_fs(x);
21346+}
21347+EXPORT_SYMBOL(set_fs);
21348+#endif
21349diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21350index b7c2849..8633ad8 100644
21351--- a/arch/x86/lib/usercopy_64.c
21352+++ b/arch/x86/lib/usercopy_64.c
21353@@ -42,6 +42,12 @@ long
21354 __strncpy_from_user(char *dst, const char __user *src, long count)
21355 {
21356 long res;
21357+
21358+#ifdef CONFIG_PAX_MEMORY_UDEREF
21359+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21360+ src += PAX_USER_SHADOW_BASE;
21361+#endif
21362+
21363 __do_strncpy_from_user(dst, src, count, res);
21364 return res;
21365 }
21366@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21367 {
21368 long __d0;
21369 might_fault();
21370+
21371+#ifdef CONFIG_PAX_MEMORY_UDEREF
21372+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21373+ addr += PAX_USER_SHADOW_BASE;
21374+#endif
21375+
21376 /* no memory constraint because it doesn't change any memory gcc knows
21377 about */
21378 asm volatile(
21379@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21380 }
21381 EXPORT_SYMBOL(strlen_user);
21382
21383-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21384+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21385 {
21386- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21387- return copy_user_generic((__force void *)to, (__force void *)from, len);
21388- }
21389- return len;
21390+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21391+
21392+#ifdef CONFIG_PAX_MEMORY_UDEREF
21393+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21394+ to += PAX_USER_SHADOW_BASE;
21395+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21396+ from += PAX_USER_SHADOW_BASE;
21397+#endif
21398+
21399+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21400+ }
21401+ return len;
21402 }
21403 EXPORT_SYMBOL(copy_in_user);
21404
21405@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21406 * it is not necessary to optimize tail handling.
21407 */
21408 unsigned long
21409-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21410+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21411 {
21412 char c;
21413 unsigned zero_len;
21414diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21415index d0474ad..36e9257 100644
21416--- a/arch/x86/mm/extable.c
21417+++ b/arch/x86/mm/extable.c
21418@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21419 const struct exception_table_entry *fixup;
21420
21421 #ifdef CONFIG_PNPBIOS
21422- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21423+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21424 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21425 extern u32 pnp_bios_is_utter_crap;
21426 pnp_bios_is_utter_crap = 1;
21427diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21428index 5db0490..13bd09c 100644
21429--- a/arch/x86/mm/fault.c
21430+++ b/arch/x86/mm/fault.c
21431@@ -13,11 +13,18 @@
21432 #include <linux/perf_event.h> /* perf_sw_event */
21433 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21434 #include <linux/prefetch.h> /* prefetchw */
21435+#include <linux/unistd.h>
21436+#include <linux/compiler.h>
21437
21438 #include <asm/traps.h> /* dotraplinkage, ... */
21439 #include <asm/pgalloc.h> /* pgd_*(), ... */
21440 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21441 #include <asm/fixmap.h> /* VSYSCALL_START */
21442+#include <asm/tlbflush.h>
21443+
21444+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21445+#include <asm/stacktrace.h>
21446+#endif
21447
21448 /*
21449 * Page fault error code bits:
21450@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21451 int ret = 0;
21452
21453 /* kprobe_running() needs smp_processor_id() */
21454- if (kprobes_built_in() && !user_mode_vm(regs)) {
21455+ if (kprobes_built_in() && !user_mode(regs)) {
21456 preempt_disable();
21457 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21458 ret = 1;
21459@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21460 return !instr_lo || (instr_lo>>1) == 1;
21461 case 0x00:
21462 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21463- if (probe_kernel_address(instr, opcode))
21464+ if (user_mode(regs)) {
21465+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21466+ return 0;
21467+ } else if (probe_kernel_address(instr, opcode))
21468 return 0;
21469
21470 *prefetch = (instr_lo == 0xF) &&
21471@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21472 while (instr < max_instr) {
21473 unsigned char opcode;
21474
21475- if (probe_kernel_address(instr, opcode))
21476+ if (user_mode(regs)) {
21477+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21478+ break;
21479+ } else if (probe_kernel_address(instr, opcode))
21480 break;
21481
21482 instr++;
21483@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21484 force_sig_info(si_signo, &info, tsk);
21485 }
21486
21487+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21488+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21489+#endif
21490+
21491+#ifdef CONFIG_PAX_EMUTRAMP
21492+static int pax_handle_fetch_fault(struct pt_regs *regs);
21493+#endif
21494+
21495+#ifdef CONFIG_PAX_PAGEEXEC
21496+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21497+{
21498+ pgd_t *pgd;
21499+ pud_t *pud;
21500+ pmd_t *pmd;
21501+
21502+ pgd = pgd_offset(mm, address);
21503+ if (!pgd_present(*pgd))
21504+ return NULL;
21505+ pud = pud_offset(pgd, address);
21506+ if (!pud_present(*pud))
21507+ return NULL;
21508+ pmd = pmd_offset(pud, address);
21509+ if (!pmd_present(*pmd))
21510+ return NULL;
21511+ return pmd;
21512+}
21513+#endif
21514+
21515 DEFINE_SPINLOCK(pgd_lock);
21516 LIST_HEAD(pgd_list);
21517
21518@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21519 for (address = VMALLOC_START & PMD_MASK;
21520 address >= TASK_SIZE && address < FIXADDR_TOP;
21521 address += PMD_SIZE) {
21522+
21523+#ifdef CONFIG_PAX_PER_CPU_PGD
21524+ unsigned long cpu;
21525+#else
21526 struct page *page;
21527+#endif
21528
21529 spin_lock(&pgd_lock);
21530+
21531+#ifdef CONFIG_PAX_PER_CPU_PGD
21532+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21533+ pgd_t *pgd = get_cpu_pgd(cpu);
21534+ pmd_t *ret;
21535+#else
21536 list_for_each_entry(page, &pgd_list, lru) {
21537+ pgd_t *pgd = page_address(page);
21538 spinlock_t *pgt_lock;
21539 pmd_t *ret;
21540
21541@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21542 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21543
21544 spin_lock(pgt_lock);
21545- ret = vmalloc_sync_one(page_address(page), address);
21546+#endif
21547+
21548+ ret = vmalloc_sync_one(pgd, address);
21549+
21550+#ifndef CONFIG_PAX_PER_CPU_PGD
21551 spin_unlock(pgt_lock);
21552+#endif
21553
21554 if (!ret)
21555 break;
21556@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21557 * an interrupt in the middle of a task switch..
21558 */
21559 pgd_paddr = read_cr3();
21560+
21561+#ifdef CONFIG_PAX_PER_CPU_PGD
21562+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21563+#endif
21564+
21565 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21566 if (!pmd_k)
21567 return -1;
21568@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21569 * happen within a race in page table update. In the later
21570 * case just flush:
21571 */
21572+
21573+#ifdef CONFIG_PAX_PER_CPU_PGD
21574+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21575+ pgd = pgd_offset_cpu(smp_processor_id(), address);
21576+#else
21577 pgd = pgd_offset(current->active_mm, address);
21578+#endif
21579+
21580 pgd_ref = pgd_offset_k(address);
21581 if (pgd_none(*pgd_ref))
21582 return -1;
21583@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21584 static int is_errata100(struct pt_regs *regs, unsigned long address)
21585 {
21586 #ifdef CONFIG_X86_64
21587- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21588+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21589 return 1;
21590 #endif
21591 return 0;
21592@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21593 }
21594
21595 static const char nx_warning[] = KERN_CRIT
21596-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21597+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21598
21599 static void
21600 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21601@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21602 if (!oops_may_print())
21603 return;
21604
21605- if (error_code & PF_INSTR) {
21606+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21607 unsigned int level;
21608
21609 pte_t *pte = lookup_address(address, &level);
21610
21611 if (pte && pte_present(*pte) && !pte_exec(*pte))
21612- printk(nx_warning, current_uid());
21613+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21614 }
21615
21616+#ifdef CONFIG_PAX_KERNEXEC
21617+ if (init_mm.start_code <= address && address < init_mm.end_code) {
21618+ if (current->signal->curr_ip)
21619+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21620+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21621+ else
21622+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21623+ current->comm, task_pid_nr(current), current_uid(), current_euid());
21624+ }
21625+#endif
21626+
21627 printk(KERN_ALERT "BUG: unable to handle kernel ");
21628 if (address < PAGE_SIZE)
21629 printk(KERN_CONT "NULL pointer dereference");
21630@@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21631 }
21632 #endif
21633
21634+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21635+ if (pax_is_fetch_fault(regs, error_code, address)) {
21636+
21637+#ifdef CONFIG_PAX_EMUTRAMP
21638+ switch (pax_handle_fetch_fault(regs)) {
21639+ case 2:
21640+ return;
21641+ }
21642+#endif
21643+
21644+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21645+ do_group_exit(SIGKILL);
21646+ }
21647+#endif
21648+
21649 if (unlikely(show_unhandled_signals))
21650 show_signal_msg(regs, error_code, address, tsk);
21651
21652@@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21653 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21654 printk(KERN_ERR
21655 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21656- tsk->comm, tsk->pid, address);
21657+ tsk->comm, task_pid_nr(tsk), address);
21658 code = BUS_MCEERR_AR;
21659 }
21660 #endif
21661@@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21662 return 1;
21663 }
21664
21665+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21666+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21667+{
21668+ pte_t *pte;
21669+ pmd_t *pmd;
21670+ spinlock_t *ptl;
21671+ unsigned char pte_mask;
21672+
21673+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21674+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21675+ return 0;
21676+
21677+ /* PaX: it's our fault, let's handle it if we can */
21678+
21679+ /* PaX: take a look at read faults before acquiring any locks */
21680+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21681+ /* instruction fetch attempt from a protected page in user mode */
21682+ up_read(&mm->mmap_sem);
21683+
21684+#ifdef CONFIG_PAX_EMUTRAMP
21685+ switch (pax_handle_fetch_fault(regs)) {
21686+ case 2:
21687+ return 1;
21688+ }
21689+#endif
21690+
21691+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21692+ do_group_exit(SIGKILL);
21693+ }
21694+
21695+ pmd = pax_get_pmd(mm, address);
21696+ if (unlikely(!pmd))
21697+ return 0;
21698+
21699+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21700+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21701+ pte_unmap_unlock(pte, ptl);
21702+ return 0;
21703+ }
21704+
21705+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21706+ /* write attempt to a protected page in user mode */
21707+ pte_unmap_unlock(pte, ptl);
21708+ return 0;
21709+ }
21710+
21711+#ifdef CONFIG_SMP
21712+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21713+#else
21714+ if (likely(address > get_limit(regs->cs)))
21715+#endif
21716+ {
21717+ set_pte(pte, pte_mkread(*pte));
21718+ __flush_tlb_one(address);
21719+ pte_unmap_unlock(pte, ptl);
21720+ up_read(&mm->mmap_sem);
21721+ return 1;
21722+ }
21723+
21724+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21725+
21726+ /*
21727+ * PaX: fill DTLB with user rights and retry
21728+ */
21729+ __asm__ __volatile__ (
21730+ "orb %2,(%1)\n"
21731+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21732+/*
21733+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21734+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21735+ * page fault when examined during a TLB load attempt. this is true not only
21736+ * for PTEs holding a non-present entry but also present entries that will
21737+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21738+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21739+ * for our target pages since their PTEs are simply not in the TLBs at all.
21740+
21741+ * the best thing in omitting it is that we gain around 15-20% speed in the
21742+ * fast path of the page fault handler and can get rid of tracing since we
21743+ * can no longer flush unintended entries.
21744+ */
21745+ "invlpg (%0)\n"
21746+#endif
21747+ __copyuser_seg"testb $0,(%0)\n"
21748+ "xorb %3,(%1)\n"
21749+ :
21750+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21751+ : "memory", "cc");
21752+ pte_unmap_unlock(pte, ptl);
21753+ up_read(&mm->mmap_sem);
21754+ return 1;
21755+}
21756+#endif
21757+
21758 /*
21759 * Handle a spurious fault caused by a stale TLB entry.
21760 *
21761@@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
21762 static inline int
21763 access_error(unsigned long error_code, struct vm_area_struct *vma)
21764 {
21765+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21766+ return 1;
21767+
21768 if (error_code & PF_WRITE) {
21769 /* write, present and write, not present: */
21770 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21771@@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21772 {
21773 struct vm_area_struct *vma;
21774 struct task_struct *tsk;
21775- unsigned long address;
21776 struct mm_struct *mm;
21777 int fault;
21778 int write = error_code & PF_WRITE;
21779 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
21780 (write ? FAULT_FLAG_WRITE : 0);
21781
21782- tsk = current;
21783- mm = tsk->mm;
21784-
21785 /* Get the faulting address: */
21786- address = read_cr2();
21787+ unsigned long address = read_cr2();
21788+
21789+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21790+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21791+ if (!search_exception_tables(regs->ip)) {
21792+ bad_area_nosemaphore(regs, error_code, address);
21793+ return;
21794+ }
21795+ if (address < PAX_USER_SHADOW_BASE) {
21796+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21797+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
21798+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21799+ } else
21800+ address -= PAX_USER_SHADOW_BASE;
21801+ }
21802+#endif
21803+
21804+ tsk = current;
21805+ mm = tsk->mm;
21806
21807 /*
21808 * Detect and handle instructions that would cause a page fault for
21809@@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21810 * User-mode registers count as a user access even for any
21811 * potential system fault or CPU buglet:
21812 */
21813- if (user_mode_vm(regs)) {
21814+ if (user_mode(regs)) {
21815 local_irq_enable();
21816 error_code |= PF_USER;
21817 } else {
21818@@ -1122,6 +1328,11 @@ retry:
21819 might_sleep();
21820 }
21821
21822+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21823+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21824+ return;
21825+#endif
21826+
21827 vma = find_vma(mm, address);
21828 if (unlikely(!vma)) {
21829 bad_area(regs, error_code, address);
21830@@ -1133,18 +1344,24 @@ retry:
21831 bad_area(regs, error_code, address);
21832 return;
21833 }
21834- if (error_code & PF_USER) {
21835- /*
21836- * Accessing the stack below %sp is always a bug.
21837- * The large cushion allows instructions like enter
21838- * and pusha to work. ("enter $65535, $31" pushes
21839- * 32 pointers and then decrements %sp by 65535.)
21840- */
21841- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21842- bad_area(regs, error_code, address);
21843- return;
21844- }
21845+ /*
21846+ * Accessing the stack below %sp is always a bug.
21847+ * The large cushion allows instructions like enter
21848+ * and pusha to work. ("enter $65535, $31" pushes
21849+ * 32 pointers and then decrements %sp by 65535.)
21850+ */
21851+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21852+ bad_area(regs, error_code, address);
21853+ return;
21854 }
21855+
21856+#ifdef CONFIG_PAX_SEGMEXEC
21857+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21858+ bad_area(regs, error_code, address);
21859+ return;
21860+ }
21861+#endif
21862+
21863 if (unlikely(expand_stack(vma, address))) {
21864 bad_area(regs, error_code, address);
21865 return;
21866@@ -1199,3 +1416,292 @@ good_area:
21867
21868 up_read(&mm->mmap_sem);
21869 }
21870+
21871+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21872+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21873+{
21874+ struct mm_struct *mm = current->mm;
21875+ unsigned long ip = regs->ip;
21876+
21877+ if (v8086_mode(regs))
21878+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21879+
21880+#ifdef CONFIG_PAX_PAGEEXEC
21881+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21882+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21883+ return true;
21884+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21885+ return true;
21886+ return false;
21887+ }
21888+#endif
21889+
21890+#ifdef CONFIG_PAX_SEGMEXEC
21891+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21892+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21893+ return true;
21894+ return false;
21895+ }
21896+#endif
21897+
21898+ return false;
21899+}
21900+#endif
21901+
21902+#ifdef CONFIG_PAX_EMUTRAMP
21903+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21904+{
21905+ int err;
21906+
21907+ do { /* PaX: libffi trampoline emulation */
21908+ unsigned char mov, jmp;
21909+ unsigned int addr1, addr2;
21910+
21911+#ifdef CONFIG_X86_64
21912+ if ((regs->ip + 9) >> 32)
21913+ break;
21914+#endif
21915+
21916+ err = get_user(mov, (unsigned char __user *)regs->ip);
21917+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21918+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21919+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21920+
21921+ if (err)
21922+ break;
21923+
21924+ if (mov == 0xB8 && jmp == 0xE9) {
21925+ regs->ax = addr1;
21926+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21927+ return 2;
21928+ }
21929+ } while (0);
21930+
21931+ do { /* PaX: gcc trampoline emulation #1 */
21932+ unsigned char mov1, mov2;
21933+ unsigned short jmp;
21934+ unsigned int addr1, addr2;
21935+
21936+#ifdef CONFIG_X86_64
21937+ if ((regs->ip + 11) >> 32)
21938+ break;
21939+#endif
21940+
21941+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21942+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21943+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21944+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21945+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21946+
21947+ if (err)
21948+ break;
21949+
21950+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21951+ regs->cx = addr1;
21952+ regs->ax = addr2;
21953+ regs->ip = addr2;
21954+ return 2;
21955+ }
21956+ } while (0);
21957+
21958+ do { /* PaX: gcc trampoline emulation #2 */
21959+ unsigned char mov, jmp;
21960+ unsigned int addr1, addr2;
21961+
21962+#ifdef CONFIG_X86_64
21963+ if ((regs->ip + 9) >> 32)
21964+ break;
21965+#endif
21966+
21967+ err = get_user(mov, (unsigned char __user *)regs->ip);
21968+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21969+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21970+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21971+
21972+ if (err)
21973+ break;
21974+
21975+ if (mov == 0xB9 && jmp == 0xE9) {
21976+ regs->cx = addr1;
21977+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21978+ return 2;
21979+ }
21980+ } while (0);
21981+
21982+ return 1; /* PaX in action */
21983+}
21984+
21985+#ifdef CONFIG_X86_64
21986+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21987+{
21988+ int err;
21989+
21990+ do { /* PaX: libffi trampoline emulation */
21991+ unsigned short mov1, mov2, jmp1;
21992+ unsigned char stcclc, jmp2;
21993+ unsigned long addr1, addr2;
21994+
21995+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21996+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21997+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21998+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21999+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
22000+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
22001+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
22002+
22003+ if (err)
22004+ break;
22005+
22006+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22007+ regs->r11 = addr1;
22008+ regs->r10 = addr2;
22009+ if (stcclc == 0xF8)
22010+ regs->flags &= ~X86_EFLAGS_CF;
22011+ else
22012+ regs->flags |= X86_EFLAGS_CF;
22013+ regs->ip = addr1;
22014+ return 2;
22015+ }
22016+ } while (0);
22017+
22018+ do { /* PaX: gcc trampoline emulation #1 */
22019+ unsigned short mov1, mov2, jmp1;
22020+ unsigned char jmp2;
22021+ unsigned int addr1;
22022+ unsigned long addr2;
22023+
22024+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22025+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22026+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22027+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22028+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22029+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22030+
22031+ if (err)
22032+ break;
22033+
22034+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22035+ regs->r11 = addr1;
22036+ regs->r10 = addr2;
22037+ regs->ip = addr1;
22038+ return 2;
22039+ }
22040+ } while (0);
22041+
22042+ do { /* PaX: gcc trampoline emulation #2 */
22043+ unsigned short mov1, mov2, jmp1;
22044+ unsigned char jmp2;
22045+ unsigned long addr1, addr2;
22046+
22047+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22048+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22049+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22050+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22051+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22052+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22053+
22054+ if (err)
22055+ break;
22056+
22057+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22058+ regs->r11 = addr1;
22059+ regs->r10 = addr2;
22060+ regs->ip = addr1;
22061+ return 2;
22062+ }
22063+ } while (0);
22064+
22065+ return 1; /* PaX in action */
22066+}
22067+#endif
22068+
22069+/*
22070+ * PaX: decide what to do with offenders (regs->ip = fault address)
22071+ *
22072+ * returns 1 when task should be killed
22073+ * 2 when gcc trampoline was detected
22074+ */
22075+static int pax_handle_fetch_fault(struct pt_regs *regs)
22076+{
22077+ if (v8086_mode(regs))
22078+ return 1;
22079+
22080+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22081+ return 1;
22082+
22083+#ifdef CONFIG_X86_32
22084+ return pax_handle_fetch_fault_32(regs);
22085+#else
22086+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22087+ return pax_handle_fetch_fault_32(regs);
22088+ else
22089+ return pax_handle_fetch_fault_64(regs);
22090+#endif
22091+}
22092+#endif
22093+
22094+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22095+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22096+{
22097+ long i;
22098+
22099+ printk(KERN_ERR "PAX: bytes at PC: ");
22100+ for (i = 0; i < 20; i++) {
22101+ unsigned char c;
22102+ if (get_user(c, (unsigned char __force_user *)pc+i))
22103+ printk(KERN_CONT "?? ");
22104+ else
22105+ printk(KERN_CONT "%02x ", c);
22106+ }
22107+ printk("\n");
22108+
22109+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22110+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
22111+ unsigned long c;
22112+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
22113+#ifdef CONFIG_X86_32
22114+ printk(KERN_CONT "???????? ");
22115+#else
22116+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22117+ printk(KERN_CONT "???????? ???????? ");
22118+ else
22119+ printk(KERN_CONT "???????????????? ");
22120+#endif
22121+ } else {
22122+#ifdef CONFIG_X86_64
22123+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22124+ printk(KERN_CONT "%08x ", (unsigned int)c);
22125+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22126+ } else
22127+#endif
22128+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22129+ }
22130+ }
22131+ printk("\n");
22132+}
22133+#endif
22134+
22135+/**
22136+ * probe_kernel_write(): safely attempt to write to a location
22137+ * @dst: address to write to
22138+ * @src: pointer to the data that shall be written
22139+ * @size: size of the data chunk
22140+ *
22141+ * Safely write to address @dst from the buffer at @src. If a kernel fault
22142+ * happens, handle that and return -EFAULT.
22143+ */
22144+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22145+{
22146+ long ret;
22147+ mm_segment_t old_fs = get_fs();
22148+
22149+ set_fs(KERNEL_DS);
22150+ pagefault_disable();
22151+ pax_open_kernel();
22152+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22153+ pax_close_kernel();
22154+ pagefault_enable();
22155+ set_fs(old_fs);
22156+
22157+ return ret ? -EFAULT : 0;
22158+}
22159diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22160index dd74e46..7d26398 100644
22161--- a/arch/x86/mm/gup.c
22162+++ b/arch/x86/mm/gup.c
22163@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22164 addr = start;
22165 len = (unsigned long) nr_pages << PAGE_SHIFT;
22166 end = start + len;
22167- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22168+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22169 (void __user *)start, len)))
22170 return 0;
22171
22172diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22173index f4f29b1..5cac4fb 100644
22174--- a/arch/x86/mm/highmem_32.c
22175+++ b/arch/x86/mm/highmem_32.c
22176@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22177 idx = type + KM_TYPE_NR*smp_processor_id();
22178 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22179 BUG_ON(!pte_none(*(kmap_pte-idx)));
22180+
22181+ pax_open_kernel();
22182 set_pte(kmap_pte-idx, mk_pte(page, prot));
22183+ pax_close_kernel();
22184+
22185 arch_flush_lazy_mmu_mode();
22186
22187 return (void *)vaddr;
22188diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22189index f581a18..29efd37 100644
22190--- a/arch/x86/mm/hugetlbpage.c
22191+++ b/arch/x86/mm/hugetlbpage.c
22192@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22193 struct hstate *h = hstate_file(file);
22194 struct mm_struct *mm = current->mm;
22195 struct vm_area_struct *vma;
22196- unsigned long start_addr;
22197+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22198+
22199+#ifdef CONFIG_PAX_SEGMEXEC
22200+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22201+ pax_task_size = SEGMEXEC_TASK_SIZE;
22202+#endif
22203+
22204+ pax_task_size -= PAGE_SIZE;
22205
22206 if (len > mm->cached_hole_size) {
22207- start_addr = mm->free_area_cache;
22208+ start_addr = mm->free_area_cache;
22209 } else {
22210- start_addr = TASK_UNMAPPED_BASE;
22211- mm->cached_hole_size = 0;
22212+ start_addr = mm->mmap_base;
22213+ mm->cached_hole_size = 0;
22214 }
22215
22216 full_search:
22217@@ -280,26 +287,27 @@ full_search:
22218
22219 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22220 /* At this point: (!vma || addr < vma->vm_end). */
22221- if (TASK_SIZE - len < addr) {
22222+ if (pax_task_size - len < addr) {
22223 /*
22224 * Start a new search - just in case we missed
22225 * some holes.
22226 */
22227- if (start_addr != TASK_UNMAPPED_BASE) {
22228- start_addr = TASK_UNMAPPED_BASE;
22229+ if (start_addr != mm->mmap_base) {
22230+ start_addr = mm->mmap_base;
22231 mm->cached_hole_size = 0;
22232 goto full_search;
22233 }
22234 return -ENOMEM;
22235 }
22236- if (!vma || addr + len <= vma->vm_start) {
22237- mm->free_area_cache = addr + len;
22238- return addr;
22239- }
22240+ if (check_heap_stack_gap(vma, addr, len))
22241+ break;
22242 if (addr + mm->cached_hole_size < vma->vm_start)
22243 mm->cached_hole_size = vma->vm_start - addr;
22244 addr = ALIGN(vma->vm_end, huge_page_size(h));
22245 }
22246+
22247+ mm->free_area_cache = addr + len;
22248+ return addr;
22249 }
22250
22251 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22252@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22253 {
22254 struct hstate *h = hstate_file(file);
22255 struct mm_struct *mm = current->mm;
22256- struct vm_area_struct *vma, *prev_vma;
22257- unsigned long base = mm->mmap_base, addr = addr0;
22258+ struct vm_area_struct *vma;
22259+ unsigned long base = mm->mmap_base, addr;
22260 unsigned long largest_hole = mm->cached_hole_size;
22261- int first_time = 1;
22262
22263 /* don't allow allocations above current base */
22264 if (mm->free_area_cache > base)
22265@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22266 largest_hole = 0;
22267 mm->free_area_cache = base;
22268 }
22269-try_again:
22270+
22271 /* make sure it can fit in the remaining address space */
22272 if (mm->free_area_cache < len)
22273 goto fail;
22274
22275 /* either no address requested or can't fit in requested address hole */
22276- addr = (mm->free_area_cache - len) & huge_page_mask(h);
22277+ addr = (mm->free_area_cache - len);
22278 do {
22279+ addr &= huge_page_mask(h);
22280+ vma = find_vma(mm, addr);
22281 /*
22282 * Lookup failure means no vma is above this address,
22283 * i.e. return with success:
22284- */
22285- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22286- return addr;
22287-
22288- /*
22289 * new region fits between prev_vma->vm_end and
22290 * vma->vm_start, use it:
22291 */
22292- if (addr + len <= vma->vm_start &&
22293- (!prev_vma || (addr >= prev_vma->vm_end))) {
22294+ if (check_heap_stack_gap(vma, addr, len)) {
22295 /* remember the address as a hint for next time */
22296- mm->cached_hole_size = largest_hole;
22297- return (mm->free_area_cache = addr);
22298- } else {
22299- /* pull free_area_cache down to the first hole */
22300- if (mm->free_area_cache == vma->vm_end) {
22301- mm->free_area_cache = vma->vm_start;
22302- mm->cached_hole_size = largest_hole;
22303- }
22304+ mm->cached_hole_size = largest_hole;
22305+ return (mm->free_area_cache = addr);
22306+ }
22307+ /* pull free_area_cache down to the first hole */
22308+ if (mm->free_area_cache == vma->vm_end) {
22309+ mm->free_area_cache = vma->vm_start;
22310+ mm->cached_hole_size = largest_hole;
22311 }
22312
22313 /* remember the largest hole we saw so far */
22314 if (addr + largest_hole < vma->vm_start)
22315- largest_hole = vma->vm_start - addr;
22316+ largest_hole = vma->vm_start - addr;
22317
22318 /* try just below the current vma->vm_start */
22319- addr = (vma->vm_start - len) & huge_page_mask(h);
22320- } while (len <= vma->vm_start);
22321+ addr = skip_heap_stack_gap(vma, len);
22322+ } while (!IS_ERR_VALUE(addr));
22323
22324 fail:
22325 /*
22326- * if hint left us with no space for the requested
22327- * mapping then try again:
22328- */
22329- if (first_time) {
22330- mm->free_area_cache = base;
22331- largest_hole = 0;
22332- first_time = 0;
22333- goto try_again;
22334- }
22335- /*
22336 * A failed mmap() very likely causes application failure,
22337 * so fall back to the bottom-up function here. This scenario
22338 * can happen with large stack limits and large mmap()
22339 * allocations.
22340 */
22341- mm->free_area_cache = TASK_UNMAPPED_BASE;
22342+
22343+#ifdef CONFIG_PAX_SEGMEXEC
22344+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22345+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22346+ else
22347+#endif
22348+
22349+ mm->mmap_base = TASK_UNMAPPED_BASE;
22350+
22351+#ifdef CONFIG_PAX_RANDMMAP
22352+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22353+ mm->mmap_base += mm->delta_mmap;
22354+#endif
22355+
22356+ mm->free_area_cache = mm->mmap_base;
22357 mm->cached_hole_size = ~0UL;
22358 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22359 len, pgoff, flags);
22360@@ -386,6 +392,7 @@ fail:
22361 /*
22362 * Restore the topdown base:
22363 */
22364+ mm->mmap_base = base;
22365 mm->free_area_cache = base;
22366 mm->cached_hole_size = ~0UL;
22367
22368@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22369 struct hstate *h = hstate_file(file);
22370 struct mm_struct *mm = current->mm;
22371 struct vm_area_struct *vma;
22372+ unsigned long pax_task_size = TASK_SIZE;
22373
22374 if (len & ~huge_page_mask(h))
22375 return -EINVAL;
22376- if (len > TASK_SIZE)
22377+
22378+#ifdef CONFIG_PAX_SEGMEXEC
22379+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22380+ pax_task_size = SEGMEXEC_TASK_SIZE;
22381+#endif
22382+
22383+ pax_task_size -= PAGE_SIZE;
22384+
22385+ if (len > pax_task_size)
22386 return -ENOMEM;
22387
22388 if (flags & MAP_FIXED) {
22389@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22390 if (addr) {
22391 addr = ALIGN(addr, huge_page_size(h));
22392 vma = find_vma(mm, addr);
22393- if (TASK_SIZE - len >= addr &&
22394- (!vma || addr + len <= vma->vm_start))
22395+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22396 return addr;
22397 }
22398 if (mm->get_unmapped_area == arch_get_unmapped_area)
22399diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22400index 87488b9..a55509f 100644
22401--- a/arch/x86/mm/init.c
22402+++ b/arch/x86/mm/init.c
22403@@ -15,6 +15,7 @@
22404 #include <asm/tlbflush.h>
22405 #include <asm/tlb.h>
22406 #include <asm/proto.h>
22407+#include <asm/desc.h>
22408
22409 unsigned long __initdata pgt_buf_start;
22410 unsigned long __meminitdata pgt_buf_end;
22411@@ -31,7 +32,7 @@ int direct_gbpages
22412 static void __init find_early_table_space(unsigned long end, int use_pse,
22413 int use_gbpages)
22414 {
22415- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22416+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22417 phys_addr_t base;
22418
22419 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22420@@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22421 */
22422 int devmem_is_allowed(unsigned long pagenr)
22423 {
22424+#ifdef CONFIG_GRKERNSEC_KMEM
22425+ /* allow BDA */
22426+ if (!pagenr)
22427+ return 1;
22428+ /* allow EBDA */
22429+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22430+ return 1;
22431+#else
22432+ if (!pagenr)
22433+ return 1;
22434+#ifdef CONFIG_VM86
22435+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22436+ return 1;
22437+#endif
22438+#endif
22439+
22440+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22441+ return 1;
22442+#ifdef CONFIG_GRKERNSEC_KMEM
22443+ /* throw out everything else below 1MB */
22444 if (pagenr <= 256)
22445- return 1;
22446+ return 0;
22447+#endif
22448 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22449 return 0;
22450 if (!page_is_ram(pagenr))
22451@@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22452
22453 void free_initmem(void)
22454 {
22455+
22456+#ifdef CONFIG_PAX_KERNEXEC
22457+#ifdef CONFIG_X86_32
22458+ /* PaX: limit KERNEL_CS to actual size */
22459+ unsigned long addr, limit;
22460+ struct desc_struct d;
22461+ int cpu;
22462+
22463+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22464+ limit = (limit - 1UL) >> PAGE_SHIFT;
22465+
22466+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22467+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22468+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22469+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22470+ }
22471+
22472+ /* PaX: make KERNEL_CS read-only */
22473+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22474+ if (!paravirt_enabled())
22475+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22476+/*
22477+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22478+ pgd = pgd_offset_k(addr);
22479+ pud = pud_offset(pgd, addr);
22480+ pmd = pmd_offset(pud, addr);
22481+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22482+ }
22483+*/
22484+#ifdef CONFIG_X86_PAE
22485+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22486+/*
22487+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22488+ pgd = pgd_offset_k(addr);
22489+ pud = pud_offset(pgd, addr);
22490+ pmd = pmd_offset(pud, addr);
22491+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22492+ }
22493+*/
22494+#endif
22495+
22496+#ifdef CONFIG_MODULES
22497+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22498+#endif
22499+
22500+#else
22501+ pgd_t *pgd;
22502+ pud_t *pud;
22503+ pmd_t *pmd;
22504+ unsigned long addr, end;
22505+
22506+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22507+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22508+ pgd = pgd_offset_k(addr);
22509+ pud = pud_offset(pgd, addr);
22510+ pmd = pmd_offset(pud, addr);
22511+ if (!pmd_present(*pmd))
22512+ continue;
22513+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22514+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22515+ else
22516+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22517+ }
22518+
22519+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22520+ end = addr + KERNEL_IMAGE_SIZE;
22521+ for (; addr < end; addr += PMD_SIZE) {
22522+ pgd = pgd_offset_k(addr);
22523+ pud = pud_offset(pgd, addr);
22524+ pmd = pmd_offset(pud, addr);
22525+ if (!pmd_present(*pmd))
22526+ continue;
22527+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22528+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22529+ }
22530+#endif
22531+
22532+ flush_tlb_all();
22533+#endif
22534+
22535 free_init_pages("unused kernel memory",
22536 (unsigned long)(&__init_begin),
22537 (unsigned long)(&__init_end));
22538diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22539index 29f7c6d..b46b35b 100644
22540--- a/arch/x86/mm/init_32.c
22541+++ b/arch/x86/mm/init_32.c
22542@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22543 }
22544
22545 /*
22546- * Creates a middle page table and puts a pointer to it in the
22547- * given global directory entry. This only returns the gd entry
22548- * in non-PAE compilation mode, since the middle layer is folded.
22549- */
22550-static pmd_t * __init one_md_table_init(pgd_t *pgd)
22551-{
22552- pud_t *pud;
22553- pmd_t *pmd_table;
22554-
22555-#ifdef CONFIG_X86_PAE
22556- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22557- if (after_bootmem)
22558- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22559- else
22560- pmd_table = (pmd_t *)alloc_low_page();
22561- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22562- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22563- pud = pud_offset(pgd, 0);
22564- BUG_ON(pmd_table != pmd_offset(pud, 0));
22565-
22566- return pmd_table;
22567- }
22568-#endif
22569- pud = pud_offset(pgd, 0);
22570- pmd_table = pmd_offset(pud, 0);
22571-
22572- return pmd_table;
22573-}
22574-
22575-/*
22576 * Create a page table and place a pointer to it in a middle page
22577 * directory entry:
22578 */
22579@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22580 page_table = (pte_t *)alloc_low_page();
22581
22582 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22583+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22584+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22585+#else
22586 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22587+#endif
22588 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22589 }
22590
22591 return pte_offset_kernel(pmd, 0);
22592 }
22593
22594+static pmd_t * __init one_md_table_init(pgd_t *pgd)
22595+{
22596+ pud_t *pud;
22597+ pmd_t *pmd_table;
22598+
22599+ pud = pud_offset(pgd, 0);
22600+ pmd_table = pmd_offset(pud, 0);
22601+
22602+ return pmd_table;
22603+}
22604+
22605 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22606 {
22607 int pgd_idx = pgd_index(vaddr);
22608@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22609 int pgd_idx, pmd_idx;
22610 unsigned long vaddr;
22611 pgd_t *pgd;
22612+ pud_t *pud;
22613 pmd_t *pmd;
22614 pte_t *pte = NULL;
22615
22616@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22617 pgd = pgd_base + pgd_idx;
22618
22619 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22620- pmd = one_md_table_init(pgd);
22621- pmd = pmd + pmd_index(vaddr);
22622+ pud = pud_offset(pgd, vaddr);
22623+ pmd = pmd_offset(pud, vaddr);
22624+
22625+#ifdef CONFIG_X86_PAE
22626+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22627+#endif
22628+
22629 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22630 pmd++, pmd_idx++) {
22631 pte = page_table_kmap_check(one_page_table_init(pmd),
22632@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22633 }
22634 }
22635
22636-static inline int is_kernel_text(unsigned long addr)
22637+static inline int is_kernel_text(unsigned long start, unsigned long end)
22638 {
22639- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22640- return 1;
22641- return 0;
22642+ if ((start > ktla_ktva((unsigned long)_etext) ||
22643+ end <= ktla_ktva((unsigned long)_stext)) &&
22644+ (start > ktla_ktva((unsigned long)_einittext) ||
22645+ end <= ktla_ktva((unsigned long)_sinittext)) &&
22646+
22647+#ifdef CONFIG_ACPI_SLEEP
22648+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22649+#endif
22650+
22651+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22652+ return 0;
22653+ return 1;
22654 }
22655
22656 /*
22657@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22658 unsigned long last_map_addr = end;
22659 unsigned long start_pfn, end_pfn;
22660 pgd_t *pgd_base = swapper_pg_dir;
22661- int pgd_idx, pmd_idx, pte_ofs;
22662+ unsigned int pgd_idx, pmd_idx, pte_ofs;
22663 unsigned long pfn;
22664 pgd_t *pgd;
22665+ pud_t *pud;
22666 pmd_t *pmd;
22667 pte_t *pte;
22668 unsigned pages_2m, pages_4k;
22669@@ -281,8 +282,13 @@ repeat:
22670 pfn = start_pfn;
22671 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22672 pgd = pgd_base + pgd_idx;
22673- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22674- pmd = one_md_table_init(pgd);
22675+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22676+ pud = pud_offset(pgd, 0);
22677+ pmd = pmd_offset(pud, 0);
22678+
22679+#ifdef CONFIG_X86_PAE
22680+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22681+#endif
22682
22683 if (pfn >= end_pfn)
22684 continue;
22685@@ -294,14 +300,13 @@ repeat:
22686 #endif
22687 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22688 pmd++, pmd_idx++) {
22689- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22690+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22691
22692 /*
22693 * Map with big pages if possible, otherwise
22694 * create normal page tables:
22695 */
22696 if (use_pse) {
22697- unsigned int addr2;
22698 pgprot_t prot = PAGE_KERNEL_LARGE;
22699 /*
22700 * first pass will use the same initial
22701@@ -311,11 +316,7 @@ repeat:
22702 __pgprot(PTE_IDENT_ATTR |
22703 _PAGE_PSE);
22704
22705- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22706- PAGE_OFFSET + PAGE_SIZE-1;
22707-
22708- if (is_kernel_text(addr) ||
22709- is_kernel_text(addr2))
22710+ if (is_kernel_text(address, address + PMD_SIZE))
22711 prot = PAGE_KERNEL_LARGE_EXEC;
22712
22713 pages_2m++;
22714@@ -332,7 +333,7 @@ repeat:
22715 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22716 pte += pte_ofs;
22717 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22718- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22719+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22720 pgprot_t prot = PAGE_KERNEL;
22721 /*
22722 * first pass will use the same initial
22723@@ -340,7 +341,7 @@ repeat:
22724 */
22725 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22726
22727- if (is_kernel_text(addr))
22728+ if (is_kernel_text(address, address + PAGE_SIZE))
22729 prot = PAGE_KERNEL_EXEC;
22730
22731 pages_4k++;
22732@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22733
22734 pud = pud_offset(pgd, va);
22735 pmd = pmd_offset(pud, va);
22736- if (!pmd_present(*pmd))
22737+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
22738 break;
22739
22740 pte = pte_offset_kernel(pmd, va);
22741@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22742
22743 static void __init pagetable_init(void)
22744 {
22745- pgd_t *pgd_base = swapper_pg_dir;
22746-
22747- permanent_kmaps_init(pgd_base);
22748+ permanent_kmaps_init(swapper_pg_dir);
22749 }
22750
22751-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22752+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22753 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22754
22755 /* user-defined highmem size */
22756@@ -757,6 +756,12 @@ void __init mem_init(void)
22757
22758 pci_iommu_alloc();
22759
22760+#ifdef CONFIG_PAX_PER_CPU_PGD
22761+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22762+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22763+ KERNEL_PGD_PTRS);
22764+#endif
22765+
22766 #ifdef CONFIG_FLATMEM
22767 BUG_ON(!mem_map);
22768 #endif
22769@@ -774,7 +779,7 @@ void __init mem_init(void)
22770 set_highmem_pages_init();
22771
22772 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22773- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22774+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22775 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22776
22777 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22778@@ -815,10 +820,10 @@ void __init mem_init(void)
22779 ((unsigned long)&__init_end -
22780 (unsigned long)&__init_begin) >> 10,
22781
22782- (unsigned long)&_etext, (unsigned long)&_edata,
22783- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22784+ (unsigned long)&_sdata, (unsigned long)&_edata,
22785+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22786
22787- (unsigned long)&_text, (unsigned long)&_etext,
22788+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22789 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22790
22791 /*
22792@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
22793 if (!kernel_set_to_readonly)
22794 return;
22795
22796+ start = ktla_ktva(start);
22797 pr_debug("Set kernel text: %lx - %lx for read write\n",
22798 start, start+size);
22799
22800@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
22801 if (!kernel_set_to_readonly)
22802 return;
22803
22804+ start = ktla_ktva(start);
22805 pr_debug("Set kernel text: %lx - %lx for read only\n",
22806 start, start+size);
22807
22808@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
22809 unsigned long start = PFN_ALIGN(_text);
22810 unsigned long size = PFN_ALIGN(_etext) - start;
22811
22812+ start = ktla_ktva(start);
22813 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22814 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22815 size >> 10);
22816diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22817index bbaaa00..16dffad 100644
22818--- a/arch/x86/mm/init_64.c
22819+++ b/arch/x86/mm/init_64.c
22820@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
22821 * around without checking the pgd every time.
22822 */
22823
22824-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22825+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22826 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22827
22828 int force_personality32;
22829@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22830
22831 for (address = start; address <= end; address += PGDIR_SIZE) {
22832 const pgd_t *pgd_ref = pgd_offset_k(address);
22833+
22834+#ifdef CONFIG_PAX_PER_CPU_PGD
22835+ unsigned long cpu;
22836+#else
22837 struct page *page;
22838+#endif
22839
22840 if (pgd_none(*pgd_ref))
22841 continue;
22842
22843 spin_lock(&pgd_lock);
22844+
22845+#ifdef CONFIG_PAX_PER_CPU_PGD
22846+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22847+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
22848+#else
22849 list_for_each_entry(page, &pgd_list, lru) {
22850 pgd_t *pgd;
22851 spinlock_t *pgt_lock;
22852@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22853 /* the pgt_lock only for Xen */
22854 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22855 spin_lock(pgt_lock);
22856+#endif
22857
22858 if (pgd_none(*pgd))
22859 set_pgd(pgd, *pgd_ref);
22860@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22861 BUG_ON(pgd_page_vaddr(*pgd)
22862 != pgd_page_vaddr(*pgd_ref));
22863
22864+#ifndef CONFIG_PAX_PER_CPU_PGD
22865 spin_unlock(pgt_lock);
22866+#endif
22867+
22868 }
22869 spin_unlock(&pgd_lock);
22870 }
22871@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
22872 pmd = fill_pmd(pud, vaddr);
22873 pte = fill_pte(pmd, vaddr);
22874
22875+ pax_open_kernel();
22876 set_pte(pte, new_pte);
22877+ pax_close_kernel();
22878
22879 /*
22880 * It's enough to flush this one mapping.
22881@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
22882 pgd = pgd_offset_k((unsigned long)__va(phys));
22883 if (pgd_none(*pgd)) {
22884 pud = (pud_t *) spp_getpage();
22885- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22886- _PAGE_USER));
22887+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22888 }
22889 pud = pud_offset(pgd, (unsigned long)__va(phys));
22890 if (pud_none(*pud)) {
22891 pmd = (pmd_t *) spp_getpage();
22892- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22893- _PAGE_USER));
22894+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22895 }
22896 pmd = pmd_offset(pud, phys);
22897 BUG_ON(!pmd_none(*pmd));
22898@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
22899 if (pfn >= pgt_buf_top)
22900 panic("alloc_low_page: ran out of memory");
22901
22902- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22903+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22904 clear_page(adr);
22905 *phys = pfn * PAGE_SIZE;
22906 return adr;
22907@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
22908
22909 phys = __pa(virt);
22910 left = phys & (PAGE_SIZE - 1);
22911- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22912+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22913 adr = (void *)(((unsigned long)adr) | left);
22914
22915 return adr;
22916@@ -693,6 +707,12 @@ void __init mem_init(void)
22917
22918 pci_iommu_alloc();
22919
22920+#ifdef CONFIG_PAX_PER_CPU_PGD
22921+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22922+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22923+ KERNEL_PGD_PTRS);
22924+#endif
22925+
22926 /* clear_bss() already clear the empty_zero_page */
22927
22928 reservedpages = 0;
22929@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
22930 static struct vm_area_struct gate_vma = {
22931 .vm_start = VSYSCALL_START,
22932 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22933- .vm_page_prot = PAGE_READONLY_EXEC,
22934- .vm_flags = VM_READ | VM_EXEC
22935+ .vm_page_prot = PAGE_READONLY,
22936+ .vm_flags = VM_READ
22937 };
22938
22939 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
22940@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
22941
22942 const char *arch_vma_name(struct vm_area_struct *vma)
22943 {
22944- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22945+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22946 return "[vdso]";
22947 if (vma == &gate_vma)
22948 return "[vsyscall]";
22949diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22950index 7b179b4..6bd1777 100644
22951--- a/arch/x86/mm/iomap_32.c
22952+++ b/arch/x86/mm/iomap_32.c
22953@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
22954 type = kmap_atomic_idx_push();
22955 idx = type + KM_TYPE_NR * smp_processor_id();
22956 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22957+
22958+ pax_open_kernel();
22959 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22960+ pax_close_kernel();
22961+
22962 arch_flush_lazy_mmu_mode();
22963
22964 return (void *)vaddr;
22965diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22966index be1ef57..9680edc 100644
22967--- a/arch/x86/mm/ioremap.c
22968+++ b/arch/x86/mm/ioremap.c
22969@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
22970 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22971 int is_ram = page_is_ram(pfn);
22972
22973- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22974+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22975 return NULL;
22976 WARN_ON_ONCE(is_ram);
22977 }
22978@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_setup(char *str)
22979 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22980
22981 static __initdata int after_paging_init;
22982-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22983+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22984
22985 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22986 {
22987@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
22988 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22989
22990 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22991- memset(bm_pte, 0, sizeof(bm_pte));
22992- pmd_populate_kernel(&init_mm, pmd, bm_pte);
22993+ pmd_populate_user(&init_mm, pmd, bm_pte);
22994
22995 /*
22996 * The boot-ioremap range spans multiple pmds, for which
22997diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
22998index d87dd6d..bf3fa66 100644
22999--- a/arch/x86/mm/kmemcheck/kmemcheck.c
23000+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
23001@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
23002 * memory (e.g. tracked pages)? For now, we need this to avoid
23003 * invoking kmemcheck for PnP BIOS calls.
23004 */
23005- if (regs->flags & X86_VM_MASK)
23006+ if (v8086_mode(regs))
23007 return false;
23008- if (regs->cs != __KERNEL_CS)
23009+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23010 return false;
23011
23012 pte = kmemcheck_pte_lookup(address);
23013diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
23014index 845df68..1d8d29f 100644
23015--- a/arch/x86/mm/mmap.c
23016+++ b/arch/x86/mm/mmap.c
23017@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23018 * Leave an at least ~128 MB hole with possible stack randomization.
23019 */
23020 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23021-#define MAX_GAP (TASK_SIZE/6*5)
23022+#define MAX_GAP (pax_task_size/6*5)
23023
23024 static int mmap_is_legacy(void)
23025 {
23026@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23027 return rnd << PAGE_SHIFT;
23028 }
23029
23030-static unsigned long mmap_base(void)
23031+static unsigned long mmap_base(struct mm_struct *mm)
23032 {
23033 unsigned long gap = rlimit(RLIMIT_STACK);
23034+ unsigned long pax_task_size = TASK_SIZE;
23035+
23036+#ifdef CONFIG_PAX_SEGMEXEC
23037+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23038+ pax_task_size = SEGMEXEC_TASK_SIZE;
23039+#endif
23040
23041 if (gap < MIN_GAP)
23042 gap = MIN_GAP;
23043 else if (gap > MAX_GAP)
23044 gap = MAX_GAP;
23045
23046- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23047+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23048 }
23049
23050 /*
23051 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23052 * does, but not when emulating X86_32
23053 */
23054-static unsigned long mmap_legacy_base(void)
23055+static unsigned long mmap_legacy_base(struct mm_struct *mm)
23056 {
23057- if (mmap_is_ia32())
23058+ if (mmap_is_ia32()) {
23059+
23060+#ifdef CONFIG_PAX_SEGMEXEC
23061+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23062+ return SEGMEXEC_TASK_UNMAPPED_BASE;
23063+ else
23064+#endif
23065+
23066 return TASK_UNMAPPED_BASE;
23067- else
23068+ } else
23069 return TASK_UNMAPPED_BASE + mmap_rnd();
23070 }
23071
23072@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23073 void arch_pick_mmap_layout(struct mm_struct *mm)
23074 {
23075 if (mmap_is_legacy()) {
23076- mm->mmap_base = mmap_legacy_base();
23077+ mm->mmap_base = mmap_legacy_base(mm);
23078+
23079+#ifdef CONFIG_PAX_RANDMMAP
23080+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23081+ mm->mmap_base += mm->delta_mmap;
23082+#endif
23083+
23084 mm->get_unmapped_area = arch_get_unmapped_area;
23085 mm->unmap_area = arch_unmap_area;
23086 } else {
23087- mm->mmap_base = mmap_base();
23088+ mm->mmap_base = mmap_base(mm);
23089+
23090+#ifdef CONFIG_PAX_RANDMMAP
23091+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23092+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23093+#endif
23094+
23095 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23096 mm->unmap_area = arch_unmap_area_topdown;
23097 }
23098diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23099index de54b9b..799051e 100644
23100--- a/arch/x86/mm/mmio-mod.c
23101+++ b/arch/x86/mm/mmio-mod.c
23102@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23103 break;
23104 default:
23105 {
23106- unsigned char *ip = (unsigned char *)instptr;
23107+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23108 my_trace->opcode = MMIO_UNKNOWN_OP;
23109 my_trace->width = 0;
23110 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23111@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23112 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23113 void __iomem *addr)
23114 {
23115- static atomic_t next_id;
23116+ static atomic_unchecked_t next_id;
23117 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23118 /* These are page-unaligned. */
23119 struct mmiotrace_map map = {
23120@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23121 .private = trace
23122 },
23123 .phys = offset,
23124- .id = atomic_inc_return(&next_id)
23125+ .id = atomic_inc_return_unchecked(&next_id)
23126 };
23127 map.map_id = trace->id;
23128
23129diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23130index b008656..773eac2 100644
23131--- a/arch/x86/mm/pageattr-test.c
23132+++ b/arch/x86/mm/pageattr-test.c
23133@@ -36,7 +36,7 @@ enum {
23134
23135 static int pte_testbit(pte_t pte)
23136 {
23137- return pte_flags(pte) & _PAGE_UNUSED1;
23138+ return pte_flags(pte) & _PAGE_CPA_TEST;
23139 }
23140
23141 struct split_state {
23142diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23143index f9e5267..6f6e27f 100644
23144--- a/arch/x86/mm/pageattr.c
23145+++ b/arch/x86/mm/pageattr.c
23146@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23147 */
23148 #ifdef CONFIG_PCI_BIOS
23149 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23150- pgprot_val(forbidden) |= _PAGE_NX;
23151+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23152 #endif
23153
23154 /*
23155@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23156 * Does not cover __inittext since that is gone later on. On
23157 * 64bit we do not enforce !NX on the low mapping
23158 */
23159- if (within(address, (unsigned long)_text, (unsigned long)_etext))
23160- pgprot_val(forbidden) |= _PAGE_NX;
23161+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23162+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23163
23164+#ifdef CONFIG_DEBUG_RODATA
23165 /*
23166 * The .rodata section needs to be read-only. Using the pfn
23167 * catches all aliases.
23168@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23169 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23170 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23171 pgprot_val(forbidden) |= _PAGE_RW;
23172+#endif
23173
23174 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23175 /*
23176@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23177 }
23178 #endif
23179
23180+#ifdef CONFIG_PAX_KERNEXEC
23181+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23182+ pgprot_val(forbidden) |= _PAGE_RW;
23183+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23184+ }
23185+#endif
23186+
23187 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23188
23189 return prot;
23190@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23191 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23192 {
23193 /* change init_mm */
23194+ pax_open_kernel();
23195 set_pte_atomic(kpte, pte);
23196+
23197 #ifdef CONFIG_X86_32
23198 if (!SHARED_KERNEL_PMD) {
23199+
23200+#ifdef CONFIG_PAX_PER_CPU_PGD
23201+ unsigned long cpu;
23202+#else
23203 struct page *page;
23204+#endif
23205
23206+#ifdef CONFIG_PAX_PER_CPU_PGD
23207+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23208+ pgd_t *pgd = get_cpu_pgd(cpu);
23209+#else
23210 list_for_each_entry(page, &pgd_list, lru) {
23211- pgd_t *pgd;
23212+ pgd_t *pgd = (pgd_t *)page_address(page);
23213+#endif
23214+
23215 pud_t *pud;
23216 pmd_t *pmd;
23217
23218- pgd = (pgd_t *)page_address(page) + pgd_index(address);
23219+ pgd += pgd_index(address);
23220 pud = pud_offset(pgd, address);
23221 pmd = pmd_offset(pud, address);
23222 set_pte_atomic((pte_t *)pmd, pte);
23223 }
23224 }
23225 #endif
23226+ pax_close_kernel();
23227 }
23228
23229 static int
23230diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23231index f6ff57b..481690f 100644
23232--- a/arch/x86/mm/pat.c
23233+++ b/arch/x86/mm/pat.c
23234@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23235
23236 if (!entry) {
23237 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23238- current->comm, current->pid, start, end);
23239+ current->comm, task_pid_nr(current), start, end);
23240 return -EINVAL;
23241 }
23242
23243@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23244 while (cursor < to) {
23245 if (!devmem_is_allowed(pfn)) {
23246 printk(KERN_INFO
23247- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23248- current->comm, from, to);
23249+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23250+ current->comm, from, to, cursor);
23251 return 0;
23252 }
23253 cursor += PAGE_SIZE;
23254@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23255 printk(KERN_INFO
23256 "%s:%d ioremap_change_attr failed %s "
23257 "for %Lx-%Lx\n",
23258- current->comm, current->pid,
23259+ current->comm, task_pid_nr(current),
23260 cattr_name(flags),
23261 base, (unsigned long long)(base + size));
23262 return -EINVAL;
23263@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23264 if (want_flags != flags) {
23265 printk(KERN_WARNING
23266 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23267- current->comm, current->pid,
23268+ current->comm, task_pid_nr(current),
23269 cattr_name(want_flags),
23270 (unsigned long long)paddr,
23271 (unsigned long long)(paddr + size),
23272@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23273 free_memtype(paddr, paddr + size);
23274 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23275 " for %Lx-%Lx, got %s\n",
23276- current->comm, current->pid,
23277+ current->comm, task_pid_nr(current),
23278 cattr_name(want_flags),
23279 (unsigned long long)paddr,
23280 (unsigned long long)(paddr + size),
23281diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23282index 9f0614d..92ae64a 100644
23283--- a/arch/x86/mm/pf_in.c
23284+++ b/arch/x86/mm/pf_in.c
23285@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23286 int i;
23287 enum reason_type rv = OTHERS;
23288
23289- p = (unsigned char *)ins_addr;
23290+ p = (unsigned char *)ktla_ktva(ins_addr);
23291 p += skip_prefix(p, &prf);
23292 p += get_opcode(p, &opcode);
23293
23294@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23295 struct prefix_bits prf;
23296 int i;
23297
23298- p = (unsigned char *)ins_addr;
23299+ p = (unsigned char *)ktla_ktva(ins_addr);
23300 p += skip_prefix(p, &prf);
23301 p += get_opcode(p, &opcode);
23302
23303@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23304 struct prefix_bits prf;
23305 int i;
23306
23307- p = (unsigned char *)ins_addr;
23308+ p = (unsigned char *)ktla_ktva(ins_addr);
23309 p += skip_prefix(p, &prf);
23310 p += get_opcode(p, &opcode);
23311
23312@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23313 struct prefix_bits prf;
23314 int i;
23315
23316- p = (unsigned char *)ins_addr;
23317+ p = (unsigned char *)ktla_ktva(ins_addr);
23318 p += skip_prefix(p, &prf);
23319 p += get_opcode(p, &opcode);
23320 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23321@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23322 struct prefix_bits prf;
23323 int i;
23324
23325- p = (unsigned char *)ins_addr;
23326+ p = (unsigned char *)ktla_ktva(ins_addr);
23327 p += skip_prefix(p, &prf);
23328 p += get_opcode(p, &opcode);
23329 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23330diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23331index 8573b83..6372501 100644
23332--- a/arch/x86/mm/pgtable.c
23333+++ b/arch/x86/mm/pgtable.c
23334@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23335 list_del(&page->lru);
23336 }
23337
23338-#define UNSHARED_PTRS_PER_PGD \
23339- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23340+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23341+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23342
23343+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23344+{
23345+ while (count--)
23346+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23347+}
23348+#endif
23349
23350+#ifdef CONFIG_PAX_PER_CPU_PGD
23351+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23352+{
23353+ while (count--)
23354+
23355+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23356+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23357+#else
23358+ *dst++ = *src++;
23359+#endif
23360+
23361+}
23362+#endif
23363+
23364+#ifdef CONFIG_X86_64
23365+#define pxd_t pud_t
23366+#define pyd_t pgd_t
23367+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23368+#define pxd_free(mm, pud) pud_free((mm), (pud))
23369+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23370+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
23371+#define PYD_SIZE PGDIR_SIZE
23372+#else
23373+#define pxd_t pmd_t
23374+#define pyd_t pud_t
23375+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23376+#define pxd_free(mm, pud) pmd_free((mm), (pud))
23377+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23378+#define pyd_offset(mm ,address) pud_offset((mm), (address))
23379+#define PYD_SIZE PUD_SIZE
23380+#endif
23381+
23382+#ifdef CONFIG_PAX_PER_CPU_PGD
23383+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23384+static inline void pgd_dtor(pgd_t *pgd) {}
23385+#else
23386 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23387 {
23388 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23389@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23390 pgd_list_del(pgd);
23391 spin_unlock(&pgd_lock);
23392 }
23393+#endif
23394
23395 /*
23396 * List of all pgd's needed for non-PAE so it can invalidate entries
23397@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23398 * -- wli
23399 */
23400
23401-#ifdef CONFIG_X86_PAE
23402+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23403 /*
23404 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23405 * updating the top-level pagetable entries to guarantee the
23406@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23407 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23408 * and initialize the kernel pmds here.
23409 */
23410-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23411+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23412
23413 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23414 {
23415@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23416 */
23417 flush_tlb_mm(mm);
23418 }
23419+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23420+#define PREALLOCATED_PXDS USER_PGD_PTRS
23421 #else /* !CONFIG_X86_PAE */
23422
23423 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23424-#define PREALLOCATED_PMDS 0
23425+#define PREALLOCATED_PXDS 0
23426
23427 #endif /* CONFIG_X86_PAE */
23428
23429-static void free_pmds(pmd_t *pmds[])
23430+static void free_pxds(pxd_t *pxds[])
23431 {
23432 int i;
23433
23434- for(i = 0; i < PREALLOCATED_PMDS; i++)
23435- if (pmds[i])
23436- free_page((unsigned long)pmds[i]);
23437+ for(i = 0; i < PREALLOCATED_PXDS; i++)
23438+ if (pxds[i])
23439+ free_page((unsigned long)pxds[i]);
23440 }
23441
23442-static int preallocate_pmds(pmd_t *pmds[])
23443+static int preallocate_pxds(pxd_t *pxds[])
23444 {
23445 int i;
23446 bool failed = false;
23447
23448- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23449- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23450- if (pmd == NULL)
23451+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23452+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23453+ if (pxd == NULL)
23454 failed = true;
23455- pmds[i] = pmd;
23456+ pxds[i] = pxd;
23457 }
23458
23459 if (failed) {
23460- free_pmds(pmds);
23461+ free_pxds(pxds);
23462 return -ENOMEM;
23463 }
23464
23465@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23466 * preallocate which never got a corresponding vma will need to be
23467 * freed manually.
23468 */
23469-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23470+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23471 {
23472 int i;
23473
23474- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23475+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23476 pgd_t pgd = pgdp[i];
23477
23478 if (pgd_val(pgd) != 0) {
23479- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23480+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23481
23482- pgdp[i] = native_make_pgd(0);
23483+ set_pgd(pgdp + i, native_make_pgd(0));
23484
23485- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23486- pmd_free(mm, pmd);
23487+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23488+ pxd_free(mm, pxd);
23489 }
23490 }
23491 }
23492
23493-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23494+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23495 {
23496- pud_t *pud;
23497+ pyd_t *pyd;
23498 unsigned long addr;
23499 int i;
23500
23501- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23502+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23503 return;
23504
23505- pud = pud_offset(pgd, 0);
23506+#ifdef CONFIG_X86_64
23507+ pyd = pyd_offset(mm, 0L);
23508+#else
23509+ pyd = pyd_offset(pgd, 0L);
23510+#endif
23511
23512- for (addr = i = 0; i < PREALLOCATED_PMDS;
23513- i++, pud++, addr += PUD_SIZE) {
23514- pmd_t *pmd = pmds[i];
23515+ for (addr = i = 0; i < PREALLOCATED_PXDS;
23516+ i++, pyd++, addr += PYD_SIZE) {
23517+ pxd_t *pxd = pxds[i];
23518
23519 if (i >= KERNEL_PGD_BOUNDARY)
23520- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23521- sizeof(pmd_t) * PTRS_PER_PMD);
23522+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23523+ sizeof(pxd_t) * PTRS_PER_PMD);
23524
23525- pud_populate(mm, pud, pmd);
23526+ pyd_populate(mm, pyd, pxd);
23527 }
23528 }
23529
23530 pgd_t *pgd_alloc(struct mm_struct *mm)
23531 {
23532 pgd_t *pgd;
23533- pmd_t *pmds[PREALLOCATED_PMDS];
23534+ pxd_t *pxds[PREALLOCATED_PXDS];
23535
23536 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23537
23538@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23539
23540 mm->pgd = pgd;
23541
23542- if (preallocate_pmds(pmds) != 0)
23543+ if (preallocate_pxds(pxds) != 0)
23544 goto out_free_pgd;
23545
23546 if (paravirt_pgd_alloc(mm) != 0)
23547- goto out_free_pmds;
23548+ goto out_free_pxds;
23549
23550 /*
23551 * Make sure that pre-populating the pmds is atomic with
23552@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23553 spin_lock(&pgd_lock);
23554
23555 pgd_ctor(mm, pgd);
23556- pgd_prepopulate_pmd(mm, pgd, pmds);
23557+ pgd_prepopulate_pxd(mm, pgd, pxds);
23558
23559 spin_unlock(&pgd_lock);
23560
23561 return pgd;
23562
23563-out_free_pmds:
23564- free_pmds(pmds);
23565+out_free_pxds:
23566+ free_pxds(pxds);
23567 out_free_pgd:
23568 free_page((unsigned long)pgd);
23569 out:
23570@@ -295,7 +344,7 @@ out:
23571
23572 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23573 {
23574- pgd_mop_up_pmds(mm, pgd);
23575+ pgd_mop_up_pxds(mm, pgd);
23576 pgd_dtor(pgd);
23577 paravirt_pgd_free(mm, pgd);
23578 free_page((unsigned long)pgd);
23579diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23580index cac7184..09a39fa 100644
23581--- a/arch/x86/mm/pgtable_32.c
23582+++ b/arch/x86/mm/pgtable_32.c
23583@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23584 return;
23585 }
23586 pte = pte_offset_kernel(pmd, vaddr);
23587+
23588+ pax_open_kernel();
23589 if (pte_val(pteval))
23590 set_pte_at(&init_mm, vaddr, pte, pteval);
23591 else
23592 pte_clear(&init_mm, vaddr, pte);
23593+ pax_close_kernel();
23594
23595 /*
23596 * It's enough to flush this one mapping.
23597diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23598index 410531d..0f16030 100644
23599--- a/arch/x86/mm/setup_nx.c
23600+++ b/arch/x86/mm/setup_nx.c
23601@@ -5,8 +5,10 @@
23602 #include <asm/pgtable.h>
23603 #include <asm/proto.h>
23604
23605+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23606 static int disable_nx __cpuinitdata;
23607
23608+#ifndef CONFIG_PAX_PAGEEXEC
23609 /*
23610 * noexec = on|off
23611 *
23612@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23613 return 0;
23614 }
23615 early_param("noexec", noexec_setup);
23616+#endif
23617+
23618+#endif
23619
23620 void __cpuinit x86_configure_nx(void)
23621 {
23622+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23623 if (cpu_has_nx && !disable_nx)
23624 __supported_pte_mask |= _PAGE_NX;
23625 else
23626+#endif
23627 __supported_pte_mask &= ~_PAGE_NX;
23628 }
23629
23630diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23631index d6c0418..06a0ad5 100644
23632--- a/arch/x86/mm/tlb.c
23633+++ b/arch/x86/mm/tlb.c
23634@@ -65,7 +65,11 @@ void leave_mm(int cpu)
23635 BUG();
23636 cpumask_clear_cpu(cpu,
23637 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23638+
23639+#ifndef CONFIG_PAX_PER_CPU_PGD
23640 load_cr3(swapper_pg_dir);
23641+#endif
23642+
23643 }
23644 EXPORT_SYMBOL_GPL(leave_mm);
23645
23646diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23647index 6687022..ceabcfa 100644
23648--- a/arch/x86/net/bpf_jit.S
23649+++ b/arch/x86/net/bpf_jit.S
23650@@ -9,6 +9,7 @@
23651 */
23652 #include <linux/linkage.h>
23653 #include <asm/dwarf2.h>
23654+#include <asm/alternative-asm.h>
23655
23656 /*
23657 * Calling convention :
23658@@ -35,6 +36,7 @@ sk_load_word:
23659 jle bpf_slow_path_word
23660 mov (SKBDATA,%rsi),%eax
23661 bswap %eax /* ntohl() */
23662+ pax_force_retaddr
23663 ret
23664
23665
23666@@ -53,6 +55,7 @@ sk_load_half:
23667 jle bpf_slow_path_half
23668 movzwl (SKBDATA,%rsi),%eax
23669 rol $8,%ax # ntohs()
23670+ pax_force_retaddr
23671 ret
23672
23673 sk_load_byte_ind:
23674@@ -66,6 +69,7 @@ sk_load_byte:
23675 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23676 jle bpf_slow_path_byte
23677 movzbl (SKBDATA,%rsi),%eax
23678+ pax_force_retaddr
23679 ret
23680
23681 /**
23682@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23683 movzbl (SKBDATA,%rsi),%ebx
23684 and $15,%bl
23685 shl $2,%bl
23686+ pax_force_retaddr
23687 ret
23688 CFI_ENDPROC
23689 ENDPROC(sk_load_byte_msh)
23690@@ -91,6 +96,7 @@ bpf_error:
23691 xor %eax,%eax
23692 mov -8(%rbp),%rbx
23693 leaveq
23694+ pax_force_retaddr
23695 ret
23696
23697 /* rsi contains offset and can be scratched */
23698@@ -113,6 +119,7 @@ bpf_slow_path_word:
23699 js bpf_error
23700 mov -12(%rbp),%eax
23701 bswap %eax
23702+ pax_force_retaddr
23703 ret
23704
23705 bpf_slow_path_half:
23706@@ -121,12 +128,14 @@ bpf_slow_path_half:
23707 mov -12(%rbp),%ax
23708 rol $8,%ax
23709 movzwl %ax,%eax
23710+ pax_force_retaddr
23711 ret
23712
23713 bpf_slow_path_byte:
23714 bpf_slow_path_common(1)
23715 js bpf_error
23716 movzbl -12(%rbp),%eax
23717+ pax_force_retaddr
23718 ret
23719
23720 bpf_slow_path_byte_msh:
23721@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23722 and $15,%al
23723 shl $2,%al
23724 xchg %eax,%ebx
23725+ pax_force_retaddr
23726 ret
23727diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23728index 7b65f75..63097f6 100644
23729--- a/arch/x86/net/bpf_jit_comp.c
23730+++ b/arch/x86/net/bpf_jit_comp.c
23731@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23732 set_fs(old_fs);
23733 }
23734
23735+struct bpf_jit_work {
23736+ struct work_struct work;
23737+ void *image;
23738+};
23739
23740 void bpf_jit_compile(struct sk_filter *fp)
23741 {
23742@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23743 if (addrs == NULL)
23744 return;
23745
23746+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23747+ if (!fp->work)
23748+ goto out;
23749+
23750 /* Before first pass, make a rough estimation of addrs[]
23751 * each bpf instruction is translated to less than 64 bytes
23752 */
23753@@ -585,11 +593,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23754 if (image) {
23755 if (unlikely(proglen + ilen > oldproglen)) {
23756 pr_err("bpb_jit_compile fatal error\n");
23757- kfree(addrs);
23758- module_free(NULL, image);
23759- return;
23760+ module_free_exec(NULL, image);
23761+ goto out;
23762 }
23763+ pax_open_kernel();
23764 memcpy(image + proglen, temp, ilen);
23765+ pax_close_kernel();
23766 }
23767 proglen += ilen;
23768 addrs[i] = proglen;
23769@@ -609,7 +618,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23770 break;
23771 }
23772 if (proglen == oldproglen) {
23773- image = module_alloc(max_t(unsigned int,
23774+ image = module_alloc_exec(max_t(unsigned int,
23775 proglen,
23776 sizeof(struct work_struct)));
23777 if (!image)
23778@@ -631,24 +640,27 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23779 fp->bpf_func = (void *)image;
23780 }
23781 out:
23782+ kfree(fp->work);
23783 kfree(addrs);
23784 return;
23785 }
23786
23787 static void jit_free_defer(struct work_struct *arg)
23788 {
23789- module_free(NULL, arg);
23790+ module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
23791+ kfree(arg);
23792 }
23793
23794 /* run from softirq, we must use a work_struct to call
23795- * module_free() from process context
23796+ * module_free_exec() from process context
23797 */
23798 void bpf_jit_free(struct sk_filter *fp)
23799 {
23800 if (fp->bpf_func != sk_run_filter) {
23801- struct work_struct *work = (struct work_struct *)fp->bpf_func;
23802+ struct work_struct *work = &fp->work->work;
23803
23804 INIT_WORK(work, jit_free_defer);
23805+ fp->work->image = fp->bpf_func;
23806 schedule_work(work);
23807 }
23808 }
23809diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23810index bff89df..377758a 100644
23811--- a/arch/x86/oprofile/backtrace.c
23812+++ b/arch/x86/oprofile/backtrace.c
23813@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
23814 struct stack_frame_ia32 *fp;
23815 unsigned long bytes;
23816
23817- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23818+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23819 if (bytes != sizeof(bufhead))
23820 return NULL;
23821
23822- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23823+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23824
23825 oprofile_add_trace(bufhead[0].return_address);
23826
23827@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
23828 struct stack_frame bufhead[2];
23829 unsigned long bytes;
23830
23831- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23832+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23833 if (bytes != sizeof(bufhead))
23834 return NULL;
23835
23836@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
23837 {
23838 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
23839
23840- if (!user_mode_vm(regs)) {
23841+ if (!user_mode(regs)) {
23842 unsigned long stack = kernel_stack_pointer(regs);
23843 if (depth)
23844 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23845diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23846index cb29191..036766d 100644
23847--- a/arch/x86/pci/mrst.c
23848+++ b/arch/x86/pci/mrst.c
23849@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23850 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23851 pci_mmcfg_late_init();
23852 pcibios_enable_irq = mrst_pci_irq_enable;
23853- pci_root_ops = pci_mrst_ops;
23854+ pax_open_kernel();
23855+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23856+ pax_close_kernel();
23857 /* Continue with standard init */
23858 return 1;
23859 }
23860diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23861index db0e9a5..8844dea 100644
23862--- a/arch/x86/pci/pcbios.c
23863+++ b/arch/x86/pci/pcbios.c
23864@@ -79,50 +79,93 @@ union bios32 {
23865 static struct {
23866 unsigned long address;
23867 unsigned short segment;
23868-} bios32_indirect = { 0, __KERNEL_CS };
23869+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23870
23871 /*
23872 * Returns the entry point for the given service, NULL on error
23873 */
23874
23875-static unsigned long bios32_service(unsigned long service)
23876+static unsigned long __devinit bios32_service(unsigned long service)
23877 {
23878 unsigned char return_code; /* %al */
23879 unsigned long address; /* %ebx */
23880 unsigned long length; /* %ecx */
23881 unsigned long entry; /* %edx */
23882 unsigned long flags;
23883+ struct desc_struct d, *gdt;
23884
23885 local_irq_save(flags);
23886- __asm__("lcall *(%%edi); cld"
23887+
23888+ gdt = get_cpu_gdt_table(smp_processor_id());
23889+
23890+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23891+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23892+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23893+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23894+
23895+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23896 : "=a" (return_code),
23897 "=b" (address),
23898 "=c" (length),
23899 "=d" (entry)
23900 : "0" (service),
23901 "1" (0),
23902- "D" (&bios32_indirect));
23903+ "D" (&bios32_indirect),
23904+ "r"(__PCIBIOS_DS)
23905+ : "memory");
23906+
23907+ pax_open_kernel();
23908+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23909+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23910+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23911+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23912+ pax_close_kernel();
23913+
23914 local_irq_restore(flags);
23915
23916 switch (return_code) {
23917- case 0:
23918- return address + entry;
23919- case 0x80: /* Not present */
23920- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23921- return 0;
23922- default: /* Shouldn't happen */
23923- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23924- service, return_code);
23925+ case 0: {
23926+ int cpu;
23927+ unsigned char flags;
23928+
23929+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23930+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23931+ printk(KERN_WARNING "bios32_service: not valid\n");
23932 return 0;
23933+ }
23934+ address = address + PAGE_OFFSET;
23935+ length += 16UL; /* some BIOSs underreport this... */
23936+ flags = 4;
23937+ if (length >= 64*1024*1024) {
23938+ length >>= PAGE_SHIFT;
23939+ flags |= 8;
23940+ }
23941+
23942+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23943+ gdt = get_cpu_gdt_table(cpu);
23944+ pack_descriptor(&d, address, length, 0x9b, flags);
23945+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23946+ pack_descriptor(&d, address, length, 0x93, flags);
23947+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23948+ }
23949+ return entry;
23950+ }
23951+ case 0x80: /* Not present */
23952+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23953+ return 0;
23954+ default: /* Shouldn't happen */
23955+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23956+ service, return_code);
23957+ return 0;
23958 }
23959 }
23960
23961 static struct {
23962 unsigned long address;
23963 unsigned short segment;
23964-} pci_indirect = { 0, __KERNEL_CS };
23965+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23966
23967-static int pci_bios_present;
23968+static int pci_bios_present __read_only;
23969
23970 static int __devinit check_pcibios(void)
23971 {
23972@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
23973 unsigned long flags, pcibios_entry;
23974
23975 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23976- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23977+ pci_indirect.address = pcibios_entry;
23978
23979 local_irq_save(flags);
23980- __asm__(
23981- "lcall *(%%edi); cld\n\t"
23982+ __asm__("movw %w6, %%ds\n\t"
23983+ "lcall *%%ss:(%%edi); cld\n\t"
23984+ "push %%ss\n\t"
23985+ "pop %%ds\n\t"
23986 "jc 1f\n\t"
23987 "xor %%ah, %%ah\n"
23988 "1:"
23989@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
23990 "=b" (ebx),
23991 "=c" (ecx)
23992 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23993- "D" (&pci_indirect)
23994+ "D" (&pci_indirect),
23995+ "r" (__PCIBIOS_DS)
23996 : "memory");
23997 local_irq_restore(flags);
23998
23999@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24000
24001 switch (len) {
24002 case 1:
24003- __asm__("lcall *(%%esi); cld\n\t"
24004+ __asm__("movw %w6, %%ds\n\t"
24005+ "lcall *%%ss:(%%esi); cld\n\t"
24006+ "push %%ss\n\t"
24007+ "pop %%ds\n\t"
24008 "jc 1f\n\t"
24009 "xor %%ah, %%ah\n"
24010 "1:"
24011@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24012 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24013 "b" (bx),
24014 "D" ((long)reg),
24015- "S" (&pci_indirect));
24016+ "S" (&pci_indirect),
24017+ "r" (__PCIBIOS_DS));
24018 /*
24019 * Zero-extend the result beyond 8 bits, do not trust the
24020 * BIOS having done it:
24021@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24022 *value &= 0xff;
24023 break;
24024 case 2:
24025- __asm__("lcall *(%%esi); cld\n\t"
24026+ __asm__("movw %w6, %%ds\n\t"
24027+ "lcall *%%ss:(%%esi); cld\n\t"
24028+ "push %%ss\n\t"
24029+ "pop %%ds\n\t"
24030 "jc 1f\n\t"
24031 "xor %%ah, %%ah\n"
24032 "1:"
24033@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24034 : "1" (PCIBIOS_READ_CONFIG_WORD),
24035 "b" (bx),
24036 "D" ((long)reg),
24037- "S" (&pci_indirect));
24038+ "S" (&pci_indirect),
24039+ "r" (__PCIBIOS_DS));
24040 /*
24041 * Zero-extend the result beyond 16 bits, do not trust the
24042 * BIOS having done it:
24043@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24044 *value &= 0xffff;
24045 break;
24046 case 4:
24047- __asm__("lcall *(%%esi); cld\n\t"
24048+ __asm__("movw %w6, %%ds\n\t"
24049+ "lcall *%%ss:(%%esi); cld\n\t"
24050+ "push %%ss\n\t"
24051+ "pop %%ds\n\t"
24052 "jc 1f\n\t"
24053 "xor %%ah, %%ah\n"
24054 "1:"
24055@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24056 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24057 "b" (bx),
24058 "D" ((long)reg),
24059- "S" (&pci_indirect));
24060+ "S" (&pci_indirect),
24061+ "r" (__PCIBIOS_DS));
24062 break;
24063 }
24064
24065@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24066
24067 switch (len) {
24068 case 1:
24069- __asm__("lcall *(%%esi); cld\n\t"
24070+ __asm__("movw %w6, %%ds\n\t"
24071+ "lcall *%%ss:(%%esi); cld\n\t"
24072+ "push %%ss\n\t"
24073+ "pop %%ds\n\t"
24074 "jc 1f\n\t"
24075 "xor %%ah, %%ah\n"
24076 "1:"
24077@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24078 "c" (value),
24079 "b" (bx),
24080 "D" ((long)reg),
24081- "S" (&pci_indirect));
24082+ "S" (&pci_indirect),
24083+ "r" (__PCIBIOS_DS));
24084 break;
24085 case 2:
24086- __asm__("lcall *(%%esi); cld\n\t"
24087+ __asm__("movw %w6, %%ds\n\t"
24088+ "lcall *%%ss:(%%esi); cld\n\t"
24089+ "push %%ss\n\t"
24090+ "pop %%ds\n\t"
24091 "jc 1f\n\t"
24092 "xor %%ah, %%ah\n"
24093 "1:"
24094@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24095 "c" (value),
24096 "b" (bx),
24097 "D" ((long)reg),
24098- "S" (&pci_indirect));
24099+ "S" (&pci_indirect),
24100+ "r" (__PCIBIOS_DS));
24101 break;
24102 case 4:
24103- __asm__("lcall *(%%esi); cld\n\t"
24104+ __asm__("movw %w6, %%ds\n\t"
24105+ "lcall *%%ss:(%%esi); cld\n\t"
24106+ "push %%ss\n\t"
24107+ "pop %%ds\n\t"
24108 "jc 1f\n\t"
24109 "xor %%ah, %%ah\n"
24110 "1:"
24111@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24112 "c" (value),
24113 "b" (bx),
24114 "D" ((long)reg),
24115- "S" (&pci_indirect));
24116+ "S" (&pci_indirect),
24117+ "r" (__PCIBIOS_DS));
24118 break;
24119 }
24120
24121@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24122
24123 DBG("PCI: Fetching IRQ routing table... ");
24124 __asm__("push %%es\n\t"
24125+ "movw %w8, %%ds\n\t"
24126 "push %%ds\n\t"
24127 "pop %%es\n\t"
24128- "lcall *(%%esi); cld\n\t"
24129+ "lcall *%%ss:(%%esi); cld\n\t"
24130 "pop %%es\n\t"
24131+ "push %%ss\n\t"
24132+ "pop %%ds\n"
24133 "jc 1f\n\t"
24134 "xor %%ah, %%ah\n"
24135 "1:"
24136@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24137 "1" (0),
24138 "D" ((long) &opt),
24139 "S" (&pci_indirect),
24140- "m" (opt)
24141+ "m" (opt),
24142+ "r" (__PCIBIOS_DS)
24143 : "memory");
24144 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24145 if (ret & 0xff00)
24146@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24147 {
24148 int ret;
24149
24150- __asm__("lcall *(%%esi); cld\n\t"
24151+ __asm__("movw %w5, %%ds\n\t"
24152+ "lcall *%%ss:(%%esi); cld\n\t"
24153+ "push %%ss\n\t"
24154+ "pop %%ds\n"
24155 "jc 1f\n\t"
24156 "xor %%ah, %%ah\n"
24157 "1:"
24158@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24159 : "0" (PCIBIOS_SET_PCI_HW_INT),
24160 "b" ((dev->bus->number << 8) | dev->devfn),
24161 "c" ((irq << 8) | (pin + 10)),
24162- "S" (&pci_indirect));
24163+ "S" (&pci_indirect),
24164+ "r" (__PCIBIOS_DS));
24165 return !(ret & 0xff00);
24166 }
24167 EXPORT_SYMBOL(pcibios_set_irq_routing);
24168diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24169index 40e4469..1ab536e 100644
24170--- a/arch/x86/platform/efi/efi_32.c
24171+++ b/arch/x86/platform/efi/efi_32.c
24172@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24173 {
24174 struct desc_ptr gdt_descr;
24175
24176+#ifdef CONFIG_PAX_KERNEXEC
24177+ struct desc_struct d;
24178+#endif
24179+
24180 local_irq_save(efi_rt_eflags);
24181
24182 load_cr3(initial_page_table);
24183 __flush_tlb_all();
24184
24185+#ifdef CONFIG_PAX_KERNEXEC
24186+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24187+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24188+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24189+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24190+#endif
24191+
24192 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24193 gdt_descr.size = GDT_SIZE - 1;
24194 load_gdt(&gdt_descr);
24195@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24196 {
24197 struct desc_ptr gdt_descr;
24198
24199+#ifdef CONFIG_PAX_KERNEXEC
24200+ struct desc_struct d;
24201+
24202+ memset(&d, 0, sizeof d);
24203+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24204+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24205+#endif
24206+
24207 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24208 gdt_descr.size = GDT_SIZE - 1;
24209 load_gdt(&gdt_descr);
24210diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24211index fbe66e6..c5c0dd2 100644
24212--- a/arch/x86/platform/efi/efi_stub_32.S
24213+++ b/arch/x86/platform/efi/efi_stub_32.S
24214@@ -6,7 +6,9 @@
24215 */
24216
24217 #include <linux/linkage.h>
24218+#include <linux/init.h>
24219 #include <asm/page_types.h>
24220+#include <asm/segment.h>
24221
24222 /*
24223 * efi_call_phys(void *, ...) is a function with variable parameters.
24224@@ -20,7 +22,7 @@
24225 * service functions will comply with gcc calling convention, too.
24226 */
24227
24228-.text
24229+__INIT
24230 ENTRY(efi_call_phys)
24231 /*
24232 * 0. The function can only be called in Linux kernel. So CS has been
24233@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24234 * The mapping of lower virtual memory has been created in prelog and
24235 * epilog.
24236 */
24237- movl $1f, %edx
24238- subl $__PAGE_OFFSET, %edx
24239- jmp *%edx
24240+ movl $(__KERNEXEC_EFI_DS), %edx
24241+ mov %edx, %ds
24242+ mov %edx, %es
24243+ mov %edx, %ss
24244+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24245 1:
24246
24247 /*
24248@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24249 * parameter 2, ..., param n. To make things easy, we save the return
24250 * address of efi_call_phys in a global variable.
24251 */
24252- popl %edx
24253- movl %edx, saved_return_addr
24254- /* get the function pointer into ECX*/
24255- popl %ecx
24256- movl %ecx, efi_rt_function_ptr
24257- movl $2f, %edx
24258- subl $__PAGE_OFFSET, %edx
24259- pushl %edx
24260+ popl (saved_return_addr)
24261+ popl (efi_rt_function_ptr)
24262
24263 /*
24264 * 3. Clear PG bit in %CR0.
24265@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24266 /*
24267 * 5. Call the physical function.
24268 */
24269- jmp *%ecx
24270+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
24271
24272-2:
24273 /*
24274 * 6. After EFI runtime service returns, control will return to
24275 * following instruction. We'd better readjust stack pointer first.
24276@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24277 movl %cr0, %edx
24278 orl $0x80000000, %edx
24279 movl %edx, %cr0
24280- jmp 1f
24281-1:
24282+
24283 /*
24284 * 8. Now restore the virtual mode from flat mode by
24285 * adding EIP with PAGE_OFFSET.
24286 */
24287- movl $1f, %edx
24288- jmp *%edx
24289+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24290 1:
24291+ movl $(__KERNEL_DS), %edx
24292+ mov %edx, %ds
24293+ mov %edx, %es
24294+ mov %edx, %ss
24295
24296 /*
24297 * 9. Balance the stack. And because EAX contain the return value,
24298 * we'd better not clobber it.
24299 */
24300- leal efi_rt_function_ptr, %edx
24301- movl (%edx), %ecx
24302- pushl %ecx
24303+ pushl (efi_rt_function_ptr)
24304
24305 /*
24306- * 10. Push the saved return address onto the stack and return.
24307+ * 10. Return to the saved return address.
24308 */
24309- leal saved_return_addr, %edx
24310- movl (%edx), %ecx
24311- pushl %ecx
24312- ret
24313+ jmpl *(saved_return_addr)
24314 ENDPROC(efi_call_phys)
24315 .previous
24316
24317-.data
24318+__INITDATA
24319 saved_return_addr:
24320 .long 0
24321 efi_rt_function_ptr:
24322diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24323index 4c07cca..2c8427d 100644
24324--- a/arch/x86/platform/efi/efi_stub_64.S
24325+++ b/arch/x86/platform/efi/efi_stub_64.S
24326@@ -7,6 +7,7 @@
24327 */
24328
24329 #include <linux/linkage.h>
24330+#include <asm/alternative-asm.h>
24331
24332 #define SAVE_XMM \
24333 mov %rsp, %rax; \
24334@@ -40,6 +41,7 @@ ENTRY(efi_call0)
24335 call *%rdi
24336 addq $32, %rsp
24337 RESTORE_XMM
24338+ pax_force_retaddr 0, 1
24339 ret
24340 ENDPROC(efi_call0)
24341
24342@@ -50,6 +52,7 @@ ENTRY(efi_call1)
24343 call *%rdi
24344 addq $32, %rsp
24345 RESTORE_XMM
24346+ pax_force_retaddr 0, 1
24347 ret
24348 ENDPROC(efi_call1)
24349
24350@@ -60,6 +63,7 @@ ENTRY(efi_call2)
24351 call *%rdi
24352 addq $32, %rsp
24353 RESTORE_XMM
24354+ pax_force_retaddr 0, 1
24355 ret
24356 ENDPROC(efi_call2)
24357
24358@@ -71,6 +75,7 @@ ENTRY(efi_call3)
24359 call *%rdi
24360 addq $32, %rsp
24361 RESTORE_XMM
24362+ pax_force_retaddr 0, 1
24363 ret
24364 ENDPROC(efi_call3)
24365
24366@@ -83,6 +88,7 @@ ENTRY(efi_call4)
24367 call *%rdi
24368 addq $32, %rsp
24369 RESTORE_XMM
24370+ pax_force_retaddr 0, 1
24371 ret
24372 ENDPROC(efi_call4)
24373
24374@@ -96,6 +102,7 @@ ENTRY(efi_call5)
24375 call *%rdi
24376 addq $48, %rsp
24377 RESTORE_XMM
24378+ pax_force_retaddr 0, 1
24379 ret
24380 ENDPROC(efi_call5)
24381
24382@@ -112,5 +119,6 @@ ENTRY(efi_call6)
24383 call *%rdi
24384 addq $48, %rsp
24385 RESTORE_XMM
24386+ pax_force_retaddr 0, 1
24387 ret
24388 ENDPROC(efi_call6)
24389diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24390index ad4ec1c..686479e 100644
24391--- a/arch/x86/platform/mrst/mrst.c
24392+++ b/arch/x86/platform/mrst/mrst.c
24393@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24394 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24395 int sfi_mrtc_num;
24396
24397-static void mrst_power_off(void)
24398+static __noreturn void mrst_power_off(void)
24399 {
24400 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24401 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24402+ BUG();
24403 }
24404
24405-static void mrst_reboot(void)
24406+static __noreturn void mrst_reboot(void)
24407 {
24408 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24409 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24410 else
24411 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24412+ BUG();
24413 }
24414
24415 /* parse all the mtimer info to a static mtimer array */
24416diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24417index f10c0af..3ec1f95 100644
24418--- a/arch/x86/power/cpu.c
24419+++ b/arch/x86/power/cpu.c
24420@@ -131,7 +131,7 @@ static void do_fpu_end(void)
24421 static void fix_processor_context(void)
24422 {
24423 int cpu = smp_processor_id();
24424- struct tss_struct *t = &per_cpu(init_tss, cpu);
24425+ struct tss_struct *t = init_tss + cpu;
24426
24427 set_tss_desc(cpu, t); /*
24428 * This just modifies memory; should not be
24429@@ -141,7 +141,9 @@ static void fix_processor_context(void)
24430 */
24431
24432 #ifdef CONFIG_X86_64
24433+ pax_open_kernel();
24434 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24435+ pax_close_kernel();
24436
24437 syscall_init(); /* This sets MSR_*STAR and related */
24438 #endif
24439diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24440index 5d17950..2253fc9 100644
24441--- a/arch/x86/vdso/Makefile
24442+++ b/arch/x86/vdso/Makefile
24443@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24444 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24445 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24446
24447-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24448+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24449 GCOV_PROFILE := n
24450
24451 #
24452diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24453index 468d591..8e80a0a 100644
24454--- a/arch/x86/vdso/vdso32-setup.c
24455+++ b/arch/x86/vdso/vdso32-setup.c
24456@@ -25,6 +25,7 @@
24457 #include <asm/tlbflush.h>
24458 #include <asm/vdso.h>
24459 #include <asm/proto.h>
24460+#include <asm/mman.h>
24461
24462 enum {
24463 VDSO_DISABLED = 0,
24464@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24465 void enable_sep_cpu(void)
24466 {
24467 int cpu = get_cpu();
24468- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24469+ struct tss_struct *tss = init_tss + cpu;
24470
24471 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24472 put_cpu();
24473@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24474 gate_vma.vm_start = FIXADDR_USER_START;
24475 gate_vma.vm_end = FIXADDR_USER_END;
24476 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24477- gate_vma.vm_page_prot = __P101;
24478+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24479 /*
24480 * Make sure the vDSO gets into every core dump.
24481 * Dumping its contents makes post-mortem fully interpretable later
24482@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24483 if (compat)
24484 addr = VDSO_HIGH_BASE;
24485 else {
24486- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24487+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24488 if (IS_ERR_VALUE(addr)) {
24489 ret = addr;
24490 goto up_fail;
24491 }
24492 }
24493
24494- current->mm->context.vdso = (void *)addr;
24495+ current->mm->context.vdso = addr;
24496
24497 if (compat_uses_vma || !compat) {
24498 /*
24499@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24500 }
24501
24502 current_thread_info()->sysenter_return =
24503- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24504+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24505
24506 up_fail:
24507 if (ret)
24508- current->mm->context.vdso = NULL;
24509+ current->mm->context.vdso = 0;
24510
24511 up_write(&mm->mmap_sem);
24512
24513@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24514
24515 const char *arch_vma_name(struct vm_area_struct *vma)
24516 {
24517- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24518+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24519 return "[vdso]";
24520+
24521+#ifdef CONFIG_PAX_SEGMEXEC
24522+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24523+ return "[vdso]";
24524+#endif
24525+
24526 return NULL;
24527 }
24528
24529@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24530 * Check to see if the corresponding task was created in compat vdso
24531 * mode.
24532 */
24533- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24534+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24535 return &gate_vma;
24536 return NULL;
24537 }
24538diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24539index 153407c..611cba9 100644
24540--- a/arch/x86/vdso/vma.c
24541+++ b/arch/x86/vdso/vma.c
24542@@ -16,8 +16,6 @@
24543 #include <asm/vdso.h>
24544 #include <asm/page.h>
24545
24546-unsigned int __read_mostly vdso_enabled = 1;
24547-
24548 extern char vdso_start[], vdso_end[];
24549 extern unsigned short vdso_sync_cpuid;
24550
24551@@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24552 * unaligned here as a result of stack start randomization.
24553 */
24554 addr = PAGE_ALIGN(addr);
24555- addr = align_addr(addr, NULL, ALIGN_VDSO);
24556
24557 return addr;
24558 }
24559@@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24560 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24561 {
24562 struct mm_struct *mm = current->mm;
24563- unsigned long addr;
24564+ unsigned long addr = 0;
24565 int ret;
24566
24567- if (!vdso_enabled)
24568- return 0;
24569-
24570 down_write(&mm->mmap_sem);
24571+
24572+#ifdef CONFIG_PAX_RANDMMAP
24573+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24574+#endif
24575+
24576 addr = vdso_addr(mm->start_stack, vdso_size);
24577+ addr = align_addr(addr, NULL, ALIGN_VDSO);
24578 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24579 if (IS_ERR_VALUE(addr)) {
24580 ret = addr;
24581 goto up_fail;
24582 }
24583
24584- current->mm->context.vdso = (void *)addr;
24585+ mm->context.vdso = addr;
24586
24587 ret = install_special_mapping(mm, addr, vdso_size,
24588 VM_READ|VM_EXEC|
24589 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24590 VM_ALWAYSDUMP,
24591 vdso_pages);
24592- if (ret) {
24593- current->mm->context.vdso = NULL;
24594- goto up_fail;
24595- }
24596+
24597+ if (ret)
24598+ mm->context.vdso = 0;
24599
24600 up_fail:
24601 up_write(&mm->mmap_sem);
24602 return ret;
24603 }
24604-
24605-static __init int vdso_setup(char *s)
24606-{
24607- vdso_enabled = simple_strtoul(s, NULL, 0);
24608- return 0;
24609-}
24610-__setup("vdso=", vdso_setup);
24611diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24612index 1f92865..c843b20 100644
24613--- a/arch/x86/xen/enlighten.c
24614+++ b/arch/x86/xen/enlighten.c
24615@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24616
24617 struct shared_info xen_dummy_shared_info;
24618
24619-void *xen_initial_gdt;
24620-
24621 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24622 __read_mostly int xen_have_vector_callback;
24623 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24624@@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24625 #endif
24626 };
24627
24628-static void xen_reboot(int reason)
24629+static __noreturn void xen_reboot(int reason)
24630 {
24631 struct sched_shutdown r = { .reason = reason };
24632
24633@@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24634 BUG();
24635 }
24636
24637-static void xen_restart(char *msg)
24638+static __noreturn void xen_restart(char *msg)
24639 {
24640 xen_reboot(SHUTDOWN_reboot);
24641 }
24642
24643-static void xen_emergency_restart(void)
24644+static __noreturn void xen_emergency_restart(void)
24645 {
24646 xen_reboot(SHUTDOWN_reboot);
24647 }
24648
24649-static void xen_machine_halt(void)
24650+static __noreturn void xen_machine_halt(void)
24651 {
24652 xen_reboot(SHUTDOWN_poweroff);
24653 }
24654@@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24655 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24656
24657 /* Work out if we support NX */
24658- x86_configure_nx();
24659+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24660+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24661+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24662+ unsigned l, h;
24663+
24664+ __supported_pte_mask |= _PAGE_NX;
24665+ rdmsr(MSR_EFER, l, h);
24666+ l |= EFER_NX;
24667+ wrmsr(MSR_EFER, l, h);
24668+ }
24669+#endif
24670
24671 xen_setup_features();
24672
24673@@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24674
24675 machine_ops = xen_machine_ops;
24676
24677- /*
24678- * The only reliable way to retain the initial address of the
24679- * percpu gdt_page is to remember it here, so we can go and
24680- * mark it RW later, when the initial percpu area is freed.
24681- */
24682- xen_initial_gdt = &per_cpu(gdt_page, 0);
24683-
24684 xen_smp_init();
24685
24686 #ifdef CONFIG_ACPI_NUMA
24687diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24688index 87f6673..e2555a6 100644
24689--- a/arch/x86/xen/mmu.c
24690+++ b/arch/x86/xen/mmu.c
24691@@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24692 convert_pfn_mfn(init_level4_pgt);
24693 convert_pfn_mfn(level3_ident_pgt);
24694 convert_pfn_mfn(level3_kernel_pgt);
24695+ convert_pfn_mfn(level3_vmalloc_start_pgt);
24696+ convert_pfn_mfn(level3_vmalloc_end_pgt);
24697+ convert_pfn_mfn(level3_vmemmap_pgt);
24698
24699 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24700 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24701@@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24702 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24703 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24704 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24705+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24706+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24707+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24708 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24709+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24710 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24711 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24712
24713@@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
24714 pv_mmu_ops.set_pud = xen_set_pud;
24715 #if PAGETABLE_LEVELS == 4
24716 pv_mmu_ops.set_pgd = xen_set_pgd;
24717+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24718 #endif
24719
24720 /* This will work as long as patching hasn't happened yet
24721@@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24722 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24723 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24724 .set_pgd = xen_set_pgd_hyper,
24725+ .set_pgd_batched = xen_set_pgd_hyper,
24726
24727 .alloc_pud = xen_alloc_pmd_init,
24728 .release_pud = xen_release_pmd_init,
24729diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24730index 041d4fe..7666b7e 100644
24731--- a/arch/x86/xen/smp.c
24732+++ b/arch/x86/xen/smp.c
24733@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
24734 {
24735 BUG_ON(smp_processor_id() != 0);
24736 native_smp_prepare_boot_cpu();
24737-
24738- /* We've switched to the "real" per-cpu gdt, so make sure the
24739- old memory can be recycled */
24740- make_lowmem_page_readwrite(xen_initial_gdt);
24741-
24742 xen_filter_cpu_maps();
24743 xen_setup_vcpu_info_placement();
24744 }
24745@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
24746 gdt = get_cpu_gdt_table(cpu);
24747
24748 ctxt->flags = VGCF_IN_KERNEL;
24749- ctxt->user_regs.ds = __USER_DS;
24750- ctxt->user_regs.es = __USER_DS;
24751+ ctxt->user_regs.ds = __KERNEL_DS;
24752+ ctxt->user_regs.es = __KERNEL_DS;
24753 ctxt->user_regs.ss = __KERNEL_DS;
24754 #ifdef CONFIG_X86_32
24755 ctxt->user_regs.fs = __KERNEL_PERCPU;
24756- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24757+ savesegment(gs, ctxt->user_regs.gs);
24758 #else
24759 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24760 #endif
24761@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
24762 int rc;
24763
24764 per_cpu(current_task, cpu) = idle;
24765+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24766 #ifdef CONFIG_X86_32
24767 irq_ctx_init(cpu);
24768 #else
24769 clear_tsk_thread_flag(idle, TIF_FORK);
24770- per_cpu(kernel_stack, cpu) =
24771- (unsigned long)task_stack_page(idle) -
24772- KERNEL_STACK_OFFSET + THREAD_SIZE;
24773+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24774 #endif
24775 xen_setup_runstate_info(cpu);
24776 xen_setup_timer(cpu);
24777diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24778index b040b0e..8cc4fe0 100644
24779--- a/arch/x86/xen/xen-asm_32.S
24780+++ b/arch/x86/xen/xen-asm_32.S
24781@@ -83,14 +83,14 @@ ENTRY(xen_iret)
24782 ESP_OFFSET=4 # bytes pushed onto stack
24783
24784 /*
24785- * Store vcpu_info pointer for easy access. Do it this way to
24786- * avoid having to reload %fs
24787+ * Store vcpu_info pointer for easy access.
24788 */
24789 #ifdef CONFIG_SMP
24790- GET_THREAD_INFO(%eax)
24791- movl TI_cpu(%eax), %eax
24792- movl __per_cpu_offset(,%eax,4), %eax
24793- mov xen_vcpu(%eax), %eax
24794+ push %fs
24795+ mov $(__KERNEL_PERCPU), %eax
24796+ mov %eax, %fs
24797+ mov PER_CPU_VAR(xen_vcpu), %eax
24798+ pop %fs
24799 #else
24800 movl xen_vcpu, %eax
24801 #endif
24802diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24803index aaa7291..3f77960 100644
24804--- a/arch/x86/xen/xen-head.S
24805+++ b/arch/x86/xen/xen-head.S
24806@@ -19,6 +19,17 @@ ENTRY(startup_xen)
24807 #ifdef CONFIG_X86_32
24808 mov %esi,xen_start_info
24809 mov $init_thread_union+THREAD_SIZE,%esp
24810+#ifdef CONFIG_SMP
24811+ movl $cpu_gdt_table,%edi
24812+ movl $__per_cpu_load,%eax
24813+ movw %ax,__KERNEL_PERCPU + 2(%edi)
24814+ rorl $16,%eax
24815+ movb %al,__KERNEL_PERCPU + 4(%edi)
24816+ movb %ah,__KERNEL_PERCPU + 7(%edi)
24817+ movl $__per_cpu_end - 1,%eax
24818+ subl $__per_cpu_start,%eax
24819+ movw %ax,__KERNEL_PERCPU + 0(%edi)
24820+#endif
24821 #else
24822 mov %rsi,xen_start_info
24823 mov $init_thread_union+THREAD_SIZE,%rsp
24824diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24825index b095739..8c17bcd 100644
24826--- a/arch/x86/xen/xen-ops.h
24827+++ b/arch/x86/xen/xen-ops.h
24828@@ -10,8 +10,6 @@
24829 extern const char xen_hypervisor_callback[];
24830 extern const char xen_failsafe_callback[];
24831
24832-extern void *xen_initial_gdt;
24833-
24834 struct trap_info;
24835 void xen_copy_trap_info(struct trap_info *traps);
24836
24837diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24838index 58916af..9cb880b 100644
24839--- a/block/blk-iopoll.c
24840+++ b/block/blk-iopoll.c
24841@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
24842 }
24843 EXPORT_SYMBOL(blk_iopoll_complete);
24844
24845-static void blk_iopoll_softirq(struct softirq_action *h)
24846+static void blk_iopoll_softirq(void)
24847 {
24848 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24849 int rearm = 0, budget = blk_iopoll_budget;
24850diff --git a/block/blk-map.c b/block/blk-map.c
24851index 623e1cd..ca1e109 100644
24852--- a/block/blk-map.c
24853+++ b/block/blk-map.c
24854@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
24855 if (!len || !kbuf)
24856 return -EINVAL;
24857
24858- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24859+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
24860 if (do_copy)
24861 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24862 else
24863diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24864index 1366a89..e17f54b 100644
24865--- a/block/blk-softirq.c
24866+++ b/block/blk-softirq.c
24867@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
24868 * Softirq action handler - move entries to local list and loop over them
24869 * while passing them to the queue registered handler.
24870 */
24871-static void blk_done_softirq(struct softirq_action *h)
24872+static void blk_done_softirq(void)
24873 {
24874 struct list_head *cpu_list, local_list;
24875
24876diff --git a/block/bsg.c b/block/bsg.c
24877index 702f131..37808bf 100644
24878--- a/block/bsg.c
24879+++ b/block/bsg.c
24880@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
24881 struct sg_io_v4 *hdr, struct bsg_device *bd,
24882 fmode_t has_write_perm)
24883 {
24884+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24885+ unsigned char *cmdptr;
24886+
24887 if (hdr->request_len > BLK_MAX_CDB) {
24888 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24889 if (!rq->cmd)
24890 return -ENOMEM;
24891- }
24892+ cmdptr = rq->cmd;
24893+ } else
24894+ cmdptr = tmpcmd;
24895
24896- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24897+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24898 hdr->request_len))
24899 return -EFAULT;
24900
24901+ if (cmdptr != rq->cmd)
24902+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24903+
24904 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24905 if (blk_verify_command(rq->cmd, has_write_perm))
24906 return -EPERM;
24907diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24908index 7b72502..646105c 100644
24909--- a/block/compat_ioctl.c
24910+++ b/block/compat_ioctl.c
24911@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
24912 err |= __get_user(f->spec1, &uf->spec1);
24913 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24914 err |= __get_user(name, &uf->name);
24915- f->name = compat_ptr(name);
24916+ f->name = (void __force_kernel *)compat_ptr(name);
24917 if (err) {
24918 err = -EFAULT;
24919 goto out;
24920diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24921index 688be8a..8a37d98 100644
24922--- a/block/scsi_ioctl.c
24923+++ b/block/scsi_ioctl.c
24924@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
24925 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24926 struct sg_io_hdr *hdr, fmode_t mode)
24927 {
24928- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24929+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24930+ unsigned char *cmdptr;
24931+
24932+ if (rq->cmd != rq->__cmd)
24933+ cmdptr = rq->cmd;
24934+ else
24935+ cmdptr = tmpcmd;
24936+
24937+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24938 return -EFAULT;
24939+
24940+ if (cmdptr != rq->cmd)
24941+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24942+
24943 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24944 return -EPERM;
24945
24946@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24947 int err;
24948 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24949 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24950+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24951+ unsigned char *cmdptr;
24952
24953 if (!sic)
24954 return -EINVAL;
24955@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24956 */
24957 err = -EFAULT;
24958 rq->cmd_len = cmdlen;
24959- if (copy_from_user(rq->cmd, sic->data, cmdlen))
24960+
24961+ if (rq->cmd != rq->__cmd)
24962+ cmdptr = rq->cmd;
24963+ else
24964+ cmdptr = tmpcmd;
24965+
24966+ if (copy_from_user(cmdptr, sic->data, cmdlen))
24967 goto error;
24968
24969+ if (rq->cmd != cmdptr)
24970+ memcpy(rq->cmd, cmdptr, cmdlen);
24971+
24972 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24973 goto error;
24974
24975diff --git a/crypto/cryptd.c b/crypto/cryptd.c
24976index 671d4d6..5f24030 100644
24977--- a/crypto/cryptd.c
24978+++ b/crypto/cryptd.c
24979@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
24980
24981 struct cryptd_blkcipher_request_ctx {
24982 crypto_completion_t complete;
24983-};
24984+} __no_const;
24985
24986 struct cryptd_hash_ctx {
24987 struct crypto_shash *child;
24988@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
24989
24990 struct cryptd_aead_request_ctx {
24991 crypto_completion_t complete;
24992-};
24993+} __no_const;
24994
24995 static void cryptd_queue_worker(struct work_struct *work);
24996
24997diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
24998index 9ed9f60..88f160b 100644
24999--- a/crypto/sha512_generic.c
25000+++ b/crypto/sha512_generic.c
25001@@ -21,8 +21,6 @@
25002 #include <linux/percpu.h>
25003 #include <asm/byteorder.h>
25004
25005-static DEFINE_PER_CPU(u64[80], msg_schedule);
25006-
25007 static inline u64 Ch(u64 x, u64 y, u64 z)
25008 {
25009 return z ^ (x & (y ^ z));
25010@@ -80,7 +78,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
25011
25012 static inline void BLEND_OP(int I, u64 *W)
25013 {
25014- W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
25015+ W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]);
25016 }
25017
25018 static void
25019@@ -89,38 +87,48 @@ sha512_transform(u64 *state, const u8 *input)
25020 u64 a, b, c, d, e, f, g, h, t1, t2;
25021
25022 int i;
25023- u64 *W = get_cpu_var(msg_schedule);
25024+ u64 W[16];
25025
25026 /* load the input */
25027 for (i = 0; i < 16; i++)
25028 LOAD_OP(i, W, input);
25029
25030- for (i = 16; i < 80; i++) {
25031- BLEND_OP(i, W);
25032- }
25033-
25034 /* load the state into our registers */
25035 a=state[0]; b=state[1]; c=state[2]; d=state[3];
25036 e=state[4]; f=state[5]; g=state[6]; h=state[7];
25037
25038- /* now iterate */
25039- for (i=0; i<80; i+=8) {
25040- t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[i ];
25041- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
25042- t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1];
25043- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
25044- t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2];
25045- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
25046- t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3];
25047- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
25048- t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4];
25049- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
25050- t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5];
25051- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
25052- t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6];
25053- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
25054- t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7];
25055- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
25056+#define SHA512_0_15(i, a, b, c, d, e, f, g, h) \
25057+ t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i]; \
25058+ t2 = e0(a) + Maj(a, b, c); \
25059+ d += t1; \
25060+ h = t1 + t2
25061+
25062+#define SHA512_16_79(i, a, b, c, d, e, f, g, h) \
25063+ BLEND_OP(i, W); \
25064+ t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \
25065+ t2 = e0(a) + Maj(a, b, c); \
25066+ d += t1; \
25067+ h = t1 + t2
25068+
25069+ for (i = 0; i < 16; i += 8) {
25070+ SHA512_0_15(i, a, b, c, d, e, f, g, h);
25071+ SHA512_0_15(i + 1, h, a, b, c, d, e, f, g);
25072+ SHA512_0_15(i + 2, g, h, a, b, c, d, e, f);
25073+ SHA512_0_15(i + 3, f, g, h, a, b, c, d, e);
25074+ SHA512_0_15(i + 4, e, f, g, h, a, b, c, d);
25075+ SHA512_0_15(i + 5, d, e, f, g, h, a, b, c);
25076+ SHA512_0_15(i + 6, c, d, e, f, g, h, a, b);
25077+ SHA512_0_15(i + 7, b, c, d, e, f, g, h, a);
25078+ }
25079+ for (i = 16; i < 80; i += 8) {
25080+ SHA512_16_79(i, a, b, c, d, e, f, g, h);
25081+ SHA512_16_79(i + 1, h, a, b, c, d, e, f, g);
25082+ SHA512_16_79(i + 2, g, h, a, b, c, d, e, f);
25083+ SHA512_16_79(i + 3, f, g, h, a, b, c, d, e);
25084+ SHA512_16_79(i + 4, e, f, g, h, a, b, c, d);
25085+ SHA512_16_79(i + 5, d, e, f, g, h, a, b, c);
25086+ SHA512_16_79(i + 6, c, d, e, f, g, h, a, b);
25087+ SHA512_16_79(i + 7, b, c, d, e, f, g, h, a);
25088 }
25089
25090 state[0] += a; state[1] += b; state[2] += c; state[3] += d;
25091@@ -128,8 +136,6 @@ sha512_transform(u64 *state, const u8 *input)
25092
25093 /* erase our data */
25094 a = b = c = d = e = f = g = h = t1 = t2 = 0;
25095- memset(W, 0, sizeof(__get_cpu_var(msg_schedule)));
25096- put_cpu_var(msg_schedule);
25097 }
25098
25099 static int
25100diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25101index 5d41894..22021e4 100644
25102--- a/drivers/acpi/apei/cper.c
25103+++ b/drivers/acpi/apei/cper.c
25104@@ -38,12 +38,12 @@
25105 */
25106 u64 cper_next_record_id(void)
25107 {
25108- static atomic64_t seq;
25109+ static atomic64_unchecked_t seq;
25110
25111- if (!atomic64_read(&seq))
25112- atomic64_set(&seq, ((u64)get_seconds()) << 32);
25113+ if (!atomic64_read_unchecked(&seq))
25114+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25115
25116- return atomic64_inc_return(&seq);
25117+ return atomic64_inc_return_unchecked(&seq);
25118 }
25119 EXPORT_SYMBOL_GPL(cper_next_record_id);
25120
25121diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25122index 6c47ae9..8ab9132 100644
25123--- a/drivers/acpi/ec_sys.c
25124+++ b/drivers/acpi/ec_sys.c
25125@@ -12,6 +12,7 @@
25126 #include <linux/acpi.h>
25127 #include <linux/debugfs.h>
25128 #include <linux/module.h>
25129+#include <asm/uaccess.h>
25130 #include "internal.h"
25131
25132 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25133@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25134 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25135 */
25136 unsigned int size = EC_SPACE_SIZE;
25137- u8 *data = (u8 *) buf;
25138+ u8 data;
25139 loff_t init_off = *off;
25140 int err = 0;
25141
25142@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25143 size = count;
25144
25145 while (size) {
25146- err = ec_read(*off, &data[*off - init_off]);
25147+ err = ec_read(*off, &data);
25148 if (err)
25149 return err;
25150+ if (put_user(data, &buf[*off - init_off]))
25151+ return -EFAULT;
25152 *off += 1;
25153 size--;
25154 }
25155@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25156
25157 unsigned int size = count;
25158 loff_t init_off = *off;
25159- u8 *data = (u8 *) buf;
25160 int err = 0;
25161
25162 if (*off >= EC_SPACE_SIZE)
25163@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25164 }
25165
25166 while (size) {
25167- u8 byte_write = data[*off - init_off];
25168+ u8 byte_write;
25169+ if (get_user(byte_write, &buf[*off - init_off]))
25170+ return -EFAULT;
25171 err = ec_write(*off, byte_write);
25172 if (err)
25173 return err;
25174diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25175index 251c7b62..000462d 100644
25176--- a/drivers/acpi/proc.c
25177+++ b/drivers/acpi/proc.c
25178@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25179 size_t count, loff_t * ppos)
25180 {
25181 struct list_head *node, *next;
25182- char strbuf[5];
25183- char str[5] = "";
25184- unsigned int len = count;
25185+ char strbuf[5] = {0};
25186
25187- if (len > 4)
25188- len = 4;
25189- if (len < 0)
25190+ if (count > 4)
25191+ count = 4;
25192+ if (copy_from_user(strbuf, buffer, count))
25193 return -EFAULT;
25194-
25195- if (copy_from_user(strbuf, buffer, len))
25196- return -EFAULT;
25197- strbuf[len] = '\0';
25198- sscanf(strbuf, "%s", str);
25199+ strbuf[count] = '\0';
25200
25201 mutex_lock(&acpi_device_lock);
25202 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25203@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25204 if (!dev->wakeup.flags.valid)
25205 continue;
25206
25207- if (!strncmp(dev->pnp.bus_id, str, 4)) {
25208+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25209 if (device_can_wakeup(&dev->dev)) {
25210 bool enable = !device_may_wakeup(&dev->dev);
25211 device_set_wakeup_enable(&dev->dev, enable);
25212diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25213index 9d7bc9f..a6fc091 100644
25214--- a/drivers/acpi/processor_driver.c
25215+++ b/drivers/acpi/processor_driver.c
25216@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25217 return 0;
25218 #endif
25219
25220- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25221+ BUG_ON(pr->id >= nr_cpu_ids);
25222
25223 /*
25224 * Buggy BIOS check
25225diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25226index c04ad68..0b99473 100644
25227--- a/drivers/ata/libata-core.c
25228+++ b/drivers/ata/libata-core.c
25229@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25230 struct ata_port *ap;
25231 unsigned int tag;
25232
25233- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25234+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25235 ap = qc->ap;
25236
25237 qc->flags = 0;
25238@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25239 struct ata_port *ap;
25240 struct ata_link *link;
25241
25242- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25243+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25244 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25245 ap = qc->ap;
25246 link = qc->dev->link;
25247@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25248 return;
25249
25250 spin_lock(&lock);
25251+ pax_open_kernel();
25252
25253 for (cur = ops->inherits; cur; cur = cur->inherits) {
25254 void **inherit = (void **)cur;
25255@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25256 if (IS_ERR(*pp))
25257 *pp = NULL;
25258
25259- ops->inherits = NULL;
25260+ *(struct ata_port_operations **)&ops->inherits = NULL;
25261
25262+ pax_close_kernel();
25263 spin_unlock(&lock);
25264 }
25265
25266diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25267index e8574bb..f9f6a72 100644
25268--- a/drivers/ata/pata_arasan_cf.c
25269+++ b/drivers/ata/pata_arasan_cf.c
25270@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25271 /* Handle platform specific quirks */
25272 if (pdata->quirk) {
25273 if (pdata->quirk & CF_BROKEN_PIO) {
25274- ap->ops->set_piomode = NULL;
25275+ pax_open_kernel();
25276+ *(void **)&ap->ops->set_piomode = NULL;
25277+ pax_close_kernel();
25278 ap->pio_mask = 0;
25279 }
25280 if (pdata->quirk & CF_BROKEN_MWDMA)
25281diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25282index f9b983a..887b9d8 100644
25283--- a/drivers/atm/adummy.c
25284+++ b/drivers/atm/adummy.c
25285@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25286 vcc->pop(vcc, skb);
25287 else
25288 dev_kfree_skb_any(skb);
25289- atomic_inc(&vcc->stats->tx);
25290+ atomic_inc_unchecked(&vcc->stats->tx);
25291
25292 return 0;
25293 }
25294diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25295index f8f41e0..1f987dd 100644
25296--- a/drivers/atm/ambassador.c
25297+++ b/drivers/atm/ambassador.c
25298@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25299 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25300
25301 // VC layer stats
25302- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25303+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25304
25305 // free the descriptor
25306 kfree (tx_descr);
25307@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25308 dump_skb ("<<<", vc, skb);
25309
25310 // VC layer stats
25311- atomic_inc(&atm_vcc->stats->rx);
25312+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25313 __net_timestamp(skb);
25314 // end of our responsibility
25315 atm_vcc->push (atm_vcc, skb);
25316@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25317 } else {
25318 PRINTK (KERN_INFO, "dropped over-size frame");
25319 // should we count this?
25320- atomic_inc(&atm_vcc->stats->rx_drop);
25321+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25322 }
25323
25324 } else {
25325@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25326 }
25327
25328 if (check_area (skb->data, skb->len)) {
25329- atomic_inc(&atm_vcc->stats->tx_err);
25330+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25331 return -ENOMEM; // ?
25332 }
25333
25334diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25335index b22d71c..d6e1049 100644
25336--- a/drivers/atm/atmtcp.c
25337+++ b/drivers/atm/atmtcp.c
25338@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25339 if (vcc->pop) vcc->pop(vcc,skb);
25340 else dev_kfree_skb(skb);
25341 if (dev_data) return 0;
25342- atomic_inc(&vcc->stats->tx_err);
25343+ atomic_inc_unchecked(&vcc->stats->tx_err);
25344 return -ENOLINK;
25345 }
25346 size = skb->len+sizeof(struct atmtcp_hdr);
25347@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25348 if (!new_skb) {
25349 if (vcc->pop) vcc->pop(vcc,skb);
25350 else dev_kfree_skb(skb);
25351- atomic_inc(&vcc->stats->tx_err);
25352+ atomic_inc_unchecked(&vcc->stats->tx_err);
25353 return -ENOBUFS;
25354 }
25355 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25356@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25357 if (vcc->pop) vcc->pop(vcc,skb);
25358 else dev_kfree_skb(skb);
25359 out_vcc->push(out_vcc,new_skb);
25360- atomic_inc(&vcc->stats->tx);
25361- atomic_inc(&out_vcc->stats->rx);
25362+ atomic_inc_unchecked(&vcc->stats->tx);
25363+ atomic_inc_unchecked(&out_vcc->stats->rx);
25364 return 0;
25365 }
25366
25367@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25368 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25369 read_unlock(&vcc_sklist_lock);
25370 if (!out_vcc) {
25371- atomic_inc(&vcc->stats->tx_err);
25372+ atomic_inc_unchecked(&vcc->stats->tx_err);
25373 goto done;
25374 }
25375 skb_pull(skb,sizeof(struct atmtcp_hdr));
25376@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25377 __net_timestamp(new_skb);
25378 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25379 out_vcc->push(out_vcc,new_skb);
25380- atomic_inc(&vcc->stats->tx);
25381- atomic_inc(&out_vcc->stats->rx);
25382+ atomic_inc_unchecked(&vcc->stats->tx);
25383+ atomic_inc_unchecked(&out_vcc->stats->rx);
25384 done:
25385 if (vcc->pop) vcc->pop(vcc,skb);
25386 else dev_kfree_skb(skb);
25387diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25388index 956e9ac..133516d 100644
25389--- a/drivers/atm/eni.c
25390+++ b/drivers/atm/eni.c
25391@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25392 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25393 vcc->dev->number);
25394 length = 0;
25395- atomic_inc(&vcc->stats->rx_err);
25396+ atomic_inc_unchecked(&vcc->stats->rx_err);
25397 }
25398 else {
25399 length = ATM_CELL_SIZE-1; /* no HEC */
25400@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25401 size);
25402 }
25403 eff = length = 0;
25404- atomic_inc(&vcc->stats->rx_err);
25405+ atomic_inc_unchecked(&vcc->stats->rx_err);
25406 }
25407 else {
25408 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25409@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25410 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25411 vcc->dev->number,vcc->vci,length,size << 2,descr);
25412 length = eff = 0;
25413- atomic_inc(&vcc->stats->rx_err);
25414+ atomic_inc_unchecked(&vcc->stats->rx_err);
25415 }
25416 }
25417 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25418@@ -771,7 +771,7 @@ rx_dequeued++;
25419 vcc->push(vcc,skb);
25420 pushed++;
25421 }
25422- atomic_inc(&vcc->stats->rx);
25423+ atomic_inc_unchecked(&vcc->stats->rx);
25424 }
25425 wake_up(&eni_dev->rx_wait);
25426 }
25427@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25428 PCI_DMA_TODEVICE);
25429 if (vcc->pop) vcc->pop(vcc,skb);
25430 else dev_kfree_skb_irq(skb);
25431- atomic_inc(&vcc->stats->tx);
25432+ atomic_inc_unchecked(&vcc->stats->tx);
25433 wake_up(&eni_dev->tx_wait);
25434 dma_complete++;
25435 }
25436@@ -1569,7 +1569,7 @@ tx_complete++;
25437 /*--------------------------------- entries ---------------------------------*/
25438
25439
25440-static const char *media_name[] __devinitdata = {
25441+static const char *media_name[] __devinitconst = {
25442 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25443 "UTP", "05?", "06?", "07?", /* 4- 7 */
25444 "TAXI","09?", "10?", "11?", /* 8-11 */
25445diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25446index 5072f8a..fa52520d 100644
25447--- a/drivers/atm/firestream.c
25448+++ b/drivers/atm/firestream.c
25449@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25450 }
25451 }
25452
25453- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25454+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25455
25456 fs_dprintk (FS_DEBUG_TXMEM, "i");
25457 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25458@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25459 #endif
25460 skb_put (skb, qe->p1 & 0xffff);
25461 ATM_SKB(skb)->vcc = atm_vcc;
25462- atomic_inc(&atm_vcc->stats->rx);
25463+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25464 __net_timestamp(skb);
25465 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25466 atm_vcc->push (atm_vcc, skb);
25467@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25468 kfree (pe);
25469 }
25470 if (atm_vcc)
25471- atomic_inc(&atm_vcc->stats->rx_drop);
25472+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25473 break;
25474 case 0x1f: /* Reassembly abort: no buffers. */
25475 /* Silently increment error counter. */
25476 if (atm_vcc)
25477- atomic_inc(&atm_vcc->stats->rx_drop);
25478+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25479 break;
25480 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25481 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25482diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25483index 361f5ae..7fc552d 100644
25484--- a/drivers/atm/fore200e.c
25485+++ b/drivers/atm/fore200e.c
25486@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25487 #endif
25488 /* check error condition */
25489 if (*entry->status & STATUS_ERROR)
25490- atomic_inc(&vcc->stats->tx_err);
25491+ atomic_inc_unchecked(&vcc->stats->tx_err);
25492 else
25493- atomic_inc(&vcc->stats->tx);
25494+ atomic_inc_unchecked(&vcc->stats->tx);
25495 }
25496 }
25497
25498@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25499 if (skb == NULL) {
25500 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25501
25502- atomic_inc(&vcc->stats->rx_drop);
25503+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25504 return -ENOMEM;
25505 }
25506
25507@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25508
25509 dev_kfree_skb_any(skb);
25510
25511- atomic_inc(&vcc->stats->rx_drop);
25512+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25513 return -ENOMEM;
25514 }
25515
25516 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25517
25518 vcc->push(vcc, skb);
25519- atomic_inc(&vcc->stats->rx);
25520+ atomic_inc_unchecked(&vcc->stats->rx);
25521
25522 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25523
25524@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25525 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25526 fore200e->atm_dev->number,
25527 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25528- atomic_inc(&vcc->stats->rx_err);
25529+ atomic_inc_unchecked(&vcc->stats->rx_err);
25530 }
25531 }
25532
25533@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25534 goto retry_here;
25535 }
25536
25537- atomic_inc(&vcc->stats->tx_err);
25538+ atomic_inc_unchecked(&vcc->stats->tx_err);
25539
25540 fore200e->tx_sat++;
25541 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25542diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25543index 9a51df4..f3bb5f8 100644
25544--- a/drivers/atm/he.c
25545+++ b/drivers/atm/he.c
25546@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25547
25548 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25549 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25550- atomic_inc(&vcc->stats->rx_drop);
25551+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25552 goto return_host_buffers;
25553 }
25554
25555@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25556 RBRQ_LEN_ERR(he_dev->rbrq_head)
25557 ? "LEN_ERR" : "",
25558 vcc->vpi, vcc->vci);
25559- atomic_inc(&vcc->stats->rx_err);
25560+ atomic_inc_unchecked(&vcc->stats->rx_err);
25561 goto return_host_buffers;
25562 }
25563
25564@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25565 vcc->push(vcc, skb);
25566 spin_lock(&he_dev->global_lock);
25567
25568- atomic_inc(&vcc->stats->rx);
25569+ atomic_inc_unchecked(&vcc->stats->rx);
25570
25571 return_host_buffers:
25572 ++pdus_assembled;
25573@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25574 tpd->vcc->pop(tpd->vcc, tpd->skb);
25575 else
25576 dev_kfree_skb_any(tpd->skb);
25577- atomic_inc(&tpd->vcc->stats->tx_err);
25578+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25579 }
25580 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25581 return;
25582@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25583 vcc->pop(vcc, skb);
25584 else
25585 dev_kfree_skb_any(skb);
25586- atomic_inc(&vcc->stats->tx_err);
25587+ atomic_inc_unchecked(&vcc->stats->tx_err);
25588 return -EINVAL;
25589 }
25590
25591@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25592 vcc->pop(vcc, skb);
25593 else
25594 dev_kfree_skb_any(skb);
25595- atomic_inc(&vcc->stats->tx_err);
25596+ atomic_inc_unchecked(&vcc->stats->tx_err);
25597 return -EINVAL;
25598 }
25599 #endif
25600@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25601 vcc->pop(vcc, skb);
25602 else
25603 dev_kfree_skb_any(skb);
25604- atomic_inc(&vcc->stats->tx_err);
25605+ atomic_inc_unchecked(&vcc->stats->tx_err);
25606 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25607 return -ENOMEM;
25608 }
25609@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25610 vcc->pop(vcc, skb);
25611 else
25612 dev_kfree_skb_any(skb);
25613- atomic_inc(&vcc->stats->tx_err);
25614+ atomic_inc_unchecked(&vcc->stats->tx_err);
25615 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25616 return -ENOMEM;
25617 }
25618@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25619 __enqueue_tpd(he_dev, tpd, cid);
25620 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25621
25622- atomic_inc(&vcc->stats->tx);
25623+ atomic_inc_unchecked(&vcc->stats->tx);
25624
25625 return 0;
25626 }
25627diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25628index b812103..e391a49 100644
25629--- a/drivers/atm/horizon.c
25630+++ b/drivers/atm/horizon.c
25631@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25632 {
25633 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25634 // VC layer stats
25635- atomic_inc(&vcc->stats->rx);
25636+ atomic_inc_unchecked(&vcc->stats->rx);
25637 __net_timestamp(skb);
25638 // end of our responsibility
25639 vcc->push (vcc, skb);
25640@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25641 dev->tx_iovec = NULL;
25642
25643 // VC layer stats
25644- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25645+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25646
25647 // free the skb
25648 hrz_kfree_skb (skb);
25649diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25650index 1c05212..c28e200 100644
25651--- a/drivers/atm/idt77252.c
25652+++ b/drivers/atm/idt77252.c
25653@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25654 else
25655 dev_kfree_skb(skb);
25656
25657- atomic_inc(&vcc->stats->tx);
25658+ atomic_inc_unchecked(&vcc->stats->tx);
25659 }
25660
25661 atomic_dec(&scq->used);
25662@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25663 if ((sb = dev_alloc_skb(64)) == NULL) {
25664 printk("%s: Can't allocate buffers for aal0.\n",
25665 card->name);
25666- atomic_add(i, &vcc->stats->rx_drop);
25667+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25668 break;
25669 }
25670 if (!atm_charge(vcc, sb->truesize)) {
25671 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25672 card->name);
25673- atomic_add(i - 1, &vcc->stats->rx_drop);
25674+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25675 dev_kfree_skb(sb);
25676 break;
25677 }
25678@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25679 ATM_SKB(sb)->vcc = vcc;
25680 __net_timestamp(sb);
25681 vcc->push(vcc, sb);
25682- atomic_inc(&vcc->stats->rx);
25683+ atomic_inc_unchecked(&vcc->stats->rx);
25684
25685 cell += ATM_CELL_PAYLOAD;
25686 }
25687@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25688 "(CDC: %08x)\n",
25689 card->name, len, rpp->len, readl(SAR_REG_CDC));
25690 recycle_rx_pool_skb(card, rpp);
25691- atomic_inc(&vcc->stats->rx_err);
25692+ atomic_inc_unchecked(&vcc->stats->rx_err);
25693 return;
25694 }
25695 if (stat & SAR_RSQE_CRC) {
25696 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25697 recycle_rx_pool_skb(card, rpp);
25698- atomic_inc(&vcc->stats->rx_err);
25699+ atomic_inc_unchecked(&vcc->stats->rx_err);
25700 return;
25701 }
25702 if (skb_queue_len(&rpp->queue) > 1) {
25703@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25704 RXPRINTK("%s: Can't alloc RX skb.\n",
25705 card->name);
25706 recycle_rx_pool_skb(card, rpp);
25707- atomic_inc(&vcc->stats->rx_err);
25708+ atomic_inc_unchecked(&vcc->stats->rx_err);
25709 return;
25710 }
25711 if (!atm_charge(vcc, skb->truesize)) {
25712@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25713 __net_timestamp(skb);
25714
25715 vcc->push(vcc, skb);
25716- atomic_inc(&vcc->stats->rx);
25717+ atomic_inc_unchecked(&vcc->stats->rx);
25718
25719 return;
25720 }
25721@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25722 __net_timestamp(skb);
25723
25724 vcc->push(vcc, skb);
25725- atomic_inc(&vcc->stats->rx);
25726+ atomic_inc_unchecked(&vcc->stats->rx);
25727
25728 if (skb->truesize > SAR_FB_SIZE_3)
25729 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25730@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25731 if (vcc->qos.aal != ATM_AAL0) {
25732 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25733 card->name, vpi, vci);
25734- atomic_inc(&vcc->stats->rx_drop);
25735+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25736 goto drop;
25737 }
25738
25739 if ((sb = dev_alloc_skb(64)) == NULL) {
25740 printk("%s: Can't allocate buffers for AAL0.\n",
25741 card->name);
25742- atomic_inc(&vcc->stats->rx_err);
25743+ atomic_inc_unchecked(&vcc->stats->rx_err);
25744 goto drop;
25745 }
25746
25747@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25748 ATM_SKB(sb)->vcc = vcc;
25749 __net_timestamp(sb);
25750 vcc->push(vcc, sb);
25751- atomic_inc(&vcc->stats->rx);
25752+ atomic_inc_unchecked(&vcc->stats->rx);
25753
25754 drop:
25755 skb_pull(queue, 64);
25756@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25757
25758 if (vc == NULL) {
25759 printk("%s: NULL connection in send().\n", card->name);
25760- atomic_inc(&vcc->stats->tx_err);
25761+ atomic_inc_unchecked(&vcc->stats->tx_err);
25762 dev_kfree_skb(skb);
25763 return -EINVAL;
25764 }
25765 if (!test_bit(VCF_TX, &vc->flags)) {
25766 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25767- atomic_inc(&vcc->stats->tx_err);
25768+ atomic_inc_unchecked(&vcc->stats->tx_err);
25769 dev_kfree_skb(skb);
25770 return -EINVAL;
25771 }
25772@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25773 break;
25774 default:
25775 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25776- atomic_inc(&vcc->stats->tx_err);
25777+ atomic_inc_unchecked(&vcc->stats->tx_err);
25778 dev_kfree_skb(skb);
25779 return -EINVAL;
25780 }
25781
25782 if (skb_shinfo(skb)->nr_frags != 0) {
25783 printk("%s: No scatter-gather yet.\n", card->name);
25784- atomic_inc(&vcc->stats->tx_err);
25785+ atomic_inc_unchecked(&vcc->stats->tx_err);
25786 dev_kfree_skb(skb);
25787 return -EINVAL;
25788 }
25789@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25790
25791 err = queue_skb(card, vc, skb, oam);
25792 if (err) {
25793- atomic_inc(&vcc->stats->tx_err);
25794+ atomic_inc_unchecked(&vcc->stats->tx_err);
25795 dev_kfree_skb(skb);
25796 return err;
25797 }
25798@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25799 skb = dev_alloc_skb(64);
25800 if (!skb) {
25801 printk("%s: Out of memory in send_oam().\n", card->name);
25802- atomic_inc(&vcc->stats->tx_err);
25803+ atomic_inc_unchecked(&vcc->stats->tx_err);
25804 return -ENOMEM;
25805 }
25806 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25807diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25808index 3d0c2b0..45441fa 100644
25809--- a/drivers/atm/iphase.c
25810+++ b/drivers/atm/iphase.c
25811@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25812 status = (u_short) (buf_desc_ptr->desc_mode);
25813 if (status & (RX_CER | RX_PTE | RX_OFL))
25814 {
25815- atomic_inc(&vcc->stats->rx_err);
25816+ atomic_inc_unchecked(&vcc->stats->rx_err);
25817 IF_ERR(printk("IA: bad packet, dropping it");)
25818 if (status & RX_CER) {
25819 IF_ERR(printk(" cause: packet CRC error\n");)
25820@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
25821 len = dma_addr - buf_addr;
25822 if (len > iadev->rx_buf_sz) {
25823 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25824- atomic_inc(&vcc->stats->rx_err);
25825+ atomic_inc_unchecked(&vcc->stats->rx_err);
25826 goto out_free_desc;
25827 }
25828
25829@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25830 ia_vcc = INPH_IA_VCC(vcc);
25831 if (ia_vcc == NULL)
25832 {
25833- atomic_inc(&vcc->stats->rx_err);
25834+ atomic_inc_unchecked(&vcc->stats->rx_err);
25835 dev_kfree_skb_any(skb);
25836 atm_return(vcc, atm_guess_pdu2truesize(len));
25837 goto INCR_DLE;
25838@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25839 if ((length > iadev->rx_buf_sz) || (length >
25840 (skb->len - sizeof(struct cpcs_trailer))))
25841 {
25842- atomic_inc(&vcc->stats->rx_err);
25843+ atomic_inc_unchecked(&vcc->stats->rx_err);
25844 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25845 length, skb->len);)
25846 dev_kfree_skb_any(skb);
25847@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25848
25849 IF_RX(printk("rx_dle_intr: skb push");)
25850 vcc->push(vcc,skb);
25851- atomic_inc(&vcc->stats->rx);
25852+ atomic_inc_unchecked(&vcc->stats->rx);
25853 iadev->rx_pkt_cnt++;
25854 }
25855 INCR_DLE:
25856@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
25857 {
25858 struct k_sonet_stats *stats;
25859 stats = &PRIV(_ia_dev[board])->sonet_stats;
25860- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25861- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25862- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25863- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25864- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25865- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25866- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25867- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25868- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25869+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25870+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25871+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25872+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25873+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25874+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25875+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25876+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25877+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25878 }
25879 ia_cmds.status = 0;
25880 break;
25881@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25882 if ((desc == 0) || (desc > iadev->num_tx_desc))
25883 {
25884 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25885- atomic_inc(&vcc->stats->tx);
25886+ atomic_inc_unchecked(&vcc->stats->tx);
25887 if (vcc->pop)
25888 vcc->pop(vcc, skb);
25889 else
25890@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25891 ATM_DESC(skb) = vcc->vci;
25892 skb_queue_tail(&iadev->tx_dma_q, skb);
25893
25894- atomic_inc(&vcc->stats->tx);
25895+ atomic_inc_unchecked(&vcc->stats->tx);
25896 iadev->tx_pkt_cnt++;
25897 /* Increment transaction counter */
25898 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25899
25900 #if 0
25901 /* add flow control logic */
25902- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25903+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25904 if (iavcc->vc_desc_cnt > 10) {
25905 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25906 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25907diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25908index f556969..0da15eb 100644
25909--- a/drivers/atm/lanai.c
25910+++ b/drivers/atm/lanai.c
25911@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
25912 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25913 lanai_endtx(lanai, lvcc);
25914 lanai_free_skb(lvcc->tx.atmvcc, skb);
25915- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25916+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25917 }
25918
25919 /* Try to fill the buffer - don't call unless there is backlog */
25920@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
25921 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25922 __net_timestamp(skb);
25923 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25924- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25925+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25926 out:
25927 lvcc->rx.buf.ptr = end;
25928 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25929@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25930 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25931 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25932 lanai->stats.service_rxnotaal5++;
25933- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25934+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25935 return 0;
25936 }
25937 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25938@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25939 int bytes;
25940 read_unlock(&vcc_sklist_lock);
25941 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25942- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25943+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25944 lvcc->stats.x.aal5.service_trash++;
25945 bytes = (SERVICE_GET_END(s) * 16) -
25946 (((unsigned long) lvcc->rx.buf.ptr) -
25947@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25948 }
25949 if (s & SERVICE_STREAM) {
25950 read_unlock(&vcc_sklist_lock);
25951- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25952+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25953 lvcc->stats.x.aal5.service_stream++;
25954 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25955 "PDU on VCI %d!\n", lanai->number, vci);
25956@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25957 return 0;
25958 }
25959 DPRINTK("got rx crc error on vci %d\n", vci);
25960- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25961+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25962 lvcc->stats.x.aal5.service_rxcrc++;
25963 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25964 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25965diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25966index 1c70c45..300718d 100644
25967--- a/drivers/atm/nicstar.c
25968+++ b/drivers/atm/nicstar.c
25969@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25970 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25971 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25972 card->index);
25973- atomic_inc(&vcc->stats->tx_err);
25974+ atomic_inc_unchecked(&vcc->stats->tx_err);
25975 dev_kfree_skb_any(skb);
25976 return -EINVAL;
25977 }
25978@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25979 if (!vc->tx) {
25980 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25981 card->index);
25982- atomic_inc(&vcc->stats->tx_err);
25983+ atomic_inc_unchecked(&vcc->stats->tx_err);
25984 dev_kfree_skb_any(skb);
25985 return -EINVAL;
25986 }
25987@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25988 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25989 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25990 card->index);
25991- atomic_inc(&vcc->stats->tx_err);
25992+ atomic_inc_unchecked(&vcc->stats->tx_err);
25993 dev_kfree_skb_any(skb);
25994 return -EINVAL;
25995 }
25996
25997 if (skb_shinfo(skb)->nr_frags != 0) {
25998 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25999- atomic_inc(&vcc->stats->tx_err);
26000+ atomic_inc_unchecked(&vcc->stats->tx_err);
26001 dev_kfree_skb_any(skb);
26002 return -EINVAL;
26003 }
26004@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26005 }
26006
26007 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
26008- atomic_inc(&vcc->stats->tx_err);
26009+ atomic_inc_unchecked(&vcc->stats->tx_err);
26010 dev_kfree_skb_any(skb);
26011 return -EIO;
26012 }
26013- atomic_inc(&vcc->stats->tx);
26014+ atomic_inc_unchecked(&vcc->stats->tx);
26015
26016 return 0;
26017 }
26018@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26019 printk
26020 ("nicstar%d: Can't allocate buffers for aal0.\n",
26021 card->index);
26022- atomic_add(i, &vcc->stats->rx_drop);
26023+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
26024 break;
26025 }
26026 if (!atm_charge(vcc, sb->truesize)) {
26027 RXPRINTK
26028 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
26029 card->index);
26030- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26031+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26032 dev_kfree_skb_any(sb);
26033 break;
26034 }
26035@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26036 ATM_SKB(sb)->vcc = vcc;
26037 __net_timestamp(sb);
26038 vcc->push(vcc, sb);
26039- atomic_inc(&vcc->stats->rx);
26040+ atomic_inc_unchecked(&vcc->stats->rx);
26041 cell += ATM_CELL_PAYLOAD;
26042 }
26043
26044@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26045 if (iovb == NULL) {
26046 printk("nicstar%d: Out of iovec buffers.\n",
26047 card->index);
26048- atomic_inc(&vcc->stats->rx_drop);
26049+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26050 recycle_rx_buf(card, skb);
26051 return;
26052 }
26053@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26054 small or large buffer itself. */
26055 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
26056 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26057- atomic_inc(&vcc->stats->rx_err);
26058+ atomic_inc_unchecked(&vcc->stats->rx_err);
26059 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26060 NS_MAX_IOVECS);
26061 NS_PRV_IOVCNT(iovb) = 0;
26062@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26063 ("nicstar%d: Expected a small buffer, and this is not one.\n",
26064 card->index);
26065 which_list(card, skb);
26066- atomic_inc(&vcc->stats->rx_err);
26067+ atomic_inc_unchecked(&vcc->stats->rx_err);
26068 recycle_rx_buf(card, skb);
26069 vc->rx_iov = NULL;
26070 recycle_iov_buf(card, iovb);
26071@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26072 ("nicstar%d: Expected a large buffer, and this is not one.\n",
26073 card->index);
26074 which_list(card, skb);
26075- atomic_inc(&vcc->stats->rx_err);
26076+ atomic_inc_unchecked(&vcc->stats->rx_err);
26077 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26078 NS_PRV_IOVCNT(iovb));
26079 vc->rx_iov = NULL;
26080@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26081 printk(" - PDU size mismatch.\n");
26082 else
26083 printk(".\n");
26084- atomic_inc(&vcc->stats->rx_err);
26085+ atomic_inc_unchecked(&vcc->stats->rx_err);
26086 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26087 NS_PRV_IOVCNT(iovb));
26088 vc->rx_iov = NULL;
26089@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26090 /* skb points to a small buffer */
26091 if (!atm_charge(vcc, skb->truesize)) {
26092 push_rxbufs(card, skb);
26093- atomic_inc(&vcc->stats->rx_drop);
26094+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26095 } else {
26096 skb_put(skb, len);
26097 dequeue_sm_buf(card, skb);
26098@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26099 ATM_SKB(skb)->vcc = vcc;
26100 __net_timestamp(skb);
26101 vcc->push(vcc, skb);
26102- atomic_inc(&vcc->stats->rx);
26103+ atomic_inc_unchecked(&vcc->stats->rx);
26104 }
26105 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26106 struct sk_buff *sb;
26107@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26108 if (len <= NS_SMBUFSIZE) {
26109 if (!atm_charge(vcc, sb->truesize)) {
26110 push_rxbufs(card, sb);
26111- atomic_inc(&vcc->stats->rx_drop);
26112+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26113 } else {
26114 skb_put(sb, len);
26115 dequeue_sm_buf(card, sb);
26116@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26117 ATM_SKB(sb)->vcc = vcc;
26118 __net_timestamp(sb);
26119 vcc->push(vcc, sb);
26120- atomic_inc(&vcc->stats->rx);
26121+ atomic_inc_unchecked(&vcc->stats->rx);
26122 }
26123
26124 push_rxbufs(card, skb);
26125@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26126
26127 if (!atm_charge(vcc, skb->truesize)) {
26128 push_rxbufs(card, skb);
26129- atomic_inc(&vcc->stats->rx_drop);
26130+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26131 } else {
26132 dequeue_lg_buf(card, skb);
26133 #ifdef NS_USE_DESTRUCTORS
26134@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26135 ATM_SKB(skb)->vcc = vcc;
26136 __net_timestamp(skb);
26137 vcc->push(vcc, skb);
26138- atomic_inc(&vcc->stats->rx);
26139+ atomic_inc_unchecked(&vcc->stats->rx);
26140 }
26141
26142 push_rxbufs(card, sb);
26143@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26144 printk
26145 ("nicstar%d: Out of huge buffers.\n",
26146 card->index);
26147- atomic_inc(&vcc->stats->rx_drop);
26148+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26149 recycle_iovec_rx_bufs(card,
26150 (struct iovec *)
26151 iovb->data,
26152@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26153 card->hbpool.count++;
26154 } else
26155 dev_kfree_skb_any(hb);
26156- atomic_inc(&vcc->stats->rx_drop);
26157+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26158 } else {
26159 /* Copy the small buffer to the huge buffer */
26160 sb = (struct sk_buff *)iov->iov_base;
26161@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26162 #endif /* NS_USE_DESTRUCTORS */
26163 __net_timestamp(hb);
26164 vcc->push(vcc, hb);
26165- atomic_inc(&vcc->stats->rx);
26166+ atomic_inc_unchecked(&vcc->stats->rx);
26167 }
26168 }
26169
26170diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26171index 5d1d076..12fbca4 100644
26172--- a/drivers/atm/solos-pci.c
26173+++ b/drivers/atm/solos-pci.c
26174@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26175 }
26176 atm_charge(vcc, skb->truesize);
26177 vcc->push(vcc, skb);
26178- atomic_inc(&vcc->stats->rx);
26179+ atomic_inc_unchecked(&vcc->stats->rx);
26180 break;
26181
26182 case PKT_STATUS:
26183@@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26184 vcc = SKB_CB(oldskb)->vcc;
26185
26186 if (vcc) {
26187- atomic_inc(&vcc->stats->tx);
26188+ atomic_inc_unchecked(&vcc->stats->tx);
26189 solos_pop(vcc, oldskb);
26190 } else
26191 dev_kfree_skb_irq(oldskb);
26192diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26193index 90f1ccc..04c4a1e 100644
26194--- a/drivers/atm/suni.c
26195+++ b/drivers/atm/suni.c
26196@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26197
26198
26199 #define ADD_LIMITED(s,v) \
26200- atomic_add((v),&stats->s); \
26201- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26202+ atomic_add_unchecked((v),&stats->s); \
26203+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26204
26205
26206 static void suni_hz(unsigned long from_timer)
26207diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26208index 5120a96..e2572bd 100644
26209--- a/drivers/atm/uPD98402.c
26210+++ b/drivers/atm/uPD98402.c
26211@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26212 struct sonet_stats tmp;
26213 int error = 0;
26214
26215- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26216+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26217 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26218 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26219 if (zero && !error) {
26220@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26221
26222
26223 #define ADD_LIMITED(s,v) \
26224- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26225- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26226- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26227+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26228+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26229+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26230
26231
26232 static void stat_event(struct atm_dev *dev)
26233@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26234 if (reason & uPD98402_INT_PFM) stat_event(dev);
26235 if (reason & uPD98402_INT_PCO) {
26236 (void) GET(PCOCR); /* clear interrupt cause */
26237- atomic_add(GET(HECCT),
26238+ atomic_add_unchecked(GET(HECCT),
26239 &PRIV(dev)->sonet_stats.uncorr_hcs);
26240 }
26241 if ((reason & uPD98402_INT_RFO) &&
26242@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26243 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26244 uPD98402_INT_LOS),PIMR); /* enable them */
26245 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26246- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26247- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26248- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26249+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26250+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26251+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26252 return 0;
26253 }
26254
26255diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26256index d889f56..17eb71e 100644
26257--- a/drivers/atm/zatm.c
26258+++ b/drivers/atm/zatm.c
26259@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26260 }
26261 if (!size) {
26262 dev_kfree_skb_irq(skb);
26263- if (vcc) atomic_inc(&vcc->stats->rx_err);
26264+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26265 continue;
26266 }
26267 if (!atm_charge(vcc,skb->truesize)) {
26268@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26269 skb->len = size;
26270 ATM_SKB(skb)->vcc = vcc;
26271 vcc->push(vcc,skb);
26272- atomic_inc(&vcc->stats->rx);
26273+ atomic_inc_unchecked(&vcc->stats->rx);
26274 }
26275 zout(pos & 0xffff,MTA(mbx));
26276 #if 0 /* probably a stupid idea */
26277@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26278 skb_queue_head(&zatm_vcc->backlog,skb);
26279 break;
26280 }
26281- atomic_inc(&vcc->stats->tx);
26282+ atomic_inc_unchecked(&vcc->stats->tx);
26283 wake_up(&zatm_vcc->tx_wait);
26284 }
26285
26286diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26287index a4760e0..51283cf 100644
26288--- a/drivers/base/devtmpfs.c
26289+++ b/drivers/base/devtmpfs.c
26290@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26291 if (!thread)
26292 return 0;
26293
26294- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26295+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26296 if (err)
26297 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26298 else
26299diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26300index caf995f..6f76697 100644
26301--- a/drivers/base/power/wakeup.c
26302+++ b/drivers/base/power/wakeup.c
26303@@ -30,14 +30,14 @@ bool events_check_enabled;
26304 * They need to be modified together atomically, so it's better to use one
26305 * atomic variable to hold them both.
26306 */
26307-static atomic_t combined_event_count = ATOMIC_INIT(0);
26308+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26309
26310 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26311 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26312
26313 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26314 {
26315- unsigned int comb = atomic_read(&combined_event_count);
26316+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
26317
26318 *cnt = (comb >> IN_PROGRESS_BITS);
26319 *inpr = comb & MAX_IN_PROGRESS;
26320@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26321 ws->last_time = ktime_get();
26322
26323 /* Increment the counter of events in progress. */
26324- atomic_inc(&combined_event_count);
26325+ atomic_inc_unchecked(&combined_event_count);
26326 }
26327
26328 /**
26329@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26330 * Increment the counter of registered wakeup events and decrement the
26331 * couter of wakeup events in progress simultaneously.
26332 */
26333- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26334+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26335 }
26336
26337 /**
26338diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26339index b0f553b..77b928b 100644
26340--- a/drivers/block/cciss.c
26341+++ b/drivers/block/cciss.c
26342@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26343 int err;
26344 u32 cp;
26345
26346+ memset(&arg64, 0, sizeof(arg64));
26347+
26348 err = 0;
26349 err |=
26350 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26351@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26352 while (!list_empty(&h->reqQ)) {
26353 c = list_entry(h->reqQ.next, CommandList_struct, list);
26354 /* can't do anything if fifo is full */
26355- if ((h->access.fifo_full(h))) {
26356+ if ((h->access->fifo_full(h))) {
26357 dev_warn(&h->pdev->dev, "fifo full\n");
26358 break;
26359 }
26360@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26361 h->Qdepth--;
26362
26363 /* Tell the controller execute command */
26364- h->access.submit_command(h, c);
26365+ h->access->submit_command(h, c);
26366
26367 /* Put job onto the completed Q */
26368 addQ(&h->cmpQ, c);
26369@@ -3443,17 +3445,17 @@ startio:
26370
26371 static inline unsigned long get_next_completion(ctlr_info_t *h)
26372 {
26373- return h->access.command_completed(h);
26374+ return h->access->command_completed(h);
26375 }
26376
26377 static inline int interrupt_pending(ctlr_info_t *h)
26378 {
26379- return h->access.intr_pending(h);
26380+ return h->access->intr_pending(h);
26381 }
26382
26383 static inline long interrupt_not_for_us(ctlr_info_t *h)
26384 {
26385- return ((h->access.intr_pending(h) == 0) ||
26386+ return ((h->access->intr_pending(h) == 0) ||
26387 (h->interrupts_enabled == 0));
26388 }
26389
26390@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26391 u32 a;
26392
26393 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26394- return h->access.command_completed(h);
26395+ return h->access->command_completed(h);
26396
26397 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26398 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26399@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26400 trans_support & CFGTBL_Trans_use_short_tags);
26401
26402 /* Change the access methods to the performant access methods */
26403- h->access = SA5_performant_access;
26404+ h->access = &SA5_performant_access;
26405 h->transMethod = CFGTBL_Trans_Performant;
26406
26407 return;
26408@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26409 if (prod_index < 0)
26410 return -ENODEV;
26411 h->product_name = products[prod_index].product_name;
26412- h->access = *(products[prod_index].access);
26413+ h->access = products[prod_index].access;
26414
26415 if (cciss_board_disabled(h)) {
26416 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26417@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26418 }
26419
26420 /* make sure the board interrupts are off */
26421- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26422+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26423 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26424 if (rc)
26425 goto clean2;
26426@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26427 * fake ones to scoop up any residual completions.
26428 */
26429 spin_lock_irqsave(&h->lock, flags);
26430- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26431+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26432 spin_unlock_irqrestore(&h->lock, flags);
26433 free_irq(h->intr[h->intr_mode], h);
26434 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26435@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26436 dev_info(&h->pdev->dev, "Board READY.\n");
26437 dev_info(&h->pdev->dev,
26438 "Waiting for stale completions to drain.\n");
26439- h->access.set_intr_mask(h, CCISS_INTR_ON);
26440+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26441 msleep(10000);
26442- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26443+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26444
26445 rc = controller_reset_failed(h->cfgtable);
26446 if (rc)
26447@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26448 cciss_scsi_setup(h);
26449
26450 /* Turn the interrupts on so we can service requests */
26451- h->access.set_intr_mask(h, CCISS_INTR_ON);
26452+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26453
26454 /* Get the firmware version */
26455 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26456@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26457 kfree(flush_buf);
26458 if (return_code != IO_OK)
26459 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26460- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26461+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26462 free_irq(h->intr[h->intr_mode], h);
26463 }
26464
26465diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26466index 7fda30e..eb5dfe0 100644
26467--- a/drivers/block/cciss.h
26468+++ b/drivers/block/cciss.h
26469@@ -101,7 +101,7 @@ struct ctlr_info
26470 /* information about each logical volume */
26471 drive_info_struct *drv[CISS_MAX_LUN];
26472
26473- struct access_method access;
26474+ struct access_method *access;
26475
26476 /* queue and queue Info */
26477 struct list_head reqQ;
26478diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26479index 9125bbe..eede5c8 100644
26480--- a/drivers/block/cpqarray.c
26481+++ b/drivers/block/cpqarray.c
26482@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26483 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26484 goto Enomem4;
26485 }
26486- hba[i]->access.set_intr_mask(hba[i], 0);
26487+ hba[i]->access->set_intr_mask(hba[i], 0);
26488 if (request_irq(hba[i]->intr, do_ida_intr,
26489 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26490 {
26491@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26492 add_timer(&hba[i]->timer);
26493
26494 /* Enable IRQ now that spinlock and rate limit timer are set up */
26495- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26496+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26497
26498 for(j=0; j<NWD; j++) {
26499 struct gendisk *disk = ida_gendisk[i][j];
26500@@ -694,7 +694,7 @@ DBGINFO(
26501 for(i=0; i<NR_PRODUCTS; i++) {
26502 if (board_id == products[i].board_id) {
26503 c->product_name = products[i].product_name;
26504- c->access = *(products[i].access);
26505+ c->access = products[i].access;
26506 break;
26507 }
26508 }
26509@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26510 hba[ctlr]->intr = intr;
26511 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26512 hba[ctlr]->product_name = products[j].product_name;
26513- hba[ctlr]->access = *(products[j].access);
26514+ hba[ctlr]->access = products[j].access;
26515 hba[ctlr]->ctlr = ctlr;
26516 hba[ctlr]->board_id = board_id;
26517 hba[ctlr]->pci_dev = NULL; /* not PCI */
26518@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26519
26520 while((c = h->reqQ) != NULL) {
26521 /* Can't do anything if we're busy */
26522- if (h->access.fifo_full(h) == 0)
26523+ if (h->access->fifo_full(h) == 0)
26524 return;
26525
26526 /* Get the first entry from the request Q */
26527@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26528 h->Qdepth--;
26529
26530 /* Tell the controller to do our bidding */
26531- h->access.submit_command(h, c);
26532+ h->access->submit_command(h, c);
26533
26534 /* Get onto the completion Q */
26535 addQ(&h->cmpQ, c);
26536@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26537 unsigned long flags;
26538 __u32 a,a1;
26539
26540- istat = h->access.intr_pending(h);
26541+ istat = h->access->intr_pending(h);
26542 /* Is this interrupt for us? */
26543 if (istat == 0)
26544 return IRQ_NONE;
26545@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26546 */
26547 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26548 if (istat & FIFO_NOT_EMPTY) {
26549- while((a = h->access.command_completed(h))) {
26550+ while((a = h->access->command_completed(h))) {
26551 a1 = a; a &= ~3;
26552 if ((c = h->cmpQ) == NULL)
26553 {
26554@@ -1449,11 +1449,11 @@ static int sendcmd(
26555 /*
26556 * Disable interrupt
26557 */
26558- info_p->access.set_intr_mask(info_p, 0);
26559+ info_p->access->set_intr_mask(info_p, 0);
26560 /* Make sure there is room in the command FIFO */
26561 /* Actually it should be completely empty at this time. */
26562 for (i = 200000; i > 0; i--) {
26563- temp = info_p->access.fifo_full(info_p);
26564+ temp = info_p->access->fifo_full(info_p);
26565 if (temp != 0) {
26566 break;
26567 }
26568@@ -1466,7 +1466,7 @@ DBG(
26569 /*
26570 * Send the cmd
26571 */
26572- info_p->access.submit_command(info_p, c);
26573+ info_p->access->submit_command(info_p, c);
26574 complete = pollcomplete(ctlr);
26575
26576 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26577@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26578 * we check the new geometry. Then turn interrupts back on when
26579 * we're done.
26580 */
26581- host->access.set_intr_mask(host, 0);
26582+ host->access->set_intr_mask(host, 0);
26583 getgeometry(ctlr);
26584- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26585+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26586
26587 for(i=0; i<NWD; i++) {
26588 struct gendisk *disk = ida_gendisk[ctlr][i];
26589@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26590 /* Wait (up to 2 seconds) for a command to complete */
26591
26592 for (i = 200000; i > 0; i--) {
26593- done = hba[ctlr]->access.command_completed(hba[ctlr]);
26594+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
26595 if (done == 0) {
26596 udelay(10); /* a short fixed delay */
26597 } else
26598diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26599index be73e9d..7fbf140 100644
26600--- a/drivers/block/cpqarray.h
26601+++ b/drivers/block/cpqarray.h
26602@@ -99,7 +99,7 @@ struct ctlr_info {
26603 drv_info_t drv[NWD];
26604 struct proc_dir_entry *proc;
26605
26606- struct access_method access;
26607+ struct access_method *access;
26608
26609 cmdlist_t *reqQ;
26610 cmdlist_t *cmpQ;
26611diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26612index 9cf2035..bffca95 100644
26613--- a/drivers/block/drbd/drbd_int.h
26614+++ b/drivers/block/drbd/drbd_int.h
26615@@ -736,7 +736,7 @@ struct drbd_request;
26616 struct drbd_epoch {
26617 struct list_head list;
26618 unsigned int barrier_nr;
26619- atomic_t epoch_size; /* increased on every request added. */
26620+ atomic_unchecked_t epoch_size; /* increased on every request added. */
26621 atomic_t active; /* increased on every req. added, and dec on every finished. */
26622 unsigned long flags;
26623 };
26624@@ -1108,7 +1108,7 @@ struct drbd_conf {
26625 void *int_dig_in;
26626 void *int_dig_vv;
26627 wait_queue_head_t seq_wait;
26628- atomic_t packet_seq;
26629+ atomic_unchecked_t packet_seq;
26630 unsigned int peer_seq;
26631 spinlock_t peer_seq_lock;
26632 unsigned int minor;
26633@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26634
26635 static inline void drbd_tcp_cork(struct socket *sock)
26636 {
26637- int __user val = 1;
26638+ int val = 1;
26639 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26640- (char __user *)&val, sizeof(val));
26641+ (char __force_user *)&val, sizeof(val));
26642 }
26643
26644 static inline void drbd_tcp_uncork(struct socket *sock)
26645 {
26646- int __user val = 0;
26647+ int val = 0;
26648 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26649- (char __user *)&val, sizeof(val));
26650+ (char __force_user *)&val, sizeof(val));
26651 }
26652
26653 static inline void drbd_tcp_nodelay(struct socket *sock)
26654 {
26655- int __user val = 1;
26656+ int val = 1;
26657 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26658- (char __user *)&val, sizeof(val));
26659+ (char __force_user *)&val, sizeof(val));
26660 }
26661
26662 static inline void drbd_tcp_quickack(struct socket *sock)
26663 {
26664- int __user val = 2;
26665+ int val = 2;
26666 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26667- (char __user *)&val, sizeof(val));
26668+ (char __force_user *)&val, sizeof(val));
26669 }
26670
26671 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26672diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26673index 0358e55..bc33689 100644
26674--- a/drivers/block/drbd/drbd_main.c
26675+++ b/drivers/block/drbd/drbd_main.c
26676@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26677 p.sector = sector;
26678 p.block_id = block_id;
26679 p.blksize = blksize;
26680- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26681+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26682
26683 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26684 return false;
26685@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26686 p.sector = cpu_to_be64(req->sector);
26687 p.block_id = (unsigned long)req;
26688 p.seq_num = cpu_to_be32(req->seq_num =
26689- atomic_add_return(1, &mdev->packet_seq));
26690+ atomic_add_return_unchecked(1, &mdev->packet_seq));
26691
26692 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26693
26694@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26695 atomic_set(&mdev->unacked_cnt, 0);
26696 atomic_set(&mdev->local_cnt, 0);
26697 atomic_set(&mdev->net_cnt, 0);
26698- atomic_set(&mdev->packet_seq, 0);
26699+ atomic_set_unchecked(&mdev->packet_seq, 0);
26700 atomic_set(&mdev->pp_in_use, 0);
26701 atomic_set(&mdev->pp_in_use_by_net, 0);
26702 atomic_set(&mdev->rs_sect_in, 0);
26703@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26704 mdev->receiver.t_state);
26705
26706 /* no need to lock it, I'm the only thread alive */
26707- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26708- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26709+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26710+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26711 mdev->al_writ_cnt =
26712 mdev->bm_writ_cnt =
26713 mdev->read_cnt =
26714diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26715index af2a250..219c74b 100644
26716--- a/drivers/block/drbd/drbd_nl.c
26717+++ b/drivers/block/drbd/drbd_nl.c
26718@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26719 module_put(THIS_MODULE);
26720 }
26721
26722-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26723+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26724
26725 static unsigned short *
26726 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26727@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26728 cn_reply->id.idx = CN_IDX_DRBD;
26729 cn_reply->id.val = CN_VAL_DRBD;
26730
26731- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26732+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26733 cn_reply->ack = 0; /* not used here. */
26734 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26735 (int)((char *)tl - (char *)reply->tag_list);
26736@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26737 cn_reply->id.idx = CN_IDX_DRBD;
26738 cn_reply->id.val = CN_VAL_DRBD;
26739
26740- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26741+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26742 cn_reply->ack = 0; /* not used here. */
26743 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26744 (int)((char *)tl - (char *)reply->tag_list);
26745@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26746 cn_reply->id.idx = CN_IDX_DRBD;
26747 cn_reply->id.val = CN_VAL_DRBD;
26748
26749- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26750+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26751 cn_reply->ack = 0; // not used here.
26752 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26753 (int)((char*)tl - (char*)reply->tag_list);
26754@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26755 cn_reply->id.idx = CN_IDX_DRBD;
26756 cn_reply->id.val = CN_VAL_DRBD;
26757
26758- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26759+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26760 cn_reply->ack = 0; /* not used here. */
26761 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26762 (int)((char *)tl - (char *)reply->tag_list);
26763diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26764index 43beaca..4a5b1dd 100644
26765--- a/drivers/block/drbd/drbd_receiver.c
26766+++ b/drivers/block/drbd/drbd_receiver.c
26767@@ -894,7 +894,7 @@ retry:
26768 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26769 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26770
26771- atomic_set(&mdev->packet_seq, 0);
26772+ atomic_set_unchecked(&mdev->packet_seq, 0);
26773 mdev->peer_seq = 0;
26774
26775 drbd_thread_start(&mdev->asender);
26776@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26777 do {
26778 next_epoch = NULL;
26779
26780- epoch_size = atomic_read(&epoch->epoch_size);
26781+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26782
26783 switch (ev & ~EV_CLEANUP) {
26784 case EV_PUT:
26785@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26786 rv = FE_DESTROYED;
26787 } else {
26788 epoch->flags = 0;
26789- atomic_set(&epoch->epoch_size, 0);
26790+ atomic_set_unchecked(&epoch->epoch_size, 0);
26791 /* atomic_set(&epoch->active, 0); is already zero */
26792 if (rv == FE_STILL_LIVE)
26793 rv = FE_RECYCLED;
26794@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26795 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26796 drbd_flush(mdev);
26797
26798- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26799+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26800 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26801 if (epoch)
26802 break;
26803 }
26804
26805 epoch = mdev->current_epoch;
26806- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26807+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26808
26809 D_ASSERT(atomic_read(&epoch->active) == 0);
26810 D_ASSERT(epoch->flags == 0);
26811@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26812 }
26813
26814 epoch->flags = 0;
26815- atomic_set(&epoch->epoch_size, 0);
26816+ atomic_set_unchecked(&epoch->epoch_size, 0);
26817 atomic_set(&epoch->active, 0);
26818
26819 spin_lock(&mdev->epoch_lock);
26820- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26821+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26822 list_add(&epoch->list, &mdev->current_epoch->list);
26823 mdev->current_epoch = epoch;
26824 mdev->epochs++;
26825@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26826 spin_unlock(&mdev->peer_seq_lock);
26827
26828 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26829- atomic_inc(&mdev->current_epoch->epoch_size);
26830+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26831 return drbd_drain_block(mdev, data_size);
26832 }
26833
26834@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26835
26836 spin_lock(&mdev->epoch_lock);
26837 e->epoch = mdev->current_epoch;
26838- atomic_inc(&e->epoch->epoch_size);
26839+ atomic_inc_unchecked(&e->epoch->epoch_size);
26840 atomic_inc(&e->epoch->active);
26841 spin_unlock(&mdev->epoch_lock);
26842
26843@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
26844 D_ASSERT(list_empty(&mdev->done_ee));
26845
26846 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26847- atomic_set(&mdev->current_epoch->epoch_size, 0);
26848+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26849 D_ASSERT(list_empty(&mdev->current_epoch->list));
26850 }
26851
26852diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26853index 1e888c9..05cf1b0 100644
26854--- a/drivers/block/loop.c
26855+++ b/drivers/block/loop.c
26856@@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
26857 mm_segment_t old_fs = get_fs();
26858
26859 set_fs(get_ds());
26860- bw = file->f_op->write(file, buf, len, &pos);
26861+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26862 set_fs(old_fs);
26863 if (likely(bw == len))
26864 return 0;
26865diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
26866index 4364303..9adf4ee 100644
26867--- a/drivers/char/Kconfig
26868+++ b/drivers/char/Kconfig
26869@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26870
26871 config DEVKMEM
26872 bool "/dev/kmem virtual device support"
26873- default y
26874+ default n
26875+ depends on !GRKERNSEC_KMEM
26876 help
26877 Say Y here if you want to support the /dev/kmem device. The
26878 /dev/kmem device is rarely used, but can be used for certain
26879@@ -596,6 +597,7 @@ config DEVPORT
26880 bool
26881 depends on !M68K
26882 depends on ISA || PCI
26883+ depends on !GRKERNSEC_KMEM
26884 default y
26885
26886 source "drivers/s390/char/Kconfig"
26887diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
26888index 2e04433..22afc64 100644
26889--- a/drivers/char/agp/frontend.c
26890+++ b/drivers/char/agp/frontend.c
26891@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
26892 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26893 return -EFAULT;
26894
26895- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26896+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26897 return -EFAULT;
26898
26899 client = agp_find_client_by_pid(reserve.pid);
26900diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
26901index 095ab90..afad0a4 100644
26902--- a/drivers/char/briq_panel.c
26903+++ b/drivers/char/briq_panel.c
26904@@ -9,6 +9,7 @@
26905 #include <linux/types.h>
26906 #include <linux/errno.h>
26907 #include <linux/tty.h>
26908+#include <linux/mutex.h>
26909 #include <linux/timer.h>
26910 #include <linux/kernel.h>
26911 #include <linux/wait.h>
26912@@ -34,6 +35,7 @@ static int vfd_is_open;
26913 static unsigned char vfd[40];
26914 static int vfd_cursor;
26915 static unsigned char ledpb, led;
26916+static DEFINE_MUTEX(vfd_mutex);
26917
26918 static void update_vfd(void)
26919 {
26920@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26921 if (!vfd_is_open)
26922 return -EBUSY;
26923
26924+ mutex_lock(&vfd_mutex);
26925 for (;;) {
26926 char c;
26927 if (!indx)
26928 break;
26929- if (get_user(c, buf))
26930+ if (get_user(c, buf)) {
26931+ mutex_unlock(&vfd_mutex);
26932 return -EFAULT;
26933+ }
26934 if (esc) {
26935 set_led(c);
26936 esc = 0;
26937@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26938 buf++;
26939 }
26940 update_vfd();
26941+ mutex_unlock(&vfd_mutex);
26942
26943 return len;
26944 }
26945diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
26946index f773a9d..65cd683 100644
26947--- a/drivers/char/genrtc.c
26948+++ b/drivers/char/genrtc.c
26949@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
26950 switch (cmd) {
26951
26952 case RTC_PLL_GET:
26953+ memset(&pll, 0, sizeof(pll));
26954 if (get_rtc_pll(&pll))
26955 return -EINVAL;
26956 else
26957diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
26958index 0833896..cccce52 100644
26959--- a/drivers/char/hpet.c
26960+++ b/drivers/char/hpet.c
26961@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
26962 }
26963
26964 static int
26965-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26966+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26967 struct hpet_info *info)
26968 {
26969 struct hpet_timer __iomem *timer;
26970diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
26971index 58c0e63..46c16bf 100644
26972--- a/drivers/char/ipmi/ipmi_msghandler.c
26973+++ b/drivers/char/ipmi/ipmi_msghandler.c
26974@@ -415,7 +415,7 @@ struct ipmi_smi {
26975 struct proc_dir_entry *proc_dir;
26976 char proc_dir_name[10];
26977
26978- atomic_t stats[IPMI_NUM_STATS];
26979+ atomic_unchecked_t stats[IPMI_NUM_STATS];
26980
26981 /*
26982 * run_to_completion duplicate of smb_info, smi_info
26983@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26984
26985
26986 #define ipmi_inc_stat(intf, stat) \
26987- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26988+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26989 #define ipmi_get_stat(intf, stat) \
26990- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26991+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26992
26993 static int is_lan_addr(struct ipmi_addr *addr)
26994 {
26995@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
26996 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26997 init_waitqueue_head(&intf->waitq);
26998 for (i = 0; i < IPMI_NUM_STATS; i++)
26999- atomic_set(&intf->stats[i], 0);
27000+ atomic_set_unchecked(&intf->stats[i], 0);
27001
27002 intf->proc_dir = NULL;
27003
27004diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
27005index 9397ab4..d01bee1 100644
27006--- a/drivers/char/ipmi/ipmi_si_intf.c
27007+++ b/drivers/char/ipmi/ipmi_si_intf.c
27008@@ -277,7 +277,7 @@ struct smi_info {
27009 unsigned char slave_addr;
27010
27011 /* Counters and things for the proc filesystem. */
27012- atomic_t stats[SI_NUM_STATS];
27013+ atomic_unchecked_t stats[SI_NUM_STATS];
27014
27015 struct task_struct *thread;
27016
27017@@ -286,9 +286,9 @@ struct smi_info {
27018 };
27019
27020 #define smi_inc_stat(smi, stat) \
27021- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27022+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27023 #define smi_get_stat(smi, stat) \
27024- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27025+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27026
27027 #define SI_MAX_PARMS 4
27028
27029@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
27030 atomic_set(&new_smi->req_events, 0);
27031 new_smi->run_to_completion = 0;
27032 for (i = 0; i < SI_NUM_STATS; i++)
27033- atomic_set(&new_smi->stats[i], 0);
27034+ atomic_set_unchecked(&new_smi->stats[i], 0);
27035
27036 new_smi->interrupt_disabled = 1;
27037 atomic_set(&new_smi->stop_operation, 0);
27038diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
27039index 1aeaaba..e018570 100644
27040--- a/drivers/char/mbcs.c
27041+++ b/drivers/char/mbcs.c
27042@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
27043 return 0;
27044 }
27045
27046-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
27047+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
27048 {
27049 .part_num = MBCS_PART_NUM,
27050 .mfg_num = MBCS_MFG_NUM,
27051diff --git a/drivers/char/mem.c b/drivers/char/mem.c
27052index 1451790..f705c30 100644
27053--- a/drivers/char/mem.c
27054+++ b/drivers/char/mem.c
27055@@ -18,6 +18,7 @@
27056 #include <linux/raw.h>
27057 #include <linux/tty.h>
27058 #include <linux/capability.h>
27059+#include <linux/security.h>
27060 #include <linux/ptrace.h>
27061 #include <linux/device.h>
27062 #include <linux/highmem.h>
27063@@ -35,6 +36,10 @@
27064 # include <linux/efi.h>
27065 #endif
27066
27067+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27068+extern const struct file_operations grsec_fops;
27069+#endif
27070+
27071 static inline unsigned long size_inside_page(unsigned long start,
27072 unsigned long size)
27073 {
27074@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27075
27076 while (cursor < to) {
27077 if (!devmem_is_allowed(pfn)) {
27078+#ifdef CONFIG_GRKERNSEC_KMEM
27079+ gr_handle_mem_readwrite(from, to);
27080+#else
27081 printk(KERN_INFO
27082 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27083 current->comm, from, to);
27084+#endif
27085 return 0;
27086 }
27087 cursor += PAGE_SIZE;
27088@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27089 }
27090 return 1;
27091 }
27092+#elif defined(CONFIG_GRKERNSEC_KMEM)
27093+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27094+{
27095+ return 0;
27096+}
27097 #else
27098 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27099 {
27100@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27101
27102 while (count > 0) {
27103 unsigned long remaining;
27104+ char *temp;
27105
27106 sz = size_inside_page(p, count);
27107
27108@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27109 if (!ptr)
27110 return -EFAULT;
27111
27112- remaining = copy_to_user(buf, ptr, sz);
27113+#ifdef CONFIG_PAX_USERCOPY
27114+ temp = kmalloc(sz, GFP_KERNEL);
27115+ if (!temp) {
27116+ unxlate_dev_mem_ptr(p, ptr);
27117+ return -ENOMEM;
27118+ }
27119+ memcpy(temp, ptr, sz);
27120+#else
27121+ temp = ptr;
27122+#endif
27123+
27124+ remaining = copy_to_user(buf, temp, sz);
27125+
27126+#ifdef CONFIG_PAX_USERCOPY
27127+ kfree(temp);
27128+#endif
27129+
27130 unxlate_dev_mem_ptr(p, ptr);
27131 if (remaining)
27132 return -EFAULT;
27133@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27134 size_t count, loff_t *ppos)
27135 {
27136 unsigned long p = *ppos;
27137- ssize_t low_count, read, sz;
27138+ ssize_t low_count, read, sz, err = 0;
27139 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27140- int err = 0;
27141
27142 read = 0;
27143 if (p < (unsigned long) high_memory) {
27144@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27145 }
27146 #endif
27147 while (low_count > 0) {
27148+ char *temp;
27149+
27150 sz = size_inside_page(p, low_count);
27151
27152 /*
27153@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27154 */
27155 kbuf = xlate_dev_kmem_ptr((char *)p);
27156
27157- if (copy_to_user(buf, kbuf, sz))
27158+#ifdef CONFIG_PAX_USERCOPY
27159+ temp = kmalloc(sz, GFP_KERNEL);
27160+ if (!temp)
27161+ return -ENOMEM;
27162+ memcpy(temp, kbuf, sz);
27163+#else
27164+ temp = kbuf;
27165+#endif
27166+
27167+ err = copy_to_user(buf, temp, sz);
27168+
27169+#ifdef CONFIG_PAX_USERCOPY
27170+ kfree(temp);
27171+#endif
27172+
27173+ if (err)
27174 return -EFAULT;
27175 buf += sz;
27176 p += sz;
27177@@ -867,6 +914,9 @@ static const struct memdev {
27178 #ifdef CONFIG_CRASH_DUMP
27179 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27180 #endif
27181+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27182+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27183+#endif
27184 };
27185
27186 static int memory_open(struct inode *inode, struct file *filp)
27187diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27188index da3cfee..a5a6606 100644
27189--- a/drivers/char/nvram.c
27190+++ b/drivers/char/nvram.c
27191@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27192
27193 spin_unlock_irq(&rtc_lock);
27194
27195- if (copy_to_user(buf, contents, tmp - contents))
27196+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27197 return -EFAULT;
27198
27199 *ppos = i;
27200diff --git a/drivers/char/random.c b/drivers/char/random.c
27201index 6035ab8..bdfe4fd 100644
27202--- a/drivers/char/random.c
27203+++ b/drivers/char/random.c
27204@@ -261,8 +261,13 @@
27205 /*
27206 * Configuration information
27207 */
27208+#ifdef CONFIG_GRKERNSEC_RANDNET
27209+#define INPUT_POOL_WORDS 512
27210+#define OUTPUT_POOL_WORDS 128
27211+#else
27212 #define INPUT_POOL_WORDS 128
27213 #define OUTPUT_POOL_WORDS 32
27214+#endif
27215 #define SEC_XFER_SIZE 512
27216 #define EXTRACT_SIZE 10
27217
27218@@ -300,10 +305,17 @@ static struct poolinfo {
27219 int poolwords;
27220 int tap1, tap2, tap3, tap4, tap5;
27221 } poolinfo_table[] = {
27222+#ifdef CONFIG_GRKERNSEC_RANDNET
27223+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27224+ { 512, 411, 308, 208, 104, 1 },
27225+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27226+ { 128, 103, 76, 51, 25, 1 },
27227+#else
27228 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27229 { 128, 103, 76, 51, 25, 1 },
27230 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27231 { 32, 26, 20, 14, 7, 1 },
27232+#endif
27233 #if 0
27234 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27235 { 2048, 1638, 1231, 819, 411, 1 },
27236@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27237
27238 extract_buf(r, tmp);
27239 i = min_t(int, nbytes, EXTRACT_SIZE);
27240- if (copy_to_user(buf, tmp, i)) {
27241+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27242 ret = -EFAULT;
27243 break;
27244 }
27245@@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27246 #include <linux/sysctl.h>
27247
27248 static int min_read_thresh = 8, min_write_thresh;
27249-static int max_read_thresh = INPUT_POOL_WORDS * 32;
27250+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27251 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27252 static char sysctl_bootid[16];
27253
27254diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27255index 1ee8ce7..b778bef 100644
27256--- a/drivers/char/sonypi.c
27257+++ b/drivers/char/sonypi.c
27258@@ -55,6 +55,7 @@
27259 #include <asm/uaccess.h>
27260 #include <asm/io.h>
27261 #include <asm/system.h>
27262+#include <asm/local.h>
27263
27264 #include <linux/sonypi.h>
27265
27266@@ -491,7 +492,7 @@ static struct sonypi_device {
27267 spinlock_t fifo_lock;
27268 wait_queue_head_t fifo_proc_list;
27269 struct fasync_struct *fifo_async;
27270- int open_count;
27271+ local_t open_count;
27272 int model;
27273 struct input_dev *input_jog_dev;
27274 struct input_dev *input_key_dev;
27275@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27276 static int sonypi_misc_release(struct inode *inode, struct file *file)
27277 {
27278 mutex_lock(&sonypi_device.lock);
27279- sonypi_device.open_count--;
27280+ local_dec(&sonypi_device.open_count);
27281 mutex_unlock(&sonypi_device.lock);
27282 return 0;
27283 }
27284@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27285 {
27286 mutex_lock(&sonypi_device.lock);
27287 /* Flush input queue on first open */
27288- if (!sonypi_device.open_count)
27289+ if (!local_read(&sonypi_device.open_count))
27290 kfifo_reset(&sonypi_device.fifo);
27291- sonypi_device.open_count++;
27292+ local_inc(&sonypi_device.open_count);
27293 mutex_unlock(&sonypi_device.lock);
27294
27295 return 0;
27296diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27297index 361a1df..2471eee 100644
27298--- a/drivers/char/tpm/tpm.c
27299+++ b/drivers/char/tpm/tpm.c
27300@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27301 chip->vendor.req_complete_val)
27302 goto out_recv;
27303
27304- if ((status == chip->vendor.req_canceled)) {
27305+ if (status == chip->vendor.req_canceled) {
27306 dev_err(chip->dev, "Operation Canceled\n");
27307 rc = -ECANCELED;
27308 goto out;
27309diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27310index 0636520..169c1d0 100644
27311--- a/drivers/char/tpm/tpm_bios.c
27312+++ b/drivers/char/tpm/tpm_bios.c
27313@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27314 event = addr;
27315
27316 if ((event->event_type == 0 && event->event_size == 0) ||
27317- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27318+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27319 return NULL;
27320
27321 return addr;
27322@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27323 return NULL;
27324
27325 if ((event->event_type == 0 && event->event_size == 0) ||
27326- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27327+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27328 return NULL;
27329
27330 (*pos)++;
27331@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27332 int i;
27333
27334 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27335- seq_putc(m, data[i]);
27336+ if (!seq_putc(m, data[i]))
27337+ return -EFAULT;
27338
27339 return 0;
27340 }
27341@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27342 log->bios_event_log_end = log->bios_event_log + len;
27343
27344 virt = acpi_os_map_memory(start, len);
27345+ if (!virt) {
27346+ kfree(log->bios_event_log);
27347+ log->bios_event_log = NULL;
27348+ return -EFAULT;
27349+ }
27350
27351- memcpy(log->bios_event_log, virt, len);
27352+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27353
27354 acpi_os_unmap_memory(virt, len);
27355 return 0;
27356diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27357index 8e3c46d..c139b99 100644
27358--- a/drivers/char/virtio_console.c
27359+++ b/drivers/char/virtio_console.c
27360@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27361 if (to_user) {
27362 ssize_t ret;
27363
27364- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27365+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27366 if (ret)
27367 return -EFAULT;
27368 } else {
27369@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27370 if (!port_has_data(port) && !port->host_connected)
27371 return 0;
27372
27373- return fill_readbuf(port, ubuf, count, true);
27374+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27375 }
27376
27377 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27378diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27379index eb1d864..39ee5a7 100644
27380--- a/drivers/dma/dmatest.c
27381+++ b/drivers/dma/dmatest.c
27382@@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27383 }
27384 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27385 cnt = dmatest_add_threads(dtc, DMA_PQ);
27386- thread_count += cnt > 0 ?: 0;
27387+ thread_count += cnt > 0 ? cnt : 0;
27388 }
27389
27390 pr_info("dmatest: Started %u threads using %s\n",
27391diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27392index c9eee6d..f9d5280 100644
27393--- a/drivers/edac/amd64_edac.c
27394+++ b/drivers/edac/amd64_edac.c
27395@@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27396 * PCI core identifies what devices are on a system during boot, and then
27397 * inquiry this table to see if this driver is for a given device found.
27398 */
27399-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27400+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27401 {
27402 .vendor = PCI_VENDOR_ID_AMD,
27403 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27404diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27405index e47e73b..348e0bd 100644
27406--- a/drivers/edac/amd76x_edac.c
27407+++ b/drivers/edac/amd76x_edac.c
27408@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27409 edac_mc_free(mci);
27410 }
27411
27412-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27413+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27414 {
27415 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27416 AMD762},
27417diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27418index 1af531a..3a8ff27 100644
27419--- a/drivers/edac/e752x_edac.c
27420+++ b/drivers/edac/e752x_edac.c
27421@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27422 edac_mc_free(mci);
27423 }
27424
27425-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27426+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27427 {
27428 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27429 E7520},
27430diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27431index 6ffb6d2..383d8d7 100644
27432--- a/drivers/edac/e7xxx_edac.c
27433+++ b/drivers/edac/e7xxx_edac.c
27434@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27435 edac_mc_free(mci);
27436 }
27437
27438-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27439+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27440 {
27441 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27442 E7205},
27443diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27444index 495198a..ac08c85 100644
27445--- a/drivers/edac/edac_pci_sysfs.c
27446+++ b/drivers/edac/edac_pci_sysfs.c
27447@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27448 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27449 static int edac_pci_poll_msec = 1000; /* one second workq period */
27450
27451-static atomic_t pci_parity_count = ATOMIC_INIT(0);
27452-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27453+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27454+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27455
27456 static struct kobject *edac_pci_top_main_kobj;
27457 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27458@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27459 edac_printk(KERN_CRIT, EDAC_PCI,
27460 "Signaled System Error on %s\n",
27461 pci_name(dev));
27462- atomic_inc(&pci_nonparity_count);
27463+ atomic_inc_unchecked(&pci_nonparity_count);
27464 }
27465
27466 if (status & (PCI_STATUS_PARITY)) {
27467@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27468 "Master Data Parity Error on %s\n",
27469 pci_name(dev));
27470
27471- atomic_inc(&pci_parity_count);
27472+ atomic_inc_unchecked(&pci_parity_count);
27473 }
27474
27475 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27476@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27477 "Detected Parity Error on %s\n",
27478 pci_name(dev));
27479
27480- atomic_inc(&pci_parity_count);
27481+ atomic_inc_unchecked(&pci_parity_count);
27482 }
27483 }
27484
27485@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27486 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27487 "Signaled System Error on %s\n",
27488 pci_name(dev));
27489- atomic_inc(&pci_nonparity_count);
27490+ atomic_inc_unchecked(&pci_nonparity_count);
27491 }
27492
27493 if (status & (PCI_STATUS_PARITY)) {
27494@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27495 "Master Data Parity Error on "
27496 "%s\n", pci_name(dev));
27497
27498- atomic_inc(&pci_parity_count);
27499+ atomic_inc_unchecked(&pci_parity_count);
27500 }
27501
27502 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27503@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27504 "Detected Parity Error on %s\n",
27505 pci_name(dev));
27506
27507- atomic_inc(&pci_parity_count);
27508+ atomic_inc_unchecked(&pci_parity_count);
27509 }
27510 }
27511 }
27512@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27513 if (!check_pci_errors)
27514 return;
27515
27516- before_count = atomic_read(&pci_parity_count);
27517+ before_count = atomic_read_unchecked(&pci_parity_count);
27518
27519 /* scan all PCI devices looking for a Parity Error on devices and
27520 * bridges.
27521@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27522 /* Only if operator has selected panic on PCI Error */
27523 if (edac_pci_get_panic_on_pe()) {
27524 /* If the count is different 'after' from 'before' */
27525- if (before_count != atomic_read(&pci_parity_count))
27526+ if (before_count != atomic_read_unchecked(&pci_parity_count))
27527 panic("EDAC: PCI Parity Error");
27528 }
27529 }
27530diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27531index c0510b3..6e2a954 100644
27532--- a/drivers/edac/i3000_edac.c
27533+++ b/drivers/edac/i3000_edac.c
27534@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27535 edac_mc_free(mci);
27536 }
27537
27538-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27539+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27540 {
27541 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27542 I3000},
27543diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27544index aa08497..7e6822a 100644
27545--- a/drivers/edac/i3200_edac.c
27546+++ b/drivers/edac/i3200_edac.c
27547@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27548 edac_mc_free(mci);
27549 }
27550
27551-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27552+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27553 {
27554 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27555 I3200},
27556diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27557index 4dc3ac2..67d05a6 100644
27558--- a/drivers/edac/i5000_edac.c
27559+++ b/drivers/edac/i5000_edac.c
27560@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27561 *
27562 * The "E500P" device is the first device supported.
27563 */
27564-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27565+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27566 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27567 .driver_data = I5000P},
27568
27569diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27570index bcbdeec..9886d16 100644
27571--- a/drivers/edac/i5100_edac.c
27572+++ b/drivers/edac/i5100_edac.c
27573@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27574 edac_mc_free(mci);
27575 }
27576
27577-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27578+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27579 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27580 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27581 { 0, }
27582diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27583index 74d6ec34..baff517 100644
27584--- a/drivers/edac/i5400_edac.c
27585+++ b/drivers/edac/i5400_edac.c
27586@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27587 *
27588 * The "E500P" device is the first device supported.
27589 */
27590-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27591+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27592 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27593 {0,} /* 0 terminated list. */
27594 };
27595diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27596index 6104dba..e7ea8e1 100644
27597--- a/drivers/edac/i7300_edac.c
27598+++ b/drivers/edac/i7300_edac.c
27599@@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27600 *
27601 * Has only 8086:360c PCI ID
27602 */
27603-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27604+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27605 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27606 {0,} /* 0 terminated list. */
27607 };
27608diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27609index 70ad892..178943c 100644
27610--- a/drivers/edac/i7core_edac.c
27611+++ b/drivers/edac/i7core_edac.c
27612@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27613 /*
27614 * pci_device_id table for which devices we are looking for
27615 */
27616-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27617+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27618 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27619 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27620 {0,} /* 0 terminated list. */
27621diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27622index 4329d39..f3022ef 100644
27623--- a/drivers/edac/i82443bxgx_edac.c
27624+++ b/drivers/edac/i82443bxgx_edac.c
27625@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27626
27627 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27628
27629-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27630+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27631 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27632 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27633 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27634diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27635index 931a057..fd28340 100644
27636--- a/drivers/edac/i82860_edac.c
27637+++ b/drivers/edac/i82860_edac.c
27638@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27639 edac_mc_free(mci);
27640 }
27641
27642-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27643+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27644 {
27645 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27646 I82860},
27647diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27648index 33864c6..01edc61 100644
27649--- a/drivers/edac/i82875p_edac.c
27650+++ b/drivers/edac/i82875p_edac.c
27651@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27652 edac_mc_free(mci);
27653 }
27654
27655-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27656+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27657 {
27658 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27659 I82875P},
27660diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27661index a5da732..983363b 100644
27662--- a/drivers/edac/i82975x_edac.c
27663+++ b/drivers/edac/i82975x_edac.c
27664@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27665 edac_mc_free(mci);
27666 }
27667
27668-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27669+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27670 {
27671 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27672 I82975X
27673diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27674index 0106747..0b40417 100644
27675--- a/drivers/edac/mce_amd.h
27676+++ b/drivers/edac/mce_amd.h
27677@@ -83,7 +83,7 @@ struct amd_decoder_ops {
27678 bool (*dc_mce)(u16, u8);
27679 bool (*ic_mce)(u16, u8);
27680 bool (*nb_mce)(u16, u8);
27681-};
27682+} __no_const;
27683
27684 void amd_report_gart_errors(bool);
27685 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27686diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27687index b153674..ad2ba9b 100644
27688--- a/drivers/edac/r82600_edac.c
27689+++ b/drivers/edac/r82600_edac.c
27690@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27691 edac_mc_free(mci);
27692 }
27693
27694-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27695+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27696 {
27697 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27698 },
27699diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27700index 7a402bf..af0b211 100644
27701--- a/drivers/edac/sb_edac.c
27702+++ b/drivers/edac/sb_edac.c
27703@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27704 /*
27705 * pci_device_id table for which devices we are looking for
27706 */
27707-static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27708+static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27709 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27710 {0,} /* 0 terminated list. */
27711 };
27712diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27713index b6f47de..c5acf3a 100644
27714--- a/drivers/edac/x38_edac.c
27715+++ b/drivers/edac/x38_edac.c
27716@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27717 edac_mc_free(mci);
27718 }
27719
27720-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27721+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27722 {
27723 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27724 X38},
27725diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27726index 85661b0..c784559a 100644
27727--- a/drivers/firewire/core-card.c
27728+++ b/drivers/firewire/core-card.c
27729@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27730
27731 void fw_core_remove_card(struct fw_card *card)
27732 {
27733- struct fw_card_driver dummy_driver = dummy_driver_template;
27734+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
27735
27736 card->driver->update_phy_reg(card, 4,
27737 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27738diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27739index 4799393..37bd3ab 100644
27740--- a/drivers/firewire/core-cdev.c
27741+++ b/drivers/firewire/core-cdev.c
27742@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27743 int ret;
27744
27745 if ((request->channels == 0 && request->bandwidth == 0) ||
27746- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27747- request->bandwidth < 0)
27748+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27749 return -EINVAL;
27750
27751 r = kmalloc(sizeof(*r), GFP_KERNEL);
27752diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27753index 855ab3f..11f4bbd 100644
27754--- a/drivers/firewire/core-transaction.c
27755+++ b/drivers/firewire/core-transaction.c
27756@@ -37,6 +37,7 @@
27757 #include <linux/timer.h>
27758 #include <linux/types.h>
27759 #include <linux/workqueue.h>
27760+#include <linux/sched.h>
27761
27762 #include <asm/byteorder.h>
27763
27764diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27765index b45be57..5fad18b 100644
27766--- a/drivers/firewire/core.h
27767+++ b/drivers/firewire/core.h
27768@@ -101,6 +101,7 @@ struct fw_card_driver {
27769
27770 int (*stop_iso)(struct fw_iso_context *ctx);
27771 };
27772+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27773
27774 void fw_card_initialize(struct fw_card *card,
27775 const struct fw_card_driver *driver, struct device *device);
27776diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27777index 153980b..4b4d046 100644
27778--- a/drivers/firmware/dmi_scan.c
27779+++ b/drivers/firmware/dmi_scan.c
27780@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27781 }
27782 }
27783 else {
27784- /*
27785- * no iounmap() for that ioremap(); it would be a no-op, but
27786- * it's so early in setup that sucker gets confused into doing
27787- * what it shouldn't if we actually call it.
27788- */
27789 p = dmi_ioremap(0xF0000, 0x10000);
27790 if (p == NULL)
27791 goto error;
27792@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27793 if (buf == NULL)
27794 return -1;
27795
27796- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27797+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27798
27799 iounmap(buf);
27800 return 0;
27801diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27802index 98723cb..10ca85b 100644
27803--- a/drivers/gpio/gpio-vr41xx.c
27804+++ b/drivers/gpio/gpio-vr41xx.c
27805@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27806 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27807 maskl, pendl, maskh, pendh);
27808
27809- atomic_inc(&irq_err_count);
27810+ atomic_inc_unchecked(&irq_err_count);
27811
27812 return -EINVAL;
27813 }
27814diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27815index 8323fc3..5c1d755 100644
27816--- a/drivers/gpu/drm/drm_crtc.c
27817+++ b/drivers/gpu/drm/drm_crtc.c
27818@@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27819 */
27820 if ((out_resp->count_modes >= mode_count) && mode_count) {
27821 copied = 0;
27822- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27823+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27824 list_for_each_entry(mode, &connector->modes, head) {
27825 drm_crtc_convert_to_umode(&u_mode, mode);
27826 if (copy_to_user(mode_ptr + copied,
27827@@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27828
27829 if ((out_resp->count_props >= props_count) && props_count) {
27830 copied = 0;
27831- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27832- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27833+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27834+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27835 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27836 if (connector->property_ids[i] != 0) {
27837 if (put_user(connector->property_ids[i],
27838@@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27839
27840 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27841 copied = 0;
27842- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27843+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27844 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27845 if (connector->encoder_ids[i] != 0) {
27846 if (put_user(connector->encoder_ids[i],
27847@@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
27848 }
27849
27850 for (i = 0; i < crtc_req->count_connectors; i++) {
27851- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27852+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27853 if (get_user(out_id, &set_connectors_ptr[i])) {
27854 ret = -EFAULT;
27855 goto out;
27856@@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27857 fb = obj_to_fb(obj);
27858
27859 num_clips = r->num_clips;
27860- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27861+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27862
27863 if (!num_clips != !clips_ptr) {
27864 ret = -EINVAL;
27865@@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27866 out_resp->flags = property->flags;
27867
27868 if ((out_resp->count_values >= value_count) && value_count) {
27869- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27870+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27871 for (i = 0; i < value_count; i++) {
27872 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27873 ret = -EFAULT;
27874@@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27875 if (property->flags & DRM_MODE_PROP_ENUM) {
27876 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27877 copied = 0;
27878- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27879+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27880 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27881
27882 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
27883@@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27884 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27885 copied = 0;
27886 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27887- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27888+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27889
27890 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27891 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
27892@@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27893 struct drm_mode_get_blob *out_resp = data;
27894 struct drm_property_blob *blob;
27895 int ret = 0;
27896- void *blob_ptr;
27897+ void __user *blob_ptr;
27898
27899 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27900 return -EINVAL;
27901@@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27902 blob = obj_to_blob(obj);
27903
27904 if (out_resp->length == blob->length) {
27905- blob_ptr = (void *)(unsigned long)out_resp->data;
27906+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
27907 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27908 ret = -EFAULT;
27909 goto done;
27910diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
27911index d2619d7..bd6bd00 100644
27912--- a/drivers/gpu/drm/drm_crtc_helper.c
27913+++ b/drivers/gpu/drm/drm_crtc_helper.c
27914@@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
27915 struct drm_crtc *tmp;
27916 int crtc_mask = 1;
27917
27918- WARN(!crtc, "checking null crtc?\n");
27919+ BUG_ON(!crtc);
27920
27921 dev = crtc->dev;
27922
27923diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
27924index 40c187c..5746164 100644
27925--- a/drivers/gpu/drm/drm_drv.c
27926+++ b/drivers/gpu/drm/drm_drv.c
27927@@ -308,7 +308,7 @@ module_exit(drm_core_exit);
27928 /**
27929 * Copy and IOCTL return string to user space
27930 */
27931-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27932+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27933 {
27934 int len;
27935
27936@@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
27937
27938 dev = file_priv->minor->dev;
27939 atomic_inc(&dev->ioctl_count);
27940- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27941+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27942 ++file_priv->ioctl_count;
27943
27944 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27945diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
27946index 4911e1d..484c8a3 100644
27947--- a/drivers/gpu/drm/drm_fops.c
27948+++ b/drivers/gpu/drm/drm_fops.c
27949@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
27950 }
27951
27952 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27953- atomic_set(&dev->counts[i], 0);
27954+ atomic_set_unchecked(&dev->counts[i], 0);
27955
27956 dev->sigdata.lock = NULL;
27957
27958@@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
27959
27960 retcode = drm_open_helper(inode, filp, dev);
27961 if (!retcode) {
27962- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27963- if (!dev->open_count++)
27964+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27965+ if (local_inc_return(&dev->open_count) == 1)
27966 retcode = drm_setup(dev);
27967 }
27968 if (!retcode) {
27969@@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
27970
27971 mutex_lock(&drm_global_mutex);
27972
27973- DRM_DEBUG("open_count = %d\n", dev->open_count);
27974+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27975
27976 if (dev->driver->preclose)
27977 dev->driver->preclose(dev, file_priv);
27978@@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
27979 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27980 task_pid_nr(current),
27981 (long)old_encode_dev(file_priv->minor->device),
27982- dev->open_count);
27983+ local_read(&dev->open_count));
27984
27985 /* if the master has gone away we can't do anything with the lock */
27986 if (file_priv->minor->master)
27987@@ -566,8 +566,8 @@ int drm_release(struct inode *inode, struct file *filp)
27988 * End inline drm_release
27989 */
27990
27991- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27992- if (!--dev->open_count) {
27993+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27994+ if (local_dec_and_test(&dev->open_count)) {
27995 if (atomic_read(&dev->ioctl_count)) {
27996 DRM_ERROR("Device busy: %d\n",
27997 atomic_read(&dev->ioctl_count));
27998diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
27999index c87dc96..326055d 100644
28000--- a/drivers/gpu/drm/drm_global.c
28001+++ b/drivers/gpu/drm/drm_global.c
28002@@ -36,7 +36,7 @@
28003 struct drm_global_item {
28004 struct mutex mutex;
28005 void *object;
28006- int refcount;
28007+ atomic_t refcount;
28008 };
28009
28010 static struct drm_global_item glob[DRM_GLOBAL_NUM];
28011@@ -49,7 +49,7 @@ void drm_global_init(void)
28012 struct drm_global_item *item = &glob[i];
28013 mutex_init(&item->mutex);
28014 item->object = NULL;
28015- item->refcount = 0;
28016+ atomic_set(&item->refcount, 0);
28017 }
28018 }
28019
28020@@ -59,7 +59,7 @@ void drm_global_release(void)
28021 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
28022 struct drm_global_item *item = &glob[i];
28023 BUG_ON(item->object != NULL);
28024- BUG_ON(item->refcount != 0);
28025+ BUG_ON(atomic_read(&item->refcount) != 0);
28026 }
28027 }
28028
28029@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28030 void *object;
28031
28032 mutex_lock(&item->mutex);
28033- if (item->refcount == 0) {
28034+ if (atomic_read(&item->refcount) == 0) {
28035 item->object = kzalloc(ref->size, GFP_KERNEL);
28036 if (unlikely(item->object == NULL)) {
28037 ret = -ENOMEM;
28038@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28039 goto out_err;
28040
28041 }
28042- ++item->refcount;
28043+ atomic_inc(&item->refcount);
28044 ref->object = item->object;
28045 object = item->object;
28046 mutex_unlock(&item->mutex);
28047@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
28048 struct drm_global_item *item = &glob[ref->global_type];
28049
28050 mutex_lock(&item->mutex);
28051- BUG_ON(item->refcount == 0);
28052+ BUG_ON(atomic_read(&item->refcount) == 0);
28053 BUG_ON(ref->object != item->object);
28054- if (--item->refcount == 0) {
28055+ if (atomic_dec_and_test(&item->refcount)) {
28056 ref->release(ref);
28057 item->object = NULL;
28058 }
28059diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28060index ab1162d..42587b2 100644
28061--- a/drivers/gpu/drm/drm_info.c
28062+++ b/drivers/gpu/drm/drm_info.c
28063@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28064 struct drm_local_map *map;
28065 struct drm_map_list *r_list;
28066
28067- /* Hardcoded from _DRM_FRAME_BUFFER,
28068- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28069- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28070- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28071+ static const char * const types[] = {
28072+ [_DRM_FRAME_BUFFER] = "FB",
28073+ [_DRM_REGISTERS] = "REG",
28074+ [_DRM_SHM] = "SHM",
28075+ [_DRM_AGP] = "AGP",
28076+ [_DRM_SCATTER_GATHER] = "SG",
28077+ [_DRM_CONSISTENT] = "PCI",
28078+ [_DRM_GEM] = "GEM" };
28079 const char *type;
28080 int i;
28081
28082@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28083 map = r_list->map;
28084 if (!map)
28085 continue;
28086- if (map->type < 0 || map->type > 5)
28087+ if (map->type >= ARRAY_SIZE(types))
28088 type = "??";
28089 else
28090 type = types[map->type];
28091@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28092 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28093 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28094 vma->vm_flags & VM_IO ? 'i' : '-',
28095+#ifdef CONFIG_GRKERNSEC_HIDESYM
28096+ 0);
28097+#else
28098 vma->vm_pgoff);
28099+#endif
28100
28101 #if defined(__i386__)
28102 pgprot = pgprot_val(vma->vm_page_prot);
28103diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28104index ddd70db..40321e6 100644
28105--- a/drivers/gpu/drm/drm_ioc32.c
28106+++ b/drivers/gpu/drm/drm_ioc32.c
28107@@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28108 request = compat_alloc_user_space(nbytes);
28109 if (!access_ok(VERIFY_WRITE, request, nbytes))
28110 return -EFAULT;
28111- list = (struct drm_buf_desc *) (request + 1);
28112+ list = (struct drm_buf_desc __user *) (request + 1);
28113
28114 if (__put_user(count, &request->count)
28115 || __put_user(list, &request->list))
28116@@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28117 request = compat_alloc_user_space(nbytes);
28118 if (!access_ok(VERIFY_WRITE, request, nbytes))
28119 return -EFAULT;
28120- list = (struct drm_buf_pub *) (request + 1);
28121+ list = (struct drm_buf_pub __user *) (request + 1);
28122
28123 if (__put_user(count, &request->count)
28124 || __put_user(list, &request->list))
28125diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28126index 904d7e9..ab88581 100644
28127--- a/drivers/gpu/drm/drm_ioctl.c
28128+++ b/drivers/gpu/drm/drm_ioctl.c
28129@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28130 stats->data[i].value =
28131 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28132 else
28133- stats->data[i].value = atomic_read(&dev->counts[i]);
28134+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28135 stats->data[i].type = dev->types[i];
28136 }
28137
28138diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28139index 632ae24..244cf4a 100644
28140--- a/drivers/gpu/drm/drm_lock.c
28141+++ b/drivers/gpu/drm/drm_lock.c
28142@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28143 if (drm_lock_take(&master->lock, lock->context)) {
28144 master->lock.file_priv = file_priv;
28145 master->lock.lock_time = jiffies;
28146- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28147+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28148 break; /* Got lock */
28149 }
28150
28151@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28152 return -EINVAL;
28153 }
28154
28155- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28156+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28157
28158 if (drm_lock_free(&master->lock, lock->context)) {
28159 /* FIXME: Should really bail out here. */
28160diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28161index 8f371e8..9f85d52 100644
28162--- a/drivers/gpu/drm/i810/i810_dma.c
28163+++ b/drivers/gpu/drm/i810/i810_dma.c
28164@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28165 dma->buflist[vertex->idx],
28166 vertex->discard, vertex->used);
28167
28168- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28169- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28170+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28171+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28172 sarea_priv->last_enqueue = dev_priv->counter - 1;
28173 sarea_priv->last_dispatch = (int)hw_status[5];
28174
28175@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28176 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28177 mc->last_render);
28178
28179- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28180- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28181+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28182+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28183 sarea_priv->last_enqueue = dev_priv->counter - 1;
28184 sarea_priv->last_dispatch = (int)hw_status[5];
28185
28186diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28187index c9339f4..f5e1b9d 100644
28188--- a/drivers/gpu/drm/i810/i810_drv.h
28189+++ b/drivers/gpu/drm/i810/i810_drv.h
28190@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28191 int page_flipping;
28192
28193 wait_queue_head_t irq_queue;
28194- atomic_t irq_received;
28195- atomic_t irq_emitted;
28196+ atomic_unchecked_t irq_received;
28197+ atomic_unchecked_t irq_emitted;
28198
28199 int front_offset;
28200 } drm_i810_private_t;
28201diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28202index 004b048..7588eba 100644
28203--- a/drivers/gpu/drm/i915/i915_debugfs.c
28204+++ b/drivers/gpu/drm/i915/i915_debugfs.c
28205@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28206 I915_READ(GTIMR));
28207 }
28208 seq_printf(m, "Interrupts received: %d\n",
28209- atomic_read(&dev_priv->irq_received));
28210+ atomic_read_unchecked(&dev_priv->irq_received));
28211 for (i = 0; i < I915_NUM_RINGS; i++) {
28212 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28213 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28214@@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28215 return ret;
28216
28217 if (opregion->header)
28218- seq_write(m, opregion->header, OPREGION_SIZE);
28219+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28220
28221 mutex_unlock(&dev->struct_mutex);
28222
28223diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28224index a9ae374..43c1e9e 100644
28225--- a/drivers/gpu/drm/i915/i915_dma.c
28226+++ b/drivers/gpu/drm/i915/i915_dma.c
28227@@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28228 bool can_switch;
28229
28230 spin_lock(&dev->count_lock);
28231- can_switch = (dev->open_count == 0);
28232+ can_switch = (local_read(&dev->open_count) == 0);
28233 spin_unlock(&dev->count_lock);
28234 return can_switch;
28235 }
28236diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28237index 554bef7..d24791c 100644
28238--- a/drivers/gpu/drm/i915/i915_drv.h
28239+++ b/drivers/gpu/drm/i915/i915_drv.h
28240@@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28241 /* render clock increase/decrease */
28242 /* display clock increase/decrease */
28243 /* pll clock increase/decrease */
28244-};
28245+} __no_const;
28246
28247 struct intel_device_info {
28248 u8 gen;
28249@@ -312,7 +312,7 @@ typedef struct drm_i915_private {
28250 int current_page;
28251 int page_flipping;
28252
28253- atomic_t irq_received;
28254+ atomic_unchecked_t irq_received;
28255
28256 /* protects the irq masks */
28257 spinlock_t irq_lock;
28258@@ -887,7 +887,7 @@ struct drm_i915_gem_object {
28259 * will be page flipped away on the next vblank. When it
28260 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28261 */
28262- atomic_t pending_flip;
28263+ atomic_unchecked_t pending_flip;
28264 };
28265
28266 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28267@@ -1267,7 +1267,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28268 extern void intel_teardown_gmbus(struct drm_device *dev);
28269 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28270 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28271-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28272+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28273 {
28274 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28275 }
28276diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28277index b9da890..cad1d98 100644
28278--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28279+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28280@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28281 i915_gem_clflush_object(obj);
28282
28283 if (obj->base.pending_write_domain)
28284- cd->flips |= atomic_read(&obj->pending_flip);
28285+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28286
28287 /* The actual obj->write_domain will be updated with
28288 * pending_write_domain after we emit the accumulated flush for all
28289@@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28290
28291 static int
28292 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28293- int count)
28294+ unsigned int count)
28295 {
28296- int i;
28297+ unsigned int i;
28298
28299 for (i = 0; i < count; i++) {
28300 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28301diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28302index b40004b..7c53a75 100644
28303--- a/drivers/gpu/drm/i915/i915_irq.c
28304+++ b/drivers/gpu/drm/i915/i915_irq.c
28305@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28306 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28307 struct drm_i915_master_private *master_priv;
28308
28309- atomic_inc(&dev_priv->irq_received);
28310+ atomic_inc_unchecked(&dev_priv->irq_received);
28311
28312 /* disable master interrupt before clearing iir */
28313 de_ier = I915_READ(DEIER);
28314@@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28315 struct drm_i915_master_private *master_priv;
28316 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28317
28318- atomic_inc(&dev_priv->irq_received);
28319+ atomic_inc_unchecked(&dev_priv->irq_received);
28320
28321 if (IS_GEN6(dev))
28322 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28323@@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28324 int ret = IRQ_NONE, pipe;
28325 bool blc_event = false;
28326
28327- atomic_inc(&dev_priv->irq_received);
28328+ atomic_inc_unchecked(&dev_priv->irq_received);
28329
28330 iir = I915_READ(IIR);
28331
28332@@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28333 {
28334 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28335
28336- atomic_set(&dev_priv->irq_received, 0);
28337+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28338
28339 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28340 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28341@@ -1931,7 +1931,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28342 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28343 int pipe;
28344
28345- atomic_set(&dev_priv->irq_received, 0);
28346+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28347
28348 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28349 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28350diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28351index daa5743..c0757a9 100644
28352--- a/drivers/gpu/drm/i915/intel_display.c
28353+++ b/drivers/gpu/drm/i915/intel_display.c
28354@@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28355
28356 wait_event(dev_priv->pending_flip_queue,
28357 atomic_read(&dev_priv->mm.wedged) ||
28358- atomic_read(&obj->pending_flip) == 0);
28359+ atomic_read_unchecked(&obj->pending_flip) == 0);
28360
28361 /* Big Hammer, we also need to ensure that any pending
28362 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28363@@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28364 obj = to_intel_framebuffer(crtc->fb)->obj;
28365 dev_priv = crtc->dev->dev_private;
28366 wait_event(dev_priv->pending_flip_queue,
28367- atomic_read(&obj->pending_flip) == 0);
28368+ atomic_read_unchecked(&obj->pending_flip) == 0);
28369 }
28370
28371 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28372@@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28373
28374 atomic_clear_mask(1 << intel_crtc->plane,
28375 &obj->pending_flip.counter);
28376- if (atomic_read(&obj->pending_flip) == 0)
28377+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
28378 wake_up(&dev_priv->pending_flip_queue);
28379
28380 schedule_work(&work->work);
28381@@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28382 /* Block clients from rendering to the new back buffer until
28383 * the flip occurs and the object is no longer visible.
28384 */
28385- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28386+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28387
28388 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28389 if (ret)
28390@@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28391 return 0;
28392
28393 cleanup_pending:
28394- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28395+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28396 drm_gem_object_unreference(&work->old_fb_obj->base);
28397 drm_gem_object_unreference(&obj->base);
28398 mutex_unlock(&dev->struct_mutex);
28399diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28400index 54558a0..2d97005 100644
28401--- a/drivers/gpu/drm/mga/mga_drv.h
28402+++ b/drivers/gpu/drm/mga/mga_drv.h
28403@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28404 u32 clear_cmd;
28405 u32 maccess;
28406
28407- atomic_t vbl_received; /**< Number of vblanks received. */
28408+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28409 wait_queue_head_t fence_queue;
28410- atomic_t last_fence_retired;
28411+ atomic_unchecked_t last_fence_retired;
28412 u32 next_fence_to_post;
28413
28414 unsigned int fb_cpp;
28415diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28416index 2581202..f230a8d9 100644
28417--- a/drivers/gpu/drm/mga/mga_irq.c
28418+++ b/drivers/gpu/drm/mga/mga_irq.c
28419@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28420 if (crtc != 0)
28421 return 0;
28422
28423- return atomic_read(&dev_priv->vbl_received);
28424+ return atomic_read_unchecked(&dev_priv->vbl_received);
28425 }
28426
28427
28428@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28429 /* VBLANK interrupt */
28430 if (status & MGA_VLINEPEN) {
28431 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28432- atomic_inc(&dev_priv->vbl_received);
28433+ atomic_inc_unchecked(&dev_priv->vbl_received);
28434 drm_handle_vblank(dev, 0);
28435 handled = 1;
28436 }
28437@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28438 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28439 MGA_WRITE(MGA_PRIMEND, prim_end);
28440
28441- atomic_inc(&dev_priv->last_fence_retired);
28442+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
28443 DRM_WAKEUP(&dev_priv->fence_queue);
28444 handled = 1;
28445 }
28446@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28447 * using fences.
28448 */
28449 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28450- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28451+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28452 - *sequence) <= (1 << 23)));
28453
28454 *sequence = cur_fence;
28455diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28456index 5fc201b..7b032b9 100644
28457--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28458+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28459@@ -201,7 +201,7 @@ struct methods {
28460 const char desc[8];
28461 void (*loadbios)(struct drm_device *, uint8_t *);
28462 const bool rw;
28463-};
28464+} __do_const;
28465
28466 static struct methods shadow_methods[] = {
28467 { "PRAMIN", load_vbios_pramin, true },
28468@@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28469 struct bit_table {
28470 const char id;
28471 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28472-};
28473+} __no_const;
28474
28475 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28476
28477diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28478index 4c0be3a..5757582 100644
28479--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28480+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28481@@ -238,7 +238,7 @@ struct nouveau_channel {
28482 struct list_head pending;
28483 uint32_t sequence;
28484 uint32_t sequence_ack;
28485- atomic_t last_sequence_irq;
28486+ atomic_unchecked_t last_sequence_irq;
28487 struct nouveau_vma vma;
28488 } fence;
28489
28490@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28491 u32 handle, u16 class);
28492 void (*set_tile_region)(struct drm_device *dev, int i);
28493 void (*tlb_flush)(struct drm_device *, int engine);
28494-};
28495+} __no_const;
28496
28497 struct nouveau_instmem_engine {
28498 void *priv;
28499@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28500 struct nouveau_mc_engine {
28501 int (*init)(struct drm_device *dev);
28502 void (*takedown)(struct drm_device *dev);
28503-};
28504+} __no_const;
28505
28506 struct nouveau_timer_engine {
28507 int (*init)(struct drm_device *dev);
28508 void (*takedown)(struct drm_device *dev);
28509 uint64_t (*read)(struct drm_device *dev);
28510-};
28511+} __no_const;
28512
28513 struct nouveau_fb_engine {
28514 int num_tiles;
28515@@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28516 void (*put)(struct drm_device *, struct nouveau_mem **);
28517
28518 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28519-};
28520+} __no_const;
28521
28522 struct nouveau_engine {
28523 struct nouveau_instmem_engine instmem;
28524@@ -706,7 +706,7 @@ struct drm_nouveau_private {
28525 struct drm_global_reference mem_global_ref;
28526 struct ttm_bo_global_ref bo_global_ref;
28527 struct ttm_bo_device bdev;
28528- atomic_t validate_sequence;
28529+ atomic_unchecked_t validate_sequence;
28530 } ttm;
28531
28532 struct {
28533diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28534index 2f6daae..c9d7b9e 100644
28535--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28536+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28537@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28538 if (USE_REFCNT(dev))
28539 sequence = nvchan_rd32(chan, 0x48);
28540 else
28541- sequence = atomic_read(&chan->fence.last_sequence_irq);
28542+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28543
28544 if (chan->fence.sequence_ack == sequence)
28545 goto out;
28546@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28547 return ret;
28548 }
28549
28550- atomic_set(&chan->fence.last_sequence_irq, 0);
28551+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28552 return 0;
28553 }
28554
28555diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28556index 5f0bc57..eb9fac8 100644
28557--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28558+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28559@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28560 int trycnt = 0;
28561 int ret, i;
28562
28563- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28564+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28565 retry:
28566 if (++trycnt > 100000) {
28567 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28568diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28569index d8831ab..0ba8356 100644
28570--- a/drivers/gpu/drm/nouveau/nouveau_state.c
28571+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28572@@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28573 bool can_switch;
28574
28575 spin_lock(&dev->count_lock);
28576- can_switch = (dev->open_count == 0);
28577+ can_switch = (local_read(&dev->open_count) == 0);
28578 spin_unlock(&dev->count_lock);
28579 return can_switch;
28580 }
28581diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28582index dbdea8e..cd6eeeb 100644
28583--- a/drivers/gpu/drm/nouveau/nv04_graph.c
28584+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28585@@ -554,7 +554,7 @@ static int
28586 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28587 u32 class, u32 mthd, u32 data)
28588 {
28589- atomic_set(&chan->fence.last_sequence_irq, data);
28590+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28591 return 0;
28592 }
28593
28594diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28595index bcac90b..53bfc76 100644
28596--- a/drivers/gpu/drm/r128/r128_cce.c
28597+++ b/drivers/gpu/drm/r128/r128_cce.c
28598@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28599
28600 /* GH: Simple idle check.
28601 */
28602- atomic_set(&dev_priv->idle_count, 0);
28603+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28604
28605 /* We don't support anything other than bus-mastering ring mode,
28606 * but the ring can be in either AGP or PCI space for the ring
28607diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28608index 930c71b..499aded 100644
28609--- a/drivers/gpu/drm/r128/r128_drv.h
28610+++ b/drivers/gpu/drm/r128/r128_drv.h
28611@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28612 int is_pci;
28613 unsigned long cce_buffers_offset;
28614
28615- atomic_t idle_count;
28616+ atomic_unchecked_t idle_count;
28617
28618 int page_flipping;
28619 int current_page;
28620 u32 crtc_offset;
28621 u32 crtc_offset_cntl;
28622
28623- atomic_t vbl_received;
28624+ atomic_unchecked_t vbl_received;
28625
28626 u32 color_fmt;
28627 unsigned int front_offset;
28628diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28629index 429d5a0..7e899ed 100644
28630--- a/drivers/gpu/drm/r128/r128_irq.c
28631+++ b/drivers/gpu/drm/r128/r128_irq.c
28632@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28633 if (crtc != 0)
28634 return 0;
28635
28636- return atomic_read(&dev_priv->vbl_received);
28637+ return atomic_read_unchecked(&dev_priv->vbl_received);
28638 }
28639
28640 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28641@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28642 /* VBLANK interrupt */
28643 if (status & R128_CRTC_VBLANK_INT) {
28644 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28645- atomic_inc(&dev_priv->vbl_received);
28646+ atomic_inc_unchecked(&dev_priv->vbl_received);
28647 drm_handle_vblank(dev, 0);
28648 return IRQ_HANDLED;
28649 }
28650diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28651index a9e33ce..09edd4b 100644
28652--- a/drivers/gpu/drm/r128/r128_state.c
28653+++ b/drivers/gpu/drm/r128/r128_state.c
28654@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28655
28656 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28657 {
28658- if (atomic_read(&dev_priv->idle_count) == 0)
28659+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28660 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28661 else
28662- atomic_set(&dev_priv->idle_count, 0);
28663+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28664 }
28665
28666 #endif
28667diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28668index 5a82b6b..9e69c73 100644
28669--- a/drivers/gpu/drm/radeon/mkregtable.c
28670+++ b/drivers/gpu/drm/radeon/mkregtable.c
28671@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28672 regex_t mask_rex;
28673 regmatch_t match[4];
28674 char buf[1024];
28675- size_t end;
28676+ long end;
28677 int len;
28678 int done = 0;
28679 int r;
28680 unsigned o;
28681 struct offset *offset;
28682 char last_reg_s[10];
28683- int last_reg;
28684+ unsigned long last_reg;
28685
28686 if (regcomp
28687 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28688diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28689index 8227e76..ce0b195 100644
28690--- a/drivers/gpu/drm/radeon/radeon.h
28691+++ b/drivers/gpu/drm/radeon/radeon.h
28692@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28693 */
28694 struct radeon_fence_driver {
28695 uint32_t scratch_reg;
28696- atomic_t seq;
28697+ atomic_unchecked_t seq;
28698 uint32_t last_seq;
28699 unsigned long last_jiffies;
28700 unsigned long last_timeout;
28701@@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28702 int x2, int y2);
28703 void (*draw_auto)(struct radeon_device *rdev);
28704 void (*set_default_state)(struct radeon_device *rdev);
28705-};
28706+} __no_const;
28707
28708 struct r600_blit {
28709 struct mutex mutex;
28710@@ -954,7 +954,7 @@ struct radeon_asic {
28711 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28712 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28713 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28714-};
28715+} __no_const;
28716
28717 /*
28718 * Asic structures
28719diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28720index 9b39145..389b93b 100644
28721--- a/drivers/gpu/drm/radeon/radeon_device.c
28722+++ b/drivers/gpu/drm/radeon/radeon_device.c
28723@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28724 bool can_switch;
28725
28726 spin_lock(&dev->count_lock);
28727- can_switch = (dev->open_count == 0);
28728+ can_switch = (local_read(&dev->open_count) == 0);
28729 spin_unlock(&dev->count_lock);
28730 return can_switch;
28731 }
28732diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28733index a1b59ca..86f2d44 100644
28734--- a/drivers/gpu/drm/radeon/radeon_drv.h
28735+++ b/drivers/gpu/drm/radeon/radeon_drv.h
28736@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28737
28738 /* SW interrupt */
28739 wait_queue_head_t swi_queue;
28740- atomic_t swi_emitted;
28741+ atomic_unchecked_t swi_emitted;
28742 int vblank_crtc;
28743 uint32_t irq_enable_reg;
28744 uint32_t r500_disp_irq_reg;
28745diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28746index 76ec0e9..6feb1a3 100644
28747--- a/drivers/gpu/drm/radeon/radeon_fence.c
28748+++ b/drivers/gpu/drm/radeon/radeon_fence.c
28749@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28750 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28751 return 0;
28752 }
28753- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28754+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28755 if (!rdev->cp.ready)
28756 /* FIXME: cp is not running assume everythings is done right
28757 * away
28758@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28759 return r;
28760 }
28761 radeon_fence_write(rdev, 0);
28762- atomic_set(&rdev->fence_drv.seq, 0);
28763+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28764 INIT_LIST_HEAD(&rdev->fence_drv.created);
28765 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28766 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28767diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28768index 48b7cea..342236f 100644
28769--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28770+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28771@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28772 request = compat_alloc_user_space(sizeof(*request));
28773 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28774 || __put_user(req32.param, &request->param)
28775- || __put_user((void __user *)(unsigned long)req32.value,
28776+ || __put_user((unsigned long)req32.value,
28777 &request->value))
28778 return -EFAULT;
28779
28780diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28781index 00da384..32f972d 100644
28782--- a/drivers/gpu/drm/radeon/radeon_irq.c
28783+++ b/drivers/gpu/drm/radeon/radeon_irq.c
28784@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28785 unsigned int ret;
28786 RING_LOCALS;
28787
28788- atomic_inc(&dev_priv->swi_emitted);
28789- ret = atomic_read(&dev_priv->swi_emitted);
28790+ atomic_inc_unchecked(&dev_priv->swi_emitted);
28791+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28792
28793 BEGIN_RING(4);
28794 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28795@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28796 drm_radeon_private_t *dev_priv =
28797 (drm_radeon_private_t *) dev->dev_private;
28798
28799- atomic_set(&dev_priv->swi_emitted, 0);
28800+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28801 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28802
28803 dev->max_vblank_count = 0x001fffff;
28804diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28805index e8422ae..d22d4a8 100644
28806--- a/drivers/gpu/drm/radeon/radeon_state.c
28807+++ b/drivers/gpu/drm/radeon/radeon_state.c
28808@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28809 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28810 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28811
28812- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28813+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28814 sarea_priv->nbox * sizeof(depth_boxes[0])))
28815 return -EFAULT;
28816
28817@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28818 {
28819 drm_radeon_private_t *dev_priv = dev->dev_private;
28820 drm_radeon_getparam_t *param = data;
28821- int value;
28822+ int value = 0;
28823
28824 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28825
28826diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28827index 0b5468b..9c4b308 100644
28828--- a/drivers/gpu/drm/radeon/radeon_ttm.c
28829+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
28830@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28831 }
28832 if (unlikely(ttm_vm_ops == NULL)) {
28833 ttm_vm_ops = vma->vm_ops;
28834- radeon_ttm_vm_ops = *ttm_vm_ops;
28835- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28836+ pax_open_kernel();
28837+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28838+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28839+ pax_close_kernel();
28840 }
28841 vma->vm_ops = &radeon_ttm_vm_ops;
28842 return 0;
28843diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
28844index a9049ed..501f284 100644
28845--- a/drivers/gpu/drm/radeon/rs690.c
28846+++ b/drivers/gpu/drm/radeon/rs690.c
28847@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
28848 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28849 rdev->pm.sideport_bandwidth.full)
28850 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28851- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28852+ read_delay_latency.full = dfixed_const(800 * 1000);
28853 read_delay_latency.full = dfixed_div(read_delay_latency,
28854 rdev->pm.igp_sideport_mclk);
28855+ a.full = dfixed_const(370);
28856+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28857 } else {
28858 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28859 rdev->pm.k8_bandwidth.full)
28860diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28861index 727e93d..1565650 100644
28862--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
28863+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28864@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
28865 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28866 struct shrink_control *sc)
28867 {
28868- static atomic_t start_pool = ATOMIC_INIT(0);
28869+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28870 unsigned i;
28871- unsigned pool_offset = atomic_add_return(1, &start_pool);
28872+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28873 struct ttm_page_pool *pool;
28874 int shrink_pages = sc->nr_to_scan;
28875
28876diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
28877index 9cf87d9..2000b7d 100644
28878--- a/drivers/gpu/drm/via/via_drv.h
28879+++ b/drivers/gpu/drm/via/via_drv.h
28880@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28881 typedef uint32_t maskarray_t[5];
28882
28883 typedef struct drm_via_irq {
28884- atomic_t irq_received;
28885+ atomic_unchecked_t irq_received;
28886 uint32_t pending_mask;
28887 uint32_t enable_mask;
28888 wait_queue_head_t irq_queue;
28889@@ -75,7 +75,7 @@ typedef struct drm_via_private {
28890 struct timeval last_vblank;
28891 int last_vblank_valid;
28892 unsigned usec_per_vblank;
28893- atomic_t vbl_received;
28894+ atomic_unchecked_t vbl_received;
28895 drm_via_state_t hc_state;
28896 char pci_buf[VIA_PCI_BUF_SIZE];
28897 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28898diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
28899index d391f48..10c8ca3 100644
28900--- a/drivers/gpu/drm/via/via_irq.c
28901+++ b/drivers/gpu/drm/via/via_irq.c
28902@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
28903 if (crtc != 0)
28904 return 0;
28905
28906- return atomic_read(&dev_priv->vbl_received);
28907+ return atomic_read_unchecked(&dev_priv->vbl_received);
28908 }
28909
28910 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28911@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28912
28913 status = VIA_READ(VIA_REG_INTERRUPT);
28914 if (status & VIA_IRQ_VBLANK_PENDING) {
28915- atomic_inc(&dev_priv->vbl_received);
28916- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28917+ atomic_inc_unchecked(&dev_priv->vbl_received);
28918+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28919 do_gettimeofday(&cur_vblank);
28920 if (dev_priv->last_vblank_valid) {
28921 dev_priv->usec_per_vblank =
28922@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28923 dev_priv->last_vblank = cur_vblank;
28924 dev_priv->last_vblank_valid = 1;
28925 }
28926- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28927+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28928 DRM_DEBUG("US per vblank is: %u\n",
28929 dev_priv->usec_per_vblank);
28930 }
28931@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28932
28933 for (i = 0; i < dev_priv->num_irqs; ++i) {
28934 if (status & cur_irq->pending_mask) {
28935- atomic_inc(&cur_irq->irq_received);
28936+ atomic_inc_unchecked(&cur_irq->irq_received);
28937 DRM_WAKEUP(&cur_irq->irq_queue);
28938 handled = 1;
28939 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
28940@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
28941 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28942 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28943 masks[irq][4]));
28944- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28945+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28946 } else {
28947 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28948 (((cur_irq_sequence =
28949- atomic_read(&cur_irq->irq_received)) -
28950+ atomic_read_unchecked(&cur_irq->irq_received)) -
28951 *sequence) <= (1 << 23)));
28952 }
28953 *sequence = cur_irq_sequence;
28954@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
28955 }
28956
28957 for (i = 0; i < dev_priv->num_irqs; ++i) {
28958- atomic_set(&cur_irq->irq_received, 0);
28959+ atomic_set_unchecked(&cur_irq->irq_received, 0);
28960 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28961 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28962 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28963@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
28964 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28965 case VIA_IRQ_RELATIVE:
28966 irqwait->request.sequence +=
28967- atomic_read(&cur_irq->irq_received);
28968+ atomic_read_unchecked(&cur_irq->irq_received);
28969 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28970 case VIA_IRQ_ABSOLUTE:
28971 break;
28972diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28973index dc27970..f18b008 100644
28974--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28975+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28976@@ -260,7 +260,7 @@ struct vmw_private {
28977 * Fencing and IRQs.
28978 */
28979
28980- atomic_t marker_seq;
28981+ atomic_unchecked_t marker_seq;
28982 wait_queue_head_t fence_queue;
28983 wait_queue_head_t fifo_queue;
28984 int fence_queue_waiters; /* Protected by hw_mutex */
28985diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28986index a0c2f12..68ae6cb 100644
28987--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28988+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28989@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
28990 (unsigned int) min,
28991 (unsigned int) fifo->capabilities);
28992
28993- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28994+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28995 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
28996 vmw_marker_queue_init(&fifo->marker_queue);
28997 return vmw_fifo_send_fence(dev_priv, &dummy);
28998@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
28999 if (reserveable)
29000 iowrite32(bytes, fifo_mem +
29001 SVGA_FIFO_RESERVED);
29002- return fifo_mem + (next_cmd >> 2);
29003+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
29004 } else {
29005 need_bounce = true;
29006 }
29007@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29008
29009 fm = vmw_fifo_reserve(dev_priv, bytes);
29010 if (unlikely(fm == NULL)) {
29011- *seqno = atomic_read(&dev_priv->marker_seq);
29012+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29013 ret = -ENOMEM;
29014 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
29015 false, 3*HZ);
29016@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29017 }
29018
29019 do {
29020- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
29021+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
29022 } while (*seqno == 0);
29023
29024 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
29025diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29026index cabc95f..14b3d77 100644
29027--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29028+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29029@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
29030 * emitted. Then the fence is stale and signaled.
29031 */
29032
29033- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
29034+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
29035 > VMW_FENCE_WRAP);
29036
29037 return ret;
29038@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
29039
29040 if (fifo_idle)
29041 down_read(&fifo_state->rwsem);
29042- signal_seq = atomic_read(&dev_priv->marker_seq);
29043+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
29044 ret = 0;
29045
29046 for (;;) {
29047diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29048index 8a8725c..afed796 100644
29049--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29050+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29051@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
29052 while (!vmw_lag_lt(queue, us)) {
29053 spin_lock(&queue->lock);
29054 if (list_empty(&queue->head))
29055- seqno = atomic_read(&dev_priv->marker_seq);
29056+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29057 else {
29058 marker = list_first_entry(&queue->head,
29059 struct vmw_marker, head);
29060diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29061index bb656d8..4169fca 100644
29062--- a/drivers/hid/hid-core.c
29063+++ b/drivers/hid/hid-core.c
29064@@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
29065
29066 int hid_add_device(struct hid_device *hdev)
29067 {
29068- static atomic_t id = ATOMIC_INIT(0);
29069+ static atomic_unchecked_t id = ATOMIC_INIT(0);
29070 int ret;
29071
29072 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29073@@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
29074 /* XXX hack, any other cleaner solution after the driver core
29075 * is converted to allow more than 20 bytes as the device name? */
29076 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29077- hdev->vendor, hdev->product, atomic_inc_return(&id));
29078+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29079
29080 hid_debug_register(hdev, dev_name(&hdev->dev));
29081 ret = device_add(&hdev->dev);
29082diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29083index 4ef02b2..8a96831 100644
29084--- a/drivers/hid/usbhid/hiddev.c
29085+++ b/drivers/hid/usbhid/hiddev.c
29086@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29087 break;
29088
29089 case HIDIOCAPPLICATION:
29090- if (arg < 0 || arg >= hid->maxapplication)
29091+ if (arg >= hid->maxapplication)
29092 break;
29093
29094 for (i = 0; i < hid->maxcollection; i++)
29095diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
29096index 4065374..10ed7dc 100644
29097--- a/drivers/hv/channel.c
29098+++ b/drivers/hv/channel.c
29099@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
29100 int ret = 0;
29101 int t;
29102
29103- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
29104- atomic_inc(&vmbus_connection.next_gpadl_handle);
29105+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
29106+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
29107
29108 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
29109 if (ret)
29110diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
29111index 0fb100e..baf87e5 100644
29112--- a/drivers/hv/hv.c
29113+++ b/drivers/hv/hv.c
29114@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
29115 u64 output_address = (output) ? virt_to_phys(output) : 0;
29116 u32 output_address_hi = output_address >> 32;
29117 u32 output_address_lo = output_address & 0xFFFFFFFF;
29118- void *hypercall_page = hv_context.hypercall_page;
29119+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
29120
29121 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29122 "=a"(hv_status_lo) : "d" (control_hi),
29123diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29124index 0aee112..b72d21f 100644
29125--- a/drivers/hv/hyperv_vmbus.h
29126+++ b/drivers/hv/hyperv_vmbus.h
29127@@ -556,7 +556,7 @@ enum vmbus_connect_state {
29128 struct vmbus_connection {
29129 enum vmbus_connect_state conn_state;
29130
29131- atomic_t next_gpadl_handle;
29132+ atomic_unchecked_t next_gpadl_handle;
29133
29134 /*
29135 * Represents channel interrupts. Each bit position represents a
29136diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29137index d2d0a2a..90b8f4d 100644
29138--- a/drivers/hv/vmbus_drv.c
29139+++ b/drivers/hv/vmbus_drv.c
29140@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29141 {
29142 int ret = 0;
29143
29144- static atomic_t device_num = ATOMIC_INIT(0);
29145+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29146
29147 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29148- atomic_inc_return(&device_num));
29149+ atomic_inc_return_unchecked(&device_num));
29150
29151 child_device_obj->device.bus = &hv_bus;
29152 child_device_obj->device.parent = &hv_acpi_dev->dev;
29153diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29154index 66f6729..2d6de0a 100644
29155--- a/drivers/hwmon/acpi_power_meter.c
29156+++ b/drivers/hwmon/acpi_power_meter.c
29157@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29158 return res;
29159
29160 temp /= 1000;
29161- if (temp < 0)
29162- return -EINVAL;
29163
29164 mutex_lock(&resource->lock);
29165 resource->trip[attr->index - 7] = temp;
29166diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29167index fe4104c..346febb 100644
29168--- a/drivers/hwmon/sht15.c
29169+++ b/drivers/hwmon/sht15.c
29170@@ -166,7 +166,7 @@ struct sht15_data {
29171 int supply_uV;
29172 bool supply_uV_valid;
29173 struct work_struct update_supply_work;
29174- atomic_t interrupt_handled;
29175+ atomic_unchecked_t interrupt_handled;
29176 };
29177
29178 /**
29179@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29180 return ret;
29181
29182 gpio_direction_input(data->pdata->gpio_data);
29183- atomic_set(&data->interrupt_handled, 0);
29184+ atomic_set_unchecked(&data->interrupt_handled, 0);
29185
29186 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29187 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29188 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29189 /* Only relevant if the interrupt hasn't occurred. */
29190- if (!atomic_read(&data->interrupt_handled))
29191+ if (!atomic_read_unchecked(&data->interrupt_handled))
29192 schedule_work(&data->read_work);
29193 }
29194 ret = wait_event_timeout(data->wait_queue,
29195@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29196
29197 /* First disable the interrupt */
29198 disable_irq_nosync(irq);
29199- atomic_inc(&data->interrupt_handled);
29200+ atomic_inc_unchecked(&data->interrupt_handled);
29201 /* Then schedule a reading work struct */
29202 if (data->state != SHT15_READING_NOTHING)
29203 schedule_work(&data->read_work);
29204@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29205 * If not, then start the interrupt again - care here as could
29206 * have gone low in meantime so verify it hasn't!
29207 */
29208- atomic_set(&data->interrupt_handled, 0);
29209+ atomic_set_unchecked(&data->interrupt_handled, 0);
29210 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29211 /* If still not occurred or another handler has been scheduled */
29212 if (gpio_get_value(data->pdata->gpio_data)
29213- || atomic_read(&data->interrupt_handled))
29214+ || atomic_read_unchecked(&data->interrupt_handled))
29215 return;
29216 }
29217
29218diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29219index 378fcb5..5e91fa8 100644
29220--- a/drivers/i2c/busses/i2c-amd756-s4882.c
29221+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29222@@ -43,7 +43,7 @@
29223 extern struct i2c_adapter amd756_smbus;
29224
29225 static struct i2c_adapter *s4882_adapter;
29226-static struct i2c_algorithm *s4882_algo;
29227+static i2c_algorithm_no_const *s4882_algo;
29228
29229 /* Wrapper access functions for multiplexed SMBus */
29230 static DEFINE_MUTEX(amd756_lock);
29231diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29232index 29015eb..af2d8e9 100644
29233--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29234+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29235@@ -41,7 +41,7 @@
29236 extern struct i2c_adapter *nforce2_smbus;
29237
29238 static struct i2c_adapter *s4985_adapter;
29239-static struct i2c_algorithm *s4985_algo;
29240+static i2c_algorithm_no_const *s4985_algo;
29241
29242 /* Wrapper access functions for multiplexed SMBus */
29243 static DEFINE_MUTEX(nforce2_lock);
29244diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29245index d7a4833..7fae376 100644
29246--- a/drivers/i2c/i2c-mux.c
29247+++ b/drivers/i2c/i2c-mux.c
29248@@ -28,7 +28,7 @@
29249 /* multiplexer per channel data */
29250 struct i2c_mux_priv {
29251 struct i2c_adapter adap;
29252- struct i2c_algorithm algo;
29253+ i2c_algorithm_no_const algo;
29254
29255 struct i2c_adapter *parent;
29256 void *mux_dev; /* the mux chip/device */
29257diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29258index 57d00ca..0145194 100644
29259--- a/drivers/ide/aec62xx.c
29260+++ b/drivers/ide/aec62xx.c
29261@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29262 .cable_detect = atp86x_cable_detect,
29263 };
29264
29265-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29266+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29267 { /* 0: AEC6210 */
29268 .name = DRV_NAME,
29269 .init_chipset = init_chipset_aec62xx,
29270diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29271index 2c8016a..911a27c 100644
29272--- a/drivers/ide/alim15x3.c
29273+++ b/drivers/ide/alim15x3.c
29274@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29275 .dma_sff_read_status = ide_dma_sff_read_status,
29276 };
29277
29278-static const struct ide_port_info ali15x3_chipset __devinitdata = {
29279+static const struct ide_port_info ali15x3_chipset __devinitconst = {
29280 .name = DRV_NAME,
29281 .init_chipset = init_chipset_ali15x3,
29282 .init_hwif = init_hwif_ali15x3,
29283diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29284index 3747b25..56fc995 100644
29285--- a/drivers/ide/amd74xx.c
29286+++ b/drivers/ide/amd74xx.c
29287@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29288 .udma_mask = udma, \
29289 }
29290
29291-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29292+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29293 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29294 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29295 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29296diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29297index 15f0ead..cb43480 100644
29298--- a/drivers/ide/atiixp.c
29299+++ b/drivers/ide/atiixp.c
29300@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29301 .cable_detect = atiixp_cable_detect,
29302 };
29303
29304-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29305+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29306 { /* 0: IXP200/300/400/700 */
29307 .name = DRV_NAME,
29308 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29309diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29310index 5f80312..d1fc438 100644
29311--- a/drivers/ide/cmd64x.c
29312+++ b/drivers/ide/cmd64x.c
29313@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29314 .dma_sff_read_status = ide_dma_sff_read_status,
29315 };
29316
29317-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29318+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29319 { /* 0: CMD643 */
29320 .name = DRV_NAME,
29321 .init_chipset = init_chipset_cmd64x,
29322diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29323index 2c1e5f7..1444762 100644
29324--- a/drivers/ide/cs5520.c
29325+++ b/drivers/ide/cs5520.c
29326@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29327 .set_dma_mode = cs5520_set_dma_mode,
29328 };
29329
29330-static const struct ide_port_info cyrix_chipset __devinitdata = {
29331+static const struct ide_port_info cyrix_chipset __devinitconst = {
29332 .name = DRV_NAME,
29333 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29334 .port_ops = &cs5520_port_ops,
29335diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29336index 4dc4eb9..49b40ad 100644
29337--- a/drivers/ide/cs5530.c
29338+++ b/drivers/ide/cs5530.c
29339@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29340 .udma_filter = cs5530_udma_filter,
29341 };
29342
29343-static const struct ide_port_info cs5530_chipset __devinitdata = {
29344+static const struct ide_port_info cs5530_chipset __devinitconst = {
29345 .name = DRV_NAME,
29346 .init_chipset = init_chipset_cs5530,
29347 .init_hwif = init_hwif_cs5530,
29348diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29349index 5059faf..18d4c85 100644
29350--- a/drivers/ide/cs5535.c
29351+++ b/drivers/ide/cs5535.c
29352@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29353 .cable_detect = cs5535_cable_detect,
29354 };
29355
29356-static const struct ide_port_info cs5535_chipset __devinitdata = {
29357+static const struct ide_port_info cs5535_chipset __devinitconst = {
29358 .name = DRV_NAME,
29359 .port_ops = &cs5535_port_ops,
29360 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29361diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29362index 847553f..3ffb49d 100644
29363--- a/drivers/ide/cy82c693.c
29364+++ b/drivers/ide/cy82c693.c
29365@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29366 .set_dma_mode = cy82c693_set_dma_mode,
29367 };
29368
29369-static const struct ide_port_info cy82c693_chipset __devinitdata = {
29370+static const struct ide_port_info cy82c693_chipset __devinitconst = {
29371 .name = DRV_NAME,
29372 .init_iops = init_iops_cy82c693,
29373 .port_ops = &cy82c693_port_ops,
29374diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29375index 58c51cd..4aec3b8 100644
29376--- a/drivers/ide/hpt366.c
29377+++ b/drivers/ide/hpt366.c
29378@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29379 }
29380 };
29381
29382-static const struct hpt_info hpt36x __devinitdata = {
29383+static const struct hpt_info hpt36x __devinitconst = {
29384 .chip_name = "HPT36x",
29385 .chip_type = HPT36x,
29386 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29387@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29388 .timings = &hpt36x_timings
29389 };
29390
29391-static const struct hpt_info hpt370 __devinitdata = {
29392+static const struct hpt_info hpt370 __devinitconst = {
29393 .chip_name = "HPT370",
29394 .chip_type = HPT370,
29395 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29396@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29397 .timings = &hpt37x_timings
29398 };
29399
29400-static const struct hpt_info hpt370a __devinitdata = {
29401+static const struct hpt_info hpt370a __devinitconst = {
29402 .chip_name = "HPT370A",
29403 .chip_type = HPT370A,
29404 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29405@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29406 .timings = &hpt37x_timings
29407 };
29408
29409-static const struct hpt_info hpt374 __devinitdata = {
29410+static const struct hpt_info hpt374 __devinitconst = {
29411 .chip_name = "HPT374",
29412 .chip_type = HPT374,
29413 .udma_mask = ATA_UDMA5,
29414@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29415 .timings = &hpt37x_timings
29416 };
29417
29418-static const struct hpt_info hpt372 __devinitdata = {
29419+static const struct hpt_info hpt372 __devinitconst = {
29420 .chip_name = "HPT372",
29421 .chip_type = HPT372,
29422 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29423@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29424 .timings = &hpt37x_timings
29425 };
29426
29427-static const struct hpt_info hpt372a __devinitdata = {
29428+static const struct hpt_info hpt372a __devinitconst = {
29429 .chip_name = "HPT372A",
29430 .chip_type = HPT372A,
29431 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29432@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29433 .timings = &hpt37x_timings
29434 };
29435
29436-static const struct hpt_info hpt302 __devinitdata = {
29437+static const struct hpt_info hpt302 __devinitconst = {
29438 .chip_name = "HPT302",
29439 .chip_type = HPT302,
29440 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29441@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29442 .timings = &hpt37x_timings
29443 };
29444
29445-static const struct hpt_info hpt371 __devinitdata = {
29446+static const struct hpt_info hpt371 __devinitconst = {
29447 .chip_name = "HPT371",
29448 .chip_type = HPT371,
29449 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29450@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29451 .timings = &hpt37x_timings
29452 };
29453
29454-static const struct hpt_info hpt372n __devinitdata = {
29455+static const struct hpt_info hpt372n __devinitconst = {
29456 .chip_name = "HPT372N",
29457 .chip_type = HPT372N,
29458 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29459@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29460 .timings = &hpt37x_timings
29461 };
29462
29463-static const struct hpt_info hpt302n __devinitdata = {
29464+static const struct hpt_info hpt302n __devinitconst = {
29465 .chip_name = "HPT302N",
29466 .chip_type = HPT302N,
29467 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29468@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29469 .timings = &hpt37x_timings
29470 };
29471
29472-static const struct hpt_info hpt371n __devinitdata = {
29473+static const struct hpt_info hpt371n __devinitconst = {
29474 .chip_name = "HPT371N",
29475 .chip_type = HPT371N,
29476 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29477@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29478 .dma_sff_read_status = ide_dma_sff_read_status,
29479 };
29480
29481-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29482+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29483 { /* 0: HPT36x */
29484 .name = DRV_NAME,
29485 .init_chipset = init_chipset_hpt366,
29486diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29487index 8126824..55a2798 100644
29488--- a/drivers/ide/ide-cd.c
29489+++ b/drivers/ide/ide-cd.c
29490@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29491 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29492 if ((unsigned long)buf & alignment
29493 || blk_rq_bytes(rq) & q->dma_pad_mask
29494- || object_is_on_stack(buf))
29495+ || object_starts_on_stack(buf))
29496 drive->dma = 0;
29497 }
29498 }
29499diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29500index a743e68..1cfd674 100644
29501--- a/drivers/ide/ide-pci-generic.c
29502+++ b/drivers/ide/ide-pci-generic.c
29503@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29504 .udma_mask = ATA_UDMA6, \
29505 }
29506
29507-static const struct ide_port_info generic_chipsets[] __devinitdata = {
29508+static const struct ide_port_info generic_chipsets[] __devinitconst = {
29509 /* 0: Unknown */
29510 DECLARE_GENERIC_PCI_DEV(0),
29511
29512diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29513index 560e66d..d5dd180 100644
29514--- a/drivers/ide/it8172.c
29515+++ b/drivers/ide/it8172.c
29516@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29517 .set_dma_mode = it8172_set_dma_mode,
29518 };
29519
29520-static const struct ide_port_info it8172_port_info __devinitdata = {
29521+static const struct ide_port_info it8172_port_info __devinitconst = {
29522 .name = DRV_NAME,
29523 .port_ops = &it8172_port_ops,
29524 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29525diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29526index 46816ba..1847aeb 100644
29527--- a/drivers/ide/it8213.c
29528+++ b/drivers/ide/it8213.c
29529@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29530 .cable_detect = it8213_cable_detect,
29531 };
29532
29533-static const struct ide_port_info it8213_chipset __devinitdata = {
29534+static const struct ide_port_info it8213_chipset __devinitconst = {
29535 .name = DRV_NAME,
29536 .enablebits = { {0x41, 0x80, 0x80} },
29537 .port_ops = &it8213_port_ops,
29538diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29539index 2e3169f..c5611db 100644
29540--- a/drivers/ide/it821x.c
29541+++ b/drivers/ide/it821x.c
29542@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29543 .cable_detect = it821x_cable_detect,
29544 };
29545
29546-static const struct ide_port_info it821x_chipset __devinitdata = {
29547+static const struct ide_port_info it821x_chipset __devinitconst = {
29548 .name = DRV_NAME,
29549 .init_chipset = init_chipset_it821x,
29550 .init_hwif = init_hwif_it821x,
29551diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29552index 74c2c4a..efddd7d 100644
29553--- a/drivers/ide/jmicron.c
29554+++ b/drivers/ide/jmicron.c
29555@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29556 .cable_detect = jmicron_cable_detect,
29557 };
29558
29559-static const struct ide_port_info jmicron_chipset __devinitdata = {
29560+static const struct ide_port_info jmicron_chipset __devinitconst = {
29561 .name = DRV_NAME,
29562 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29563 .port_ops = &jmicron_port_ops,
29564diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29565index 95327a2..73f78d8 100644
29566--- a/drivers/ide/ns87415.c
29567+++ b/drivers/ide/ns87415.c
29568@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29569 .dma_sff_read_status = superio_dma_sff_read_status,
29570 };
29571
29572-static const struct ide_port_info ns87415_chipset __devinitdata = {
29573+static const struct ide_port_info ns87415_chipset __devinitconst = {
29574 .name = DRV_NAME,
29575 .init_hwif = init_hwif_ns87415,
29576 .tp_ops = &ns87415_tp_ops,
29577diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29578index 1a53a4c..39edc66 100644
29579--- a/drivers/ide/opti621.c
29580+++ b/drivers/ide/opti621.c
29581@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29582 .set_pio_mode = opti621_set_pio_mode,
29583 };
29584
29585-static const struct ide_port_info opti621_chipset __devinitdata = {
29586+static const struct ide_port_info opti621_chipset __devinitconst = {
29587 .name = DRV_NAME,
29588 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29589 .port_ops = &opti621_port_ops,
29590diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29591index 9546fe2..2e5ceb6 100644
29592--- a/drivers/ide/pdc202xx_new.c
29593+++ b/drivers/ide/pdc202xx_new.c
29594@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29595 .udma_mask = udma, \
29596 }
29597
29598-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29599+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29600 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29601 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29602 };
29603diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29604index 3a35ec6..5634510 100644
29605--- a/drivers/ide/pdc202xx_old.c
29606+++ b/drivers/ide/pdc202xx_old.c
29607@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29608 .max_sectors = sectors, \
29609 }
29610
29611-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29612+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29613 { /* 0: PDC20246 */
29614 .name = DRV_NAME,
29615 .init_chipset = init_chipset_pdc202xx,
29616diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29617index 1892e81..fe0fd60 100644
29618--- a/drivers/ide/piix.c
29619+++ b/drivers/ide/piix.c
29620@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29621 .udma_mask = udma, \
29622 }
29623
29624-static const struct ide_port_info piix_pci_info[] __devinitdata = {
29625+static const struct ide_port_info piix_pci_info[] __devinitconst = {
29626 /* 0: MPIIX */
29627 { /*
29628 * MPIIX actually has only a single IDE channel mapped to
29629diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29630index a6414a8..c04173e 100644
29631--- a/drivers/ide/rz1000.c
29632+++ b/drivers/ide/rz1000.c
29633@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29634 }
29635 }
29636
29637-static const struct ide_port_info rz1000_chipset __devinitdata = {
29638+static const struct ide_port_info rz1000_chipset __devinitconst = {
29639 .name = DRV_NAME,
29640 .host_flags = IDE_HFLAG_NO_DMA,
29641 };
29642diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29643index 356b9b5..d4758eb 100644
29644--- a/drivers/ide/sc1200.c
29645+++ b/drivers/ide/sc1200.c
29646@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29647 .dma_sff_read_status = ide_dma_sff_read_status,
29648 };
29649
29650-static const struct ide_port_info sc1200_chipset __devinitdata = {
29651+static const struct ide_port_info sc1200_chipset __devinitconst = {
29652 .name = DRV_NAME,
29653 .port_ops = &sc1200_port_ops,
29654 .dma_ops = &sc1200_dma_ops,
29655diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29656index b7f5b0c..9701038 100644
29657--- a/drivers/ide/scc_pata.c
29658+++ b/drivers/ide/scc_pata.c
29659@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29660 .dma_sff_read_status = scc_dma_sff_read_status,
29661 };
29662
29663-static const struct ide_port_info scc_chipset __devinitdata = {
29664+static const struct ide_port_info scc_chipset __devinitconst = {
29665 .name = "sccIDE",
29666 .init_iops = init_iops_scc,
29667 .init_dma = scc_init_dma,
29668diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29669index 35fb8da..24d72ef 100644
29670--- a/drivers/ide/serverworks.c
29671+++ b/drivers/ide/serverworks.c
29672@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29673 .cable_detect = svwks_cable_detect,
29674 };
29675
29676-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29677+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29678 { /* 0: OSB4 */
29679 .name = DRV_NAME,
29680 .init_chipset = init_chipset_svwks,
29681diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29682index ddeda44..46f7e30 100644
29683--- a/drivers/ide/siimage.c
29684+++ b/drivers/ide/siimage.c
29685@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29686 .udma_mask = ATA_UDMA6, \
29687 }
29688
29689-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29690+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29691 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29692 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29693 };
29694diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29695index 4a00225..09e61b4 100644
29696--- a/drivers/ide/sis5513.c
29697+++ b/drivers/ide/sis5513.c
29698@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29699 .cable_detect = sis_cable_detect,
29700 };
29701
29702-static const struct ide_port_info sis5513_chipset __devinitdata = {
29703+static const struct ide_port_info sis5513_chipset __devinitconst = {
29704 .name = DRV_NAME,
29705 .init_chipset = init_chipset_sis5513,
29706 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29707diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29708index f21dc2a..d051cd2 100644
29709--- a/drivers/ide/sl82c105.c
29710+++ b/drivers/ide/sl82c105.c
29711@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29712 .dma_sff_read_status = ide_dma_sff_read_status,
29713 };
29714
29715-static const struct ide_port_info sl82c105_chipset __devinitdata = {
29716+static const struct ide_port_info sl82c105_chipset __devinitconst = {
29717 .name = DRV_NAME,
29718 .init_chipset = init_chipset_sl82c105,
29719 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29720diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29721index 864ffe0..863a5e9 100644
29722--- a/drivers/ide/slc90e66.c
29723+++ b/drivers/ide/slc90e66.c
29724@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29725 .cable_detect = slc90e66_cable_detect,
29726 };
29727
29728-static const struct ide_port_info slc90e66_chipset __devinitdata = {
29729+static const struct ide_port_info slc90e66_chipset __devinitconst = {
29730 .name = DRV_NAME,
29731 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29732 .port_ops = &slc90e66_port_ops,
29733diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29734index 4799d5c..1794678 100644
29735--- a/drivers/ide/tc86c001.c
29736+++ b/drivers/ide/tc86c001.c
29737@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29738 .dma_sff_read_status = ide_dma_sff_read_status,
29739 };
29740
29741-static const struct ide_port_info tc86c001_chipset __devinitdata = {
29742+static const struct ide_port_info tc86c001_chipset __devinitconst = {
29743 .name = DRV_NAME,
29744 .init_hwif = init_hwif_tc86c001,
29745 .port_ops = &tc86c001_port_ops,
29746diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29747index 281c914..55ce1b8 100644
29748--- a/drivers/ide/triflex.c
29749+++ b/drivers/ide/triflex.c
29750@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29751 .set_dma_mode = triflex_set_mode,
29752 };
29753
29754-static const struct ide_port_info triflex_device __devinitdata = {
29755+static const struct ide_port_info triflex_device __devinitconst = {
29756 .name = DRV_NAME,
29757 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29758 .port_ops = &triflex_port_ops,
29759diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29760index 4b42ca0..e494a98 100644
29761--- a/drivers/ide/trm290.c
29762+++ b/drivers/ide/trm290.c
29763@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29764 .dma_check = trm290_dma_check,
29765 };
29766
29767-static const struct ide_port_info trm290_chipset __devinitdata = {
29768+static const struct ide_port_info trm290_chipset __devinitconst = {
29769 .name = DRV_NAME,
29770 .init_hwif = init_hwif_trm290,
29771 .tp_ops = &trm290_tp_ops,
29772diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29773index f46f49c..eb77678 100644
29774--- a/drivers/ide/via82cxxx.c
29775+++ b/drivers/ide/via82cxxx.c
29776@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29777 .cable_detect = via82cxxx_cable_detect,
29778 };
29779
29780-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29781+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29782 .name = DRV_NAME,
29783 .init_chipset = init_chipset_via82cxxx,
29784 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29785diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29786index eb0e2cc..14241c7 100644
29787--- a/drivers/ieee802154/fakehard.c
29788+++ b/drivers/ieee802154/fakehard.c
29789@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29790 phy->transmit_power = 0xbf;
29791
29792 dev->netdev_ops = &fake_ops;
29793- dev->ml_priv = &fake_mlme;
29794+ dev->ml_priv = (void *)&fake_mlme;
29795
29796 priv = netdev_priv(dev);
29797 priv->phy = phy;
29798diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29799index 8b72f39..55df4c8 100644
29800--- a/drivers/infiniband/core/cm.c
29801+++ b/drivers/infiniband/core/cm.c
29802@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29803
29804 struct cm_counter_group {
29805 struct kobject obj;
29806- atomic_long_t counter[CM_ATTR_COUNT];
29807+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29808 };
29809
29810 struct cm_counter_attribute {
29811@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29812 struct ib_mad_send_buf *msg = NULL;
29813 int ret;
29814
29815- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29816+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29817 counter[CM_REQ_COUNTER]);
29818
29819 /* Quick state check to discard duplicate REQs. */
29820@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29821 if (!cm_id_priv)
29822 return;
29823
29824- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29825+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29826 counter[CM_REP_COUNTER]);
29827 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29828 if (ret)
29829@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
29830 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29831 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29832 spin_unlock_irq(&cm_id_priv->lock);
29833- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29834+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29835 counter[CM_RTU_COUNTER]);
29836 goto out;
29837 }
29838@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
29839 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29840 dreq_msg->local_comm_id);
29841 if (!cm_id_priv) {
29842- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29843+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29844 counter[CM_DREQ_COUNTER]);
29845 cm_issue_drep(work->port, work->mad_recv_wc);
29846 return -EINVAL;
29847@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
29848 case IB_CM_MRA_REP_RCVD:
29849 break;
29850 case IB_CM_TIMEWAIT:
29851- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29852+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29853 counter[CM_DREQ_COUNTER]);
29854 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29855 goto unlock;
29856@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
29857 cm_free_msg(msg);
29858 goto deref;
29859 case IB_CM_DREQ_RCVD:
29860- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29861+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29862 counter[CM_DREQ_COUNTER]);
29863 goto unlock;
29864 default:
29865@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
29866 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29867 cm_id_priv->msg, timeout)) {
29868 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29869- atomic_long_inc(&work->port->
29870+ atomic_long_inc_unchecked(&work->port->
29871 counter_group[CM_RECV_DUPLICATES].
29872 counter[CM_MRA_COUNTER]);
29873 goto out;
29874@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
29875 break;
29876 case IB_CM_MRA_REQ_RCVD:
29877 case IB_CM_MRA_REP_RCVD:
29878- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29879+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29880 counter[CM_MRA_COUNTER]);
29881 /* fall through */
29882 default:
29883@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
29884 case IB_CM_LAP_IDLE:
29885 break;
29886 case IB_CM_MRA_LAP_SENT:
29887- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29888+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29889 counter[CM_LAP_COUNTER]);
29890 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29891 goto unlock;
29892@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
29893 cm_free_msg(msg);
29894 goto deref;
29895 case IB_CM_LAP_RCVD:
29896- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29897+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29898 counter[CM_LAP_COUNTER]);
29899 goto unlock;
29900 default:
29901@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
29902 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29903 if (cur_cm_id_priv) {
29904 spin_unlock_irq(&cm.lock);
29905- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29906+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29907 counter[CM_SIDR_REQ_COUNTER]);
29908 goto out; /* Duplicate message. */
29909 }
29910@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
29911 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29912 msg->retries = 1;
29913
29914- atomic_long_add(1 + msg->retries,
29915+ atomic_long_add_unchecked(1 + msg->retries,
29916 &port->counter_group[CM_XMIT].counter[attr_index]);
29917 if (msg->retries)
29918- atomic_long_add(msg->retries,
29919+ atomic_long_add_unchecked(msg->retries,
29920 &port->counter_group[CM_XMIT_RETRIES].
29921 counter[attr_index]);
29922
29923@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
29924 }
29925
29926 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29927- atomic_long_inc(&port->counter_group[CM_RECV].
29928+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29929 counter[attr_id - CM_ATTR_ID_OFFSET]);
29930
29931 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29932@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
29933 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29934
29935 return sprintf(buf, "%ld\n",
29936- atomic_long_read(&group->counter[cm_attr->index]));
29937+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29938 }
29939
29940 static const struct sysfs_ops cm_counter_ops = {
29941diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
29942index 176c8f9..2627b62 100644
29943--- a/drivers/infiniband/core/fmr_pool.c
29944+++ b/drivers/infiniband/core/fmr_pool.c
29945@@ -98,8 +98,8 @@ struct ib_fmr_pool {
29946
29947 struct task_struct *thread;
29948
29949- atomic_t req_ser;
29950- atomic_t flush_ser;
29951+ atomic_unchecked_t req_ser;
29952+ atomic_unchecked_t flush_ser;
29953
29954 wait_queue_head_t force_wait;
29955 };
29956@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29957 struct ib_fmr_pool *pool = pool_ptr;
29958
29959 do {
29960- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29961+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29962 ib_fmr_batch_release(pool);
29963
29964- atomic_inc(&pool->flush_ser);
29965+ atomic_inc_unchecked(&pool->flush_ser);
29966 wake_up_interruptible(&pool->force_wait);
29967
29968 if (pool->flush_function)
29969@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29970 }
29971
29972 set_current_state(TASK_INTERRUPTIBLE);
29973- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29974+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29975 !kthread_should_stop())
29976 schedule();
29977 __set_current_state(TASK_RUNNING);
29978@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
29979 pool->dirty_watermark = params->dirty_watermark;
29980 pool->dirty_len = 0;
29981 spin_lock_init(&pool->pool_lock);
29982- atomic_set(&pool->req_ser, 0);
29983- atomic_set(&pool->flush_ser, 0);
29984+ atomic_set_unchecked(&pool->req_ser, 0);
29985+ atomic_set_unchecked(&pool->flush_ser, 0);
29986 init_waitqueue_head(&pool->force_wait);
29987
29988 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29989@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
29990 }
29991 spin_unlock_irq(&pool->pool_lock);
29992
29993- serial = atomic_inc_return(&pool->req_ser);
29994+ serial = atomic_inc_return_unchecked(&pool->req_ser);
29995 wake_up_process(pool->thread);
29996
29997 if (wait_event_interruptible(pool->force_wait,
29998- atomic_read(&pool->flush_ser) - serial >= 0))
29999+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30000 return -EINTR;
30001
30002 return 0;
30003@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
30004 } else {
30005 list_add_tail(&fmr->list, &pool->dirty_list);
30006 if (++pool->dirty_len >= pool->dirty_watermark) {
30007- atomic_inc(&pool->req_ser);
30008+ atomic_inc_unchecked(&pool->req_ser);
30009 wake_up_process(pool->thread);
30010 }
30011 }
30012diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
30013index 40c8353..946b0e4 100644
30014--- a/drivers/infiniband/hw/cxgb4/mem.c
30015+++ b/drivers/infiniband/hw/cxgb4/mem.c
30016@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30017 int err;
30018 struct fw_ri_tpte tpt;
30019 u32 stag_idx;
30020- static atomic_t key;
30021+ static atomic_unchecked_t key;
30022
30023 if (c4iw_fatal_error(rdev))
30024 return -EIO;
30025@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30026 &rdev->resource.tpt_fifo_lock);
30027 if (!stag_idx)
30028 return -ENOMEM;
30029- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
30030+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
30031 }
30032 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
30033 __func__, stag_state, type, pdid, stag_idx);
30034diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
30035index 79b3dbc..96e5fcc 100644
30036--- a/drivers/infiniband/hw/ipath/ipath_rc.c
30037+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
30038@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30039 struct ib_atomic_eth *ateth;
30040 struct ipath_ack_entry *e;
30041 u64 vaddr;
30042- atomic64_t *maddr;
30043+ atomic64_unchecked_t *maddr;
30044 u64 sdata;
30045 u32 rkey;
30046 u8 next;
30047@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30048 IB_ACCESS_REMOTE_ATOMIC)))
30049 goto nack_acc_unlck;
30050 /* Perform atomic OP and save result. */
30051- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30052+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30053 sdata = be64_to_cpu(ateth->swap_data);
30054 e = &qp->s_ack_queue[qp->r_head_ack_queue];
30055 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30056- (u64) atomic64_add_return(sdata, maddr) - sdata :
30057+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30058 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30059 be64_to_cpu(ateth->compare_data),
30060 sdata);
30061diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30062index 1f95bba..9530f87 100644
30063--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30064+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30065@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30066 unsigned long flags;
30067 struct ib_wc wc;
30068 u64 sdata;
30069- atomic64_t *maddr;
30070+ atomic64_unchecked_t *maddr;
30071 enum ib_wc_status send_status;
30072
30073 /*
30074@@ -382,11 +382,11 @@ again:
30075 IB_ACCESS_REMOTE_ATOMIC)))
30076 goto acc_err;
30077 /* Perform atomic OP and save result. */
30078- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30079+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30080 sdata = wqe->wr.wr.atomic.compare_add;
30081 *(u64 *) sqp->s_sge.sge.vaddr =
30082 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30083- (u64) atomic64_add_return(sdata, maddr) - sdata :
30084+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30085 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30086 sdata, wqe->wr.wr.atomic.swap);
30087 goto send_comp;
30088diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30089index 5965b3d..16817fb 100644
30090--- a/drivers/infiniband/hw/nes/nes.c
30091+++ b/drivers/infiniband/hw/nes/nes.c
30092@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30093 LIST_HEAD(nes_adapter_list);
30094 static LIST_HEAD(nes_dev_list);
30095
30096-atomic_t qps_destroyed;
30097+atomic_unchecked_t qps_destroyed;
30098
30099 static unsigned int ee_flsh_adapter;
30100 static unsigned int sysfs_nonidx_addr;
30101@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30102 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30103 struct nes_adapter *nesadapter = nesdev->nesadapter;
30104
30105- atomic_inc(&qps_destroyed);
30106+ atomic_inc_unchecked(&qps_destroyed);
30107
30108 /* Free the control structures */
30109
30110diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30111index 568b4f1..5ea3eff 100644
30112--- a/drivers/infiniband/hw/nes/nes.h
30113+++ b/drivers/infiniband/hw/nes/nes.h
30114@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
30115 extern unsigned int wqm_quanta;
30116 extern struct list_head nes_adapter_list;
30117
30118-extern atomic_t cm_connects;
30119-extern atomic_t cm_accepts;
30120-extern atomic_t cm_disconnects;
30121-extern atomic_t cm_closes;
30122-extern atomic_t cm_connecteds;
30123-extern atomic_t cm_connect_reqs;
30124-extern atomic_t cm_rejects;
30125-extern atomic_t mod_qp_timouts;
30126-extern atomic_t qps_created;
30127-extern atomic_t qps_destroyed;
30128-extern atomic_t sw_qps_destroyed;
30129+extern atomic_unchecked_t cm_connects;
30130+extern atomic_unchecked_t cm_accepts;
30131+extern atomic_unchecked_t cm_disconnects;
30132+extern atomic_unchecked_t cm_closes;
30133+extern atomic_unchecked_t cm_connecteds;
30134+extern atomic_unchecked_t cm_connect_reqs;
30135+extern atomic_unchecked_t cm_rejects;
30136+extern atomic_unchecked_t mod_qp_timouts;
30137+extern atomic_unchecked_t qps_created;
30138+extern atomic_unchecked_t qps_destroyed;
30139+extern atomic_unchecked_t sw_qps_destroyed;
30140 extern u32 mh_detected;
30141 extern u32 mh_pauses_sent;
30142 extern u32 cm_packets_sent;
30143@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30144 extern u32 cm_packets_received;
30145 extern u32 cm_packets_dropped;
30146 extern u32 cm_packets_retrans;
30147-extern atomic_t cm_listens_created;
30148-extern atomic_t cm_listens_destroyed;
30149+extern atomic_unchecked_t cm_listens_created;
30150+extern atomic_unchecked_t cm_listens_destroyed;
30151 extern u32 cm_backlog_drops;
30152-extern atomic_t cm_loopbacks;
30153-extern atomic_t cm_nodes_created;
30154-extern atomic_t cm_nodes_destroyed;
30155-extern atomic_t cm_accel_dropped_pkts;
30156-extern atomic_t cm_resets_recvd;
30157-extern atomic_t pau_qps_created;
30158-extern atomic_t pau_qps_destroyed;
30159+extern atomic_unchecked_t cm_loopbacks;
30160+extern atomic_unchecked_t cm_nodes_created;
30161+extern atomic_unchecked_t cm_nodes_destroyed;
30162+extern atomic_unchecked_t cm_accel_dropped_pkts;
30163+extern atomic_unchecked_t cm_resets_recvd;
30164+extern atomic_unchecked_t pau_qps_created;
30165+extern atomic_unchecked_t pau_qps_destroyed;
30166
30167 extern u32 int_mod_timer_init;
30168 extern u32 int_mod_cq_depth_256;
30169diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30170index 0a52d72..0642f36 100644
30171--- a/drivers/infiniband/hw/nes/nes_cm.c
30172+++ b/drivers/infiniband/hw/nes/nes_cm.c
30173@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30174 u32 cm_packets_retrans;
30175 u32 cm_packets_created;
30176 u32 cm_packets_received;
30177-atomic_t cm_listens_created;
30178-atomic_t cm_listens_destroyed;
30179+atomic_unchecked_t cm_listens_created;
30180+atomic_unchecked_t cm_listens_destroyed;
30181 u32 cm_backlog_drops;
30182-atomic_t cm_loopbacks;
30183-atomic_t cm_nodes_created;
30184-atomic_t cm_nodes_destroyed;
30185-atomic_t cm_accel_dropped_pkts;
30186-atomic_t cm_resets_recvd;
30187+atomic_unchecked_t cm_loopbacks;
30188+atomic_unchecked_t cm_nodes_created;
30189+atomic_unchecked_t cm_nodes_destroyed;
30190+atomic_unchecked_t cm_accel_dropped_pkts;
30191+atomic_unchecked_t cm_resets_recvd;
30192
30193 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30194 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30195@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30196
30197 static struct nes_cm_core *g_cm_core;
30198
30199-atomic_t cm_connects;
30200-atomic_t cm_accepts;
30201-atomic_t cm_disconnects;
30202-atomic_t cm_closes;
30203-atomic_t cm_connecteds;
30204-atomic_t cm_connect_reqs;
30205-atomic_t cm_rejects;
30206+atomic_unchecked_t cm_connects;
30207+atomic_unchecked_t cm_accepts;
30208+atomic_unchecked_t cm_disconnects;
30209+atomic_unchecked_t cm_closes;
30210+atomic_unchecked_t cm_connecteds;
30211+atomic_unchecked_t cm_connect_reqs;
30212+atomic_unchecked_t cm_rejects;
30213
30214 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30215 {
30216@@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30217 kfree(listener);
30218 listener = NULL;
30219 ret = 0;
30220- atomic_inc(&cm_listens_destroyed);
30221+ atomic_inc_unchecked(&cm_listens_destroyed);
30222 } else {
30223 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30224 }
30225@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30226 cm_node->rem_mac);
30227
30228 add_hte_node(cm_core, cm_node);
30229- atomic_inc(&cm_nodes_created);
30230+ atomic_inc_unchecked(&cm_nodes_created);
30231
30232 return cm_node;
30233 }
30234@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30235 }
30236
30237 atomic_dec(&cm_core->node_cnt);
30238- atomic_inc(&cm_nodes_destroyed);
30239+ atomic_inc_unchecked(&cm_nodes_destroyed);
30240 nesqp = cm_node->nesqp;
30241 if (nesqp) {
30242 nesqp->cm_node = NULL;
30243@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30244
30245 static void drop_packet(struct sk_buff *skb)
30246 {
30247- atomic_inc(&cm_accel_dropped_pkts);
30248+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30249 dev_kfree_skb_any(skb);
30250 }
30251
30252@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30253 {
30254
30255 int reset = 0; /* whether to send reset in case of err.. */
30256- atomic_inc(&cm_resets_recvd);
30257+ atomic_inc_unchecked(&cm_resets_recvd);
30258 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30259 " refcnt=%d\n", cm_node, cm_node->state,
30260 atomic_read(&cm_node->ref_count));
30261@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30262 rem_ref_cm_node(cm_node->cm_core, cm_node);
30263 return NULL;
30264 }
30265- atomic_inc(&cm_loopbacks);
30266+ atomic_inc_unchecked(&cm_loopbacks);
30267 loopbackremotenode->loopbackpartner = cm_node;
30268 loopbackremotenode->tcp_cntxt.rcv_wscale =
30269 NES_CM_DEFAULT_RCV_WND_SCALE;
30270@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30271 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30272 else {
30273 rem_ref_cm_node(cm_core, cm_node);
30274- atomic_inc(&cm_accel_dropped_pkts);
30275+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30276 dev_kfree_skb_any(skb);
30277 }
30278 break;
30279@@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30280
30281 if ((cm_id) && (cm_id->event_handler)) {
30282 if (issue_disconn) {
30283- atomic_inc(&cm_disconnects);
30284+ atomic_inc_unchecked(&cm_disconnects);
30285 cm_event.event = IW_CM_EVENT_DISCONNECT;
30286 cm_event.status = disconn_status;
30287 cm_event.local_addr = cm_id->local_addr;
30288@@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30289 }
30290
30291 if (issue_close) {
30292- atomic_inc(&cm_closes);
30293+ atomic_inc_unchecked(&cm_closes);
30294 nes_disconnect(nesqp, 1);
30295
30296 cm_id->provider_data = nesqp;
30297@@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30298
30299 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30300 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30301- atomic_inc(&cm_accepts);
30302+ atomic_inc_unchecked(&cm_accepts);
30303
30304 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30305 netdev_refcnt_read(nesvnic->netdev));
30306@@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30307 struct nes_cm_core *cm_core;
30308 u8 *start_buff;
30309
30310- atomic_inc(&cm_rejects);
30311+ atomic_inc_unchecked(&cm_rejects);
30312 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30313 loopback = cm_node->loopbackpartner;
30314 cm_core = cm_node->cm_core;
30315@@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30316 ntohl(cm_id->local_addr.sin_addr.s_addr),
30317 ntohs(cm_id->local_addr.sin_port));
30318
30319- atomic_inc(&cm_connects);
30320+ atomic_inc_unchecked(&cm_connects);
30321 nesqp->active_conn = 1;
30322
30323 /* cache the cm_id in the qp */
30324@@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30325 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30326 return err;
30327 }
30328- atomic_inc(&cm_listens_created);
30329+ atomic_inc_unchecked(&cm_listens_created);
30330 }
30331
30332 cm_id->add_ref(cm_id);
30333@@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30334
30335 if (nesqp->destroyed)
30336 return;
30337- atomic_inc(&cm_connecteds);
30338+ atomic_inc_unchecked(&cm_connecteds);
30339 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30340 " local port 0x%04X. jiffies = %lu.\n",
30341 nesqp->hwqp.qp_id,
30342@@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30343
30344 cm_id->add_ref(cm_id);
30345 ret = cm_id->event_handler(cm_id, &cm_event);
30346- atomic_inc(&cm_closes);
30347+ atomic_inc_unchecked(&cm_closes);
30348 cm_event.event = IW_CM_EVENT_CLOSE;
30349 cm_event.status = 0;
30350 cm_event.provider_data = cm_id->provider_data;
30351@@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30352 return;
30353 cm_id = cm_node->cm_id;
30354
30355- atomic_inc(&cm_connect_reqs);
30356+ atomic_inc_unchecked(&cm_connect_reqs);
30357 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30358 cm_node, cm_id, jiffies);
30359
30360@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30361 return;
30362 cm_id = cm_node->cm_id;
30363
30364- atomic_inc(&cm_connect_reqs);
30365+ atomic_inc_unchecked(&cm_connect_reqs);
30366 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30367 cm_node, cm_id, jiffies);
30368
30369diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30370index b3b2a24..7bfaf1e 100644
30371--- a/drivers/infiniband/hw/nes/nes_mgt.c
30372+++ b/drivers/infiniband/hw/nes/nes_mgt.c
30373@@ -40,8 +40,8 @@
30374 #include "nes.h"
30375 #include "nes_mgt.h"
30376
30377-atomic_t pau_qps_created;
30378-atomic_t pau_qps_destroyed;
30379+atomic_unchecked_t pau_qps_created;
30380+atomic_unchecked_t pau_qps_destroyed;
30381
30382 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30383 {
30384@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30385 {
30386 struct sk_buff *skb;
30387 unsigned long flags;
30388- atomic_inc(&pau_qps_destroyed);
30389+ atomic_inc_unchecked(&pau_qps_destroyed);
30390
30391 /* Free packets that have not yet been forwarded */
30392 /* Lock is acquired by skb_dequeue when removing the skb */
30393@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30394 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30395 skb_queue_head_init(&nesqp->pau_list);
30396 spin_lock_init(&nesqp->pau_lock);
30397- atomic_inc(&pau_qps_created);
30398+ atomic_inc_unchecked(&pau_qps_created);
30399 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30400 }
30401
30402diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30403index c00d2f3..8834298 100644
30404--- a/drivers/infiniband/hw/nes/nes_nic.c
30405+++ b/drivers/infiniband/hw/nes/nes_nic.c
30406@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30407 target_stat_values[++index] = mh_detected;
30408 target_stat_values[++index] = mh_pauses_sent;
30409 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30410- target_stat_values[++index] = atomic_read(&cm_connects);
30411- target_stat_values[++index] = atomic_read(&cm_accepts);
30412- target_stat_values[++index] = atomic_read(&cm_disconnects);
30413- target_stat_values[++index] = atomic_read(&cm_connecteds);
30414- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30415- target_stat_values[++index] = atomic_read(&cm_rejects);
30416- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30417- target_stat_values[++index] = atomic_read(&qps_created);
30418- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30419- target_stat_values[++index] = atomic_read(&qps_destroyed);
30420- target_stat_values[++index] = atomic_read(&cm_closes);
30421+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30422+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30423+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30424+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30425+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30426+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30427+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30428+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30429+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30430+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30431+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30432 target_stat_values[++index] = cm_packets_sent;
30433 target_stat_values[++index] = cm_packets_bounced;
30434 target_stat_values[++index] = cm_packets_created;
30435 target_stat_values[++index] = cm_packets_received;
30436 target_stat_values[++index] = cm_packets_dropped;
30437 target_stat_values[++index] = cm_packets_retrans;
30438- target_stat_values[++index] = atomic_read(&cm_listens_created);
30439- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30440+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30441+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30442 target_stat_values[++index] = cm_backlog_drops;
30443- target_stat_values[++index] = atomic_read(&cm_loopbacks);
30444- target_stat_values[++index] = atomic_read(&cm_nodes_created);
30445- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30446- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30447- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30448+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30449+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30450+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30451+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30452+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30453 target_stat_values[++index] = nesadapter->free_4kpbl;
30454 target_stat_values[++index] = nesadapter->free_256pbl;
30455 target_stat_values[++index] = int_mod_timer_init;
30456 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30457 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30458 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30459- target_stat_values[++index] = atomic_read(&pau_qps_created);
30460- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30461+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30462+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30463 }
30464
30465 /**
30466diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30467index 5095bc4..41e8fff 100644
30468--- a/drivers/infiniband/hw/nes/nes_verbs.c
30469+++ b/drivers/infiniband/hw/nes/nes_verbs.c
30470@@ -46,9 +46,9 @@
30471
30472 #include <rdma/ib_umem.h>
30473
30474-atomic_t mod_qp_timouts;
30475-atomic_t qps_created;
30476-atomic_t sw_qps_destroyed;
30477+atomic_unchecked_t mod_qp_timouts;
30478+atomic_unchecked_t qps_created;
30479+atomic_unchecked_t sw_qps_destroyed;
30480
30481 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30482
30483@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30484 if (init_attr->create_flags)
30485 return ERR_PTR(-EINVAL);
30486
30487- atomic_inc(&qps_created);
30488+ atomic_inc_unchecked(&qps_created);
30489 switch (init_attr->qp_type) {
30490 case IB_QPT_RC:
30491 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30492@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30493 struct iw_cm_event cm_event;
30494 int ret = 0;
30495
30496- atomic_inc(&sw_qps_destroyed);
30497+ atomic_inc_unchecked(&sw_qps_destroyed);
30498 nesqp->destroyed = 1;
30499
30500 /* Blow away the connection if it exists. */
30501diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30502index b881bdc..c2e360c 100644
30503--- a/drivers/infiniband/hw/qib/qib.h
30504+++ b/drivers/infiniband/hw/qib/qib.h
30505@@ -51,6 +51,7 @@
30506 #include <linux/completion.h>
30507 #include <linux/kref.h>
30508 #include <linux/sched.h>
30509+#include <linux/slab.h>
30510
30511 #include "qib_common.h"
30512 #include "qib_verbs.h"
30513diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30514index c351aa4..e6967c2 100644
30515--- a/drivers/input/gameport/gameport.c
30516+++ b/drivers/input/gameport/gameport.c
30517@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30518 */
30519 static void gameport_init_port(struct gameport *gameport)
30520 {
30521- static atomic_t gameport_no = ATOMIC_INIT(0);
30522+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30523
30524 __module_get(THIS_MODULE);
30525
30526 mutex_init(&gameport->drv_mutex);
30527 device_initialize(&gameport->dev);
30528 dev_set_name(&gameport->dev, "gameport%lu",
30529- (unsigned long)atomic_inc_return(&gameport_no) - 1);
30530+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30531 gameport->dev.bus = &gameport_bus;
30532 gameport->dev.release = gameport_release_port;
30533 if (gameport->parent)
30534diff --git a/drivers/input/input.c b/drivers/input/input.c
30535index da38d97..2aa0b79 100644
30536--- a/drivers/input/input.c
30537+++ b/drivers/input/input.c
30538@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30539 */
30540 int input_register_device(struct input_dev *dev)
30541 {
30542- static atomic_t input_no = ATOMIC_INIT(0);
30543+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30544 struct input_handler *handler;
30545 const char *path;
30546 int error;
30547@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30548 dev->setkeycode = input_default_setkeycode;
30549
30550 dev_set_name(&dev->dev, "input%ld",
30551- (unsigned long) atomic_inc_return(&input_no) - 1);
30552+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30553
30554 error = device_add(&dev->dev);
30555 if (error)
30556diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30557index b8d8611..7a4a04b 100644
30558--- a/drivers/input/joystick/sidewinder.c
30559+++ b/drivers/input/joystick/sidewinder.c
30560@@ -30,6 +30,7 @@
30561 #include <linux/kernel.h>
30562 #include <linux/module.h>
30563 #include <linux/slab.h>
30564+#include <linux/sched.h>
30565 #include <linux/init.h>
30566 #include <linux/input.h>
30567 #include <linux/gameport.h>
30568diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30569index d728875..844c89b 100644
30570--- a/drivers/input/joystick/xpad.c
30571+++ b/drivers/input/joystick/xpad.c
30572@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30573
30574 static int xpad_led_probe(struct usb_xpad *xpad)
30575 {
30576- static atomic_t led_seq = ATOMIC_INIT(0);
30577+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30578 long led_no;
30579 struct xpad_led *led;
30580 struct led_classdev *led_cdev;
30581@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30582 if (!led)
30583 return -ENOMEM;
30584
30585- led_no = (long)atomic_inc_return(&led_seq) - 1;
30586+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30587
30588 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30589 led->xpad = xpad;
30590diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30591index 0110b5a..d3ad144 100644
30592--- a/drivers/input/mousedev.c
30593+++ b/drivers/input/mousedev.c
30594@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30595
30596 spin_unlock_irq(&client->packet_lock);
30597
30598- if (copy_to_user(buffer, data, count))
30599+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
30600 return -EFAULT;
30601
30602 return count;
30603diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30604index ba70058..571d25d 100644
30605--- a/drivers/input/serio/serio.c
30606+++ b/drivers/input/serio/serio.c
30607@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30608 */
30609 static void serio_init_port(struct serio *serio)
30610 {
30611- static atomic_t serio_no = ATOMIC_INIT(0);
30612+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30613
30614 __module_get(THIS_MODULE);
30615
30616@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30617 mutex_init(&serio->drv_mutex);
30618 device_initialize(&serio->dev);
30619 dev_set_name(&serio->dev, "serio%ld",
30620- (long)atomic_inc_return(&serio_no) - 1);
30621+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
30622 serio->dev.bus = &serio_bus;
30623 serio->dev.release = serio_release_port;
30624 serio->dev.groups = serio_device_attr_groups;
30625diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30626index e44933d..9ba484a 100644
30627--- a/drivers/isdn/capi/capi.c
30628+++ b/drivers/isdn/capi/capi.c
30629@@ -83,8 +83,8 @@ struct capiminor {
30630
30631 struct capi20_appl *ap;
30632 u32 ncci;
30633- atomic_t datahandle;
30634- atomic_t msgid;
30635+ atomic_unchecked_t datahandle;
30636+ atomic_unchecked_t msgid;
30637
30638 struct tty_port port;
30639 int ttyinstop;
30640@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30641 capimsg_setu16(s, 2, mp->ap->applid);
30642 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30643 capimsg_setu8 (s, 5, CAPI_RESP);
30644- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30645+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30646 capimsg_setu32(s, 8, mp->ncci);
30647 capimsg_setu16(s, 12, datahandle);
30648 }
30649@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30650 mp->outbytes -= len;
30651 spin_unlock_bh(&mp->outlock);
30652
30653- datahandle = atomic_inc_return(&mp->datahandle);
30654+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30655 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30656 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30657 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30658 capimsg_setu16(skb->data, 2, mp->ap->applid);
30659 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30660 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30661- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30662+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30663 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30664 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30665 capimsg_setu16(skb->data, 16, len); /* Data length */
30666diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30667index db621db..825ea1a 100644
30668--- a/drivers/isdn/gigaset/common.c
30669+++ b/drivers/isdn/gigaset/common.c
30670@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30671 cs->commands_pending = 0;
30672 cs->cur_at_seq = 0;
30673 cs->gotfwver = -1;
30674- cs->open_count = 0;
30675+ local_set(&cs->open_count, 0);
30676 cs->dev = NULL;
30677 cs->tty = NULL;
30678 cs->tty_dev = NULL;
30679diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30680index 212efaf..f187c6b 100644
30681--- a/drivers/isdn/gigaset/gigaset.h
30682+++ b/drivers/isdn/gigaset/gigaset.h
30683@@ -35,6 +35,7 @@
30684 #include <linux/tty_driver.h>
30685 #include <linux/list.h>
30686 #include <linux/atomic.h>
30687+#include <asm/local.h>
30688
30689 #define GIG_VERSION {0, 5, 0, 0}
30690 #define GIG_COMPAT {0, 4, 0, 0}
30691@@ -433,7 +434,7 @@ struct cardstate {
30692 spinlock_t cmdlock;
30693 unsigned curlen, cmdbytes;
30694
30695- unsigned open_count;
30696+ local_t open_count;
30697 struct tty_struct *tty;
30698 struct tasklet_struct if_wake_tasklet;
30699 unsigned control_state;
30700diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30701index ee0a549..a7c9798 100644
30702--- a/drivers/isdn/gigaset/interface.c
30703+++ b/drivers/isdn/gigaset/interface.c
30704@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30705 }
30706 tty->driver_data = cs;
30707
30708- ++cs->open_count;
30709-
30710- if (cs->open_count == 1) {
30711+ if (local_inc_return(&cs->open_count) == 1) {
30712 spin_lock_irqsave(&cs->lock, flags);
30713 cs->tty = tty;
30714 spin_unlock_irqrestore(&cs->lock, flags);
30715@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30716
30717 if (!cs->connected)
30718 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30719- else if (!cs->open_count)
30720+ else if (!local_read(&cs->open_count))
30721 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30722 else {
30723- if (!--cs->open_count) {
30724+ if (!local_dec_return(&cs->open_count)) {
30725 spin_lock_irqsave(&cs->lock, flags);
30726 cs->tty = NULL;
30727 spin_unlock_irqrestore(&cs->lock, flags);
30728@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30729 if (!cs->connected) {
30730 gig_dbg(DEBUG_IF, "not connected");
30731 retval = -ENODEV;
30732- } else if (!cs->open_count)
30733+ } else if (!local_read(&cs->open_count))
30734 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30735 else {
30736 retval = 0;
30737@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30738 retval = -ENODEV;
30739 goto done;
30740 }
30741- if (!cs->open_count) {
30742+ if (!local_read(&cs->open_count)) {
30743 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30744 retval = -ENODEV;
30745 goto done;
30746@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30747 if (!cs->connected) {
30748 gig_dbg(DEBUG_IF, "not connected");
30749 retval = -ENODEV;
30750- } else if (!cs->open_count)
30751+ } else if (!local_read(&cs->open_count))
30752 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30753 else if (cs->mstate != MS_LOCKED) {
30754 dev_warn(cs->dev, "can't write to unlocked device\n");
30755@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30756
30757 if (!cs->connected)
30758 gig_dbg(DEBUG_IF, "not connected");
30759- else if (!cs->open_count)
30760+ else if (!local_read(&cs->open_count))
30761 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30762 else if (cs->mstate != MS_LOCKED)
30763 dev_warn(cs->dev, "can't write to unlocked device\n");
30764@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30765
30766 if (!cs->connected)
30767 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30768- else if (!cs->open_count)
30769+ else if (!local_read(&cs->open_count))
30770 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30771 else
30772 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30773@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30774
30775 if (!cs->connected)
30776 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30777- else if (!cs->open_count)
30778+ else if (!local_read(&cs->open_count))
30779 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30780 else
30781 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30782@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30783 goto out;
30784 }
30785
30786- if (!cs->open_count) {
30787+ if (!local_read(&cs->open_count)) {
30788 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30789 goto out;
30790 }
30791diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30792index 2a57da59..e7a12ed 100644
30793--- a/drivers/isdn/hardware/avm/b1.c
30794+++ b/drivers/isdn/hardware/avm/b1.c
30795@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30796 }
30797 if (left) {
30798 if (t4file->user) {
30799- if (copy_from_user(buf, dp, left))
30800+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30801 return -EFAULT;
30802 } else {
30803 memcpy(buf, dp, left);
30804@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30805 }
30806 if (left) {
30807 if (config->user) {
30808- if (copy_from_user(buf, dp, left))
30809+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30810 return -EFAULT;
30811 } else {
30812 memcpy(buf, dp, left);
30813diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30814index 85784a7..a19ca98 100644
30815--- a/drivers/isdn/hardware/eicon/divasync.h
30816+++ b/drivers/isdn/hardware/eicon/divasync.h
30817@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30818 } diva_didd_add_adapter_t;
30819 typedef struct _diva_didd_remove_adapter {
30820 IDI_CALL p_request;
30821-} diva_didd_remove_adapter_t;
30822+} __no_const diva_didd_remove_adapter_t;
30823 typedef struct _diva_didd_read_adapter_array {
30824 void * buffer;
30825 dword length;
30826diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30827index a3bd163..8956575 100644
30828--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
30829+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
30830@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
30831 typedef struct _diva_os_idi_adapter_interface {
30832 diva_init_card_proc_t cleanup_adapter_proc;
30833 diva_cmd_card_proc_t cmd_proc;
30834-} diva_os_idi_adapter_interface_t;
30835+} __no_const diva_os_idi_adapter_interface_t;
30836
30837 typedef struct _diva_os_xdi_adapter {
30838 struct list_head link;
30839diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
30840index 1f355bb..43f1fea 100644
30841--- a/drivers/isdn/icn/icn.c
30842+++ b/drivers/isdn/icn/icn.c
30843@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
30844 if (count > len)
30845 count = len;
30846 if (user) {
30847- if (copy_from_user(msg, buf, count))
30848+ if (count > sizeof msg || copy_from_user(msg, buf, count))
30849 return -EFAULT;
30850 } else
30851 memcpy(msg, buf, count);
30852diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
30853index b5fdcb7..5b6c59f 100644
30854--- a/drivers/lguest/core.c
30855+++ b/drivers/lguest/core.c
30856@@ -92,9 +92,17 @@ static __init int map_switcher(void)
30857 * it's worked so far. The end address needs +1 because __get_vm_area
30858 * allocates an extra guard page, so we need space for that.
30859 */
30860+
30861+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30862+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30863+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30864+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30865+#else
30866 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30867 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30868 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30869+#endif
30870+
30871 if (!switcher_vma) {
30872 err = -ENOMEM;
30873 printk("lguest: could not map switcher pages high\n");
30874@@ -119,7 +127,7 @@ static __init int map_switcher(void)
30875 * Now the Switcher is mapped at the right address, we can't fail!
30876 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
30877 */
30878- memcpy(switcher_vma->addr, start_switcher_text,
30879+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30880 end_switcher_text - start_switcher_text);
30881
30882 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30883diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
30884index 65af42f..530c87a 100644
30885--- a/drivers/lguest/x86/core.c
30886+++ b/drivers/lguest/x86/core.c
30887@@ -59,7 +59,7 @@ static struct {
30888 /* Offset from where switcher.S was compiled to where we've copied it */
30889 static unsigned long switcher_offset(void)
30890 {
30891- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30892+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30893 }
30894
30895 /* This cpu's struct lguest_pages. */
30896@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
30897 * These copies are pretty cheap, so we do them unconditionally: */
30898 /* Save the current Host top-level page directory.
30899 */
30900+
30901+#ifdef CONFIG_PAX_PER_CPU_PGD
30902+ pages->state.host_cr3 = read_cr3();
30903+#else
30904 pages->state.host_cr3 = __pa(current->mm->pgd);
30905+#endif
30906+
30907 /*
30908 * Set up the Guest's page tables to see this CPU's pages (and no
30909 * other CPU's pages).
30910@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
30911 * compiled-in switcher code and the high-mapped copy we just made.
30912 */
30913 for (i = 0; i < IDT_ENTRIES; i++)
30914- default_idt_entries[i] += switcher_offset();
30915+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30916
30917 /*
30918 * Set up the Switcher's per-cpu areas.
30919@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
30920 * it will be undisturbed when we switch. To change %cs and jump we
30921 * need this structure to feed to Intel's "lcall" instruction.
30922 */
30923- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30924+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30925 lguest_entry.segment = LGUEST_CS;
30926
30927 /*
30928diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
30929index 40634b0..4f5855e 100644
30930--- a/drivers/lguest/x86/switcher_32.S
30931+++ b/drivers/lguest/x86/switcher_32.S
30932@@ -87,6 +87,7 @@
30933 #include <asm/page.h>
30934 #include <asm/segment.h>
30935 #include <asm/lguest.h>
30936+#include <asm/processor-flags.h>
30937
30938 // We mark the start of the code to copy
30939 // It's placed in .text tho it's never run here
30940@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30941 // Changes type when we load it: damn Intel!
30942 // For after we switch over our page tables
30943 // That entry will be read-only: we'd crash.
30944+
30945+#ifdef CONFIG_PAX_KERNEXEC
30946+ mov %cr0, %edx
30947+ xor $X86_CR0_WP, %edx
30948+ mov %edx, %cr0
30949+#endif
30950+
30951 movl $(GDT_ENTRY_TSS*8), %edx
30952 ltr %dx
30953
30954@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30955 // Let's clear it again for our return.
30956 // The GDT descriptor of the Host
30957 // Points to the table after two "size" bytes
30958- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30959+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30960 // Clear "used" from type field (byte 5, bit 2)
30961- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30962+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30963+
30964+#ifdef CONFIG_PAX_KERNEXEC
30965+ mov %cr0, %eax
30966+ xor $X86_CR0_WP, %eax
30967+ mov %eax, %cr0
30968+#endif
30969
30970 // Once our page table's switched, the Guest is live!
30971 // The Host fades as we run this final step.
30972@@ -295,13 +309,12 @@ deliver_to_host:
30973 // I consulted gcc, and it gave
30974 // These instructions, which I gladly credit:
30975 leal (%edx,%ebx,8), %eax
30976- movzwl (%eax),%edx
30977- movl 4(%eax), %eax
30978- xorw %ax, %ax
30979- orl %eax, %edx
30980+ movl 4(%eax), %edx
30981+ movw (%eax), %dx
30982 // Now the address of the handler's in %edx
30983 // We call it now: its "iret" drops us home.
30984- jmp *%edx
30985+ ljmp $__KERNEL_CS, $1f
30986+1: jmp *%edx
30987
30988 // Every interrupt can come to us here
30989 // But we must truly tell each apart.
30990diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
30991index 4daf9e5..b8d1d0f 100644
30992--- a/drivers/macintosh/macio_asic.c
30993+++ b/drivers/macintosh/macio_asic.c
30994@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
30995 * MacIO is matched against any Apple ID, it's probe() function
30996 * will then decide wether it applies or not
30997 */
30998-static const struct pci_device_id __devinitdata pci_ids [] = { {
30999+static const struct pci_device_id __devinitconst pci_ids [] = { {
31000 .vendor = PCI_VENDOR_ID_APPLE,
31001 .device = PCI_ANY_ID,
31002 .subvendor = PCI_ANY_ID,
31003diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
31004index 31c2dc2..a2de7a6 100644
31005--- a/drivers/md/dm-ioctl.c
31006+++ b/drivers/md/dm-ioctl.c
31007@@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
31008 cmd == DM_LIST_VERSIONS_CMD)
31009 return 0;
31010
31011- if ((cmd == DM_DEV_CREATE_CMD)) {
31012+ if (cmd == DM_DEV_CREATE_CMD) {
31013 if (!*param->name) {
31014 DMWARN("name not supplied when creating device");
31015 return -EINVAL;
31016diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
31017index 9bfd057..01180bc 100644
31018--- a/drivers/md/dm-raid1.c
31019+++ b/drivers/md/dm-raid1.c
31020@@ -40,7 +40,7 @@ enum dm_raid1_error {
31021
31022 struct mirror {
31023 struct mirror_set *ms;
31024- atomic_t error_count;
31025+ atomic_unchecked_t error_count;
31026 unsigned long error_type;
31027 struct dm_dev *dev;
31028 sector_t offset;
31029@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
31030 struct mirror *m;
31031
31032 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
31033- if (!atomic_read(&m->error_count))
31034+ if (!atomic_read_unchecked(&m->error_count))
31035 return m;
31036
31037 return NULL;
31038@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
31039 * simple way to tell if a device has encountered
31040 * errors.
31041 */
31042- atomic_inc(&m->error_count);
31043+ atomic_inc_unchecked(&m->error_count);
31044
31045 if (test_and_set_bit(error_type, &m->error_type))
31046 return;
31047@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31048 struct mirror *m = get_default_mirror(ms);
31049
31050 do {
31051- if (likely(!atomic_read(&m->error_count)))
31052+ if (likely(!atomic_read_unchecked(&m->error_count)))
31053 return m;
31054
31055 if (m-- == ms->mirror)
31056@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31057 {
31058 struct mirror *default_mirror = get_default_mirror(m->ms);
31059
31060- return !atomic_read(&default_mirror->error_count);
31061+ return !atomic_read_unchecked(&default_mirror->error_count);
31062 }
31063
31064 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31065@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31066 */
31067 if (likely(region_in_sync(ms, region, 1)))
31068 m = choose_mirror(ms, bio->bi_sector);
31069- else if (m && atomic_read(&m->error_count))
31070+ else if (m && atomic_read_unchecked(&m->error_count))
31071 m = NULL;
31072
31073 if (likely(m))
31074@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31075 }
31076
31077 ms->mirror[mirror].ms = ms;
31078- atomic_set(&(ms->mirror[mirror].error_count), 0);
31079+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31080 ms->mirror[mirror].error_type = 0;
31081 ms->mirror[mirror].offset = offset;
31082
31083@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31084 */
31085 static char device_status_char(struct mirror *m)
31086 {
31087- if (!atomic_read(&(m->error_count)))
31088+ if (!atomic_read_unchecked(&(m->error_count)))
31089 return 'A';
31090
31091 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31092diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31093index 3d80cf0..b77cc47 100644
31094--- a/drivers/md/dm-stripe.c
31095+++ b/drivers/md/dm-stripe.c
31096@@ -20,7 +20,7 @@ struct stripe {
31097 struct dm_dev *dev;
31098 sector_t physical_start;
31099
31100- atomic_t error_count;
31101+ atomic_unchecked_t error_count;
31102 };
31103
31104 struct stripe_c {
31105@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31106 kfree(sc);
31107 return r;
31108 }
31109- atomic_set(&(sc->stripe[i].error_count), 0);
31110+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31111 }
31112
31113 ti->private = sc;
31114@@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31115 DMEMIT("%d ", sc->stripes);
31116 for (i = 0; i < sc->stripes; i++) {
31117 DMEMIT("%s ", sc->stripe[i].dev->name);
31118- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31119+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31120 'D' : 'A';
31121 }
31122 buffer[i] = '\0';
31123@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31124 */
31125 for (i = 0; i < sc->stripes; i++)
31126 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31127- atomic_inc(&(sc->stripe[i].error_count));
31128- if (atomic_read(&(sc->stripe[i].error_count)) <
31129+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31130+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31131 DM_IO_ERROR_THRESHOLD)
31132 schedule_work(&sc->trigger_event);
31133 }
31134diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31135index 8e91321..fd17aef 100644
31136--- a/drivers/md/dm-table.c
31137+++ b/drivers/md/dm-table.c
31138@@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31139 if (!dev_size)
31140 return 0;
31141
31142- if ((start >= dev_size) || (start + len > dev_size)) {
31143+ if ((start >= dev_size) || (len > dev_size - start)) {
31144 DMWARN("%s: %s too small for target: "
31145 "start=%llu, len=%llu, dev_size=%llu",
31146 dm_device_name(ti->table->md), bdevname(bdev, b),
31147diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31148index 59c4f04..4c7b661 100644
31149--- a/drivers/md/dm-thin-metadata.c
31150+++ b/drivers/md/dm-thin-metadata.c
31151@@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31152
31153 pmd->info.tm = tm;
31154 pmd->info.levels = 2;
31155- pmd->info.value_type.context = pmd->data_sm;
31156+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31157 pmd->info.value_type.size = sizeof(__le64);
31158 pmd->info.value_type.inc = data_block_inc;
31159 pmd->info.value_type.dec = data_block_dec;
31160@@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31161
31162 pmd->bl_info.tm = tm;
31163 pmd->bl_info.levels = 1;
31164- pmd->bl_info.value_type.context = pmd->data_sm;
31165+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31166 pmd->bl_info.value_type.size = sizeof(__le64);
31167 pmd->bl_info.value_type.inc = data_block_inc;
31168 pmd->bl_info.value_type.dec = data_block_dec;
31169diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31170index 4720f68..78d1df7 100644
31171--- a/drivers/md/dm.c
31172+++ b/drivers/md/dm.c
31173@@ -177,9 +177,9 @@ struct mapped_device {
31174 /*
31175 * Event handling.
31176 */
31177- atomic_t event_nr;
31178+ atomic_unchecked_t event_nr;
31179 wait_queue_head_t eventq;
31180- atomic_t uevent_seq;
31181+ atomic_unchecked_t uevent_seq;
31182 struct list_head uevent_list;
31183 spinlock_t uevent_lock; /* Protect access to uevent_list */
31184
31185@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31186 rwlock_init(&md->map_lock);
31187 atomic_set(&md->holders, 1);
31188 atomic_set(&md->open_count, 0);
31189- atomic_set(&md->event_nr, 0);
31190- atomic_set(&md->uevent_seq, 0);
31191+ atomic_set_unchecked(&md->event_nr, 0);
31192+ atomic_set_unchecked(&md->uevent_seq, 0);
31193 INIT_LIST_HEAD(&md->uevent_list);
31194 spin_lock_init(&md->uevent_lock);
31195
31196@@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31197
31198 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31199
31200- atomic_inc(&md->event_nr);
31201+ atomic_inc_unchecked(&md->event_nr);
31202 wake_up(&md->eventq);
31203 }
31204
31205@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31206
31207 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31208 {
31209- return atomic_add_return(1, &md->uevent_seq);
31210+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31211 }
31212
31213 uint32_t dm_get_event_nr(struct mapped_device *md)
31214 {
31215- return atomic_read(&md->event_nr);
31216+ return atomic_read_unchecked(&md->event_nr);
31217 }
31218
31219 int dm_wait_event(struct mapped_device *md, int event_nr)
31220 {
31221 return wait_event_interruptible(md->eventq,
31222- (event_nr != atomic_read(&md->event_nr)));
31223+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31224 }
31225
31226 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31227diff --git a/drivers/md/md.c b/drivers/md/md.c
31228index f47f1f8..b7f559e 100644
31229--- a/drivers/md/md.c
31230+++ b/drivers/md/md.c
31231@@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31232 * start build, activate spare
31233 */
31234 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31235-static atomic_t md_event_count;
31236+static atomic_unchecked_t md_event_count;
31237 void md_new_event(struct mddev *mddev)
31238 {
31239- atomic_inc(&md_event_count);
31240+ atomic_inc_unchecked(&md_event_count);
31241 wake_up(&md_event_waiters);
31242 }
31243 EXPORT_SYMBOL_GPL(md_new_event);
31244@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31245 */
31246 static void md_new_event_inintr(struct mddev *mddev)
31247 {
31248- atomic_inc(&md_event_count);
31249+ atomic_inc_unchecked(&md_event_count);
31250 wake_up(&md_event_waiters);
31251 }
31252
31253@@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31254
31255 rdev->preferred_minor = 0xffff;
31256 rdev->data_offset = le64_to_cpu(sb->data_offset);
31257- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31258+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31259
31260 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31261 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31262@@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31263 else
31264 sb->resync_offset = cpu_to_le64(0);
31265
31266- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31267+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31268
31269 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31270 sb->size = cpu_to_le64(mddev->dev_sectors);
31271@@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31272 static ssize_t
31273 errors_show(struct md_rdev *rdev, char *page)
31274 {
31275- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31276+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31277 }
31278
31279 static ssize_t
31280@@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31281 char *e;
31282 unsigned long n = simple_strtoul(buf, &e, 10);
31283 if (*buf && (*e == 0 || *e == '\n')) {
31284- atomic_set(&rdev->corrected_errors, n);
31285+ atomic_set_unchecked(&rdev->corrected_errors, n);
31286 return len;
31287 }
31288 return -EINVAL;
31289@@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31290 rdev->sb_loaded = 0;
31291 rdev->bb_page = NULL;
31292 atomic_set(&rdev->nr_pending, 0);
31293- atomic_set(&rdev->read_errors, 0);
31294- atomic_set(&rdev->corrected_errors, 0);
31295+ atomic_set_unchecked(&rdev->read_errors, 0);
31296+ atomic_set_unchecked(&rdev->corrected_errors, 0);
31297
31298 INIT_LIST_HEAD(&rdev->same_set);
31299 init_waitqueue_head(&rdev->blocked_wait);
31300@@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31301
31302 spin_unlock(&pers_lock);
31303 seq_printf(seq, "\n");
31304- seq->poll_event = atomic_read(&md_event_count);
31305+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31306 return 0;
31307 }
31308 if (v == (void*)2) {
31309@@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31310 chunk_kb ? "KB" : "B");
31311 if (bitmap->file) {
31312 seq_printf(seq, ", file: ");
31313- seq_path(seq, &bitmap->file->f_path, " \t\n");
31314+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31315 }
31316
31317 seq_printf(seq, "\n");
31318@@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31319 return error;
31320
31321 seq = file->private_data;
31322- seq->poll_event = atomic_read(&md_event_count);
31323+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31324 return error;
31325 }
31326
31327@@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31328 /* always allow read */
31329 mask = POLLIN | POLLRDNORM;
31330
31331- if (seq->poll_event != atomic_read(&md_event_count))
31332+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31333 mask |= POLLERR | POLLPRI;
31334 return mask;
31335 }
31336@@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31337 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31338 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31339 (int)part_stat_read(&disk->part0, sectors[1]) -
31340- atomic_read(&disk->sync_io);
31341+ atomic_read_unchecked(&disk->sync_io);
31342 /* sync IO will cause sync_io to increase before the disk_stats
31343 * as sync_io is counted when a request starts, and
31344 * disk_stats is counted when it completes.
31345diff --git a/drivers/md/md.h b/drivers/md/md.h
31346index cf742d9..7c7c745 100644
31347--- a/drivers/md/md.h
31348+++ b/drivers/md/md.h
31349@@ -120,13 +120,13 @@ struct md_rdev {
31350 * only maintained for arrays that
31351 * support hot removal
31352 */
31353- atomic_t read_errors; /* number of consecutive read errors that
31354+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
31355 * we have tried to ignore.
31356 */
31357 struct timespec last_read_error; /* monotonic time since our
31358 * last read error
31359 */
31360- atomic_t corrected_errors; /* number of corrected read errors,
31361+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31362 * for reporting to userspace and storing
31363 * in superblock.
31364 */
31365@@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31366
31367 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31368 {
31369- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31370+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31371 }
31372
31373 struct md_personality
31374diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31375index 50ed53b..4f29d7d 100644
31376--- a/drivers/md/persistent-data/dm-space-map-checker.c
31377+++ b/drivers/md/persistent-data/dm-space-map-checker.c
31378@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31379 /*----------------------------------------------------------------*/
31380
31381 struct sm_checker {
31382- struct dm_space_map sm;
31383+ dm_space_map_no_const sm;
31384
31385 struct count_array old_counts;
31386 struct count_array counts;
31387diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31388index fc469ba..2d91555 100644
31389--- a/drivers/md/persistent-data/dm-space-map-disk.c
31390+++ b/drivers/md/persistent-data/dm-space-map-disk.c
31391@@ -23,7 +23,7 @@
31392 * Space map interface.
31393 */
31394 struct sm_disk {
31395- struct dm_space_map sm;
31396+ dm_space_map_no_const sm;
31397
31398 struct ll_disk ll;
31399 struct ll_disk old_ll;
31400diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31401index e89ae5e..062e4c2 100644
31402--- a/drivers/md/persistent-data/dm-space-map-metadata.c
31403+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31404@@ -43,7 +43,7 @@ struct block_op {
31405 };
31406
31407 struct sm_metadata {
31408- struct dm_space_map sm;
31409+ dm_space_map_no_const sm;
31410
31411 struct ll_disk ll;
31412 struct ll_disk old_ll;
31413diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31414index 1cbfc6b..56e1dbb 100644
31415--- a/drivers/md/persistent-data/dm-space-map.h
31416+++ b/drivers/md/persistent-data/dm-space-map.h
31417@@ -60,6 +60,7 @@ struct dm_space_map {
31418 int (*root_size)(struct dm_space_map *sm, size_t *result);
31419 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31420 };
31421+typedef struct dm_space_map __no_const dm_space_map_no_const;
31422
31423 /*----------------------------------------------------------------*/
31424
31425diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31426index 7d9e071..015b1d5 100644
31427--- a/drivers/md/raid1.c
31428+++ b/drivers/md/raid1.c
31429@@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31430 if (r1_sync_page_io(rdev, sect, s,
31431 bio->bi_io_vec[idx].bv_page,
31432 READ) != 0)
31433- atomic_add(s, &rdev->corrected_errors);
31434+ atomic_add_unchecked(s, &rdev->corrected_errors);
31435 }
31436 sectors -= s;
31437 sect += s;
31438@@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31439 test_bit(In_sync, &rdev->flags)) {
31440 if (r1_sync_page_io(rdev, sect, s,
31441 conf->tmppage, READ)) {
31442- atomic_add(s, &rdev->corrected_errors);
31443+ atomic_add_unchecked(s, &rdev->corrected_errors);
31444 printk(KERN_INFO
31445 "md/raid1:%s: read error corrected "
31446 "(%d sectors at %llu on %s)\n",
31447diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31448index 685ddf3..955b087 100644
31449--- a/drivers/md/raid10.c
31450+++ b/drivers/md/raid10.c
31451@@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31452 /* The write handler will notice the lack of
31453 * R10BIO_Uptodate and record any errors etc
31454 */
31455- atomic_add(r10_bio->sectors,
31456+ atomic_add_unchecked(r10_bio->sectors,
31457 &conf->mirrors[d].rdev->corrected_errors);
31458
31459 /* for reconstruct, we always reschedule after a read.
31460@@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31461 {
31462 struct timespec cur_time_mon;
31463 unsigned long hours_since_last;
31464- unsigned int read_errors = atomic_read(&rdev->read_errors);
31465+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31466
31467 ktime_get_ts(&cur_time_mon);
31468
31469@@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31470 * overflowing the shift of read_errors by hours_since_last.
31471 */
31472 if (hours_since_last >= 8 * sizeof(read_errors))
31473- atomic_set(&rdev->read_errors, 0);
31474+ atomic_set_unchecked(&rdev->read_errors, 0);
31475 else
31476- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31477+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31478 }
31479
31480 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31481@@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31482 return;
31483
31484 check_decay_read_errors(mddev, rdev);
31485- atomic_inc(&rdev->read_errors);
31486- if (atomic_read(&rdev->read_errors) > max_read_errors) {
31487+ atomic_inc_unchecked(&rdev->read_errors);
31488+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31489 char b[BDEVNAME_SIZE];
31490 bdevname(rdev->bdev, b);
31491
31492@@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31493 "md/raid10:%s: %s: Raid device exceeded "
31494 "read_error threshold [cur %d:max %d]\n",
31495 mdname(mddev), b,
31496- atomic_read(&rdev->read_errors), max_read_errors);
31497+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31498 printk(KERN_NOTICE
31499 "md/raid10:%s: %s: Failing raid device\n",
31500 mdname(mddev), b);
31501@@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31502 (unsigned long long)(
31503 sect + rdev->data_offset),
31504 bdevname(rdev->bdev, b));
31505- atomic_add(s, &rdev->corrected_errors);
31506+ atomic_add_unchecked(s, &rdev->corrected_errors);
31507 }
31508
31509 rdev_dec_pending(rdev, mddev);
31510diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31511index 858fdbb..b2dac95 100644
31512--- a/drivers/md/raid5.c
31513+++ b/drivers/md/raid5.c
31514@@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31515 (unsigned long long)(sh->sector
31516 + rdev->data_offset),
31517 bdevname(rdev->bdev, b));
31518- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31519+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31520 clear_bit(R5_ReadError, &sh->dev[i].flags);
31521 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31522 }
31523- if (atomic_read(&conf->disks[i].rdev->read_errors))
31524- atomic_set(&conf->disks[i].rdev->read_errors, 0);
31525+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31526+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31527 } else {
31528 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31529 int retry = 0;
31530 rdev = conf->disks[i].rdev;
31531
31532 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31533- atomic_inc(&rdev->read_errors);
31534+ atomic_inc_unchecked(&rdev->read_errors);
31535 if (conf->mddev->degraded >= conf->max_degraded)
31536 printk_ratelimited(
31537 KERN_WARNING
31538@@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31539 (unsigned long long)(sh->sector
31540 + rdev->data_offset),
31541 bdn);
31542- else if (atomic_read(&rdev->read_errors)
31543+ else if (atomic_read_unchecked(&rdev->read_errors)
31544 > conf->max_nr_stripes)
31545 printk(KERN_WARNING
31546 "md/raid:%s: Too many read errors, failing device %s.\n",
31547diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31548index ba9a643..e474ab5 100644
31549--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31550+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31551@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31552 .subvendor = _subvend, .subdevice = _subdev, \
31553 .driver_data = (unsigned long)&_driverdata }
31554
31555-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31556+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31557 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31558 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31559 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31560diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31561index a7d876f..8c21b61 100644
31562--- a/drivers/media/dvb/dvb-core/dvb_demux.h
31563+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31564@@ -73,7 +73,7 @@ struct dvb_demux_feed {
31565 union {
31566 dmx_ts_cb ts;
31567 dmx_section_cb sec;
31568- } cb;
31569+ } __no_const cb;
31570
31571 struct dvb_demux *demux;
31572 void *priv;
31573diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31574index f732877..d38c35a 100644
31575--- a/drivers/media/dvb/dvb-core/dvbdev.c
31576+++ b/drivers/media/dvb/dvb-core/dvbdev.c
31577@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31578 const struct dvb_device *template, void *priv, int type)
31579 {
31580 struct dvb_device *dvbdev;
31581- struct file_operations *dvbdevfops;
31582+ file_operations_no_const *dvbdevfops;
31583 struct device *clsdev;
31584 int minor;
31585 int id;
31586diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31587index 9f2a02c..5920f88 100644
31588--- a/drivers/media/dvb/dvb-usb/cxusb.c
31589+++ b/drivers/media/dvb/dvb-usb/cxusb.c
31590@@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31591 struct dib0700_adapter_state {
31592 int (*set_param_save) (struct dvb_frontend *,
31593 struct dvb_frontend_parameters *);
31594-};
31595+} __no_const;
31596
31597 static int dib7070_set_param_override(struct dvb_frontend *fe,
31598 struct dvb_frontend_parameters *fep)
31599diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31600index f103ec1..5e8968b 100644
31601--- a/drivers/media/dvb/dvb-usb/dw2102.c
31602+++ b/drivers/media/dvb/dvb-usb/dw2102.c
31603@@ -95,7 +95,7 @@ struct su3000_state {
31604
31605 struct s6x0_state {
31606 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31607-};
31608+} __no_const;
31609
31610 /* debug */
31611 static int dvb_usb_dw2102_debug;
31612diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31613index 404f63a..4796533 100644
31614--- a/drivers/media/dvb/frontends/dib3000.h
31615+++ b/drivers/media/dvb/frontends/dib3000.h
31616@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31617 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31618 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31619 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31620-};
31621+} __no_const;
31622
31623 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31624 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31625diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31626index 90bf573..e8463da 100644
31627--- a/drivers/media/dvb/frontends/ds3000.c
31628+++ b/drivers/media/dvb/frontends/ds3000.c
31629@@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31630
31631 for (i = 0; i < 30 ; i++) {
31632 ds3000_read_status(fe, &status);
31633- if (status && FE_HAS_LOCK)
31634+ if (status & FE_HAS_LOCK)
31635 break;
31636
31637 msleep(10);
31638diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31639index 0564192..75b16f5 100644
31640--- a/drivers/media/dvb/ngene/ngene-cards.c
31641+++ b/drivers/media/dvb/ngene/ngene-cards.c
31642@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31643
31644 /****************************************************************************/
31645
31646-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31647+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31648 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31649 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31650 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31651diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31652index 16a089f..ab1667d 100644
31653--- a/drivers/media/radio/radio-cadet.c
31654+++ b/drivers/media/radio/radio-cadet.c
31655@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31656 unsigned char readbuf[RDS_BUFFER];
31657 int i = 0;
31658
31659+ if (count > RDS_BUFFER)
31660+ return -EFAULT;
31661 mutex_lock(&dev->lock);
31662 if (dev->rdsstat == 0) {
31663 dev->rdsstat = 1;
31664diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31665index 61287fc..8b08712 100644
31666--- a/drivers/media/rc/redrat3.c
31667+++ b/drivers/media/rc/redrat3.c
31668@@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31669 return carrier;
31670 }
31671
31672-static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31673+static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31674 {
31675 struct redrat3_dev *rr3 = rcdev->priv;
31676 struct device *dev = rr3->dev;
31677diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31678index 9cde353..8c6a1c3 100644
31679--- a/drivers/media/video/au0828/au0828.h
31680+++ b/drivers/media/video/au0828/au0828.h
31681@@ -191,7 +191,7 @@ struct au0828_dev {
31682
31683 /* I2C */
31684 struct i2c_adapter i2c_adap;
31685- struct i2c_algorithm i2c_algo;
31686+ i2c_algorithm_no_const i2c_algo;
31687 struct i2c_client i2c_client;
31688 u32 i2c_rc;
31689
31690diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31691index 68d1240..46b32eb 100644
31692--- a/drivers/media/video/cx88/cx88-alsa.c
31693+++ b/drivers/media/video/cx88/cx88-alsa.c
31694@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31695 * Only boards with eeprom and byte 1 at eeprom=1 have it
31696 */
31697
31698-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31699+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31700 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31701 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31702 {0, }
31703diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31704index 305e6aa..0143317 100644
31705--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31706+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31707@@ -196,7 +196,7 @@ struct pvr2_hdw {
31708
31709 /* I2C stuff */
31710 struct i2c_adapter i2c_adap;
31711- struct i2c_algorithm i2c_algo;
31712+ i2c_algorithm_no_const i2c_algo;
31713 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31714 int i2c_cx25840_hack_state;
31715 int i2c_linked;
31716diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31717index a0895bf..b7ebb1b 100644
31718--- a/drivers/media/video/timblogiw.c
31719+++ b/drivers/media/video/timblogiw.c
31720@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31721
31722 /* Platform device functions */
31723
31724-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31725+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31726 .vidioc_querycap = timblogiw_querycap,
31727 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31728 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31729@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31730 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31731 };
31732
31733-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31734+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31735 .owner = THIS_MODULE,
31736 .open = timblogiw_open,
31737 .release = timblogiw_close,
31738diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31739index e9c6a60..daf6a33 100644
31740--- a/drivers/message/fusion/mptbase.c
31741+++ b/drivers/message/fusion/mptbase.c
31742@@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31743 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31744 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31745
31746+#ifdef CONFIG_GRKERNSEC_HIDESYM
31747+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31748+#else
31749 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31750 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31751+#endif
31752+
31753 /*
31754 * Rounding UP to nearest 4-kB boundary here...
31755 */
31756diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31757index 9d95042..b808101 100644
31758--- a/drivers/message/fusion/mptsas.c
31759+++ b/drivers/message/fusion/mptsas.c
31760@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31761 return 0;
31762 }
31763
31764+static inline void
31765+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31766+{
31767+ if (phy_info->port_details) {
31768+ phy_info->port_details->rphy = rphy;
31769+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31770+ ioc->name, rphy));
31771+ }
31772+
31773+ if (rphy) {
31774+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31775+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31776+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31777+ ioc->name, rphy, rphy->dev.release));
31778+ }
31779+}
31780+
31781 /* no mutex */
31782 static void
31783 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31784@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31785 return NULL;
31786 }
31787
31788-static inline void
31789-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31790-{
31791- if (phy_info->port_details) {
31792- phy_info->port_details->rphy = rphy;
31793- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31794- ioc->name, rphy));
31795- }
31796-
31797- if (rphy) {
31798- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31799- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31800- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31801- ioc->name, rphy, rphy->dev.release));
31802- }
31803-}
31804-
31805 static inline struct sas_port *
31806 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31807 {
31808diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
31809index 0c3ced7..1fe34ec 100644
31810--- a/drivers/message/fusion/mptscsih.c
31811+++ b/drivers/message/fusion/mptscsih.c
31812@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31813
31814 h = shost_priv(SChost);
31815
31816- if (h) {
31817- if (h->info_kbuf == NULL)
31818- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31819- return h->info_kbuf;
31820- h->info_kbuf[0] = '\0';
31821+ if (!h)
31822+ return NULL;
31823
31824- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31825- h->info_kbuf[size-1] = '\0';
31826- }
31827+ if (h->info_kbuf == NULL)
31828+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31829+ return h->info_kbuf;
31830+ h->info_kbuf[0] = '\0';
31831+
31832+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31833+ h->info_kbuf[size-1] = '\0';
31834
31835 return h->info_kbuf;
31836 }
31837diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
31838index 07dbeaf..5533142 100644
31839--- a/drivers/message/i2o/i2o_proc.c
31840+++ b/drivers/message/i2o/i2o_proc.c
31841@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
31842 "Array Controller Device"
31843 };
31844
31845-static char *chtostr(u8 * chars, int n)
31846-{
31847- char tmp[256];
31848- tmp[0] = 0;
31849- return strncat(tmp, (char *)chars, n);
31850-}
31851-
31852 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31853 char *group)
31854 {
31855@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
31856
31857 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31858 seq_printf(seq, "%-#8x", ddm_table.module_id);
31859- seq_printf(seq, "%-29s",
31860- chtostr(ddm_table.module_name_version, 28));
31861+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31862 seq_printf(seq, "%9d ", ddm_table.data_size);
31863 seq_printf(seq, "%8d", ddm_table.code_size);
31864
31865@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
31866
31867 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31868 seq_printf(seq, "%-#8x", dst->module_id);
31869- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31870- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31871+ seq_printf(seq, "%-.28s", dst->module_name_version);
31872+ seq_printf(seq, "%-.8s", dst->date);
31873 seq_printf(seq, "%8d ", dst->module_size);
31874 seq_printf(seq, "%8d ", dst->mpb_size);
31875 seq_printf(seq, "0x%04x", dst->module_flags);
31876@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
31877 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31878 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31879 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31880- seq_printf(seq, "Vendor info : %s\n",
31881- chtostr((u8 *) (work32 + 2), 16));
31882- seq_printf(seq, "Product info : %s\n",
31883- chtostr((u8 *) (work32 + 6), 16));
31884- seq_printf(seq, "Description : %s\n",
31885- chtostr((u8 *) (work32 + 10), 16));
31886- seq_printf(seq, "Product rev. : %s\n",
31887- chtostr((u8 *) (work32 + 14), 8));
31888+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31889+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31890+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31891+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31892
31893 seq_printf(seq, "Serial number : ");
31894 print_serial_number(seq, (u8 *) (work32 + 16),
31895@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
31896 }
31897
31898 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31899- seq_printf(seq, "Module name : %s\n",
31900- chtostr(result.module_name, 24));
31901- seq_printf(seq, "Module revision : %s\n",
31902- chtostr(result.module_rev, 8));
31903+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
31904+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31905
31906 seq_printf(seq, "Serial number : ");
31907 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31908@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
31909 return 0;
31910 }
31911
31912- seq_printf(seq, "Device name : %s\n",
31913- chtostr(result.device_name, 64));
31914- seq_printf(seq, "Service name : %s\n",
31915- chtostr(result.service_name, 64));
31916- seq_printf(seq, "Physical name : %s\n",
31917- chtostr(result.physical_location, 64));
31918- seq_printf(seq, "Instance number : %s\n",
31919- chtostr(result.instance_number, 4));
31920+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
31921+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
31922+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31923+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31924
31925 return 0;
31926 }
31927diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
31928index a8c08f3..155fe3d 100644
31929--- a/drivers/message/i2o/iop.c
31930+++ b/drivers/message/i2o/iop.c
31931@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
31932
31933 spin_lock_irqsave(&c->context_list_lock, flags);
31934
31935- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31936- atomic_inc(&c->context_list_counter);
31937+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31938+ atomic_inc_unchecked(&c->context_list_counter);
31939
31940- entry->context = atomic_read(&c->context_list_counter);
31941+ entry->context = atomic_read_unchecked(&c->context_list_counter);
31942
31943 list_add(&entry->list, &c->context_list);
31944
31945@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
31946
31947 #if BITS_PER_LONG == 64
31948 spin_lock_init(&c->context_list_lock);
31949- atomic_set(&c->context_list_counter, 0);
31950+ atomic_set_unchecked(&c->context_list_counter, 0);
31951 INIT_LIST_HEAD(&c->context_list);
31952 #endif
31953
31954diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
31955index 7ce65f4..e66e9bc 100644
31956--- a/drivers/mfd/abx500-core.c
31957+++ b/drivers/mfd/abx500-core.c
31958@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
31959
31960 struct abx500_device_entry {
31961 struct list_head list;
31962- struct abx500_ops ops;
31963+ abx500_ops_no_const ops;
31964 struct device *dev;
31965 };
31966
31967diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
31968index 5c2a06a..8fa077c 100644
31969--- a/drivers/mfd/janz-cmodio.c
31970+++ b/drivers/mfd/janz-cmodio.c
31971@@ -13,6 +13,7 @@
31972
31973 #include <linux/kernel.h>
31974 #include <linux/module.h>
31975+#include <linux/slab.h>
31976 #include <linux/init.h>
31977 #include <linux/pci.h>
31978 #include <linux/interrupt.h>
31979diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
31980index 29d12a7..f900ba4 100644
31981--- a/drivers/misc/lis3lv02d/lis3lv02d.c
31982+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
31983@@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
31984 * the lid is closed. This leads to interrupts as soon as a little move
31985 * is done.
31986 */
31987- atomic_inc(&lis3->count);
31988+ atomic_inc_unchecked(&lis3->count);
31989
31990 wake_up_interruptible(&lis3->misc_wait);
31991 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
31992@@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
31993 if (lis3->pm_dev)
31994 pm_runtime_get_sync(lis3->pm_dev);
31995
31996- atomic_set(&lis3->count, 0);
31997+ atomic_set_unchecked(&lis3->count, 0);
31998 return 0;
31999 }
32000
32001@@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
32002 add_wait_queue(&lis3->misc_wait, &wait);
32003 while (true) {
32004 set_current_state(TASK_INTERRUPTIBLE);
32005- data = atomic_xchg(&lis3->count, 0);
32006+ data = atomic_xchg_unchecked(&lis3->count, 0);
32007 if (data)
32008 break;
32009
32010@@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32011 struct lis3lv02d, miscdev);
32012
32013 poll_wait(file, &lis3->misc_wait, wait);
32014- if (atomic_read(&lis3->count))
32015+ if (atomic_read_unchecked(&lis3->count))
32016 return POLLIN | POLLRDNORM;
32017 return 0;
32018 }
32019diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32020index 2b1482a..5d33616 100644
32021--- a/drivers/misc/lis3lv02d/lis3lv02d.h
32022+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
32023@@ -266,7 +266,7 @@ struct lis3lv02d {
32024 struct input_polled_dev *idev; /* input device */
32025 struct platform_device *pdev; /* platform device */
32026 struct regulator_bulk_data regulators[2];
32027- atomic_t count; /* interrupt count after last read */
32028+ atomic_unchecked_t count; /* interrupt count after last read */
32029 union axis_conversion ac; /* hw -> logical axis */
32030 int mapped_btns[3];
32031
32032diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32033index 2f30bad..c4c13d0 100644
32034--- a/drivers/misc/sgi-gru/gruhandles.c
32035+++ b/drivers/misc/sgi-gru/gruhandles.c
32036@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32037 unsigned long nsec;
32038
32039 nsec = CLKS2NSEC(clks);
32040- atomic_long_inc(&mcs_op_statistics[op].count);
32041- atomic_long_add(nsec, &mcs_op_statistics[op].total);
32042+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32043+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32044 if (mcs_op_statistics[op].max < nsec)
32045 mcs_op_statistics[op].max = nsec;
32046 }
32047diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32048index 7768b87..f8aac38 100644
32049--- a/drivers/misc/sgi-gru/gruprocfs.c
32050+++ b/drivers/misc/sgi-gru/gruprocfs.c
32051@@ -32,9 +32,9 @@
32052
32053 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32054
32055-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32056+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32057 {
32058- unsigned long val = atomic_long_read(v);
32059+ unsigned long val = atomic_long_read_unchecked(v);
32060
32061 seq_printf(s, "%16lu %s\n", val, id);
32062 }
32063@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32064
32065 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32066 for (op = 0; op < mcsop_last; op++) {
32067- count = atomic_long_read(&mcs_op_statistics[op].count);
32068- total = atomic_long_read(&mcs_op_statistics[op].total);
32069+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32070+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32071 max = mcs_op_statistics[op].max;
32072 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32073 count ? total / count : 0, max);
32074diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32075index 5c3ce24..4915ccb 100644
32076--- a/drivers/misc/sgi-gru/grutables.h
32077+++ b/drivers/misc/sgi-gru/grutables.h
32078@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32079 * GRU statistics.
32080 */
32081 struct gru_stats_s {
32082- atomic_long_t vdata_alloc;
32083- atomic_long_t vdata_free;
32084- atomic_long_t gts_alloc;
32085- atomic_long_t gts_free;
32086- atomic_long_t gms_alloc;
32087- atomic_long_t gms_free;
32088- atomic_long_t gts_double_allocate;
32089- atomic_long_t assign_context;
32090- atomic_long_t assign_context_failed;
32091- atomic_long_t free_context;
32092- atomic_long_t load_user_context;
32093- atomic_long_t load_kernel_context;
32094- atomic_long_t lock_kernel_context;
32095- atomic_long_t unlock_kernel_context;
32096- atomic_long_t steal_user_context;
32097- atomic_long_t steal_kernel_context;
32098- atomic_long_t steal_context_failed;
32099- atomic_long_t nopfn;
32100- atomic_long_t asid_new;
32101- atomic_long_t asid_next;
32102- atomic_long_t asid_wrap;
32103- atomic_long_t asid_reuse;
32104- atomic_long_t intr;
32105- atomic_long_t intr_cbr;
32106- atomic_long_t intr_tfh;
32107- atomic_long_t intr_spurious;
32108- atomic_long_t intr_mm_lock_failed;
32109- atomic_long_t call_os;
32110- atomic_long_t call_os_wait_queue;
32111- atomic_long_t user_flush_tlb;
32112- atomic_long_t user_unload_context;
32113- atomic_long_t user_exception;
32114- atomic_long_t set_context_option;
32115- atomic_long_t check_context_retarget_intr;
32116- atomic_long_t check_context_unload;
32117- atomic_long_t tlb_dropin;
32118- atomic_long_t tlb_preload_page;
32119- atomic_long_t tlb_dropin_fail_no_asid;
32120- atomic_long_t tlb_dropin_fail_upm;
32121- atomic_long_t tlb_dropin_fail_invalid;
32122- atomic_long_t tlb_dropin_fail_range_active;
32123- atomic_long_t tlb_dropin_fail_idle;
32124- atomic_long_t tlb_dropin_fail_fmm;
32125- atomic_long_t tlb_dropin_fail_no_exception;
32126- atomic_long_t tfh_stale_on_fault;
32127- atomic_long_t mmu_invalidate_range;
32128- atomic_long_t mmu_invalidate_page;
32129- atomic_long_t flush_tlb;
32130- atomic_long_t flush_tlb_gru;
32131- atomic_long_t flush_tlb_gru_tgh;
32132- atomic_long_t flush_tlb_gru_zero_asid;
32133+ atomic_long_unchecked_t vdata_alloc;
32134+ atomic_long_unchecked_t vdata_free;
32135+ atomic_long_unchecked_t gts_alloc;
32136+ atomic_long_unchecked_t gts_free;
32137+ atomic_long_unchecked_t gms_alloc;
32138+ atomic_long_unchecked_t gms_free;
32139+ atomic_long_unchecked_t gts_double_allocate;
32140+ atomic_long_unchecked_t assign_context;
32141+ atomic_long_unchecked_t assign_context_failed;
32142+ atomic_long_unchecked_t free_context;
32143+ atomic_long_unchecked_t load_user_context;
32144+ atomic_long_unchecked_t load_kernel_context;
32145+ atomic_long_unchecked_t lock_kernel_context;
32146+ atomic_long_unchecked_t unlock_kernel_context;
32147+ atomic_long_unchecked_t steal_user_context;
32148+ atomic_long_unchecked_t steal_kernel_context;
32149+ atomic_long_unchecked_t steal_context_failed;
32150+ atomic_long_unchecked_t nopfn;
32151+ atomic_long_unchecked_t asid_new;
32152+ atomic_long_unchecked_t asid_next;
32153+ atomic_long_unchecked_t asid_wrap;
32154+ atomic_long_unchecked_t asid_reuse;
32155+ atomic_long_unchecked_t intr;
32156+ atomic_long_unchecked_t intr_cbr;
32157+ atomic_long_unchecked_t intr_tfh;
32158+ atomic_long_unchecked_t intr_spurious;
32159+ atomic_long_unchecked_t intr_mm_lock_failed;
32160+ atomic_long_unchecked_t call_os;
32161+ atomic_long_unchecked_t call_os_wait_queue;
32162+ atomic_long_unchecked_t user_flush_tlb;
32163+ atomic_long_unchecked_t user_unload_context;
32164+ atomic_long_unchecked_t user_exception;
32165+ atomic_long_unchecked_t set_context_option;
32166+ atomic_long_unchecked_t check_context_retarget_intr;
32167+ atomic_long_unchecked_t check_context_unload;
32168+ atomic_long_unchecked_t tlb_dropin;
32169+ atomic_long_unchecked_t tlb_preload_page;
32170+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32171+ atomic_long_unchecked_t tlb_dropin_fail_upm;
32172+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
32173+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
32174+ atomic_long_unchecked_t tlb_dropin_fail_idle;
32175+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
32176+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32177+ atomic_long_unchecked_t tfh_stale_on_fault;
32178+ atomic_long_unchecked_t mmu_invalidate_range;
32179+ atomic_long_unchecked_t mmu_invalidate_page;
32180+ atomic_long_unchecked_t flush_tlb;
32181+ atomic_long_unchecked_t flush_tlb_gru;
32182+ atomic_long_unchecked_t flush_tlb_gru_tgh;
32183+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32184
32185- atomic_long_t copy_gpa;
32186- atomic_long_t read_gpa;
32187+ atomic_long_unchecked_t copy_gpa;
32188+ atomic_long_unchecked_t read_gpa;
32189
32190- atomic_long_t mesq_receive;
32191- atomic_long_t mesq_receive_none;
32192- atomic_long_t mesq_send;
32193- atomic_long_t mesq_send_failed;
32194- atomic_long_t mesq_noop;
32195- atomic_long_t mesq_send_unexpected_error;
32196- atomic_long_t mesq_send_lb_overflow;
32197- atomic_long_t mesq_send_qlimit_reached;
32198- atomic_long_t mesq_send_amo_nacked;
32199- atomic_long_t mesq_send_put_nacked;
32200- atomic_long_t mesq_page_overflow;
32201- atomic_long_t mesq_qf_locked;
32202- atomic_long_t mesq_qf_noop_not_full;
32203- atomic_long_t mesq_qf_switch_head_failed;
32204- atomic_long_t mesq_qf_unexpected_error;
32205- atomic_long_t mesq_noop_unexpected_error;
32206- atomic_long_t mesq_noop_lb_overflow;
32207- atomic_long_t mesq_noop_qlimit_reached;
32208- atomic_long_t mesq_noop_amo_nacked;
32209- atomic_long_t mesq_noop_put_nacked;
32210- atomic_long_t mesq_noop_page_overflow;
32211+ atomic_long_unchecked_t mesq_receive;
32212+ atomic_long_unchecked_t mesq_receive_none;
32213+ atomic_long_unchecked_t mesq_send;
32214+ atomic_long_unchecked_t mesq_send_failed;
32215+ atomic_long_unchecked_t mesq_noop;
32216+ atomic_long_unchecked_t mesq_send_unexpected_error;
32217+ atomic_long_unchecked_t mesq_send_lb_overflow;
32218+ atomic_long_unchecked_t mesq_send_qlimit_reached;
32219+ atomic_long_unchecked_t mesq_send_amo_nacked;
32220+ atomic_long_unchecked_t mesq_send_put_nacked;
32221+ atomic_long_unchecked_t mesq_page_overflow;
32222+ atomic_long_unchecked_t mesq_qf_locked;
32223+ atomic_long_unchecked_t mesq_qf_noop_not_full;
32224+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
32225+ atomic_long_unchecked_t mesq_qf_unexpected_error;
32226+ atomic_long_unchecked_t mesq_noop_unexpected_error;
32227+ atomic_long_unchecked_t mesq_noop_lb_overflow;
32228+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
32229+ atomic_long_unchecked_t mesq_noop_amo_nacked;
32230+ atomic_long_unchecked_t mesq_noop_put_nacked;
32231+ atomic_long_unchecked_t mesq_noop_page_overflow;
32232
32233 };
32234
32235@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32236 tghop_invalidate, mcsop_last};
32237
32238 struct mcs_op_statistic {
32239- atomic_long_t count;
32240- atomic_long_t total;
32241+ atomic_long_unchecked_t count;
32242+ atomic_long_unchecked_t total;
32243 unsigned long max;
32244 };
32245
32246@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32247
32248 #define STAT(id) do { \
32249 if (gru_options & OPT_STATS) \
32250- atomic_long_inc(&gru_stats.id); \
32251+ atomic_long_inc_unchecked(&gru_stats.id); \
32252 } while (0)
32253
32254 #ifdef CONFIG_SGI_GRU_DEBUG
32255diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32256index 851b2f2..a4ec097 100644
32257--- a/drivers/misc/sgi-xp/xp.h
32258+++ b/drivers/misc/sgi-xp/xp.h
32259@@ -289,7 +289,7 @@ struct xpc_interface {
32260 xpc_notify_func, void *);
32261 void (*received) (short, int, void *);
32262 enum xp_retval (*partid_to_nasids) (short, void *);
32263-};
32264+} __no_const;
32265
32266 extern struct xpc_interface xpc_interface;
32267
32268diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32269index b94d5f7..7f494c5 100644
32270--- a/drivers/misc/sgi-xp/xpc.h
32271+++ b/drivers/misc/sgi-xp/xpc.h
32272@@ -835,6 +835,7 @@ struct xpc_arch_operations {
32273 void (*received_payload) (struct xpc_channel *, void *);
32274 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32275 };
32276+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32277
32278 /* struct xpc_partition act_state values (for XPC HB) */
32279
32280@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32281 /* found in xpc_main.c */
32282 extern struct device *xpc_part;
32283 extern struct device *xpc_chan;
32284-extern struct xpc_arch_operations xpc_arch_ops;
32285+extern xpc_arch_operations_no_const xpc_arch_ops;
32286 extern int xpc_disengage_timelimit;
32287 extern int xpc_disengage_timedout;
32288 extern int xpc_activate_IRQ_rcvd;
32289diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32290index 8d082b4..aa749ae 100644
32291--- a/drivers/misc/sgi-xp/xpc_main.c
32292+++ b/drivers/misc/sgi-xp/xpc_main.c
32293@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32294 .notifier_call = xpc_system_die,
32295 };
32296
32297-struct xpc_arch_operations xpc_arch_ops;
32298+xpc_arch_operations_no_const xpc_arch_ops;
32299
32300 /*
32301 * Timer function to enforce the timelimit on the partition disengage.
32302diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32303index 6878a94..fe5c5f1 100644
32304--- a/drivers/mmc/host/sdhci-pci.c
32305+++ b/drivers/mmc/host/sdhci-pci.c
32306@@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32307 .probe = via_probe,
32308 };
32309
32310-static const struct pci_device_id pci_ids[] __devinitdata = {
32311+static const struct pci_device_id pci_ids[] __devinitconst = {
32312 {
32313 .vendor = PCI_VENDOR_ID_RICOH,
32314 .device = PCI_DEVICE_ID_RICOH_R5C822,
32315diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32316index e9fad91..0a7a16a 100644
32317--- a/drivers/mtd/devices/doc2000.c
32318+++ b/drivers/mtd/devices/doc2000.c
32319@@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32320
32321 /* The ECC will not be calculated correctly if less than 512 is written */
32322 /* DBB-
32323- if (len != 0x200 && eccbuf)
32324+ if (len != 0x200)
32325 printk(KERN_WARNING
32326 "ECC needs a full sector write (adr: %lx size %lx)\n",
32327 (long) to, (long) len);
32328diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32329index a3f7a27..234016e 100644
32330--- a/drivers/mtd/devices/doc2001.c
32331+++ b/drivers/mtd/devices/doc2001.c
32332@@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32333 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32334
32335 /* Don't allow read past end of device */
32336- if (from >= this->totlen)
32337+ if (from >= this->totlen || !len)
32338 return -EINVAL;
32339
32340 /* Don't allow a single read to cross a 512-byte block boundary */
32341diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32342index 3984d48..28aa897 100644
32343--- a/drivers/mtd/nand/denali.c
32344+++ b/drivers/mtd/nand/denali.c
32345@@ -26,6 +26,7 @@
32346 #include <linux/pci.h>
32347 #include <linux/mtd/mtd.h>
32348 #include <linux/module.h>
32349+#include <linux/slab.h>
32350
32351 #include "denali.h"
32352
32353diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32354index ac40925..483b753 100644
32355--- a/drivers/mtd/nftlmount.c
32356+++ b/drivers/mtd/nftlmount.c
32357@@ -24,6 +24,7 @@
32358 #include <asm/errno.h>
32359 #include <linux/delay.h>
32360 #include <linux/slab.h>
32361+#include <linux/sched.h>
32362 #include <linux/mtd/mtd.h>
32363 #include <linux/mtd/nand.h>
32364 #include <linux/mtd/nftl.h>
32365diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32366index 6c3fb5a..c542a81 100644
32367--- a/drivers/mtd/ubi/build.c
32368+++ b/drivers/mtd/ubi/build.c
32369@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32370 static int __init bytes_str_to_int(const char *str)
32371 {
32372 char *endp;
32373- unsigned long result;
32374+ unsigned long result, scale = 1;
32375
32376 result = simple_strtoul(str, &endp, 0);
32377 if (str == endp || result >= INT_MAX) {
32378@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32379
32380 switch (*endp) {
32381 case 'G':
32382- result *= 1024;
32383+ scale *= 1024;
32384 case 'M':
32385- result *= 1024;
32386+ scale *= 1024;
32387 case 'K':
32388- result *= 1024;
32389+ scale *= 1024;
32390 if (endp[1] == 'i' && endp[2] == 'B')
32391 endp += 2;
32392 case '\0':
32393@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32394 return -EINVAL;
32395 }
32396
32397- return result;
32398+ if ((intoverflow_t)result*scale >= INT_MAX) {
32399+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32400+ str);
32401+ return -EINVAL;
32402+ }
32403+
32404+ return result*scale;
32405 }
32406
32407 /**
32408diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32409index 1feae59..c2a61d2 100644
32410--- a/drivers/net/ethernet/atheros/atlx/atl2.c
32411+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32412@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32413 */
32414
32415 #define ATL2_PARAM(X, desc) \
32416- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32417+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32418 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32419 MODULE_PARM_DESC(X, desc);
32420 #else
32421diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32422index 9a517c2..a50cfcb 100644
32423--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32424+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32425@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32426
32427 int (*wait_comp)(struct bnx2x *bp,
32428 struct bnx2x_rx_mode_ramrod_params *p);
32429-};
32430+} __no_const;
32431
32432 /********************** Set multicast group ***********************************/
32433
32434diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32435index 94b4bd0..73c02de 100644
32436--- a/drivers/net/ethernet/broadcom/tg3.h
32437+++ b/drivers/net/ethernet/broadcom/tg3.h
32438@@ -134,6 +134,7 @@
32439 #define CHIPREV_ID_5750_A0 0x4000
32440 #define CHIPREV_ID_5750_A1 0x4001
32441 #define CHIPREV_ID_5750_A3 0x4003
32442+#define CHIPREV_ID_5750_C1 0x4201
32443 #define CHIPREV_ID_5750_C2 0x4202
32444 #define CHIPREV_ID_5752_A0_HW 0x5000
32445 #define CHIPREV_ID_5752_A0 0x6000
32446diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32447index c5f5479..2e8c260 100644
32448--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32449+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32450@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32451 */
32452 struct l2t_skb_cb {
32453 arp_failure_handler_func arp_failure_handler;
32454-};
32455+} __no_const;
32456
32457 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32458
32459diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32460index 871bcaa..4043505 100644
32461--- a/drivers/net/ethernet/dec/tulip/de4x5.c
32462+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32463@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32464 for (i=0; i<ETH_ALEN; i++) {
32465 tmp.addr[i] = dev->dev_addr[i];
32466 }
32467- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32468+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32469 break;
32470
32471 case DE4X5_SET_HWADDR: /* Set the hardware address */
32472@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32473 spin_lock_irqsave(&lp->lock, flags);
32474 memcpy(&statbuf, &lp->pktStats, ioc->len);
32475 spin_unlock_irqrestore(&lp->lock, flags);
32476- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32477+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32478 return -EFAULT;
32479 break;
32480 }
32481diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32482index 14d5b61..1398636 100644
32483--- a/drivers/net/ethernet/dec/tulip/eeprom.c
32484+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32485@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32486 {NULL}};
32487
32488
32489-static const char *block_name[] __devinitdata = {
32490+static const char *block_name[] __devinitconst = {
32491 "21140 non-MII",
32492 "21140 MII PHY",
32493 "21142 Serial PHY",
32494diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32495index 4d01219..b58d26d 100644
32496--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32497+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32498@@ -236,7 +236,7 @@ struct pci_id_info {
32499 int drv_flags; /* Driver use, intended as capability flags. */
32500 };
32501
32502-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32503+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32504 { /* Sometime a Level-One switch card. */
32505 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32506 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32507diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32508index dcd7f7a..ecb7fb3 100644
32509--- a/drivers/net/ethernet/dlink/sundance.c
32510+++ b/drivers/net/ethernet/dlink/sundance.c
32511@@ -218,7 +218,7 @@ enum {
32512 struct pci_id_info {
32513 const char *name;
32514 };
32515-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32516+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32517 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32518 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32519 {"D-Link DFE-580TX 4 port Server Adapter"},
32520diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32521index bf266a0..e024af7 100644
32522--- a/drivers/net/ethernet/emulex/benet/be_main.c
32523+++ b/drivers/net/ethernet/emulex/benet/be_main.c
32524@@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32525
32526 if (wrapped)
32527 newacc += 65536;
32528- ACCESS_ONCE(*acc) = newacc;
32529+ ACCESS_ONCE_RW(*acc) = newacc;
32530 }
32531
32532 void be_parse_stats(struct be_adapter *adapter)
32533diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32534index 61d2bdd..7f1154a 100644
32535--- a/drivers/net/ethernet/fealnx.c
32536+++ b/drivers/net/ethernet/fealnx.c
32537@@ -150,7 +150,7 @@ struct chip_info {
32538 int flags;
32539 };
32540
32541-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32542+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32543 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32544 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32545 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32546diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32547index e1159e5..e18684d 100644
32548--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32549+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32550@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32551 {
32552 struct e1000_hw *hw = &adapter->hw;
32553 struct e1000_mac_info *mac = &hw->mac;
32554- struct e1000_mac_operations *func = &mac->ops;
32555+ e1000_mac_operations_no_const *func = &mac->ops;
32556
32557 /* Set media type */
32558 switch (adapter->pdev->device) {
32559diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32560index a3e65fd..f451444 100644
32561--- a/drivers/net/ethernet/intel/e1000e/82571.c
32562+++ b/drivers/net/ethernet/intel/e1000e/82571.c
32563@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32564 {
32565 struct e1000_hw *hw = &adapter->hw;
32566 struct e1000_mac_info *mac = &hw->mac;
32567- struct e1000_mac_operations *func = &mac->ops;
32568+ e1000_mac_operations_no_const *func = &mac->ops;
32569 u32 swsm = 0;
32570 u32 swsm2 = 0;
32571 bool force_clear_smbi = false;
32572diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32573index 2967039..ca8c40c 100644
32574--- a/drivers/net/ethernet/intel/e1000e/hw.h
32575+++ b/drivers/net/ethernet/intel/e1000e/hw.h
32576@@ -778,6 +778,7 @@ struct e1000_mac_operations {
32577 void (*write_vfta)(struct e1000_hw *, u32, u32);
32578 s32 (*read_mac_addr)(struct e1000_hw *);
32579 };
32580+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32581
32582 /*
32583 * When to use various PHY register access functions:
32584@@ -818,6 +819,7 @@ struct e1000_phy_operations {
32585 void (*power_up)(struct e1000_hw *);
32586 void (*power_down)(struct e1000_hw *);
32587 };
32588+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32589
32590 /* Function pointers for the NVM. */
32591 struct e1000_nvm_operations {
32592@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32593 s32 (*validate)(struct e1000_hw *);
32594 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32595 };
32596+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32597
32598 struct e1000_mac_info {
32599- struct e1000_mac_operations ops;
32600+ e1000_mac_operations_no_const ops;
32601 u8 addr[ETH_ALEN];
32602 u8 perm_addr[ETH_ALEN];
32603
32604@@ -872,7 +875,7 @@ struct e1000_mac_info {
32605 };
32606
32607 struct e1000_phy_info {
32608- struct e1000_phy_operations ops;
32609+ e1000_phy_operations_no_const ops;
32610
32611 enum e1000_phy_type type;
32612
32613@@ -906,7 +909,7 @@ struct e1000_phy_info {
32614 };
32615
32616 struct e1000_nvm_info {
32617- struct e1000_nvm_operations ops;
32618+ e1000_nvm_operations_no_const ops;
32619
32620 enum e1000_nvm_type type;
32621 enum e1000_nvm_override override;
32622diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32623index 4519a13..f97fcd0 100644
32624--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32625+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32626@@ -314,6 +314,7 @@ struct e1000_mac_operations {
32627 s32 (*read_mac_addr)(struct e1000_hw *);
32628 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32629 };
32630+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32631
32632 struct e1000_phy_operations {
32633 s32 (*acquire)(struct e1000_hw *);
32634@@ -330,6 +331,7 @@ struct e1000_phy_operations {
32635 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32636 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32637 };
32638+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32639
32640 struct e1000_nvm_operations {
32641 s32 (*acquire)(struct e1000_hw *);
32642@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32643 s32 (*update)(struct e1000_hw *);
32644 s32 (*validate)(struct e1000_hw *);
32645 };
32646+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32647
32648 struct e1000_info {
32649 s32 (*get_invariants)(struct e1000_hw *);
32650@@ -350,7 +353,7 @@ struct e1000_info {
32651 extern const struct e1000_info e1000_82575_info;
32652
32653 struct e1000_mac_info {
32654- struct e1000_mac_operations ops;
32655+ e1000_mac_operations_no_const ops;
32656
32657 u8 addr[6];
32658 u8 perm_addr[6];
32659@@ -388,7 +391,7 @@ struct e1000_mac_info {
32660 };
32661
32662 struct e1000_phy_info {
32663- struct e1000_phy_operations ops;
32664+ e1000_phy_operations_no_const ops;
32665
32666 enum e1000_phy_type type;
32667
32668@@ -423,7 +426,7 @@ struct e1000_phy_info {
32669 };
32670
32671 struct e1000_nvm_info {
32672- struct e1000_nvm_operations ops;
32673+ e1000_nvm_operations_no_const ops;
32674 enum e1000_nvm_type type;
32675 enum e1000_nvm_override override;
32676
32677@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32678 s32 (*check_for_ack)(struct e1000_hw *, u16);
32679 s32 (*check_for_rst)(struct e1000_hw *, u16);
32680 };
32681+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32682
32683 struct e1000_mbx_stats {
32684 u32 msgs_tx;
32685@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32686 };
32687
32688 struct e1000_mbx_info {
32689- struct e1000_mbx_operations ops;
32690+ e1000_mbx_operations_no_const ops;
32691 struct e1000_mbx_stats stats;
32692 u32 timeout;
32693 u32 usec_delay;
32694diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32695index d7ed58f..64cde36 100644
32696--- a/drivers/net/ethernet/intel/igbvf/vf.h
32697+++ b/drivers/net/ethernet/intel/igbvf/vf.h
32698@@ -189,9 +189,10 @@ struct e1000_mac_operations {
32699 s32 (*read_mac_addr)(struct e1000_hw *);
32700 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32701 };
32702+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32703
32704 struct e1000_mac_info {
32705- struct e1000_mac_operations ops;
32706+ e1000_mac_operations_no_const ops;
32707 u8 addr[6];
32708 u8 perm_addr[6];
32709
32710@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32711 s32 (*check_for_ack)(struct e1000_hw *);
32712 s32 (*check_for_rst)(struct e1000_hw *);
32713 };
32714+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32715
32716 struct e1000_mbx_stats {
32717 u32 msgs_tx;
32718@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32719 };
32720
32721 struct e1000_mbx_info {
32722- struct e1000_mbx_operations ops;
32723+ e1000_mbx_operations_no_const ops;
32724 struct e1000_mbx_stats stats;
32725 u32 timeout;
32726 u32 usec_delay;
32727diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32728index 6c5cca8..de8ef63 100644
32729--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32730+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32731@@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32732 s32 (*update_checksum)(struct ixgbe_hw *);
32733 u16 (*calc_checksum)(struct ixgbe_hw *);
32734 };
32735+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32736
32737 struct ixgbe_mac_operations {
32738 s32 (*init_hw)(struct ixgbe_hw *);
32739@@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32740 /* Manageability interface */
32741 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32742 };
32743+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32744
32745 struct ixgbe_phy_operations {
32746 s32 (*identify)(struct ixgbe_hw *);
32747@@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32748 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32749 s32 (*check_overtemp)(struct ixgbe_hw *);
32750 };
32751+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32752
32753 struct ixgbe_eeprom_info {
32754- struct ixgbe_eeprom_operations ops;
32755+ ixgbe_eeprom_operations_no_const ops;
32756 enum ixgbe_eeprom_type type;
32757 u32 semaphore_delay;
32758 u16 word_size;
32759@@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32760
32761 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32762 struct ixgbe_mac_info {
32763- struct ixgbe_mac_operations ops;
32764+ ixgbe_mac_operations_no_const ops;
32765 enum ixgbe_mac_type type;
32766 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32767 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32768@@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32769 };
32770
32771 struct ixgbe_phy_info {
32772- struct ixgbe_phy_operations ops;
32773+ ixgbe_phy_operations_no_const ops;
32774 struct mdio_if_info mdio;
32775 enum ixgbe_phy_type type;
32776 u32 id;
32777@@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32778 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32779 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32780 };
32781+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32782
32783 struct ixgbe_mbx_stats {
32784 u32 msgs_tx;
32785@@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32786 };
32787
32788 struct ixgbe_mbx_info {
32789- struct ixgbe_mbx_operations ops;
32790+ ixgbe_mbx_operations_no_const ops;
32791 struct ixgbe_mbx_stats stats;
32792 u32 timeout;
32793 u32 usec_delay;
32794diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
32795index 10306b4..28df758 100644
32796--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
32797+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
32798@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32799 s32 (*clear_vfta)(struct ixgbe_hw *);
32800 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
32801 };
32802+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32803
32804 enum ixgbe_mac_type {
32805 ixgbe_mac_unknown = 0,
32806@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
32807 };
32808
32809 struct ixgbe_mac_info {
32810- struct ixgbe_mac_operations ops;
32811+ ixgbe_mac_operations_no_const ops;
32812 u8 addr[6];
32813 u8 perm_addr[6];
32814
32815@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
32816 s32 (*check_for_ack)(struct ixgbe_hw *);
32817 s32 (*check_for_rst)(struct ixgbe_hw *);
32818 };
32819+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32820
32821 struct ixgbe_mbx_stats {
32822 u32 msgs_tx;
32823@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
32824 };
32825
32826 struct ixgbe_mbx_info {
32827- struct ixgbe_mbx_operations ops;
32828+ ixgbe_mbx_operations_no_const ops;
32829 struct ixgbe_mbx_stats stats;
32830 u32 timeout;
32831 u32 udelay;
32832diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
32833index 94bbc85..78c12e6 100644
32834--- a/drivers/net/ethernet/mellanox/mlx4/main.c
32835+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
32836@@ -40,6 +40,7 @@
32837 #include <linux/dma-mapping.h>
32838 #include <linux/slab.h>
32839 #include <linux/io-mapping.h>
32840+#include <linux/sched.h>
32841
32842 #include <linux/mlx4/device.h>
32843 #include <linux/mlx4/doorbell.h>
32844diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32845index 5046a64..71ca936 100644
32846--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
32847+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32848@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
32849 void (*link_down)(struct __vxge_hw_device *devh);
32850 void (*crit_err)(struct __vxge_hw_device *devh,
32851 enum vxge_hw_event type, u64 ext_data);
32852-};
32853+} __no_const;
32854
32855 /*
32856 * struct __vxge_hw_blockpool_entry - Block private data structure
32857diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32858index 4a518a3..936b334 100644
32859--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32860+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32861@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32862 struct vxge_hw_mempool_dma *dma_object,
32863 u32 index,
32864 u32 is_last);
32865-};
32866+} __no_const;
32867
32868 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32869 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32870diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
32871index c8f47f1..5da9840 100644
32872--- a/drivers/net/ethernet/realtek/r8169.c
32873+++ b/drivers/net/ethernet/realtek/r8169.c
32874@@ -698,17 +698,17 @@ struct rtl8169_private {
32875 struct mdio_ops {
32876 void (*write)(void __iomem *, int, int);
32877 int (*read)(void __iomem *, int);
32878- } mdio_ops;
32879+ } __no_const mdio_ops;
32880
32881 struct pll_power_ops {
32882 void (*down)(struct rtl8169_private *);
32883 void (*up)(struct rtl8169_private *);
32884- } pll_power_ops;
32885+ } __no_const pll_power_ops;
32886
32887 struct jumbo_ops {
32888 void (*enable)(struct rtl8169_private *);
32889 void (*disable)(struct rtl8169_private *);
32890- } jumbo_ops;
32891+ } __no_const jumbo_ops;
32892
32893 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32894 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32895diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
32896index 1b4658c..a30dabb 100644
32897--- a/drivers/net/ethernet/sis/sis190.c
32898+++ b/drivers/net/ethernet/sis/sis190.c
32899@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
32900 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32901 struct net_device *dev)
32902 {
32903- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32904+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32905 struct sis190_private *tp = netdev_priv(dev);
32906 struct pci_dev *isa_bridge;
32907 u8 reg, tmp8;
32908diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
32909index edfa15d..002bfa9 100644
32910--- a/drivers/net/ppp/ppp_generic.c
32911+++ b/drivers/net/ppp/ppp_generic.c
32912@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32913 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32914 struct ppp_stats stats;
32915 struct ppp_comp_stats cstats;
32916- char *vers;
32917
32918 switch (cmd) {
32919 case SIOCGPPPSTATS:
32920@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32921 break;
32922
32923 case SIOCGPPPVER:
32924- vers = PPP_VERSION;
32925- if (copy_to_user(addr, vers, strlen(vers) + 1))
32926+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32927 break;
32928 err = 0;
32929 break;
32930diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
32931index 515f122..41dd273 100644
32932--- a/drivers/net/tokenring/abyss.c
32933+++ b/drivers/net/tokenring/abyss.c
32934@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
32935
32936 static int __init abyss_init (void)
32937 {
32938- abyss_netdev_ops = tms380tr_netdev_ops;
32939+ pax_open_kernel();
32940+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32941
32942- abyss_netdev_ops.ndo_open = abyss_open;
32943- abyss_netdev_ops.ndo_stop = abyss_close;
32944+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32945+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32946+ pax_close_kernel();
32947
32948 return pci_register_driver(&abyss_driver);
32949 }
32950diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
32951index 6153cfd..cf69c1c 100644
32952--- a/drivers/net/tokenring/madgemc.c
32953+++ b/drivers/net/tokenring/madgemc.c
32954@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
32955
32956 static int __init madgemc_init (void)
32957 {
32958- madgemc_netdev_ops = tms380tr_netdev_ops;
32959- madgemc_netdev_ops.ndo_open = madgemc_open;
32960- madgemc_netdev_ops.ndo_stop = madgemc_close;
32961+ pax_open_kernel();
32962+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32963+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32964+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32965+ pax_close_kernel();
32966
32967 return mca_register_driver (&madgemc_driver);
32968 }
32969diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
32970index 8d362e6..f91cc52 100644
32971--- a/drivers/net/tokenring/proteon.c
32972+++ b/drivers/net/tokenring/proteon.c
32973@@ -353,9 +353,11 @@ static int __init proteon_init(void)
32974 struct platform_device *pdev;
32975 int i, num = 0, err = 0;
32976
32977- proteon_netdev_ops = tms380tr_netdev_ops;
32978- proteon_netdev_ops.ndo_open = proteon_open;
32979- proteon_netdev_ops.ndo_stop = tms380tr_close;
32980+ pax_open_kernel();
32981+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32982+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32983+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32984+ pax_close_kernel();
32985
32986 err = platform_driver_register(&proteon_driver);
32987 if (err)
32988diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
32989index 46db5c5..37c1536 100644
32990--- a/drivers/net/tokenring/skisa.c
32991+++ b/drivers/net/tokenring/skisa.c
32992@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32993 struct platform_device *pdev;
32994 int i, num = 0, err = 0;
32995
32996- sk_isa_netdev_ops = tms380tr_netdev_ops;
32997- sk_isa_netdev_ops.ndo_open = sk_isa_open;
32998- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32999+ pax_open_kernel();
33000+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33001+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
33002+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33003+ pax_close_kernel();
33004
33005 err = platform_driver_register(&sk_isa_driver);
33006 if (err)
33007diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
33008index 304fe78..db112fa 100644
33009--- a/drivers/net/usb/hso.c
33010+++ b/drivers/net/usb/hso.c
33011@@ -71,7 +71,7 @@
33012 #include <asm/byteorder.h>
33013 #include <linux/serial_core.h>
33014 #include <linux/serial.h>
33015-
33016+#include <asm/local.h>
33017
33018 #define MOD_AUTHOR "Option Wireless"
33019 #define MOD_DESCRIPTION "USB High Speed Option driver"
33020@@ -257,7 +257,7 @@ struct hso_serial {
33021
33022 /* from usb_serial_port */
33023 struct tty_struct *tty;
33024- int open_count;
33025+ local_t open_count;
33026 spinlock_t serial_lock;
33027
33028 int (*write_data) (struct hso_serial *serial);
33029@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
33030 struct urb *urb;
33031
33032 urb = serial->rx_urb[0];
33033- if (serial->open_count > 0) {
33034+ if (local_read(&serial->open_count) > 0) {
33035 count = put_rxbuf_data(urb, serial);
33036 if (count == -1)
33037 return;
33038@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
33039 DUMP1(urb->transfer_buffer, urb->actual_length);
33040
33041 /* Anyone listening? */
33042- if (serial->open_count == 0)
33043+ if (local_read(&serial->open_count) == 0)
33044 return;
33045
33046 if (status == 0) {
33047@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33048 spin_unlock_irq(&serial->serial_lock);
33049
33050 /* check for port already opened, if not set the termios */
33051- serial->open_count++;
33052- if (serial->open_count == 1) {
33053+ if (local_inc_return(&serial->open_count) == 1) {
33054 serial->rx_state = RX_IDLE;
33055 /* Force default termio settings */
33056 _hso_serial_set_termios(tty, NULL);
33057@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33058 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
33059 if (result) {
33060 hso_stop_serial_device(serial->parent);
33061- serial->open_count--;
33062+ local_dec(&serial->open_count);
33063 kref_put(&serial->parent->ref, hso_serial_ref_free);
33064 }
33065 } else {
33066@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
33067
33068 /* reset the rts and dtr */
33069 /* do the actual close */
33070- serial->open_count--;
33071+ local_dec(&serial->open_count);
33072
33073- if (serial->open_count <= 0) {
33074- serial->open_count = 0;
33075+ if (local_read(&serial->open_count) <= 0) {
33076+ local_set(&serial->open_count, 0);
33077 spin_lock_irq(&serial->serial_lock);
33078 if (serial->tty == tty) {
33079 serial->tty->driver_data = NULL;
33080@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
33081
33082 /* the actual setup */
33083 spin_lock_irqsave(&serial->serial_lock, flags);
33084- if (serial->open_count)
33085+ if (local_read(&serial->open_count))
33086 _hso_serial_set_termios(tty, old);
33087 else
33088 tty->termios = old;
33089@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
33090 D1("Pending read interrupt on port %d\n", i);
33091 spin_lock(&serial->serial_lock);
33092 if (serial->rx_state == RX_IDLE &&
33093- serial->open_count > 0) {
33094+ local_read(&serial->open_count) > 0) {
33095 /* Setup and send a ctrl req read on
33096 * port i */
33097 if (!serial->rx_urb_filled[0]) {
33098@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
33099 /* Start all serial ports */
33100 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33101 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33102- if (dev2ser(serial_table[i])->open_count) {
33103+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
33104 result =
33105 hso_start_serial_device(serial_table[i], GFP_NOIO);
33106 hso_kick_transmit(dev2ser(serial_table[i]));
33107diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33108index e662cbc..8d4a102 100644
33109--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
33110+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33111@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
33112 * Return with error code if any of the queue indices
33113 * is out of range
33114 */
33115- if (p->ring_index[i] < 0 ||
33116- p->ring_index[i] >= adapter->num_rx_queues)
33117+ if (p->ring_index[i] >= adapter->num_rx_queues)
33118 return -EINVAL;
33119 }
33120
33121diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33122index 0f9ee46..e2d6e65 100644
33123--- a/drivers/net/wireless/ath/ath.h
33124+++ b/drivers/net/wireless/ath/ath.h
33125@@ -119,6 +119,7 @@ struct ath_ops {
33126 void (*write_flush) (void *);
33127 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33128 };
33129+typedef struct ath_ops __no_const ath_ops_no_const;
33130
33131 struct ath_common;
33132 struct ath_bus_ops;
33133diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33134index b592016..fe47870 100644
33135--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33136+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33137@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33138 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33139 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33140
33141- ACCESS_ONCE(ads->ds_link) = i->link;
33142- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33143+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
33144+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33145
33146 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33147 ctl6 = SM(i->keytype, AR_EncrType);
33148@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33149
33150 if ((i->is_first || i->is_last) &&
33151 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33152- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33153+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33154 | set11nTries(i->rates, 1)
33155 | set11nTries(i->rates, 2)
33156 | set11nTries(i->rates, 3)
33157 | (i->dur_update ? AR_DurUpdateEna : 0)
33158 | SM(0, AR_BurstDur);
33159
33160- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33161+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33162 | set11nRate(i->rates, 1)
33163 | set11nRate(i->rates, 2)
33164 | set11nRate(i->rates, 3);
33165 } else {
33166- ACCESS_ONCE(ads->ds_ctl2) = 0;
33167- ACCESS_ONCE(ads->ds_ctl3) = 0;
33168+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33169+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33170 }
33171
33172 if (!i->is_first) {
33173- ACCESS_ONCE(ads->ds_ctl0) = 0;
33174- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33175- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33176+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33177+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33178+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33179 return;
33180 }
33181
33182@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33183 break;
33184 }
33185
33186- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33187+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33188 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33189 | SM(i->txpower, AR_XmitPower)
33190 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33191@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33192 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33193 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33194
33195- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33196- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33197+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33198+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33199
33200 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33201 return;
33202
33203- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33204+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33205 | set11nPktDurRTSCTS(i->rates, 1);
33206
33207- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33208+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33209 | set11nPktDurRTSCTS(i->rates, 3);
33210
33211- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33212+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33213 | set11nRateFlags(i->rates, 1)
33214 | set11nRateFlags(i->rates, 2)
33215 | set11nRateFlags(i->rates, 3)
33216diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33217index f5ae3c6..7936af3 100644
33218--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33219+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33220@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33221 (i->qcu << AR_TxQcuNum_S) | 0x17;
33222
33223 checksum += val;
33224- ACCESS_ONCE(ads->info) = val;
33225+ ACCESS_ONCE_RW(ads->info) = val;
33226
33227 checksum += i->link;
33228- ACCESS_ONCE(ads->link) = i->link;
33229+ ACCESS_ONCE_RW(ads->link) = i->link;
33230
33231 checksum += i->buf_addr[0];
33232- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33233+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33234 checksum += i->buf_addr[1];
33235- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33236+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33237 checksum += i->buf_addr[2];
33238- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33239+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33240 checksum += i->buf_addr[3];
33241- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33242+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33243
33244 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33245- ACCESS_ONCE(ads->ctl3) = val;
33246+ ACCESS_ONCE_RW(ads->ctl3) = val;
33247 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33248- ACCESS_ONCE(ads->ctl5) = val;
33249+ ACCESS_ONCE_RW(ads->ctl5) = val;
33250 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33251- ACCESS_ONCE(ads->ctl7) = val;
33252+ ACCESS_ONCE_RW(ads->ctl7) = val;
33253 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33254- ACCESS_ONCE(ads->ctl9) = val;
33255+ ACCESS_ONCE_RW(ads->ctl9) = val;
33256
33257 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33258- ACCESS_ONCE(ads->ctl10) = checksum;
33259+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
33260
33261 if (i->is_first || i->is_last) {
33262- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33263+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33264 | set11nTries(i->rates, 1)
33265 | set11nTries(i->rates, 2)
33266 | set11nTries(i->rates, 3)
33267 | (i->dur_update ? AR_DurUpdateEna : 0)
33268 | SM(0, AR_BurstDur);
33269
33270- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33271+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33272 | set11nRate(i->rates, 1)
33273 | set11nRate(i->rates, 2)
33274 | set11nRate(i->rates, 3);
33275 } else {
33276- ACCESS_ONCE(ads->ctl13) = 0;
33277- ACCESS_ONCE(ads->ctl14) = 0;
33278+ ACCESS_ONCE_RW(ads->ctl13) = 0;
33279+ ACCESS_ONCE_RW(ads->ctl14) = 0;
33280 }
33281
33282 ads->ctl20 = 0;
33283@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33284
33285 ctl17 = SM(i->keytype, AR_EncrType);
33286 if (!i->is_first) {
33287- ACCESS_ONCE(ads->ctl11) = 0;
33288- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33289- ACCESS_ONCE(ads->ctl15) = 0;
33290- ACCESS_ONCE(ads->ctl16) = 0;
33291- ACCESS_ONCE(ads->ctl17) = ctl17;
33292- ACCESS_ONCE(ads->ctl18) = 0;
33293- ACCESS_ONCE(ads->ctl19) = 0;
33294+ ACCESS_ONCE_RW(ads->ctl11) = 0;
33295+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33296+ ACCESS_ONCE_RW(ads->ctl15) = 0;
33297+ ACCESS_ONCE_RW(ads->ctl16) = 0;
33298+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33299+ ACCESS_ONCE_RW(ads->ctl18) = 0;
33300+ ACCESS_ONCE_RW(ads->ctl19) = 0;
33301 return;
33302 }
33303
33304- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33305+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33306 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33307 | SM(i->txpower, AR_XmitPower)
33308 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33309@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33310 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33311 ctl12 |= SM(val, AR_PAPRDChainMask);
33312
33313- ACCESS_ONCE(ads->ctl12) = ctl12;
33314- ACCESS_ONCE(ads->ctl17) = ctl17;
33315+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33316+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33317
33318- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33319+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33320 | set11nPktDurRTSCTS(i->rates, 1);
33321
33322- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33323+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33324 | set11nPktDurRTSCTS(i->rates, 3);
33325
33326- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33327+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33328 | set11nRateFlags(i->rates, 1)
33329 | set11nRateFlags(i->rates, 2)
33330 | set11nRateFlags(i->rates, 3)
33331 | SM(i->rtscts_rate, AR_RTSCTSRate);
33332
33333- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33334+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33335 }
33336
33337 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33338diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33339index f389b3c..7359e18 100644
33340--- a/drivers/net/wireless/ath/ath9k/hw.h
33341+++ b/drivers/net/wireless/ath/ath9k/hw.h
33342@@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33343
33344 /* ANI */
33345 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33346-};
33347+} __no_const;
33348
33349 /**
33350 * struct ath_hw_ops - callbacks used by hardware code and driver code
33351@@ -635,7 +635,7 @@ struct ath_hw_ops {
33352 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33353 struct ath_hw_antcomb_conf *antconf);
33354
33355-};
33356+} __no_const;
33357
33358 struct ath_nf_limits {
33359 s16 max;
33360@@ -655,7 +655,7 @@ enum ath_cal_list {
33361 #define AH_FASTCC 0x4
33362
33363 struct ath_hw {
33364- struct ath_ops reg_ops;
33365+ ath_ops_no_const reg_ops;
33366
33367 struct ieee80211_hw *hw;
33368 struct ath_common common;
33369diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33370index bea8524..c677c06 100644
33371--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33372+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33373@@ -547,7 +547,7 @@ struct phy_func_ptr {
33374 void (*carrsuppr)(struct brcms_phy *);
33375 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33376 void (*detach)(struct brcms_phy *);
33377-};
33378+} __no_const;
33379
33380 struct brcms_phy {
33381 struct brcms_phy_pub pubpi_ro;
33382diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33383index 05f2ad1..ae00eea 100644
33384--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33385+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33386@@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33387 */
33388 if (iwl3945_mod_params.disable_hw_scan) {
33389 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33390- iwl3945_hw_ops.hw_scan = NULL;
33391+ pax_open_kernel();
33392+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33393+ pax_close_kernel();
33394 }
33395
33396 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33397diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33398index 69a77e2..552b42c 100644
33399--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33400+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33401@@ -71,8 +71,8 @@ do { \
33402 } while (0)
33403
33404 #else
33405-#define IWL_DEBUG(m, level, fmt, args...)
33406-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33407+#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33408+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33409 #define iwl_print_hex_dump(m, level, p, len)
33410 #endif /* CONFIG_IWLWIFI_DEBUG */
33411
33412diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33413index 523ad55..f8c5dc5 100644
33414--- a/drivers/net/wireless/mac80211_hwsim.c
33415+++ b/drivers/net/wireless/mac80211_hwsim.c
33416@@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33417 return -EINVAL;
33418
33419 if (fake_hw_scan) {
33420- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33421- mac80211_hwsim_ops.sw_scan_start = NULL;
33422- mac80211_hwsim_ops.sw_scan_complete = NULL;
33423+ pax_open_kernel();
33424+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33425+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33426+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33427+ pax_close_kernel();
33428 }
33429
33430 spin_lock_init(&hwsim_radio_lock);
33431diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33432index 30f138b..c904585 100644
33433--- a/drivers/net/wireless/mwifiex/main.h
33434+++ b/drivers/net/wireless/mwifiex/main.h
33435@@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33436 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33437 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33438 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33439-};
33440+} __no_const;
33441
33442 struct mwifiex_adapter {
33443 u8 iface_type;
33444diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33445index 0c13840..a5c3ed6 100644
33446--- a/drivers/net/wireless/rndis_wlan.c
33447+++ b/drivers/net/wireless/rndis_wlan.c
33448@@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33449
33450 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33451
33452- if (rts_threshold < 0 || rts_threshold > 2347)
33453+ if (rts_threshold > 2347)
33454 rts_threshold = 2347;
33455
33456 tmp = cpu_to_le32(rts_threshold);
33457diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33458index a77f1bb..c608b2b 100644
33459--- a/drivers/net/wireless/wl1251/wl1251.h
33460+++ b/drivers/net/wireless/wl1251/wl1251.h
33461@@ -266,7 +266,7 @@ struct wl1251_if_operations {
33462 void (*reset)(struct wl1251 *wl);
33463 void (*enable_irq)(struct wl1251 *wl);
33464 void (*disable_irq)(struct wl1251 *wl);
33465-};
33466+} __no_const;
33467
33468 struct wl1251 {
33469 struct ieee80211_hw *hw;
33470diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33471index f34b5b2..b5abb9f 100644
33472--- a/drivers/oprofile/buffer_sync.c
33473+++ b/drivers/oprofile/buffer_sync.c
33474@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33475 if (cookie == NO_COOKIE)
33476 offset = pc;
33477 if (cookie == INVALID_COOKIE) {
33478- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33479+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33480 offset = pc;
33481 }
33482 if (cookie != last_cookie) {
33483@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33484 /* add userspace sample */
33485
33486 if (!mm) {
33487- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33488+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33489 return 0;
33490 }
33491
33492 cookie = lookup_dcookie(mm, s->eip, &offset);
33493
33494 if (cookie == INVALID_COOKIE) {
33495- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33496+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33497 return 0;
33498 }
33499
33500@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33501 /* ignore backtraces if failed to add a sample */
33502 if (state == sb_bt_start) {
33503 state = sb_bt_ignore;
33504- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33505+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33506 }
33507 }
33508 release_mm(mm);
33509diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33510index c0cc4e7..44d4e54 100644
33511--- a/drivers/oprofile/event_buffer.c
33512+++ b/drivers/oprofile/event_buffer.c
33513@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33514 }
33515
33516 if (buffer_pos == buffer_size) {
33517- atomic_inc(&oprofile_stats.event_lost_overflow);
33518+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33519 return;
33520 }
33521
33522diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33523index f8c752e..28bf4fc 100644
33524--- a/drivers/oprofile/oprof.c
33525+++ b/drivers/oprofile/oprof.c
33526@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33527 if (oprofile_ops.switch_events())
33528 return;
33529
33530- atomic_inc(&oprofile_stats.multiplex_counter);
33531+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33532 start_switch_worker();
33533 }
33534
33535diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33536index 917d28e..d62d981 100644
33537--- a/drivers/oprofile/oprofile_stats.c
33538+++ b/drivers/oprofile/oprofile_stats.c
33539@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33540 cpu_buf->sample_invalid_eip = 0;
33541 }
33542
33543- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33544- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33545- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33546- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33547- atomic_set(&oprofile_stats.multiplex_counter, 0);
33548+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33549+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33550+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33551+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33552+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33553 }
33554
33555
33556diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33557index 38b6fc0..b5cbfce 100644
33558--- a/drivers/oprofile/oprofile_stats.h
33559+++ b/drivers/oprofile/oprofile_stats.h
33560@@ -13,11 +13,11 @@
33561 #include <linux/atomic.h>
33562
33563 struct oprofile_stat_struct {
33564- atomic_t sample_lost_no_mm;
33565- atomic_t sample_lost_no_mapping;
33566- atomic_t bt_lost_no_mapping;
33567- atomic_t event_lost_overflow;
33568- atomic_t multiplex_counter;
33569+ atomic_unchecked_t sample_lost_no_mm;
33570+ atomic_unchecked_t sample_lost_no_mapping;
33571+ atomic_unchecked_t bt_lost_no_mapping;
33572+ atomic_unchecked_t event_lost_overflow;
33573+ atomic_unchecked_t multiplex_counter;
33574 };
33575
33576 extern struct oprofile_stat_struct oprofile_stats;
33577diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33578index 2f0aa0f..90fab02 100644
33579--- a/drivers/oprofile/oprofilefs.c
33580+++ b/drivers/oprofile/oprofilefs.c
33581@@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33582
33583
33584 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33585- char const *name, atomic_t *val)
33586+ char const *name, atomic_unchecked_t *val)
33587 {
33588 return __oprofilefs_create_file(sb, root, name,
33589 &atomic_ro_fops, 0444, val);
33590diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33591index 3f56bc0..707d642 100644
33592--- a/drivers/parport/procfs.c
33593+++ b/drivers/parport/procfs.c
33594@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33595
33596 *ppos += len;
33597
33598- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33599+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33600 }
33601
33602 #ifdef CONFIG_PARPORT_1284
33603@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33604
33605 *ppos += len;
33606
33607- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33608+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33609 }
33610 #endif /* IEEE1284.3 support. */
33611
33612diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33613index 9fff878..ad0ad53 100644
33614--- a/drivers/pci/hotplug/cpci_hotplug.h
33615+++ b/drivers/pci/hotplug/cpci_hotplug.h
33616@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33617 int (*hardware_test) (struct slot* slot, u32 value);
33618 u8 (*get_power) (struct slot* slot);
33619 int (*set_power) (struct slot* slot, int value);
33620-};
33621+} __no_const;
33622
33623 struct cpci_hp_controller {
33624 unsigned int irq;
33625diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33626index 76ba8a1..20ca857 100644
33627--- a/drivers/pci/hotplug/cpqphp_nvram.c
33628+++ b/drivers/pci/hotplug/cpqphp_nvram.c
33629@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33630
33631 void compaq_nvram_init (void __iomem *rom_start)
33632 {
33633+
33634+#ifndef CONFIG_PAX_KERNEXEC
33635 if (rom_start) {
33636 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33637 }
33638+#endif
33639+
33640 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33641
33642 /* initialize our int15 lock */
33643diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33644index cbfbab1..6a9fced 100644
33645--- a/drivers/pci/pcie/aspm.c
33646+++ b/drivers/pci/pcie/aspm.c
33647@@ -27,9 +27,9 @@
33648 #define MODULE_PARAM_PREFIX "pcie_aspm."
33649
33650 /* Note: those are not register definitions */
33651-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33652-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33653-#define ASPM_STATE_L1 (4) /* L1 state */
33654+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33655+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33656+#define ASPM_STATE_L1 (4U) /* L1 state */
33657 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33658 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33659
33660diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33661index 04e74f4..a960176 100644
33662--- a/drivers/pci/probe.c
33663+++ b/drivers/pci/probe.c
33664@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33665 u32 l, sz, mask;
33666 u16 orig_cmd;
33667
33668- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33669+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33670
33671 if (!dev->mmio_always_on) {
33672 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33673diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33674index 27911b5..5b6db88 100644
33675--- a/drivers/pci/proc.c
33676+++ b/drivers/pci/proc.c
33677@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33678 static int __init pci_proc_init(void)
33679 {
33680 struct pci_dev *dev = NULL;
33681+
33682+#ifdef CONFIG_GRKERNSEC_PROC_ADD
33683+#ifdef CONFIG_GRKERNSEC_PROC_USER
33684+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33685+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33686+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33687+#endif
33688+#else
33689 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33690+#endif
33691 proc_create("devices", 0, proc_bus_pci_dir,
33692 &proc_bus_pci_dev_operations);
33693 proc_initialized = 1;
33694diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33695index 7b82868..b9344c9 100644
33696--- a/drivers/platform/x86/thinkpad_acpi.c
33697+++ b/drivers/platform/x86/thinkpad_acpi.c
33698@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33699 return 0;
33700 }
33701
33702-void static hotkey_mask_warn_incomplete_mask(void)
33703+static void hotkey_mask_warn_incomplete_mask(void)
33704 {
33705 /* log only what the user can fix... */
33706 const u32 wantedmask = hotkey_driver_mask &
33707@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33708 }
33709 }
33710
33711-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33712- struct tp_nvram_state *newn,
33713- const u32 event_mask)
33714-{
33715-
33716 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33717 do { \
33718 if ((event_mask & (1 << __scancode)) && \
33719@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33720 tpacpi_hotkey_send_key(__scancode); \
33721 } while (0)
33722
33723- void issue_volchange(const unsigned int oldvol,
33724- const unsigned int newvol)
33725- {
33726- unsigned int i = oldvol;
33727+static void issue_volchange(const unsigned int oldvol,
33728+ const unsigned int newvol,
33729+ const u32 event_mask)
33730+{
33731+ unsigned int i = oldvol;
33732
33733- while (i > newvol) {
33734- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33735- i--;
33736- }
33737- while (i < newvol) {
33738- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33739- i++;
33740- }
33741+ while (i > newvol) {
33742+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33743+ i--;
33744 }
33745+ while (i < newvol) {
33746+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33747+ i++;
33748+ }
33749+}
33750
33751- void issue_brightnesschange(const unsigned int oldbrt,
33752- const unsigned int newbrt)
33753- {
33754- unsigned int i = oldbrt;
33755+static void issue_brightnesschange(const unsigned int oldbrt,
33756+ const unsigned int newbrt,
33757+ const u32 event_mask)
33758+{
33759+ unsigned int i = oldbrt;
33760
33761- while (i > newbrt) {
33762- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33763- i--;
33764- }
33765- while (i < newbrt) {
33766- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33767- i++;
33768- }
33769+ while (i > newbrt) {
33770+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33771+ i--;
33772+ }
33773+ while (i < newbrt) {
33774+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33775+ i++;
33776 }
33777+}
33778
33779+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33780+ struct tp_nvram_state *newn,
33781+ const u32 event_mask)
33782+{
33783 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33784 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33785 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33786@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33787 oldn->volume_level != newn->volume_level) {
33788 /* recently muted, or repeated mute keypress, or
33789 * multiple presses ending in mute */
33790- issue_volchange(oldn->volume_level, newn->volume_level);
33791+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33792 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
33793 }
33794 } else {
33795@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33796 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33797 }
33798 if (oldn->volume_level != newn->volume_level) {
33799- issue_volchange(oldn->volume_level, newn->volume_level);
33800+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33801 } else if (oldn->volume_toggle != newn->volume_toggle) {
33802 /* repeated vol up/down keypress at end of scale ? */
33803 if (newn->volume_level == 0)
33804@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33805 /* handle brightness */
33806 if (oldn->brightness_level != newn->brightness_level) {
33807 issue_brightnesschange(oldn->brightness_level,
33808- newn->brightness_level);
33809+ newn->brightness_level,
33810+ event_mask);
33811 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
33812 /* repeated key presses that didn't change state */
33813 if (newn->brightness_level == 0)
33814@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33815 && !tp_features.bright_unkfw)
33816 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33817 }
33818+}
33819
33820 #undef TPACPI_COMPARE_KEY
33821 #undef TPACPI_MAY_SEND_KEY
33822-}
33823
33824 /*
33825 * Polling driver
33826diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
33827index b859d16..5cc6b1a 100644
33828--- a/drivers/pnp/pnpbios/bioscalls.c
33829+++ b/drivers/pnp/pnpbios/bioscalls.c
33830@@ -59,7 +59,7 @@ do { \
33831 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33832 } while(0)
33833
33834-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33835+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33836 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33837
33838 /*
33839@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33840
33841 cpu = get_cpu();
33842 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33843+
33844+ pax_open_kernel();
33845 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33846+ pax_close_kernel();
33847
33848 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33849 spin_lock_irqsave(&pnp_bios_lock, flags);
33850@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33851 :"memory");
33852 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33853
33854+ pax_open_kernel();
33855 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33856+ pax_close_kernel();
33857+
33858 put_cpu();
33859
33860 /* If we get here and this is set then the PnP BIOS faulted on us. */
33861@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
33862 return status;
33863 }
33864
33865-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33866+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33867 {
33868 int i;
33869
33870@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33871 pnp_bios_callpoint.offset = header->fields.pm16offset;
33872 pnp_bios_callpoint.segment = PNP_CS16;
33873
33874+ pax_open_kernel();
33875+
33876 for_each_possible_cpu(i) {
33877 struct desc_struct *gdt = get_cpu_gdt_table(i);
33878 if (!gdt)
33879@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33880 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33881 (unsigned long)__va(header->fields.pm16dseg));
33882 }
33883+
33884+ pax_close_kernel();
33885 }
33886diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
33887index b0ecacb..7c9da2e 100644
33888--- a/drivers/pnp/resource.c
33889+++ b/drivers/pnp/resource.c
33890@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
33891 return 1;
33892
33893 /* check if the resource is valid */
33894- if (*irq < 0 || *irq > 15)
33895+ if (*irq > 15)
33896 return 0;
33897
33898 /* check if the resource is reserved */
33899@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
33900 return 1;
33901
33902 /* check if the resource is valid */
33903- if (*dma < 0 || *dma == 4 || *dma > 7)
33904+ if (*dma == 4 || *dma > 7)
33905 return 0;
33906
33907 /* check if the resource is reserved */
33908diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
33909index bb16f5b..c751eef 100644
33910--- a/drivers/power/bq27x00_battery.c
33911+++ b/drivers/power/bq27x00_battery.c
33912@@ -67,7 +67,7 @@
33913 struct bq27x00_device_info;
33914 struct bq27x00_access_methods {
33915 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33916-};
33917+} __no_const;
33918
33919 enum bq27x00_chip { BQ27000, BQ27500 };
33920
33921diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
33922index 33f5d9a..d957d3f 100644
33923--- a/drivers/regulator/max8660.c
33924+++ b/drivers/regulator/max8660.c
33925@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
33926 max8660->shadow_regs[MAX8660_OVER1] = 5;
33927 } else {
33928 /* Otherwise devices can be toggled via software */
33929- max8660_dcdc_ops.enable = max8660_dcdc_enable;
33930- max8660_dcdc_ops.disable = max8660_dcdc_disable;
33931+ pax_open_kernel();
33932+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33933+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33934+ pax_close_kernel();
33935 }
33936
33937 /*
33938diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
33939index 023d17d..74ef35b 100644
33940--- a/drivers/regulator/mc13892-regulator.c
33941+++ b/drivers/regulator/mc13892-regulator.c
33942@@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
33943 }
33944 mc13xxx_unlock(mc13892);
33945
33946- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33947+ pax_open_kernel();
33948+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33949 = mc13892_vcam_set_mode;
33950- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33951+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33952 = mc13892_vcam_get_mode;
33953+ pax_close_kernel();
33954 for (i = 0; i < pdata->num_regulators; i++) {
33955 init_data = &pdata->regulators[i];
33956 priv->regulators[i] = regulator_register(
33957diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
33958index cace6d3..f623fda 100644
33959--- a/drivers/rtc/rtc-dev.c
33960+++ b/drivers/rtc/rtc-dev.c
33961@@ -14,6 +14,7 @@
33962 #include <linux/module.h>
33963 #include <linux/rtc.h>
33964 #include <linux/sched.h>
33965+#include <linux/grsecurity.h>
33966 #include "rtc-core.h"
33967
33968 static dev_t rtc_devt;
33969@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
33970 if (copy_from_user(&tm, uarg, sizeof(tm)))
33971 return -EFAULT;
33972
33973+ gr_log_timechange();
33974+
33975 return rtc_set_time(rtc, &tm);
33976
33977 case RTC_PIE_ON:
33978diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
33979index ffb5878..e6d785c 100644
33980--- a/drivers/scsi/aacraid/aacraid.h
33981+++ b/drivers/scsi/aacraid/aacraid.h
33982@@ -492,7 +492,7 @@ struct adapter_ops
33983 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33984 /* Administrative operations */
33985 int (*adapter_comm)(struct aac_dev * dev, int comm);
33986-};
33987+} __no_const;
33988
33989 /*
33990 * Define which interrupt handler needs to be installed
33991diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
33992index 705e13e..91c873c 100644
33993--- a/drivers/scsi/aacraid/linit.c
33994+++ b/drivers/scsi/aacraid/linit.c
33995@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
33996 #elif defined(__devinitconst)
33997 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33998 #else
33999-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
34000+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34001 #endif
34002 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
34003 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
34004diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
34005index d5ff142..49c0ebb 100644
34006--- a/drivers/scsi/aic94xx/aic94xx_init.c
34007+++ b/drivers/scsi/aic94xx/aic94xx_init.c
34008@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
34009 .lldd_control_phy = asd_control_phy,
34010 };
34011
34012-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
34013+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
34014 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
34015 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
34016 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
34017diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
34018index a796de9..1ef20e1 100644
34019--- a/drivers/scsi/bfa/bfa.h
34020+++ b/drivers/scsi/bfa/bfa.h
34021@@ -196,7 +196,7 @@ struct bfa_hwif_s {
34022 u32 *end);
34023 int cpe_vec_q0;
34024 int rme_vec_q0;
34025-};
34026+} __no_const;
34027 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
34028
34029 struct bfa_faa_cbfn_s {
34030diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
34031index e07bd47..cd1bbbb 100644
34032--- a/drivers/scsi/bfa/bfa_fcpim.c
34033+++ b/drivers/scsi/bfa/bfa_fcpim.c
34034@@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
34035
34036 bfa_iotag_attach(fcp);
34037
34038- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
34039+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
34040 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
34041 (fcp->num_itns * sizeof(struct bfa_itn_s));
34042 memset(fcp->itn_arr, 0,
34043@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34044 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
34045 {
34046 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
34047- struct bfa_itn_s *itn;
34048+ bfa_itn_s_no_const *itn;
34049
34050 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
34051 itn->isr = isr;
34052diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
34053index 1080bcb..a3b39e3 100644
34054--- a/drivers/scsi/bfa/bfa_fcpim.h
34055+++ b/drivers/scsi/bfa/bfa_fcpim.h
34056@@ -37,6 +37,7 @@ struct bfa_iotag_s {
34057 struct bfa_itn_s {
34058 bfa_isr_func_t isr;
34059 };
34060+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
34061
34062 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34063 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
34064@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
34065 struct list_head iotag_tio_free_q; /* free IO resources */
34066 struct list_head iotag_unused_q; /* unused IO resources*/
34067 struct bfa_iotag_s *iotag_arr;
34068- struct bfa_itn_s *itn_arr;
34069+ bfa_itn_s_no_const *itn_arr;
34070 int num_ioim_reqs;
34071 int num_fwtio_reqs;
34072 int num_itns;
34073diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
34074index 546d46b..642fa5b 100644
34075--- a/drivers/scsi/bfa/bfa_ioc.h
34076+++ b/drivers/scsi/bfa/bfa_ioc.h
34077@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
34078 bfa_ioc_disable_cbfn_t disable_cbfn;
34079 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
34080 bfa_ioc_reset_cbfn_t reset_cbfn;
34081-};
34082+} __no_const;
34083
34084 /*
34085 * IOC event notification mechanism.
34086@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
34087 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
34088 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
34089 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
34090-};
34091+} __no_const;
34092
34093 /*
34094 * Queue element to wait for room in request queue. FIFO order is
34095diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
34096index 351dc0b..951dc32 100644
34097--- a/drivers/scsi/hosts.c
34098+++ b/drivers/scsi/hosts.c
34099@@ -42,7 +42,7 @@
34100 #include "scsi_logging.h"
34101
34102
34103-static atomic_t scsi_host_next_hn; /* host_no for next new host */
34104+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34105
34106
34107 static void scsi_host_cls_release(struct device *dev)
34108@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
34109 * subtract one because we increment first then return, but we need to
34110 * know what the next host number was before increment
34111 */
34112- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34113+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34114 shost->dma_channel = 0xff;
34115
34116 /* These three are default values which can be overridden */
34117diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
34118index 865d452..e9b7fa7 100644
34119--- a/drivers/scsi/hpsa.c
34120+++ b/drivers/scsi/hpsa.c
34121@@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34122 u32 a;
34123
34124 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34125- return h->access.command_completed(h);
34126+ return h->access->command_completed(h);
34127
34128 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34129 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34130@@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34131 while (!list_empty(&h->reqQ)) {
34132 c = list_entry(h->reqQ.next, struct CommandList, list);
34133 /* can't do anything if fifo is full */
34134- if ((h->access.fifo_full(h))) {
34135+ if ((h->access->fifo_full(h))) {
34136 dev_warn(&h->pdev->dev, "fifo full\n");
34137 break;
34138 }
34139@@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34140 h->Qdepth--;
34141
34142 /* Tell the controller execute command */
34143- h->access.submit_command(h, c);
34144+ h->access->submit_command(h, c);
34145
34146 /* Put job onto the completed Q */
34147 addQ(&h->cmpQ, c);
34148@@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34149
34150 static inline unsigned long get_next_completion(struct ctlr_info *h)
34151 {
34152- return h->access.command_completed(h);
34153+ return h->access->command_completed(h);
34154 }
34155
34156 static inline bool interrupt_pending(struct ctlr_info *h)
34157 {
34158- return h->access.intr_pending(h);
34159+ return h->access->intr_pending(h);
34160 }
34161
34162 static inline long interrupt_not_for_us(struct ctlr_info *h)
34163 {
34164- return (h->access.intr_pending(h) == 0) ||
34165+ return (h->access->intr_pending(h) == 0) ||
34166 (h->interrupts_enabled == 0);
34167 }
34168
34169@@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34170 if (prod_index < 0)
34171 return -ENODEV;
34172 h->product_name = products[prod_index].product_name;
34173- h->access = *(products[prod_index].access);
34174+ h->access = products[prod_index].access;
34175
34176 if (hpsa_board_disabled(h->pdev)) {
34177 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34178@@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34179
34180 assert_spin_locked(&lockup_detector_lock);
34181 remove_ctlr_from_lockup_detector_list(h);
34182- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34183+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34184 spin_lock_irqsave(&h->lock, flags);
34185 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34186 spin_unlock_irqrestore(&h->lock, flags);
34187@@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34188 }
34189
34190 /* make sure the board interrupts are off */
34191- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34192+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34193
34194 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34195 goto clean2;
34196@@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34197 * fake ones to scoop up any residual completions.
34198 */
34199 spin_lock_irqsave(&h->lock, flags);
34200- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34201+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34202 spin_unlock_irqrestore(&h->lock, flags);
34203 free_irq(h->intr[h->intr_mode], h);
34204 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34205@@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34206 dev_info(&h->pdev->dev, "Board READY.\n");
34207 dev_info(&h->pdev->dev,
34208 "Waiting for stale completions to drain.\n");
34209- h->access.set_intr_mask(h, HPSA_INTR_ON);
34210+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34211 msleep(10000);
34212- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34213+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34214
34215 rc = controller_reset_failed(h->cfgtable);
34216 if (rc)
34217@@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34218 }
34219
34220 /* Turn the interrupts on so we can service requests */
34221- h->access.set_intr_mask(h, HPSA_INTR_ON);
34222+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34223
34224 hpsa_hba_inquiry(h);
34225 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34226@@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34227 * To write all data in the battery backed cache to disks
34228 */
34229 hpsa_flush_cache(h);
34230- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34231+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34232 free_irq(h->intr[h->intr_mode], h);
34233 #ifdef CONFIG_PCI_MSI
34234 if (h->msix_vector)
34235@@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34236 return;
34237 }
34238 /* Change the access methods to the performant access methods */
34239- h->access = SA5_performant_access;
34240+ h->access = &SA5_performant_access;
34241 h->transMethod = CFGTBL_Trans_Performant;
34242 }
34243
34244diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34245index 91edafb..a9b88ec 100644
34246--- a/drivers/scsi/hpsa.h
34247+++ b/drivers/scsi/hpsa.h
34248@@ -73,7 +73,7 @@ struct ctlr_info {
34249 unsigned int msix_vector;
34250 unsigned int msi_vector;
34251 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34252- struct access_method access;
34253+ struct access_method *access;
34254
34255 /* queue and queue Info */
34256 struct list_head reqQ;
34257diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34258index f2df059..a3a9930 100644
34259--- a/drivers/scsi/ips.h
34260+++ b/drivers/scsi/ips.h
34261@@ -1027,7 +1027,7 @@ typedef struct {
34262 int (*intr)(struct ips_ha *);
34263 void (*enableint)(struct ips_ha *);
34264 uint32_t (*statupd)(struct ips_ha *);
34265-} ips_hw_func_t;
34266+} __no_const ips_hw_func_t;
34267
34268 typedef struct ips_ha {
34269 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34270diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34271index 9de9db2..1e09660 100644
34272--- a/drivers/scsi/libfc/fc_exch.c
34273+++ b/drivers/scsi/libfc/fc_exch.c
34274@@ -105,12 +105,12 @@ struct fc_exch_mgr {
34275 * all together if not used XXX
34276 */
34277 struct {
34278- atomic_t no_free_exch;
34279- atomic_t no_free_exch_xid;
34280- atomic_t xid_not_found;
34281- atomic_t xid_busy;
34282- atomic_t seq_not_found;
34283- atomic_t non_bls_resp;
34284+ atomic_unchecked_t no_free_exch;
34285+ atomic_unchecked_t no_free_exch_xid;
34286+ atomic_unchecked_t xid_not_found;
34287+ atomic_unchecked_t xid_busy;
34288+ atomic_unchecked_t seq_not_found;
34289+ atomic_unchecked_t non_bls_resp;
34290 } stats;
34291 };
34292
34293@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34294 /* allocate memory for exchange */
34295 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34296 if (!ep) {
34297- atomic_inc(&mp->stats.no_free_exch);
34298+ atomic_inc_unchecked(&mp->stats.no_free_exch);
34299 goto out;
34300 }
34301 memset(ep, 0, sizeof(*ep));
34302@@ -780,7 +780,7 @@ out:
34303 return ep;
34304 err:
34305 spin_unlock_bh(&pool->lock);
34306- atomic_inc(&mp->stats.no_free_exch_xid);
34307+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34308 mempool_free(ep, mp->ep_pool);
34309 return NULL;
34310 }
34311@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34312 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34313 ep = fc_exch_find(mp, xid);
34314 if (!ep) {
34315- atomic_inc(&mp->stats.xid_not_found);
34316+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34317 reject = FC_RJT_OX_ID;
34318 goto out;
34319 }
34320@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34321 ep = fc_exch_find(mp, xid);
34322 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34323 if (ep) {
34324- atomic_inc(&mp->stats.xid_busy);
34325+ atomic_inc_unchecked(&mp->stats.xid_busy);
34326 reject = FC_RJT_RX_ID;
34327 goto rel;
34328 }
34329@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34330 }
34331 xid = ep->xid; /* get our XID */
34332 } else if (!ep) {
34333- atomic_inc(&mp->stats.xid_not_found);
34334+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34335 reject = FC_RJT_RX_ID; /* XID not found */
34336 goto out;
34337 }
34338@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34339 } else {
34340 sp = &ep->seq;
34341 if (sp->id != fh->fh_seq_id) {
34342- atomic_inc(&mp->stats.seq_not_found);
34343+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34344 if (f_ctl & FC_FC_END_SEQ) {
34345 /*
34346 * Update sequence_id based on incoming last
34347@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34348
34349 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34350 if (!ep) {
34351- atomic_inc(&mp->stats.xid_not_found);
34352+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34353 goto out;
34354 }
34355 if (ep->esb_stat & ESB_ST_COMPLETE) {
34356- atomic_inc(&mp->stats.xid_not_found);
34357+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34358 goto rel;
34359 }
34360 if (ep->rxid == FC_XID_UNKNOWN)
34361 ep->rxid = ntohs(fh->fh_rx_id);
34362 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34363- atomic_inc(&mp->stats.xid_not_found);
34364+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34365 goto rel;
34366 }
34367 if (ep->did != ntoh24(fh->fh_s_id) &&
34368 ep->did != FC_FID_FLOGI) {
34369- atomic_inc(&mp->stats.xid_not_found);
34370+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34371 goto rel;
34372 }
34373 sof = fr_sof(fp);
34374@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34375 sp->ssb_stat |= SSB_ST_RESP;
34376 sp->id = fh->fh_seq_id;
34377 } else if (sp->id != fh->fh_seq_id) {
34378- atomic_inc(&mp->stats.seq_not_found);
34379+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34380 goto rel;
34381 }
34382
34383@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34384 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34385
34386 if (!sp)
34387- atomic_inc(&mp->stats.xid_not_found);
34388+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34389 else
34390- atomic_inc(&mp->stats.non_bls_resp);
34391+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34392
34393 fc_frame_free(fp);
34394 }
34395diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34396index db9238f..4378ed2 100644
34397--- a/drivers/scsi/libsas/sas_ata.c
34398+++ b/drivers/scsi/libsas/sas_ata.c
34399@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34400 .postreset = ata_std_postreset,
34401 .error_handler = ata_std_error_handler,
34402 .post_internal_cmd = sas_ata_post_internal,
34403- .qc_defer = ata_std_qc_defer,
34404+ .qc_defer = ata_std_qc_defer,
34405 .qc_prep = ata_noop_qc_prep,
34406 .qc_issue = sas_ata_qc_issue,
34407 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34408diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34409index bb4c8e0..f33d849 100644
34410--- a/drivers/scsi/lpfc/lpfc.h
34411+++ b/drivers/scsi/lpfc/lpfc.h
34412@@ -425,7 +425,7 @@ struct lpfc_vport {
34413 struct dentry *debug_nodelist;
34414 struct dentry *vport_debugfs_root;
34415 struct lpfc_debugfs_trc *disc_trc;
34416- atomic_t disc_trc_cnt;
34417+ atomic_unchecked_t disc_trc_cnt;
34418 #endif
34419 uint8_t stat_data_enabled;
34420 uint8_t stat_data_blocked;
34421@@ -835,8 +835,8 @@ struct lpfc_hba {
34422 struct timer_list fabric_block_timer;
34423 unsigned long bit_flags;
34424 #define FABRIC_COMANDS_BLOCKED 0
34425- atomic_t num_rsrc_err;
34426- atomic_t num_cmd_success;
34427+ atomic_unchecked_t num_rsrc_err;
34428+ atomic_unchecked_t num_cmd_success;
34429 unsigned long last_rsrc_error_time;
34430 unsigned long last_ramp_down_time;
34431 unsigned long last_ramp_up_time;
34432@@ -866,7 +866,7 @@ struct lpfc_hba {
34433
34434 struct dentry *debug_slow_ring_trc;
34435 struct lpfc_debugfs_trc *slow_ring_trc;
34436- atomic_t slow_ring_trc_cnt;
34437+ atomic_unchecked_t slow_ring_trc_cnt;
34438 /* iDiag debugfs sub-directory */
34439 struct dentry *idiag_root;
34440 struct dentry *idiag_pci_cfg;
34441diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34442index 2838259..a07cfb5 100644
34443--- a/drivers/scsi/lpfc/lpfc_debugfs.c
34444+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34445@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34446
34447 #include <linux/debugfs.h>
34448
34449-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34450+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34451 static unsigned long lpfc_debugfs_start_time = 0L;
34452
34453 /* iDiag */
34454@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34455 lpfc_debugfs_enable = 0;
34456
34457 len = 0;
34458- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34459+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34460 (lpfc_debugfs_max_disc_trc - 1);
34461 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34462 dtp = vport->disc_trc + i;
34463@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34464 lpfc_debugfs_enable = 0;
34465
34466 len = 0;
34467- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34468+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34469 (lpfc_debugfs_max_slow_ring_trc - 1);
34470 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34471 dtp = phba->slow_ring_trc + i;
34472@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34473 !vport || !vport->disc_trc)
34474 return;
34475
34476- index = atomic_inc_return(&vport->disc_trc_cnt) &
34477+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34478 (lpfc_debugfs_max_disc_trc - 1);
34479 dtp = vport->disc_trc + index;
34480 dtp->fmt = fmt;
34481 dtp->data1 = data1;
34482 dtp->data2 = data2;
34483 dtp->data3 = data3;
34484- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34485+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34486 dtp->jif = jiffies;
34487 #endif
34488 return;
34489@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34490 !phba || !phba->slow_ring_trc)
34491 return;
34492
34493- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34494+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34495 (lpfc_debugfs_max_slow_ring_trc - 1);
34496 dtp = phba->slow_ring_trc + index;
34497 dtp->fmt = fmt;
34498 dtp->data1 = data1;
34499 dtp->data2 = data2;
34500 dtp->data3 = data3;
34501- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34502+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34503 dtp->jif = jiffies;
34504 #endif
34505 return;
34506@@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34507 "slow_ring buffer\n");
34508 goto debug_failed;
34509 }
34510- atomic_set(&phba->slow_ring_trc_cnt, 0);
34511+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34512 memset(phba->slow_ring_trc, 0,
34513 (sizeof(struct lpfc_debugfs_trc) *
34514 lpfc_debugfs_max_slow_ring_trc));
34515@@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34516 "buffer\n");
34517 goto debug_failed;
34518 }
34519- atomic_set(&vport->disc_trc_cnt, 0);
34520+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34521
34522 snprintf(name, sizeof(name), "discovery_trace");
34523 vport->debug_disc_trc =
34524diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34525index 55bc4fc..a2a109c 100644
34526--- a/drivers/scsi/lpfc/lpfc_init.c
34527+++ b/drivers/scsi/lpfc/lpfc_init.c
34528@@ -10027,8 +10027,10 @@ lpfc_init(void)
34529 printk(LPFC_COPYRIGHT "\n");
34530
34531 if (lpfc_enable_npiv) {
34532- lpfc_transport_functions.vport_create = lpfc_vport_create;
34533- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34534+ pax_open_kernel();
34535+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34536+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34537+ pax_close_kernel();
34538 }
34539 lpfc_transport_template =
34540 fc_attach_transport(&lpfc_transport_functions);
34541diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34542index 2e1e54e..1af0a0d 100644
34543--- a/drivers/scsi/lpfc/lpfc_scsi.c
34544+++ b/drivers/scsi/lpfc/lpfc_scsi.c
34545@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34546 uint32_t evt_posted;
34547
34548 spin_lock_irqsave(&phba->hbalock, flags);
34549- atomic_inc(&phba->num_rsrc_err);
34550+ atomic_inc_unchecked(&phba->num_rsrc_err);
34551 phba->last_rsrc_error_time = jiffies;
34552
34553 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34554@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34555 unsigned long flags;
34556 struct lpfc_hba *phba = vport->phba;
34557 uint32_t evt_posted;
34558- atomic_inc(&phba->num_cmd_success);
34559+ atomic_inc_unchecked(&phba->num_cmd_success);
34560
34561 if (vport->cfg_lun_queue_depth <= queue_depth)
34562 return;
34563@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34564 unsigned long num_rsrc_err, num_cmd_success;
34565 int i;
34566
34567- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34568- num_cmd_success = atomic_read(&phba->num_cmd_success);
34569+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34570+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34571
34572 vports = lpfc_create_vport_work_array(phba);
34573 if (vports != NULL)
34574@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34575 }
34576 }
34577 lpfc_destroy_vport_work_array(phba, vports);
34578- atomic_set(&phba->num_rsrc_err, 0);
34579- atomic_set(&phba->num_cmd_success, 0);
34580+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34581+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34582 }
34583
34584 /**
34585@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34586 }
34587 }
34588 lpfc_destroy_vport_work_array(phba, vports);
34589- atomic_set(&phba->num_rsrc_err, 0);
34590- atomic_set(&phba->num_cmd_success, 0);
34591+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34592+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34593 }
34594
34595 /**
34596diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34597index 5163edb..7b142bc 100644
34598--- a/drivers/scsi/pmcraid.c
34599+++ b/drivers/scsi/pmcraid.c
34600@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34601 res->scsi_dev = scsi_dev;
34602 scsi_dev->hostdata = res;
34603 res->change_detected = 0;
34604- atomic_set(&res->read_failures, 0);
34605- atomic_set(&res->write_failures, 0);
34606+ atomic_set_unchecked(&res->read_failures, 0);
34607+ atomic_set_unchecked(&res->write_failures, 0);
34608 rc = 0;
34609 }
34610 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34611@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34612
34613 /* If this was a SCSI read/write command keep count of errors */
34614 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34615- atomic_inc(&res->read_failures);
34616+ atomic_inc_unchecked(&res->read_failures);
34617 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34618- atomic_inc(&res->write_failures);
34619+ atomic_inc_unchecked(&res->write_failures);
34620
34621 if (!RES_IS_GSCSI(res->cfg_entry) &&
34622 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34623@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34624 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34625 * hrrq_id assigned here in queuecommand
34626 */
34627- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34628+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34629 pinstance->num_hrrq;
34630 cmd->cmd_done = pmcraid_io_done;
34631
34632@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34633 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34634 * hrrq_id assigned here in queuecommand
34635 */
34636- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34637+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34638 pinstance->num_hrrq;
34639
34640 if (request_size) {
34641@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34642
34643 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34644 /* add resources only after host is added into system */
34645- if (!atomic_read(&pinstance->expose_resources))
34646+ if (!atomic_read_unchecked(&pinstance->expose_resources))
34647 return;
34648
34649 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34650@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34651 init_waitqueue_head(&pinstance->reset_wait_q);
34652
34653 atomic_set(&pinstance->outstanding_cmds, 0);
34654- atomic_set(&pinstance->last_message_id, 0);
34655- atomic_set(&pinstance->expose_resources, 0);
34656+ atomic_set_unchecked(&pinstance->last_message_id, 0);
34657+ atomic_set_unchecked(&pinstance->expose_resources, 0);
34658
34659 INIT_LIST_HEAD(&pinstance->free_res_q);
34660 INIT_LIST_HEAD(&pinstance->used_res_q);
34661@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34662 /* Schedule worker thread to handle CCN and take care of adding and
34663 * removing devices to OS
34664 */
34665- atomic_set(&pinstance->expose_resources, 1);
34666+ atomic_set_unchecked(&pinstance->expose_resources, 1);
34667 schedule_work(&pinstance->worker_q);
34668 return rc;
34669
34670diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34671index ca496c7..9c791d5 100644
34672--- a/drivers/scsi/pmcraid.h
34673+++ b/drivers/scsi/pmcraid.h
34674@@ -748,7 +748,7 @@ struct pmcraid_instance {
34675 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34676
34677 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34678- atomic_t last_message_id;
34679+ atomic_unchecked_t last_message_id;
34680
34681 /* configuration table */
34682 struct pmcraid_config_table *cfg_table;
34683@@ -777,7 +777,7 @@ struct pmcraid_instance {
34684 atomic_t outstanding_cmds;
34685
34686 /* should add/delete resources to mid-layer now ?*/
34687- atomic_t expose_resources;
34688+ atomic_unchecked_t expose_resources;
34689
34690
34691
34692@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34693 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34694 };
34695 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34696- atomic_t read_failures; /* count of failed READ commands */
34697- atomic_t write_failures; /* count of failed WRITE commands */
34698+ atomic_unchecked_t read_failures; /* count of failed READ commands */
34699+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34700
34701 /* To indicate add/delete/modify during CCN */
34702 u8 change_detected;
34703diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34704index fcf052c..a8025a4 100644
34705--- a/drivers/scsi/qla2xxx/qla_def.h
34706+++ b/drivers/scsi/qla2xxx/qla_def.h
34707@@ -2244,7 +2244,7 @@ struct isp_operations {
34708 int (*get_flash_version) (struct scsi_qla_host *, void *);
34709 int (*start_scsi) (srb_t *);
34710 int (*abort_isp) (struct scsi_qla_host *);
34711-};
34712+} __no_const;
34713
34714 /* MSI-X Support *************************************************************/
34715
34716diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34717index fd5edc6..4906148 100644
34718--- a/drivers/scsi/qla4xxx/ql4_def.h
34719+++ b/drivers/scsi/qla4xxx/ql4_def.h
34720@@ -258,7 +258,7 @@ struct ddb_entry {
34721 * (4000 only) */
34722 atomic_t relogin_timer; /* Max Time to wait for
34723 * relogin to complete */
34724- atomic_t relogin_retry_count; /* Num of times relogin has been
34725+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34726 * retried */
34727 uint32_t default_time2wait; /* Default Min time between
34728 * relogins (+aens) */
34729diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34730index 4169c8b..a8b896b 100644
34731--- a/drivers/scsi/qla4xxx/ql4_os.c
34732+++ b/drivers/scsi/qla4xxx/ql4_os.c
34733@@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34734 */
34735 if (!iscsi_is_session_online(cls_sess)) {
34736 /* Reset retry relogin timer */
34737- atomic_inc(&ddb_entry->relogin_retry_count);
34738+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34739 DEBUG2(ql4_printk(KERN_INFO, ha,
34740 "%s: index[%d] relogin timed out-retrying"
34741 " relogin (%d), retry (%d)\n", __func__,
34742 ddb_entry->fw_ddb_index,
34743- atomic_read(&ddb_entry->relogin_retry_count),
34744+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34745 ddb_entry->default_time2wait + 4));
34746 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34747 atomic_set(&ddb_entry->retry_relogin_timer,
34748@@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34749
34750 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34751 atomic_set(&ddb_entry->relogin_timer, 0);
34752- atomic_set(&ddb_entry->relogin_retry_count, 0);
34753+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34754
34755 ddb_entry->default_relogin_timeout =
34756 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34757diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34758index 2aeb2e9..46e3925 100644
34759--- a/drivers/scsi/scsi.c
34760+++ b/drivers/scsi/scsi.c
34761@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34762 unsigned long timeout;
34763 int rtn = 0;
34764
34765- atomic_inc(&cmd->device->iorequest_cnt);
34766+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34767
34768 /* check if the device is still usable */
34769 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34770diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34771index f85cfa6..a57c9e8 100644
34772--- a/drivers/scsi/scsi_lib.c
34773+++ b/drivers/scsi/scsi_lib.c
34774@@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34775 shost = sdev->host;
34776 scsi_init_cmd_errh(cmd);
34777 cmd->result = DID_NO_CONNECT << 16;
34778- atomic_inc(&cmd->device->iorequest_cnt);
34779+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34780
34781 /*
34782 * SCSI request completion path will do scsi_device_unbusy(),
34783@@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34784
34785 INIT_LIST_HEAD(&cmd->eh_entry);
34786
34787- atomic_inc(&cmd->device->iodone_cnt);
34788+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34789 if (cmd->result)
34790- atomic_inc(&cmd->device->ioerr_cnt);
34791+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34792
34793 disposition = scsi_decide_disposition(cmd);
34794 if (disposition != SUCCESS &&
34795diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
34796index 04c2a27..9d8bd66 100644
34797--- a/drivers/scsi/scsi_sysfs.c
34798+++ b/drivers/scsi/scsi_sysfs.c
34799@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
34800 char *buf) \
34801 { \
34802 struct scsi_device *sdev = to_scsi_device(dev); \
34803- unsigned long long count = atomic_read(&sdev->field); \
34804+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34805 return snprintf(buf, 20, "0x%llx\n", count); \
34806 } \
34807 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34808diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
34809index 84a1fdf..693b0d6 100644
34810--- a/drivers/scsi/scsi_tgt_lib.c
34811+++ b/drivers/scsi/scsi_tgt_lib.c
34812@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
34813 int err;
34814
34815 dprintk("%lx %u\n", uaddr, len);
34816- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34817+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34818 if (err) {
34819 /*
34820 * TODO: need to fixup sg_tablesize, max_segment_size,
34821diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
34822index 1b21491..1b7f60e 100644
34823--- a/drivers/scsi/scsi_transport_fc.c
34824+++ b/drivers/scsi/scsi_transport_fc.c
34825@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
34826 * Netlink Infrastructure
34827 */
34828
34829-static atomic_t fc_event_seq;
34830+static atomic_unchecked_t fc_event_seq;
34831
34832 /**
34833 * fc_get_event_number - Obtain the next sequential FC event number
34834@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34835 u32
34836 fc_get_event_number(void)
34837 {
34838- return atomic_add_return(1, &fc_event_seq);
34839+ return atomic_add_return_unchecked(1, &fc_event_seq);
34840 }
34841 EXPORT_SYMBOL(fc_get_event_number);
34842
34843@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
34844 {
34845 int error;
34846
34847- atomic_set(&fc_event_seq, 0);
34848+ atomic_set_unchecked(&fc_event_seq, 0);
34849
34850 error = transport_class_register(&fc_host_class);
34851 if (error)
34852@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
34853 char *cp;
34854
34855 *val = simple_strtoul(buf, &cp, 0);
34856- if ((*cp && (*cp != '\n')) || (*val < 0))
34857+ if (*cp && (*cp != '\n'))
34858 return -EINVAL;
34859 /*
34860 * Check for overflow; dev_loss_tmo is u32
34861diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
34862index 96029e6..4d77fa0 100644
34863--- a/drivers/scsi/scsi_transport_iscsi.c
34864+++ b/drivers/scsi/scsi_transport_iscsi.c
34865@@ -79,7 +79,7 @@ struct iscsi_internal {
34866 struct transport_container session_cont;
34867 };
34868
34869-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34870+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34871 static struct workqueue_struct *iscsi_eh_timer_workq;
34872
34873 static DEFINE_IDA(iscsi_sess_ida);
34874@@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
34875 int err;
34876
34877 ihost = shost->shost_data;
34878- session->sid = atomic_add_return(1, &iscsi_session_nr);
34879+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34880
34881 if (target_id == ISCSI_MAX_TARGET) {
34882 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
34883@@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
34884 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34885 ISCSI_TRANSPORT_VERSION);
34886
34887- atomic_set(&iscsi_session_nr, 0);
34888+ atomic_set_unchecked(&iscsi_session_nr, 0);
34889
34890 err = class_register(&iscsi_transport_class);
34891 if (err)
34892diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
34893index 21a045e..ec89e03 100644
34894--- a/drivers/scsi/scsi_transport_srp.c
34895+++ b/drivers/scsi/scsi_transport_srp.c
34896@@ -33,7 +33,7 @@
34897 #include "scsi_transport_srp_internal.h"
34898
34899 struct srp_host_attrs {
34900- atomic_t next_port_id;
34901+ atomic_unchecked_t next_port_id;
34902 };
34903 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34904
34905@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
34906 struct Scsi_Host *shost = dev_to_shost(dev);
34907 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34908
34909- atomic_set(&srp_host->next_port_id, 0);
34910+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34911 return 0;
34912 }
34913
34914@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
34915 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34916 rport->roles = ids->roles;
34917
34918- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34919+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34920 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34921
34922 transport_setup_device(&rport->dev);
34923diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
34924index 441a1c5..07cece7 100644
34925--- a/drivers/scsi/sg.c
34926+++ b/drivers/scsi/sg.c
34927@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
34928 sdp->disk->disk_name,
34929 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34930 NULL,
34931- (char *)arg);
34932+ (char __user *)arg);
34933 case BLKTRACESTART:
34934 return blk_trace_startstop(sdp->device->request_queue, 1);
34935 case BLKTRACESTOP:
34936@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
34937 const struct file_operations * fops;
34938 };
34939
34940-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34941+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34942 {"allow_dio", &adio_fops},
34943 {"debug", &debug_fops},
34944 {"def_reserved_size", &dressz_fops},
34945@@ -2327,7 +2327,7 @@ sg_proc_init(void)
34946 {
34947 int k, mask;
34948 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34949- struct sg_proc_leaf * leaf;
34950+ const struct sg_proc_leaf * leaf;
34951
34952 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34953 if (!sg_proc_sgp)
34954diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
34955index f64250e..1ee3049 100644
34956--- a/drivers/spi/spi-dw-pci.c
34957+++ b/drivers/spi/spi-dw-pci.c
34958@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
34959 #define spi_resume NULL
34960 #endif
34961
34962-static const struct pci_device_id pci_ids[] __devinitdata = {
34963+static const struct pci_device_id pci_ids[] __devinitconst = {
34964 /* Intel MID platform SPI controller 0 */
34965 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34966 {},
34967diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
34968index 77eae99..b7cdcc9 100644
34969--- a/drivers/spi/spi.c
34970+++ b/drivers/spi/spi.c
34971@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
34972 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34973
34974 /* portable code must never pass more than 32 bytes */
34975-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34976+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34977
34978 static u8 *buf;
34979
34980diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
34981index 436fe97..4082570 100644
34982--- a/drivers/staging/gma500/power.c
34983+++ b/drivers/staging/gma500/power.c
34984@@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
34985 ret = gma_resume_pci(dev->pdev);
34986 if (ret == 0) {
34987 /* FIXME: we want to defer this for Medfield/Oaktrail */
34988- gma_resume_display(dev);
34989+ gma_resume_display(dev->pdev);
34990 psb_irq_preinstall(dev);
34991 psb_irq_postinstall(dev);
34992 pm_runtime_get(&dev->pdev->dev);
34993diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
34994index bafccb3..e3ac78d 100644
34995--- a/drivers/staging/hv/rndis_filter.c
34996+++ b/drivers/staging/hv/rndis_filter.c
34997@@ -42,7 +42,7 @@ struct rndis_device {
34998
34999 enum rndis_device_state state;
35000 bool link_state;
35001- atomic_t new_req_id;
35002+ atomic_unchecked_t new_req_id;
35003
35004 spinlock_t request_lock;
35005 struct list_head req_list;
35006@@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35007 * template
35008 */
35009 set = &rndis_msg->msg.set_req;
35010- set->req_id = atomic_inc_return(&dev->new_req_id);
35011+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35012
35013 /* Add to the request list */
35014 spin_lock_irqsave(&dev->request_lock, flags);
35015@@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35016
35017 /* Setup the rndis set */
35018 halt = &request->request_msg.msg.halt_req;
35019- halt->req_id = atomic_inc_return(&dev->new_req_id);
35020+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35021
35022 /* Ignore return since this msg is optional. */
35023 rndis_filter_send_request(dev, request);
35024diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
35025index 9e8f010..af9efb5 100644
35026--- a/drivers/staging/iio/buffer_generic.h
35027+++ b/drivers/staging/iio/buffer_generic.h
35028@@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
35029
35030 int (*is_enabled)(struct iio_buffer *buffer);
35031 int (*enable)(struct iio_buffer *buffer);
35032-};
35033+} __no_const;
35034
35035 /**
35036 * struct iio_buffer_setup_ops - buffer setup related callbacks
35037diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
35038index 8b307b4..a97ac91 100644
35039--- a/drivers/staging/octeon/ethernet-rx.c
35040+++ b/drivers/staging/octeon/ethernet-rx.c
35041@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35042 /* Increment RX stats for virtual ports */
35043 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35044 #ifdef CONFIG_64BIT
35045- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35046- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35047+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35048+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35049 #else
35050- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35051- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35052+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35053+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35054 #endif
35055 }
35056 netif_receive_skb(skb);
35057@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35058 dev->name);
35059 */
35060 #ifdef CONFIG_64BIT
35061- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35062+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35063 #else
35064- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35065+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35066 #endif
35067 dev_kfree_skb_irq(skb);
35068 }
35069diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
35070index 076f866..2308070 100644
35071--- a/drivers/staging/octeon/ethernet.c
35072+++ b/drivers/staging/octeon/ethernet.c
35073@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
35074 * since the RX tasklet also increments it.
35075 */
35076 #ifdef CONFIG_64BIT
35077- atomic64_add(rx_status.dropped_packets,
35078- (atomic64_t *)&priv->stats.rx_dropped);
35079+ atomic64_add_unchecked(rx_status.dropped_packets,
35080+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35081 #else
35082- atomic_add(rx_status.dropped_packets,
35083- (atomic_t *)&priv->stats.rx_dropped);
35084+ atomic_add_unchecked(rx_status.dropped_packets,
35085+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
35086 #endif
35087 }
35088
35089diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
35090index 7a19555..466456d 100644
35091--- a/drivers/staging/pohmelfs/inode.c
35092+++ b/drivers/staging/pohmelfs/inode.c
35093@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35094 mutex_init(&psb->mcache_lock);
35095 psb->mcache_root = RB_ROOT;
35096 psb->mcache_timeout = msecs_to_jiffies(5000);
35097- atomic_long_set(&psb->mcache_gen, 0);
35098+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
35099
35100 psb->trans_max_pages = 100;
35101
35102@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35103 INIT_LIST_HEAD(&psb->crypto_ready_list);
35104 INIT_LIST_HEAD(&psb->crypto_active_list);
35105
35106- atomic_set(&psb->trans_gen, 1);
35107+ atomic_set_unchecked(&psb->trans_gen, 1);
35108 atomic_long_set(&psb->total_inodes, 0);
35109
35110 mutex_init(&psb->state_lock);
35111diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
35112index e22665c..a2a9390 100644
35113--- a/drivers/staging/pohmelfs/mcache.c
35114+++ b/drivers/staging/pohmelfs/mcache.c
35115@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
35116 m->data = data;
35117 m->start = start;
35118 m->size = size;
35119- m->gen = atomic_long_inc_return(&psb->mcache_gen);
35120+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35121
35122 mutex_lock(&psb->mcache_lock);
35123 err = pohmelfs_mcache_insert(psb, m);
35124diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35125index 985b6b7..7699e05 100644
35126--- a/drivers/staging/pohmelfs/netfs.h
35127+++ b/drivers/staging/pohmelfs/netfs.h
35128@@ -571,14 +571,14 @@ struct pohmelfs_config;
35129 struct pohmelfs_sb {
35130 struct rb_root mcache_root;
35131 struct mutex mcache_lock;
35132- atomic_long_t mcache_gen;
35133+ atomic_long_unchecked_t mcache_gen;
35134 unsigned long mcache_timeout;
35135
35136 unsigned int idx;
35137
35138 unsigned int trans_retries;
35139
35140- atomic_t trans_gen;
35141+ atomic_unchecked_t trans_gen;
35142
35143 unsigned int crypto_attached_size;
35144 unsigned int crypto_align_size;
35145diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35146index 06c1a74..866eebc 100644
35147--- a/drivers/staging/pohmelfs/trans.c
35148+++ b/drivers/staging/pohmelfs/trans.c
35149@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35150 int err;
35151 struct netfs_cmd *cmd = t->iovec.iov_base;
35152
35153- t->gen = atomic_inc_return(&psb->trans_gen);
35154+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35155
35156 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35157 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35158diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35159index 86308a0..feaa925 100644
35160--- a/drivers/staging/rtl8712/rtl871x_io.h
35161+++ b/drivers/staging/rtl8712/rtl871x_io.h
35162@@ -108,7 +108,7 @@ struct _io_ops {
35163 u8 *pmem);
35164 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35165 u8 *pmem);
35166-};
35167+} __no_const;
35168
35169 struct io_req {
35170 struct list_head list;
35171diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35172index c7b5e8b..783d6cb 100644
35173--- a/drivers/staging/sbe-2t3e3/netdev.c
35174+++ b/drivers/staging/sbe-2t3e3/netdev.c
35175@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35176 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35177
35178 if (rlen)
35179- if (copy_to_user(data, &resp, rlen))
35180+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35181 return -EFAULT;
35182
35183 return 0;
35184diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35185index be21617..0954e45 100644
35186--- a/drivers/staging/usbip/usbip_common.h
35187+++ b/drivers/staging/usbip/usbip_common.h
35188@@ -289,7 +289,7 @@ struct usbip_device {
35189 void (*shutdown)(struct usbip_device *);
35190 void (*reset)(struct usbip_device *);
35191 void (*unusable)(struct usbip_device *);
35192- } eh_ops;
35193+ } __no_const eh_ops;
35194 };
35195
35196 #if 0
35197diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35198index 88b3298..3783eee 100644
35199--- a/drivers/staging/usbip/vhci.h
35200+++ b/drivers/staging/usbip/vhci.h
35201@@ -88,7 +88,7 @@ struct vhci_hcd {
35202 unsigned resuming:1;
35203 unsigned long re_timeout;
35204
35205- atomic_t seqnum;
35206+ atomic_unchecked_t seqnum;
35207
35208 /*
35209 * NOTE:
35210diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35211index 2ee97e2..0420b86 100644
35212--- a/drivers/staging/usbip/vhci_hcd.c
35213+++ b/drivers/staging/usbip/vhci_hcd.c
35214@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35215 return;
35216 }
35217
35218- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35219+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35220 if (priv->seqnum == 0xffff)
35221 dev_info(&urb->dev->dev, "seqnum max\n");
35222
35223@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35224 return -ENOMEM;
35225 }
35226
35227- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35228+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35229 if (unlink->seqnum == 0xffff)
35230 pr_info("seqnum max\n");
35231
35232@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35233 vdev->rhport = rhport;
35234 }
35235
35236- atomic_set(&vhci->seqnum, 0);
35237+ atomic_set_unchecked(&vhci->seqnum, 0);
35238 spin_lock_init(&vhci->lock);
35239
35240 hcd->power_budget = 0; /* no limit */
35241diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35242index 3872b8c..fe6d2f4 100644
35243--- a/drivers/staging/usbip/vhci_rx.c
35244+++ b/drivers/staging/usbip/vhci_rx.c
35245@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35246 if (!urb) {
35247 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35248 pr_info("max seqnum %d\n",
35249- atomic_read(&the_controller->seqnum));
35250+ atomic_read_unchecked(&the_controller->seqnum));
35251 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35252 return;
35253 }
35254diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35255index 7735027..30eed13 100644
35256--- a/drivers/staging/vt6655/hostap.c
35257+++ b/drivers/staging/vt6655/hostap.c
35258@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35259 *
35260 */
35261
35262+static net_device_ops_no_const apdev_netdev_ops;
35263+
35264 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35265 {
35266 PSDevice apdev_priv;
35267 struct net_device *dev = pDevice->dev;
35268 int ret;
35269- const struct net_device_ops apdev_netdev_ops = {
35270- .ndo_start_xmit = pDevice->tx_80211,
35271- };
35272
35273 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35274
35275@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35276 *apdev_priv = *pDevice;
35277 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35278
35279+ /* only half broken now */
35280+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35281 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35282
35283 pDevice->apdev->type = ARPHRD_IEEE80211;
35284diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35285index 51b5adf..098e320 100644
35286--- a/drivers/staging/vt6656/hostap.c
35287+++ b/drivers/staging/vt6656/hostap.c
35288@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35289 *
35290 */
35291
35292+static net_device_ops_no_const apdev_netdev_ops;
35293+
35294 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35295 {
35296 PSDevice apdev_priv;
35297 struct net_device *dev = pDevice->dev;
35298 int ret;
35299- const struct net_device_ops apdev_netdev_ops = {
35300- .ndo_start_xmit = pDevice->tx_80211,
35301- };
35302
35303 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35304
35305@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35306 *apdev_priv = *pDevice;
35307 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35308
35309+ /* only half broken now */
35310+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35311 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35312
35313 pDevice->apdev->type = ARPHRD_IEEE80211;
35314diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35315index 7843dfd..3db105f 100644
35316--- a/drivers/staging/wlan-ng/hfa384x_usb.c
35317+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35318@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35319
35320 struct usbctlx_completor {
35321 int (*complete) (struct usbctlx_completor *);
35322-};
35323+} __no_const;
35324
35325 static int
35326 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35327diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35328index 1ca66ea..76f1343 100644
35329--- a/drivers/staging/zcache/tmem.c
35330+++ b/drivers/staging/zcache/tmem.c
35331@@ -39,7 +39,7 @@
35332 * A tmem host implementation must use this function to register callbacks
35333 * for memory allocation.
35334 */
35335-static struct tmem_hostops tmem_hostops;
35336+static tmem_hostops_no_const tmem_hostops;
35337
35338 static void tmem_objnode_tree_init(void);
35339
35340@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35341 * A tmem host implementation must use this function to register
35342 * callbacks for a page-accessible memory (PAM) implementation
35343 */
35344-static struct tmem_pamops tmem_pamops;
35345+static tmem_pamops_no_const tmem_pamops;
35346
35347 void tmem_register_pamops(struct tmem_pamops *m)
35348 {
35349diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35350index ed147c4..94fc3c6 100644
35351--- a/drivers/staging/zcache/tmem.h
35352+++ b/drivers/staging/zcache/tmem.h
35353@@ -180,6 +180,7 @@ struct tmem_pamops {
35354 void (*new_obj)(struct tmem_obj *);
35355 int (*replace_in_obj)(void *, struct tmem_obj *);
35356 };
35357+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35358 extern void tmem_register_pamops(struct tmem_pamops *m);
35359
35360 /* memory allocation methods provided by the host implementation */
35361@@ -189,6 +190,7 @@ struct tmem_hostops {
35362 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35363 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35364 };
35365+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35366 extern void tmem_register_hostops(struct tmem_hostops *m);
35367
35368 /* core tmem accessor functions */
35369diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35370index 8599545..7761358 100644
35371--- a/drivers/target/iscsi/iscsi_target.c
35372+++ b/drivers/target/iscsi/iscsi_target.c
35373@@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35374 * outstanding_r2ts reaches zero, go ahead and send the delayed
35375 * TASK_ABORTED status.
35376 */
35377- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35378+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35379 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35380 if (--cmd->outstanding_r2ts < 1) {
35381 iscsit_stop_dataout_timer(cmd);
35382diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35383index 6845228..df77141 100644
35384--- a/drivers/target/target_core_tmr.c
35385+++ b/drivers/target/target_core_tmr.c
35386@@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35387 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35388 cmd->t_task_list_num,
35389 atomic_read(&cmd->t_task_cdbs_left),
35390- atomic_read(&cmd->t_task_cdbs_sent),
35391+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35392 atomic_read(&cmd->t_transport_active),
35393 atomic_read(&cmd->t_transport_stop),
35394 atomic_read(&cmd->t_transport_sent));
35395@@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35396 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35397 " task: %p, t_fe_count: %d dev: %p\n", task,
35398 fe_count, dev);
35399- atomic_set(&cmd->t_transport_aborted, 1);
35400+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35401 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35402
35403 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35404@@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35405 }
35406 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35407 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35408- atomic_set(&cmd->t_transport_aborted, 1);
35409+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35410 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35411
35412 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35413diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35414index e87d0eb..856cbcc 100644
35415--- a/drivers/target/target_core_transport.c
35416+++ b/drivers/target/target_core_transport.c
35417@@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35418
35419 dev->queue_depth = dev_limits->queue_depth;
35420 atomic_set(&dev->depth_left, dev->queue_depth);
35421- atomic_set(&dev->dev_ordered_id, 0);
35422+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
35423
35424 se_dev_set_default_attribs(dev, dev_limits);
35425
35426@@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35427 * Used to determine when ORDERED commands should go from
35428 * Dormant to Active status.
35429 */
35430- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35431+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35432 smp_mb__after_atomic_inc();
35433 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35434 cmd->se_ordered_id, cmd->sam_task_attr,
35435@@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35436 " t_transport_active: %d t_transport_stop: %d"
35437 " t_transport_sent: %d\n", cmd->t_task_list_num,
35438 atomic_read(&cmd->t_task_cdbs_left),
35439- atomic_read(&cmd->t_task_cdbs_sent),
35440+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35441 atomic_read(&cmd->t_task_cdbs_ex_left),
35442 atomic_read(&cmd->t_transport_active),
35443 atomic_read(&cmd->t_transport_stop),
35444@@ -2089,9 +2089,9 @@ check_depth:
35445
35446 spin_lock_irqsave(&cmd->t_state_lock, flags);
35447 task->task_flags |= (TF_ACTIVE | TF_SENT);
35448- atomic_inc(&cmd->t_task_cdbs_sent);
35449+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35450
35451- if (atomic_read(&cmd->t_task_cdbs_sent) ==
35452+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35453 cmd->t_task_list_num)
35454 atomic_set(&cmd->t_transport_sent, 1);
35455
35456@@ -4260,7 +4260,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35457 atomic_set(&cmd->transport_lun_stop, 0);
35458 }
35459 if (!atomic_read(&cmd->t_transport_active) ||
35460- atomic_read(&cmd->t_transport_aborted)) {
35461+ atomic_read_unchecked(&cmd->t_transport_aborted)) {
35462 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35463 return false;
35464 }
35465@@ -4509,7 +4509,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35466 {
35467 int ret = 0;
35468
35469- if (atomic_read(&cmd->t_transport_aborted) != 0) {
35470+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35471 if (!send_status ||
35472 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35473 return 1;
35474@@ -4546,7 +4546,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35475 */
35476 if (cmd->data_direction == DMA_TO_DEVICE) {
35477 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35478- atomic_inc(&cmd->t_transport_aborted);
35479+ atomic_inc_unchecked(&cmd->t_transport_aborted);
35480 smp_mb__after_atomic_inc();
35481 }
35482 }
35483diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35484index b9040be..e3f5aab 100644
35485--- a/drivers/tty/hvc/hvcs.c
35486+++ b/drivers/tty/hvc/hvcs.c
35487@@ -83,6 +83,7 @@
35488 #include <asm/hvcserver.h>
35489 #include <asm/uaccess.h>
35490 #include <asm/vio.h>
35491+#include <asm/local.h>
35492
35493 /*
35494 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35495@@ -270,7 +271,7 @@ struct hvcs_struct {
35496 unsigned int index;
35497
35498 struct tty_struct *tty;
35499- int open_count;
35500+ local_t open_count;
35501
35502 /*
35503 * Used to tell the driver kernel_thread what operations need to take
35504@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35505
35506 spin_lock_irqsave(&hvcsd->lock, flags);
35507
35508- if (hvcsd->open_count > 0) {
35509+ if (local_read(&hvcsd->open_count) > 0) {
35510 spin_unlock_irqrestore(&hvcsd->lock, flags);
35511 printk(KERN_INFO "HVCS: vterm state unchanged. "
35512 "The hvcs device node is still in use.\n");
35513@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35514 if ((retval = hvcs_partner_connect(hvcsd)))
35515 goto error_release;
35516
35517- hvcsd->open_count = 1;
35518+ local_set(&hvcsd->open_count, 1);
35519 hvcsd->tty = tty;
35520 tty->driver_data = hvcsd;
35521
35522@@ -1179,7 +1180,7 @@ fast_open:
35523
35524 spin_lock_irqsave(&hvcsd->lock, flags);
35525 kref_get(&hvcsd->kref);
35526- hvcsd->open_count++;
35527+ local_inc(&hvcsd->open_count);
35528 hvcsd->todo_mask |= HVCS_SCHED_READ;
35529 spin_unlock_irqrestore(&hvcsd->lock, flags);
35530
35531@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35532 hvcsd = tty->driver_data;
35533
35534 spin_lock_irqsave(&hvcsd->lock, flags);
35535- if (--hvcsd->open_count == 0) {
35536+ if (local_dec_and_test(&hvcsd->open_count)) {
35537
35538 vio_disable_interrupts(hvcsd->vdev);
35539
35540@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35541 free_irq(irq, hvcsd);
35542 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35543 return;
35544- } else if (hvcsd->open_count < 0) {
35545+ } else if (local_read(&hvcsd->open_count) < 0) {
35546 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35547 " is missmanaged.\n",
35548- hvcsd->vdev->unit_address, hvcsd->open_count);
35549+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35550 }
35551
35552 spin_unlock_irqrestore(&hvcsd->lock, flags);
35553@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35554
35555 spin_lock_irqsave(&hvcsd->lock, flags);
35556 /* Preserve this so that we know how many kref refs to put */
35557- temp_open_count = hvcsd->open_count;
35558+ temp_open_count = local_read(&hvcsd->open_count);
35559
35560 /*
35561 * Don't kref put inside the spinlock because the destruction
35562@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35563 hvcsd->tty->driver_data = NULL;
35564 hvcsd->tty = NULL;
35565
35566- hvcsd->open_count = 0;
35567+ local_set(&hvcsd->open_count, 0);
35568
35569 /* This will drop any buffered data on the floor which is OK in a hangup
35570 * scenario. */
35571@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35572 * the middle of a write operation? This is a crummy place to do this
35573 * but we want to keep it all in the spinlock.
35574 */
35575- if (hvcsd->open_count <= 0) {
35576+ if (local_read(&hvcsd->open_count) <= 0) {
35577 spin_unlock_irqrestore(&hvcsd->lock, flags);
35578 return -ENODEV;
35579 }
35580@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35581 {
35582 struct hvcs_struct *hvcsd = tty->driver_data;
35583
35584- if (!hvcsd || hvcsd->open_count <= 0)
35585+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35586 return 0;
35587
35588 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35589diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35590index ef92869..f4ebd88 100644
35591--- a/drivers/tty/ipwireless/tty.c
35592+++ b/drivers/tty/ipwireless/tty.c
35593@@ -29,6 +29,7 @@
35594 #include <linux/tty_driver.h>
35595 #include <linux/tty_flip.h>
35596 #include <linux/uaccess.h>
35597+#include <asm/local.h>
35598
35599 #include "tty.h"
35600 #include "network.h"
35601@@ -51,7 +52,7 @@ struct ipw_tty {
35602 int tty_type;
35603 struct ipw_network *network;
35604 struct tty_struct *linux_tty;
35605- int open_count;
35606+ local_t open_count;
35607 unsigned int control_lines;
35608 struct mutex ipw_tty_mutex;
35609 int tx_bytes_queued;
35610@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35611 mutex_unlock(&tty->ipw_tty_mutex);
35612 return -ENODEV;
35613 }
35614- if (tty->open_count == 0)
35615+ if (local_read(&tty->open_count) == 0)
35616 tty->tx_bytes_queued = 0;
35617
35618- tty->open_count++;
35619+ local_inc(&tty->open_count);
35620
35621 tty->linux_tty = linux_tty;
35622 linux_tty->driver_data = tty;
35623@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35624
35625 static void do_ipw_close(struct ipw_tty *tty)
35626 {
35627- tty->open_count--;
35628-
35629- if (tty->open_count == 0) {
35630+ if (local_dec_return(&tty->open_count) == 0) {
35631 struct tty_struct *linux_tty = tty->linux_tty;
35632
35633 if (linux_tty != NULL) {
35634@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35635 return;
35636
35637 mutex_lock(&tty->ipw_tty_mutex);
35638- if (tty->open_count == 0) {
35639+ if (local_read(&tty->open_count) == 0) {
35640 mutex_unlock(&tty->ipw_tty_mutex);
35641 return;
35642 }
35643@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35644 return;
35645 }
35646
35647- if (!tty->open_count) {
35648+ if (!local_read(&tty->open_count)) {
35649 mutex_unlock(&tty->ipw_tty_mutex);
35650 return;
35651 }
35652@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35653 return -ENODEV;
35654
35655 mutex_lock(&tty->ipw_tty_mutex);
35656- if (!tty->open_count) {
35657+ if (!local_read(&tty->open_count)) {
35658 mutex_unlock(&tty->ipw_tty_mutex);
35659 return -EINVAL;
35660 }
35661@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35662 if (!tty)
35663 return -ENODEV;
35664
35665- if (!tty->open_count)
35666+ if (!local_read(&tty->open_count))
35667 return -EINVAL;
35668
35669 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35670@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35671 if (!tty)
35672 return 0;
35673
35674- if (!tty->open_count)
35675+ if (!local_read(&tty->open_count))
35676 return 0;
35677
35678 return tty->tx_bytes_queued;
35679@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35680 if (!tty)
35681 return -ENODEV;
35682
35683- if (!tty->open_count)
35684+ if (!local_read(&tty->open_count))
35685 return -EINVAL;
35686
35687 return get_control_lines(tty);
35688@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35689 if (!tty)
35690 return -ENODEV;
35691
35692- if (!tty->open_count)
35693+ if (!local_read(&tty->open_count))
35694 return -EINVAL;
35695
35696 return set_control_lines(tty, set, clear);
35697@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35698 if (!tty)
35699 return -ENODEV;
35700
35701- if (!tty->open_count)
35702+ if (!local_read(&tty->open_count))
35703 return -EINVAL;
35704
35705 /* FIXME: Exactly how is the tty object locked here .. */
35706@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35707 against a parallel ioctl etc */
35708 mutex_lock(&ttyj->ipw_tty_mutex);
35709 }
35710- while (ttyj->open_count)
35711+ while (local_read(&ttyj->open_count))
35712 do_ipw_close(ttyj);
35713 ipwireless_disassociate_network_ttys(network,
35714 ttyj->channel_idx);
35715diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35716index fc7bbba..9527e93 100644
35717--- a/drivers/tty/n_gsm.c
35718+++ b/drivers/tty/n_gsm.c
35719@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35720 kref_init(&dlci->ref);
35721 mutex_init(&dlci->mutex);
35722 dlci->fifo = &dlci->_fifo;
35723- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35724+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35725 kfree(dlci);
35726 return NULL;
35727 }
35728diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35729index 39d6ab6..eb97f41 100644
35730--- a/drivers/tty/n_tty.c
35731+++ b/drivers/tty/n_tty.c
35732@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35733 {
35734 *ops = tty_ldisc_N_TTY;
35735 ops->owner = NULL;
35736- ops->refcount = ops->flags = 0;
35737+ atomic_set(&ops->refcount, 0);
35738+ ops->flags = 0;
35739 }
35740 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35741diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35742index e18604b..a7d5a11 100644
35743--- a/drivers/tty/pty.c
35744+++ b/drivers/tty/pty.c
35745@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35746 register_sysctl_table(pty_root_table);
35747
35748 /* Now create the /dev/ptmx special device */
35749+ pax_open_kernel();
35750 tty_default_fops(&ptmx_fops);
35751- ptmx_fops.open = ptmx_open;
35752+ *(void **)&ptmx_fops.open = ptmx_open;
35753+ pax_close_kernel();
35754
35755 cdev_init(&ptmx_cdev, &ptmx_fops);
35756 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35757diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35758index 2b42a01..32a2ed3 100644
35759--- a/drivers/tty/serial/kgdboc.c
35760+++ b/drivers/tty/serial/kgdboc.c
35761@@ -24,8 +24,9 @@
35762 #define MAX_CONFIG_LEN 40
35763
35764 static struct kgdb_io kgdboc_io_ops;
35765+static struct kgdb_io kgdboc_io_ops_console;
35766
35767-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35768+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35769 static int configured = -1;
35770
35771 static char config[MAX_CONFIG_LEN];
35772@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35773 kgdboc_unregister_kbd();
35774 if (configured == 1)
35775 kgdb_unregister_io_module(&kgdboc_io_ops);
35776+ else if (configured == 2)
35777+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35778 }
35779
35780 static int configure_kgdboc(void)
35781@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35782 int err;
35783 char *cptr = config;
35784 struct console *cons;
35785+ int is_console = 0;
35786
35787 err = kgdboc_option_setup(config);
35788 if (err || !strlen(config) || isspace(config[0]))
35789 goto noconfig;
35790
35791 err = -ENODEV;
35792- kgdboc_io_ops.is_console = 0;
35793 kgdb_tty_driver = NULL;
35794
35795 kgdboc_use_kms = 0;
35796@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
35797 int idx;
35798 if (cons->device && cons->device(cons, &idx) == p &&
35799 idx == tty_line) {
35800- kgdboc_io_ops.is_console = 1;
35801+ is_console = 1;
35802 break;
35803 }
35804 cons = cons->next;
35805@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
35806 kgdb_tty_line = tty_line;
35807
35808 do_register:
35809- err = kgdb_register_io_module(&kgdboc_io_ops);
35810+ if (is_console) {
35811+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35812+ configured = 2;
35813+ } else {
35814+ err = kgdb_register_io_module(&kgdboc_io_ops);
35815+ configured = 1;
35816+ }
35817 if (err)
35818 goto noconfig;
35819
35820- configured = 1;
35821-
35822 return 0;
35823
35824 noconfig:
35825@@ -213,7 +220,7 @@ noconfig:
35826 static int __init init_kgdboc(void)
35827 {
35828 /* Already configured? */
35829- if (configured == 1)
35830+ if (configured >= 1)
35831 return 0;
35832
35833 return configure_kgdboc();
35834@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
35835 if (config[len - 1] == '\n')
35836 config[len - 1] = '\0';
35837
35838- if (configured == 1)
35839+ if (configured >= 1)
35840 cleanup_kgdboc();
35841
35842 /* Go and configure with the new params. */
35843@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
35844 .post_exception = kgdboc_post_exp_handler,
35845 };
35846
35847+static struct kgdb_io kgdboc_io_ops_console = {
35848+ .name = "kgdboc",
35849+ .read_char = kgdboc_get_char,
35850+ .write_char = kgdboc_put_char,
35851+ .pre_exception = kgdboc_pre_exp_handler,
35852+ .post_exception = kgdboc_post_exp_handler,
35853+ .is_console = 1
35854+};
35855+
35856 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35857 /* This is only available if kgdboc is a built in for early debugging */
35858 static int __init kgdboc_early_init(char *opt)
35859diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
35860index 05085be..67eadb0 100644
35861--- a/drivers/tty/tty_io.c
35862+++ b/drivers/tty/tty_io.c
35863@@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35864
35865 void tty_default_fops(struct file_operations *fops)
35866 {
35867- *fops = tty_fops;
35868+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35869 }
35870
35871 /*
35872diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
35873index 8e0924f..4204eb4 100644
35874--- a/drivers/tty/tty_ldisc.c
35875+++ b/drivers/tty/tty_ldisc.c
35876@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
35877 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35878 struct tty_ldisc_ops *ldo = ld->ops;
35879
35880- ldo->refcount--;
35881+ atomic_dec(&ldo->refcount);
35882 module_put(ldo->owner);
35883 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35884
35885@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
35886 spin_lock_irqsave(&tty_ldisc_lock, flags);
35887 tty_ldiscs[disc] = new_ldisc;
35888 new_ldisc->num = disc;
35889- new_ldisc->refcount = 0;
35890+ atomic_set(&new_ldisc->refcount, 0);
35891 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35892
35893 return ret;
35894@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
35895 return -EINVAL;
35896
35897 spin_lock_irqsave(&tty_ldisc_lock, flags);
35898- if (tty_ldiscs[disc]->refcount)
35899+ if (atomic_read(&tty_ldiscs[disc]->refcount))
35900 ret = -EBUSY;
35901 else
35902 tty_ldiscs[disc] = NULL;
35903@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
35904 if (ldops) {
35905 ret = ERR_PTR(-EAGAIN);
35906 if (try_module_get(ldops->owner)) {
35907- ldops->refcount++;
35908+ atomic_inc(&ldops->refcount);
35909 ret = ldops;
35910 }
35911 }
35912@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
35913 unsigned long flags;
35914
35915 spin_lock_irqsave(&tty_ldisc_lock, flags);
35916- ldops->refcount--;
35917+ atomic_dec(&ldops->refcount);
35918 module_put(ldops->owner);
35919 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35920 }
35921diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
35922index a605549..6bd3c96 100644
35923--- a/drivers/tty/vt/keyboard.c
35924+++ b/drivers/tty/vt/keyboard.c
35925@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
35926 kbd->kbdmode == VC_OFF) &&
35927 value != KVAL(K_SAK))
35928 return; /* SAK is allowed even in raw mode */
35929+
35930+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35931+ {
35932+ void *func = fn_handler[value];
35933+ if (func == fn_show_state || func == fn_show_ptregs ||
35934+ func == fn_show_mem)
35935+ return;
35936+ }
35937+#endif
35938+
35939 fn_handler[value](vc);
35940 }
35941
35942diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
35943index 5e096f4..0da1363 100644
35944--- a/drivers/tty/vt/vt_ioctl.c
35945+++ b/drivers/tty/vt/vt_ioctl.c
35946@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35947 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35948 return -EFAULT;
35949
35950- if (!capable(CAP_SYS_TTY_CONFIG))
35951- perm = 0;
35952-
35953 switch (cmd) {
35954 case KDGKBENT:
35955 key_map = key_maps[s];
35956@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35957 val = (i ? K_HOLE : K_NOSUCHMAP);
35958 return put_user(val, &user_kbe->kb_value);
35959 case KDSKBENT:
35960+ if (!capable(CAP_SYS_TTY_CONFIG))
35961+ perm = 0;
35962+
35963 if (!perm)
35964 return -EPERM;
35965 if (!i && v == K_NOSUCHMAP) {
35966@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35967 int i, j, k;
35968 int ret;
35969
35970- if (!capable(CAP_SYS_TTY_CONFIG))
35971- perm = 0;
35972-
35973 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35974 if (!kbs) {
35975 ret = -ENOMEM;
35976@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35977 kfree(kbs);
35978 return ((p && *p) ? -EOVERFLOW : 0);
35979 case KDSKBSENT:
35980+ if (!capable(CAP_SYS_TTY_CONFIG))
35981+ perm = 0;
35982+
35983 if (!perm) {
35984 ret = -EPERM;
35985 goto reterr;
35986diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
35987index a783d53..cb30d94 100644
35988--- a/drivers/uio/uio.c
35989+++ b/drivers/uio/uio.c
35990@@ -25,6 +25,7 @@
35991 #include <linux/kobject.h>
35992 #include <linux/cdev.h>
35993 #include <linux/uio_driver.h>
35994+#include <asm/local.h>
35995
35996 #define UIO_MAX_DEVICES (1U << MINORBITS)
35997
35998@@ -32,10 +33,10 @@ struct uio_device {
35999 struct module *owner;
36000 struct device *dev;
36001 int minor;
36002- atomic_t event;
36003+ atomic_unchecked_t event;
36004 struct fasync_struct *async_queue;
36005 wait_queue_head_t wait;
36006- int vma_count;
36007+ local_t vma_count;
36008 struct uio_info *info;
36009 struct kobject *map_dir;
36010 struct kobject *portio_dir;
36011@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
36012 struct device_attribute *attr, char *buf)
36013 {
36014 struct uio_device *idev = dev_get_drvdata(dev);
36015- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36016+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36017 }
36018
36019 static struct device_attribute uio_class_attributes[] = {
36020@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
36021 {
36022 struct uio_device *idev = info->uio_dev;
36023
36024- atomic_inc(&idev->event);
36025+ atomic_inc_unchecked(&idev->event);
36026 wake_up_interruptible(&idev->wait);
36027 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36028 }
36029@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
36030 }
36031
36032 listener->dev = idev;
36033- listener->event_count = atomic_read(&idev->event);
36034+ listener->event_count = atomic_read_unchecked(&idev->event);
36035 filep->private_data = listener;
36036
36037 if (idev->info->open) {
36038@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
36039 return -EIO;
36040
36041 poll_wait(filep, &idev->wait, wait);
36042- if (listener->event_count != atomic_read(&idev->event))
36043+ if (listener->event_count != atomic_read_unchecked(&idev->event))
36044 return POLLIN | POLLRDNORM;
36045 return 0;
36046 }
36047@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
36048 do {
36049 set_current_state(TASK_INTERRUPTIBLE);
36050
36051- event_count = atomic_read(&idev->event);
36052+ event_count = atomic_read_unchecked(&idev->event);
36053 if (event_count != listener->event_count) {
36054 if (copy_to_user(buf, &event_count, count))
36055 retval = -EFAULT;
36056@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
36057 static void uio_vma_open(struct vm_area_struct *vma)
36058 {
36059 struct uio_device *idev = vma->vm_private_data;
36060- idev->vma_count++;
36061+ local_inc(&idev->vma_count);
36062 }
36063
36064 static void uio_vma_close(struct vm_area_struct *vma)
36065 {
36066 struct uio_device *idev = vma->vm_private_data;
36067- idev->vma_count--;
36068+ local_dec(&idev->vma_count);
36069 }
36070
36071 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36072@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
36073 idev->owner = owner;
36074 idev->info = info;
36075 init_waitqueue_head(&idev->wait);
36076- atomic_set(&idev->event, 0);
36077+ atomic_set_unchecked(&idev->event, 0);
36078
36079 ret = uio_get_minor(idev);
36080 if (ret)
36081diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
36082index a845f8b..4f54072 100644
36083--- a/drivers/usb/atm/cxacru.c
36084+++ b/drivers/usb/atm/cxacru.c
36085@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
36086 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36087 if (ret < 2)
36088 return -EINVAL;
36089- if (index < 0 || index > 0x7f)
36090+ if (index > 0x7f)
36091 return -EINVAL;
36092 pos += tmp;
36093
36094diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
36095index d3448ca..d2864ca 100644
36096--- a/drivers/usb/atm/usbatm.c
36097+++ b/drivers/usb/atm/usbatm.c
36098@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36099 if (printk_ratelimit())
36100 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36101 __func__, vpi, vci);
36102- atomic_inc(&vcc->stats->rx_err);
36103+ atomic_inc_unchecked(&vcc->stats->rx_err);
36104 return;
36105 }
36106
36107@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36108 if (length > ATM_MAX_AAL5_PDU) {
36109 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36110 __func__, length, vcc);
36111- atomic_inc(&vcc->stats->rx_err);
36112+ atomic_inc_unchecked(&vcc->stats->rx_err);
36113 goto out;
36114 }
36115
36116@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36117 if (sarb->len < pdu_length) {
36118 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36119 __func__, pdu_length, sarb->len, vcc);
36120- atomic_inc(&vcc->stats->rx_err);
36121+ atomic_inc_unchecked(&vcc->stats->rx_err);
36122 goto out;
36123 }
36124
36125 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36126 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36127 __func__, vcc);
36128- atomic_inc(&vcc->stats->rx_err);
36129+ atomic_inc_unchecked(&vcc->stats->rx_err);
36130 goto out;
36131 }
36132
36133@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36134 if (printk_ratelimit())
36135 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36136 __func__, length);
36137- atomic_inc(&vcc->stats->rx_drop);
36138+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36139 goto out;
36140 }
36141
36142@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36143
36144 vcc->push(vcc, skb);
36145
36146- atomic_inc(&vcc->stats->rx);
36147+ atomic_inc_unchecked(&vcc->stats->rx);
36148 out:
36149 skb_trim(sarb, 0);
36150 }
36151@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36152 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36153
36154 usbatm_pop(vcc, skb);
36155- atomic_inc(&vcc->stats->tx);
36156+ atomic_inc_unchecked(&vcc->stats->tx);
36157
36158 skb = skb_dequeue(&instance->sndqueue);
36159 }
36160@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36161 if (!left--)
36162 return sprintf(page,
36163 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36164- atomic_read(&atm_dev->stats.aal5.tx),
36165- atomic_read(&atm_dev->stats.aal5.tx_err),
36166- atomic_read(&atm_dev->stats.aal5.rx),
36167- atomic_read(&atm_dev->stats.aal5.rx_err),
36168- atomic_read(&atm_dev->stats.aal5.rx_drop));
36169+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36170+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36171+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36172+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36173+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36174
36175 if (!left--) {
36176 if (instance->disconnected)
36177diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36178index d956965..4179a77 100644
36179--- a/drivers/usb/core/devices.c
36180+++ b/drivers/usb/core/devices.c
36181@@ -126,7 +126,7 @@ static const char format_endpt[] =
36182 * time it gets called.
36183 */
36184 static struct device_connect_event {
36185- atomic_t count;
36186+ atomic_unchecked_t count;
36187 wait_queue_head_t wait;
36188 } device_event = {
36189 .count = ATOMIC_INIT(1),
36190@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36191
36192 void usbfs_conn_disc_event(void)
36193 {
36194- atomic_add(2, &device_event.count);
36195+ atomic_add_unchecked(2, &device_event.count);
36196 wake_up(&device_event.wait);
36197 }
36198
36199@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36200
36201 poll_wait(file, &device_event.wait, wait);
36202
36203- event_count = atomic_read(&device_event.count);
36204+ event_count = atomic_read_unchecked(&device_event.count);
36205 if (file->f_version != event_count) {
36206 file->f_version = event_count;
36207 return POLLIN | POLLRDNORM;
36208diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36209index b3bdfed..a9460e0 100644
36210--- a/drivers/usb/core/message.c
36211+++ b/drivers/usb/core/message.c
36212@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36213 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36214 if (buf) {
36215 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36216- if (len > 0) {
36217- smallbuf = kmalloc(++len, GFP_NOIO);
36218+ if (len++ > 0) {
36219+ smallbuf = kmalloc(len, GFP_NOIO);
36220 if (!smallbuf)
36221 return buf;
36222 memcpy(smallbuf, buf, len);
36223diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36224index 1fc8f12..20647c1 100644
36225--- a/drivers/usb/early/ehci-dbgp.c
36226+++ b/drivers/usb/early/ehci-dbgp.c
36227@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36228
36229 #ifdef CONFIG_KGDB
36230 static struct kgdb_io kgdbdbgp_io_ops;
36231-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36232+static struct kgdb_io kgdbdbgp_io_ops_console;
36233+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36234 #else
36235 #define dbgp_kgdb_mode (0)
36236 #endif
36237@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36238 .write_char = kgdbdbgp_write_char,
36239 };
36240
36241+static struct kgdb_io kgdbdbgp_io_ops_console = {
36242+ .name = "kgdbdbgp",
36243+ .read_char = kgdbdbgp_read_char,
36244+ .write_char = kgdbdbgp_write_char,
36245+ .is_console = 1
36246+};
36247+
36248 static int kgdbdbgp_wait_time;
36249
36250 static int __init kgdbdbgp_parse_config(char *str)
36251@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36252 ptr++;
36253 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36254 }
36255- kgdb_register_io_module(&kgdbdbgp_io_ops);
36256- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36257+ if (early_dbgp_console.index != -1)
36258+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36259+ else
36260+ kgdb_register_io_module(&kgdbdbgp_io_ops);
36261
36262 return 0;
36263 }
36264diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36265index d6bea3e..60b250e 100644
36266--- a/drivers/usb/wusbcore/wa-hc.h
36267+++ b/drivers/usb/wusbcore/wa-hc.h
36268@@ -192,7 +192,7 @@ struct wahc {
36269 struct list_head xfer_delayed_list;
36270 spinlock_t xfer_list_lock;
36271 struct work_struct xfer_work;
36272- atomic_t xfer_id_count;
36273+ atomic_unchecked_t xfer_id_count;
36274 };
36275
36276
36277@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36278 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36279 spin_lock_init(&wa->xfer_list_lock);
36280 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36281- atomic_set(&wa->xfer_id_count, 1);
36282+ atomic_set_unchecked(&wa->xfer_id_count, 1);
36283 }
36284
36285 /**
36286diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36287index 57c01ab..8a05959 100644
36288--- a/drivers/usb/wusbcore/wa-xfer.c
36289+++ b/drivers/usb/wusbcore/wa-xfer.c
36290@@ -296,7 +296,7 @@ out:
36291 */
36292 static void wa_xfer_id_init(struct wa_xfer *xfer)
36293 {
36294- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36295+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36296 }
36297
36298 /*
36299diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36300index c14c42b..f955cc2 100644
36301--- a/drivers/vhost/vhost.c
36302+++ b/drivers/vhost/vhost.c
36303@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36304 return 0;
36305 }
36306
36307-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36308+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36309 {
36310 struct file *eventfp, *filep = NULL,
36311 *pollstart = NULL, *pollstop = NULL;
36312diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36313index b0b2ac3..89a4399 100644
36314--- a/drivers/video/aty/aty128fb.c
36315+++ b/drivers/video/aty/aty128fb.c
36316@@ -148,7 +148,7 @@ enum {
36317 };
36318
36319 /* Must match above enum */
36320-static const char *r128_family[] __devinitdata = {
36321+static const char *r128_family[] __devinitconst = {
36322 "AGP",
36323 "PCI",
36324 "PRO AGP",
36325diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36326index 5c3960d..15cf8fc 100644
36327--- a/drivers/video/fbcmap.c
36328+++ b/drivers/video/fbcmap.c
36329@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36330 rc = -ENODEV;
36331 goto out;
36332 }
36333- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36334- !info->fbops->fb_setcmap)) {
36335+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36336 rc = -EINVAL;
36337 goto out1;
36338 }
36339diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36340index ad93629..e020fc3 100644
36341--- a/drivers/video/fbmem.c
36342+++ b/drivers/video/fbmem.c
36343@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36344 image->dx += image->width + 8;
36345 }
36346 } else if (rotate == FB_ROTATE_UD) {
36347- for (x = 0; x < num && image->dx >= 0; x++) {
36348+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36349 info->fbops->fb_imageblit(info, image);
36350 image->dx -= image->width + 8;
36351 }
36352@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36353 image->dy += image->height + 8;
36354 }
36355 } else if (rotate == FB_ROTATE_CCW) {
36356- for (x = 0; x < num && image->dy >= 0; x++) {
36357+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36358 info->fbops->fb_imageblit(info, image);
36359 image->dy -= image->height + 8;
36360 }
36361@@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36362 return -EFAULT;
36363 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36364 return -EINVAL;
36365- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36366+ if (con2fb.framebuffer >= FB_MAX)
36367 return -EINVAL;
36368 if (!registered_fb[con2fb.framebuffer])
36369 request_module("fb%d", con2fb.framebuffer);
36370diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36371index 5a5d092..265c5ed 100644
36372--- a/drivers/video/geode/gx1fb_core.c
36373+++ b/drivers/video/geode/gx1fb_core.c
36374@@ -29,7 +29,7 @@ static int crt_option = 1;
36375 static char panel_option[32] = "";
36376
36377 /* Modes relevant to the GX1 (taken from modedb.c) */
36378-static const struct fb_videomode __devinitdata gx1_modedb[] = {
36379+static const struct fb_videomode __devinitconst gx1_modedb[] = {
36380 /* 640x480-60 VESA */
36381 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36382 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36383diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36384index 0fad23f..0e9afa4 100644
36385--- a/drivers/video/gxt4500.c
36386+++ b/drivers/video/gxt4500.c
36387@@ -156,7 +156,7 @@ struct gxt4500_par {
36388 static char *mode_option;
36389
36390 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36391-static const struct fb_videomode defaultmode __devinitdata = {
36392+static const struct fb_videomode defaultmode __devinitconst = {
36393 .refresh = 60,
36394 .xres = 1280,
36395 .yres = 1024,
36396@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36397 return 0;
36398 }
36399
36400-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36401+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36402 .id = "IBM GXT4500P",
36403 .type = FB_TYPE_PACKED_PIXELS,
36404 .visual = FB_VISUAL_PSEUDOCOLOR,
36405diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36406index 7672d2e..b56437f 100644
36407--- a/drivers/video/i810/i810_accel.c
36408+++ b/drivers/video/i810/i810_accel.c
36409@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36410 }
36411 }
36412 printk("ringbuffer lockup!!!\n");
36413+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36414 i810_report_error(mmio);
36415 par->dev_flags |= LOCKUP;
36416 info->pixmap.scan_align = 1;
36417diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36418index 318f6fb..9a389c1 100644
36419--- a/drivers/video/i810/i810_main.c
36420+++ b/drivers/video/i810/i810_main.c
36421@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36422 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36423
36424 /* PCI */
36425-static const char *i810_pci_list[] __devinitdata = {
36426+static const char *i810_pci_list[] __devinitconst = {
36427 "Intel(R) 810 Framebuffer Device" ,
36428 "Intel(R) 810-DC100 Framebuffer Device" ,
36429 "Intel(R) 810E Framebuffer Device" ,
36430diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36431index de36693..3c63fc2 100644
36432--- a/drivers/video/jz4740_fb.c
36433+++ b/drivers/video/jz4740_fb.c
36434@@ -136,7 +136,7 @@ struct jzfb {
36435 uint32_t pseudo_palette[16];
36436 };
36437
36438-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36439+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36440 .id = "JZ4740 FB",
36441 .type = FB_TYPE_PACKED_PIXELS,
36442 .visual = FB_VISUAL_TRUECOLOR,
36443diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36444index 3c14e43..eafa544 100644
36445--- a/drivers/video/logo/logo_linux_clut224.ppm
36446+++ b/drivers/video/logo/logo_linux_clut224.ppm
36447@@ -1,1604 +1,1123 @@
36448 P3
36449-# Standard 224-color Linux logo
36450 80 80
36451 255
36452- 0 0 0 0 0 0 0 0 0 0 0 0
36453- 0 0 0 0 0 0 0 0 0 0 0 0
36454- 0 0 0 0 0 0 0 0 0 0 0 0
36455- 0 0 0 0 0 0 0 0 0 0 0 0
36456- 0 0 0 0 0 0 0 0 0 0 0 0
36457- 0 0 0 0 0 0 0 0 0 0 0 0
36458- 0 0 0 0 0 0 0 0 0 0 0 0
36459- 0 0 0 0 0 0 0 0 0 0 0 0
36460- 0 0 0 0 0 0 0 0 0 0 0 0
36461- 6 6 6 6 6 6 10 10 10 10 10 10
36462- 10 10 10 6 6 6 6 6 6 6 6 6
36463- 0 0 0 0 0 0 0 0 0 0 0 0
36464- 0 0 0 0 0 0 0 0 0 0 0 0
36465- 0 0 0 0 0 0 0 0 0 0 0 0
36466- 0 0 0 0 0 0 0 0 0 0 0 0
36467- 0 0 0 0 0 0 0 0 0 0 0 0
36468- 0 0 0 0 0 0 0 0 0 0 0 0
36469- 0 0 0 0 0 0 0 0 0 0 0 0
36470- 0 0 0 0 0 0 0 0 0 0 0 0
36471- 0 0 0 0 0 0 0 0 0 0 0 0
36472- 0 0 0 0 0 0 0 0 0 0 0 0
36473- 0 0 0 0 0 0 0 0 0 0 0 0
36474- 0 0 0 0 0 0 0 0 0 0 0 0
36475- 0 0 0 0 0 0 0 0 0 0 0 0
36476- 0 0 0 0 0 0 0 0 0 0 0 0
36477- 0 0 0 0 0 0 0 0 0 0 0 0
36478- 0 0 0 0 0 0 0 0 0 0 0 0
36479- 0 0 0 0 0 0 0 0 0 0 0 0
36480- 0 0 0 6 6 6 10 10 10 14 14 14
36481- 22 22 22 26 26 26 30 30 30 34 34 34
36482- 30 30 30 30 30 30 26 26 26 18 18 18
36483- 14 14 14 10 10 10 6 6 6 0 0 0
36484- 0 0 0 0 0 0 0 0 0 0 0 0
36485- 0 0 0 0 0 0 0 0 0 0 0 0
36486- 0 0 0 0 0 0 0 0 0 0 0 0
36487- 0 0 0 0 0 0 0 0 0 0 0 0
36488- 0 0 0 0 0 0 0 0 0 0 0 0
36489- 0 0 0 0 0 0 0 0 0 0 0 0
36490- 0 0 0 0 0 0 0 0 0 0 0 0
36491- 0 0 0 0 0 0 0 0 0 0 0 0
36492- 0 0 0 0 0 0 0 0 0 0 0 0
36493- 0 0 0 0 0 1 0 0 1 0 0 0
36494- 0 0 0 0 0 0 0 0 0 0 0 0
36495- 0 0 0 0 0 0 0 0 0 0 0 0
36496- 0 0 0 0 0 0 0 0 0 0 0 0
36497- 0 0 0 0 0 0 0 0 0 0 0 0
36498- 0 0 0 0 0 0 0 0 0 0 0 0
36499- 0 0 0 0 0 0 0 0 0 0 0 0
36500- 6 6 6 14 14 14 26 26 26 42 42 42
36501- 54 54 54 66 66 66 78 78 78 78 78 78
36502- 78 78 78 74 74 74 66 66 66 54 54 54
36503- 42 42 42 26 26 26 18 18 18 10 10 10
36504- 6 6 6 0 0 0 0 0 0 0 0 0
36505- 0 0 0 0 0 0 0 0 0 0 0 0
36506- 0 0 0 0 0 0 0 0 0 0 0 0
36507- 0 0 0 0 0 0 0 0 0 0 0 0
36508- 0 0 0 0 0 0 0 0 0 0 0 0
36509- 0 0 0 0 0 0 0 0 0 0 0 0
36510- 0 0 0 0 0 0 0 0 0 0 0 0
36511- 0 0 0 0 0 0 0 0 0 0 0 0
36512- 0 0 0 0 0 0 0 0 0 0 0 0
36513- 0 0 1 0 0 0 0 0 0 0 0 0
36514- 0 0 0 0 0 0 0 0 0 0 0 0
36515- 0 0 0 0 0 0 0 0 0 0 0 0
36516- 0 0 0 0 0 0 0 0 0 0 0 0
36517- 0 0 0 0 0 0 0 0 0 0 0 0
36518- 0 0 0 0 0 0 0 0 0 0 0 0
36519- 0 0 0 0 0 0 0 0 0 10 10 10
36520- 22 22 22 42 42 42 66 66 66 86 86 86
36521- 66 66 66 38 38 38 38 38 38 22 22 22
36522- 26 26 26 34 34 34 54 54 54 66 66 66
36523- 86 86 86 70 70 70 46 46 46 26 26 26
36524- 14 14 14 6 6 6 0 0 0 0 0 0
36525- 0 0 0 0 0 0 0 0 0 0 0 0
36526- 0 0 0 0 0 0 0 0 0 0 0 0
36527- 0 0 0 0 0 0 0 0 0 0 0 0
36528- 0 0 0 0 0 0 0 0 0 0 0 0
36529- 0 0 0 0 0 0 0 0 0 0 0 0
36530- 0 0 0 0 0 0 0 0 0 0 0 0
36531- 0 0 0 0 0 0 0 0 0 0 0 0
36532- 0 0 0 0 0 0 0 0 0 0 0 0
36533- 0 0 1 0 0 1 0 0 1 0 0 0
36534- 0 0 0 0 0 0 0 0 0 0 0 0
36535- 0 0 0 0 0 0 0 0 0 0 0 0
36536- 0 0 0 0 0 0 0 0 0 0 0 0
36537- 0 0 0 0 0 0 0 0 0 0 0 0
36538- 0 0 0 0 0 0 0 0 0 0 0 0
36539- 0 0 0 0 0 0 10 10 10 26 26 26
36540- 50 50 50 82 82 82 58 58 58 6 6 6
36541- 2 2 6 2 2 6 2 2 6 2 2 6
36542- 2 2 6 2 2 6 2 2 6 2 2 6
36543- 6 6 6 54 54 54 86 86 86 66 66 66
36544- 38 38 38 18 18 18 6 6 6 0 0 0
36545- 0 0 0 0 0 0 0 0 0 0 0 0
36546- 0 0 0 0 0 0 0 0 0 0 0 0
36547- 0 0 0 0 0 0 0 0 0 0 0 0
36548- 0 0 0 0 0 0 0 0 0 0 0 0
36549- 0 0 0 0 0 0 0 0 0 0 0 0
36550- 0 0 0 0 0 0 0 0 0 0 0 0
36551- 0 0 0 0 0 0 0 0 0 0 0 0
36552- 0 0 0 0 0 0 0 0 0 0 0 0
36553- 0 0 0 0 0 0 0 0 0 0 0 0
36554- 0 0 0 0 0 0 0 0 0 0 0 0
36555- 0 0 0 0 0 0 0 0 0 0 0 0
36556- 0 0 0 0 0 0 0 0 0 0 0 0
36557- 0 0 0 0 0 0 0 0 0 0 0 0
36558- 0 0 0 0 0 0 0 0 0 0 0 0
36559- 0 0 0 6 6 6 22 22 22 50 50 50
36560- 78 78 78 34 34 34 2 2 6 2 2 6
36561- 2 2 6 2 2 6 2 2 6 2 2 6
36562- 2 2 6 2 2 6 2 2 6 2 2 6
36563- 2 2 6 2 2 6 6 6 6 70 70 70
36564- 78 78 78 46 46 46 22 22 22 6 6 6
36565- 0 0 0 0 0 0 0 0 0 0 0 0
36566- 0 0 0 0 0 0 0 0 0 0 0 0
36567- 0 0 0 0 0 0 0 0 0 0 0 0
36568- 0 0 0 0 0 0 0 0 0 0 0 0
36569- 0 0 0 0 0 0 0 0 0 0 0 0
36570- 0 0 0 0 0 0 0 0 0 0 0 0
36571- 0 0 0 0 0 0 0 0 0 0 0 0
36572- 0 0 0 0 0 0 0 0 0 0 0 0
36573- 0 0 1 0 0 1 0 0 1 0 0 0
36574- 0 0 0 0 0 0 0 0 0 0 0 0
36575- 0 0 0 0 0 0 0 0 0 0 0 0
36576- 0 0 0 0 0 0 0 0 0 0 0 0
36577- 0 0 0 0 0 0 0 0 0 0 0 0
36578- 0 0 0 0 0 0 0 0 0 0 0 0
36579- 6 6 6 18 18 18 42 42 42 82 82 82
36580- 26 26 26 2 2 6 2 2 6 2 2 6
36581- 2 2 6 2 2 6 2 2 6 2 2 6
36582- 2 2 6 2 2 6 2 2 6 14 14 14
36583- 46 46 46 34 34 34 6 6 6 2 2 6
36584- 42 42 42 78 78 78 42 42 42 18 18 18
36585- 6 6 6 0 0 0 0 0 0 0 0 0
36586- 0 0 0 0 0 0 0 0 0 0 0 0
36587- 0 0 0 0 0 0 0 0 0 0 0 0
36588- 0 0 0 0 0 0 0 0 0 0 0 0
36589- 0 0 0 0 0 0 0 0 0 0 0 0
36590- 0 0 0 0 0 0 0 0 0 0 0 0
36591- 0 0 0 0 0 0 0 0 0 0 0 0
36592- 0 0 0 0 0 0 0 0 0 0 0 0
36593- 0 0 1 0 0 0 0 0 1 0 0 0
36594- 0 0 0 0 0 0 0 0 0 0 0 0
36595- 0 0 0 0 0 0 0 0 0 0 0 0
36596- 0 0 0 0 0 0 0 0 0 0 0 0
36597- 0 0 0 0 0 0 0 0 0 0 0 0
36598- 0 0 0 0 0 0 0 0 0 0 0 0
36599- 10 10 10 30 30 30 66 66 66 58 58 58
36600- 2 2 6 2 2 6 2 2 6 2 2 6
36601- 2 2 6 2 2 6 2 2 6 2 2 6
36602- 2 2 6 2 2 6 2 2 6 26 26 26
36603- 86 86 86 101 101 101 46 46 46 10 10 10
36604- 2 2 6 58 58 58 70 70 70 34 34 34
36605- 10 10 10 0 0 0 0 0 0 0 0 0
36606- 0 0 0 0 0 0 0 0 0 0 0 0
36607- 0 0 0 0 0 0 0 0 0 0 0 0
36608- 0 0 0 0 0 0 0 0 0 0 0 0
36609- 0 0 0 0 0 0 0 0 0 0 0 0
36610- 0 0 0 0 0 0 0 0 0 0 0 0
36611- 0 0 0 0 0 0 0 0 0 0 0 0
36612- 0 0 0 0 0 0 0 0 0 0 0 0
36613- 0 0 1 0 0 1 0 0 1 0 0 0
36614- 0 0 0 0 0 0 0 0 0 0 0 0
36615- 0 0 0 0 0 0 0 0 0 0 0 0
36616- 0 0 0 0 0 0 0 0 0 0 0 0
36617- 0 0 0 0 0 0 0 0 0 0 0 0
36618- 0 0 0 0 0 0 0 0 0 0 0 0
36619- 14 14 14 42 42 42 86 86 86 10 10 10
36620- 2 2 6 2 2 6 2 2 6 2 2 6
36621- 2 2 6 2 2 6 2 2 6 2 2 6
36622- 2 2 6 2 2 6 2 2 6 30 30 30
36623- 94 94 94 94 94 94 58 58 58 26 26 26
36624- 2 2 6 6 6 6 78 78 78 54 54 54
36625- 22 22 22 6 6 6 0 0 0 0 0 0
36626- 0 0 0 0 0 0 0 0 0 0 0 0
36627- 0 0 0 0 0 0 0 0 0 0 0 0
36628- 0 0 0 0 0 0 0 0 0 0 0 0
36629- 0 0 0 0 0 0 0 0 0 0 0 0
36630- 0 0 0 0 0 0 0 0 0 0 0 0
36631- 0 0 0 0 0 0 0 0 0 0 0 0
36632- 0 0 0 0 0 0 0 0 0 0 0 0
36633- 0 0 0 0 0 0 0 0 0 0 0 0
36634- 0 0 0 0 0 0 0 0 0 0 0 0
36635- 0 0 0 0 0 0 0 0 0 0 0 0
36636- 0 0 0 0 0 0 0 0 0 0 0 0
36637- 0 0 0 0 0 0 0 0 0 0 0 0
36638- 0 0 0 0 0 0 0 0 0 6 6 6
36639- 22 22 22 62 62 62 62 62 62 2 2 6
36640- 2 2 6 2 2 6 2 2 6 2 2 6
36641- 2 2 6 2 2 6 2 2 6 2 2 6
36642- 2 2 6 2 2 6 2 2 6 26 26 26
36643- 54 54 54 38 38 38 18 18 18 10 10 10
36644- 2 2 6 2 2 6 34 34 34 82 82 82
36645- 38 38 38 14 14 14 0 0 0 0 0 0
36646- 0 0 0 0 0 0 0 0 0 0 0 0
36647- 0 0 0 0 0 0 0 0 0 0 0 0
36648- 0 0 0 0 0 0 0 0 0 0 0 0
36649- 0 0 0 0 0 0 0 0 0 0 0 0
36650- 0 0 0 0 0 0 0 0 0 0 0 0
36651- 0 0 0 0 0 0 0 0 0 0 0 0
36652- 0 0 0 0 0 0 0 0 0 0 0 0
36653- 0 0 0 0 0 1 0 0 1 0 0 0
36654- 0 0 0 0 0 0 0 0 0 0 0 0
36655- 0 0 0 0 0 0 0 0 0 0 0 0
36656- 0 0 0 0 0 0 0 0 0 0 0 0
36657- 0 0 0 0 0 0 0 0 0 0 0 0
36658- 0 0 0 0 0 0 0 0 0 6 6 6
36659- 30 30 30 78 78 78 30 30 30 2 2 6
36660- 2 2 6 2 2 6 2 2 6 2 2 6
36661- 2 2 6 2 2 6 2 2 6 2 2 6
36662- 2 2 6 2 2 6 2 2 6 10 10 10
36663- 10 10 10 2 2 6 2 2 6 2 2 6
36664- 2 2 6 2 2 6 2 2 6 78 78 78
36665- 50 50 50 18 18 18 6 6 6 0 0 0
36666- 0 0 0 0 0 0 0 0 0 0 0 0
36667- 0 0 0 0 0 0 0 0 0 0 0 0
36668- 0 0 0 0 0 0 0 0 0 0 0 0
36669- 0 0 0 0 0 0 0 0 0 0 0 0
36670- 0 0 0 0 0 0 0 0 0 0 0 0
36671- 0 0 0 0 0 0 0 0 0 0 0 0
36672- 0 0 0 0 0 0 0 0 0 0 0 0
36673- 0 0 1 0 0 0 0 0 0 0 0 0
36674- 0 0 0 0 0 0 0 0 0 0 0 0
36675- 0 0 0 0 0 0 0 0 0 0 0 0
36676- 0 0 0 0 0 0 0 0 0 0 0 0
36677- 0 0 0 0 0 0 0 0 0 0 0 0
36678- 0 0 0 0 0 0 0 0 0 10 10 10
36679- 38 38 38 86 86 86 14 14 14 2 2 6
36680- 2 2 6 2 2 6 2 2 6 2 2 6
36681- 2 2 6 2 2 6 2 2 6 2 2 6
36682- 2 2 6 2 2 6 2 2 6 2 2 6
36683- 2 2 6 2 2 6 2 2 6 2 2 6
36684- 2 2 6 2 2 6 2 2 6 54 54 54
36685- 66 66 66 26 26 26 6 6 6 0 0 0
36686- 0 0 0 0 0 0 0 0 0 0 0 0
36687- 0 0 0 0 0 0 0 0 0 0 0 0
36688- 0 0 0 0 0 0 0 0 0 0 0 0
36689- 0 0 0 0 0 0 0 0 0 0 0 0
36690- 0 0 0 0 0 0 0 0 0 0 0 0
36691- 0 0 0 0 0 0 0 0 0 0 0 0
36692- 0 0 0 0 0 0 0 0 0 0 0 0
36693- 0 0 0 0 0 1 0 0 1 0 0 0
36694- 0 0 0 0 0 0 0 0 0 0 0 0
36695- 0 0 0 0 0 0 0 0 0 0 0 0
36696- 0 0 0 0 0 0 0 0 0 0 0 0
36697- 0 0 0 0 0 0 0 0 0 0 0 0
36698- 0 0 0 0 0 0 0 0 0 14 14 14
36699- 42 42 42 82 82 82 2 2 6 2 2 6
36700- 2 2 6 6 6 6 10 10 10 2 2 6
36701- 2 2 6 2 2 6 2 2 6 2 2 6
36702- 2 2 6 2 2 6 2 2 6 6 6 6
36703- 14 14 14 10 10 10 2 2 6 2 2 6
36704- 2 2 6 2 2 6 2 2 6 18 18 18
36705- 82 82 82 34 34 34 10 10 10 0 0 0
36706- 0 0 0 0 0 0 0 0 0 0 0 0
36707- 0 0 0 0 0 0 0 0 0 0 0 0
36708- 0 0 0 0 0 0 0 0 0 0 0 0
36709- 0 0 0 0 0 0 0 0 0 0 0 0
36710- 0 0 0 0 0 0 0 0 0 0 0 0
36711- 0 0 0 0 0 0 0 0 0 0 0 0
36712- 0 0 0 0 0 0 0 0 0 0 0 0
36713- 0 0 1 0 0 0 0 0 0 0 0 0
36714- 0 0 0 0 0 0 0 0 0 0 0 0
36715- 0 0 0 0 0 0 0 0 0 0 0 0
36716- 0 0 0 0 0 0 0 0 0 0 0 0
36717- 0 0 0 0 0 0 0 0 0 0 0 0
36718- 0 0 0 0 0 0 0 0 0 14 14 14
36719- 46 46 46 86 86 86 2 2 6 2 2 6
36720- 6 6 6 6 6 6 22 22 22 34 34 34
36721- 6 6 6 2 2 6 2 2 6 2 2 6
36722- 2 2 6 2 2 6 18 18 18 34 34 34
36723- 10 10 10 50 50 50 22 22 22 2 2 6
36724- 2 2 6 2 2 6 2 2 6 10 10 10
36725- 86 86 86 42 42 42 14 14 14 0 0 0
36726- 0 0 0 0 0 0 0 0 0 0 0 0
36727- 0 0 0 0 0 0 0 0 0 0 0 0
36728- 0 0 0 0 0 0 0 0 0 0 0 0
36729- 0 0 0 0 0 0 0 0 0 0 0 0
36730- 0 0 0 0 0 0 0 0 0 0 0 0
36731- 0 0 0 0 0 0 0 0 0 0 0 0
36732- 0 0 0 0 0 0 0 0 0 0 0 0
36733- 0 0 1 0 0 1 0 0 1 0 0 0
36734- 0 0 0 0 0 0 0 0 0 0 0 0
36735- 0 0 0 0 0 0 0 0 0 0 0 0
36736- 0 0 0 0 0 0 0 0 0 0 0 0
36737- 0 0 0 0 0 0 0 0 0 0 0 0
36738- 0 0 0 0 0 0 0 0 0 14 14 14
36739- 46 46 46 86 86 86 2 2 6 2 2 6
36740- 38 38 38 116 116 116 94 94 94 22 22 22
36741- 22 22 22 2 2 6 2 2 6 2 2 6
36742- 14 14 14 86 86 86 138 138 138 162 162 162
36743-154 154 154 38 38 38 26 26 26 6 6 6
36744- 2 2 6 2 2 6 2 2 6 2 2 6
36745- 86 86 86 46 46 46 14 14 14 0 0 0
36746- 0 0 0 0 0 0 0 0 0 0 0 0
36747- 0 0 0 0 0 0 0 0 0 0 0 0
36748- 0 0 0 0 0 0 0 0 0 0 0 0
36749- 0 0 0 0 0 0 0 0 0 0 0 0
36750- 0 0 0 0 0 0 0 0 0 0 0 0
36751- 0 0 0 0 0 0 0 0 0 0 0 0
36752- 0 0 0 0 0 0 0 0 0 0 0 0
36753- 0 0 0 0 0 0 0 0 0 0 0 0
36754- 0 0 0 0 0 0 0 0 0 0 0 0
36755- 0 0 0 0 0 0 0 0 0 0 0 0
36756- 0 0 0 0 0 0 0 0 0 0 0 0
36757- 0 0 0 0 0 0 0 0 0 0 0 0
36758- 0 0 0 0 0 0 0 0 0 14 14 14
36759- 46 46 46 86 86 86 2 2 6 14 14 14
36760-134 134 134 198 198 198 195 195 195 116 116 116
36761- 10 10 10 2 2 6 2 2 6 6 6 6
36762-101 98 89 187 187 187 210 210 210 218 218 218
36763-214 214 214 134 134 134 14 14 14 6 6 6
36764- 2 2 6 2 2 6 2 2 6 2 2 6
36765- 86 86 86 50 50 50 18 18 18 6 6 6
36766- 0 0 0 0 0 0 0 0 0 0 0 0
36767- 0 0 0 0 0 0 0 0 0 0 0 0
36768- 0 0 0 0 0 0 0 0 0 0 0 0
36769- 0 0 0 0 0 0 0 0 0 0 0 0
36770- 0 0 0 0 0 0 0 0 0 0 0 0
36771- 0 0 0 0 0 0 0 0 0 0 0 0
36772- 0 0 0 0 0 0 0 0 1 0 0 0
36773- 0 0 1 0 0 1 0 0 1 0 0 0
36774- 0 0 0 0 0 0 0 0 0 0 0 0
36775- 0 0 0 0 0 0 0 0 0 0 0 0
36776- 0 0 0 0 0 0 0 0 0 0 0 0
36777- 0 0 0 0 0 0 0 0 0 0 0 0
36778- 0 0 0 0 0 0 0 0 0 14 14 14
36779- 46 46 46 86 86 86 2 2 6 54 54 54
36780-218 218 218 195 195 195 226 226 226 246 246 246
36781- 58 58 58 2 2 6 2 2 6 30 30 30
36782-210 210 210 253 253 253 174 174 174 123 123 123
36783-221 221 221 234 234 234 74 74 74 2 2 6
36784- 2 2 6 2 2 6 2 2 6 2 2 6
36785- 70 70 70 58 58 58 22 22 22 6 6 6
36786- 0 0 0 0 0 0 0 0 0 0 0 0
36787- 0 0 0 0 0 0 0 0 0 0 0 0
36788- 0 0 0 0 0 0 0 0 0 0 0 0
36789- 0 0 0 0 0 0 0 0 0 0 0 0
36790- 0 0 0 0 0 0 0 0 0 0 0 0
36791- 0 0 0 0 0 0 0 0 0 0 0 0
36792- 0 0 0 0 0 0 0 0 0 0 0 0
36793- 0 0 0 0 0 0 0 0 0 0 0 0
36794- 0 0 0 0 0 0 0 0 0 0 0 0
36795- 0 0 0 0 0 0 0 0 0 0 0 0
36796- 0 0 0 0 0 0 0 0 0 0 0 0
36797- 0 0 0 0 0 0 0 0 0 0 0 0
36798- 0 0 0 0 0 0 0 0 0 14 14 14
36799- 46 46 46 82 82 82 2 2 6 106 106 106
36800-170 170 170 26 26 26 86 86 86 226 226 226
36801-123 123 123 10 10 10 14 14 14 46 46 46
36802-231 231 231 190 190 190 6 6 6 70 70 70
36803- 90 90 90 238 238 238 158 158 158 2 2 6
36804- 2 2 6 2 2 6 2 2 6 2 2 6
36805- 70 70 70 58 58 58 22 22 22 6 6 6
36806- 0 0 0 0 0 0 0 0 0 0 0 0
36807- 0 0 0 0 0 0 0 0 0 0 0 0
36808- 0 0 0 0 0 0 0 0 0 0 0 0
36809- 0 0 0 0 0 0 0 0 0 0 0 0
36810- 0 0 0 0 0 0 0 0 0 0 0 0
36811- 0 0 0 0 0 0 0 0 0 0 0 0
36812- 0 0 0 0 0 0 0 0 1 0 0 0
36813- 0 0 1 0 0 1 0 0 1 0 0 0
36814- 0 0 0 0 0 0 0 0 0 0 0 0
36815- 0 0 0 0 0 0 0 0 0 0 0 0
36816- 0 0 0 0 0 0 0 0 0 0 0 0
36817- 0 0 0 0 0 0 0 0 0 0 0 0
36818- 0 0 0 0 0 0 0 0 0 14 14 14
36819- 42 42 42 86 86 86 6 6 6 116 116 116
36820-106 106 106 6 6 6 70 70 70 149 149 149
36821-128 128 128 18 18 18 38 38 38 54 54 54
36822-221 221 221 106 106 106 2 2 6 14 14 14
36823- 46 46 46 190 190 190 198 198 198 2 2 6
36824- 2 2 6 2 2 6 2 2 6 2 2 6
36825- 74 74 74 62 62 62 22 22 22 6 6 6
36826- 0 0 0 0 0 0 0 0 0 0 0 0
36827- 0 0 0 0 0 0 0 0 0 0 0 0
36828- 0 0 0 0 0 0 0 0 0 0 0 0
36829- 0 0 0 0 0 0 0 0 0 0 0 0
36830- 0 0 0 0 0 0 0 0 0 0 0 0
36831- 0 0 0 0 0 0 0 0 0 0 0 0
36832- 0 0 0 0 0 0 0 0 1 0 0 0
36833- 0 0 1 0 0 0 0 0 1 0 0 0
36834- 0 0 0 0 0 0 0 0 0 0 0 0
36835- 0 0 0 0 0 0 0 0 0 0 0 0
36836- 0 0 0 0 0 0 0 0 0 0 0 0
36837- 0 0 0 0 0 0 0 0 0 0 0 0
36838- 0 0 0 0 0 0 0 0 0 14 14 14
36839- 42 42 42 94 94 94 14 14 14 101 101 101
36840-128 128 128 2 2 6 18 18 18 116 116 116
36841-118 98 46 121 92 8 121 92 8 98 78 10
36842-162 162 162 106 106 106 2 2 6 2 2 6
36843- 2 2 6 195 195 195 195 195 195 6 6 6
36844- 2 2 6 2 2 6 2 2 6 2 2 6
36845- 74 74 74 62 62 62 22 22 22 6 6 6
36846- 0 0 0 0 0 0 0 0 0 0 0 0
36847- 0 0 0 0 0 0 0 0 0 0 0 0
36848- 0 0 0 0 0 0 0 0 0 0 0 0
36849- 0 0 0 0 0 0 0 0 0 0 0 0
36850- 0 0 0 0 0 0 0 0 0 0 0 0
36851- 0 0 0 0 0 0 0 0 0 0 0 0
36852- 0 0 0 0 0 0 0 0 1 0 0 1
36853- 0 0 1 0 0 0 0 0 1 0 0 0
36854- 0 0 0 0 0 0 0 0 0 0 0 0
36855- 0 0 0 0 0 0 0 0 0 0 0 0
36856- 0 0 0 0 0 0 0 0 0 0 0 0
36857- 0 0 0 0 0 0 0 0 0 0 0 0
36858- 0 0 0 0 0 0 0 0 0 10 10 10
36859- 38 38 38 90 90 90 14 14 14 58 58 58
36860-210 210 210 26 26 26 54 38 6 154 114 10
36861-226 170 11 236 186 11 225 175 15 184 144 12
36862-215 174 15 175 146 61 37 26 9 2 2 6
36863- 70 70 70 246 246 246 138 138 138 2 2 6
36864- 2 2 6 2 2 6 2 2 6 2 2 6
36865- 70 70 70 66 66 66 26 26 26 6 6 6
36866- 0 0 0 0 0 0 0 0 0 0 0 0
36867- 0 0 0 0 0 0 0 0 0 0 0 0
36868- 0 0 0 0 0 0 0 0 0 0 0 0
36869- 0 0 0 0 0 0 0 0 0 0 0 0
36870- 0 0 0 0 0 0 0 0 0 0 0 0
36871- 0 0 0 0 0 0 0 0 0 0 0 0
36872- 0 0 0 0 0 0 0 0 0 0 0 0
36873- 0 0 0 0 0 0 0 0 0 0 0 0
36874- 0 0 0 0 0 0 0 0 0 0 0 0
36875- 0 0 0 0 0 0 0 0 0 0 0 0
36876- 0 0 0 0 0 0 0 0 0 0 0 0
36877- 0 0 0 0 0 0 0 0 0 0 0 0
36878- 0 0 0 0 0 0 0 0 0 10 10 10
36879- 38 38 38 86 86 86 14 14 14 10 10 10
36880-195 195 195 188 164 115 192 133 9 225 175 15
36881-239 182 13 234 190 10 232 195 16 232 200 30
36882-245 207 45 241 208 19 232 195 16 184 144 12
36883-218 194 134 211 206 186 42 42 42 2 2 6
36884- 2 2 6 2 2 6 2 2 6 2 2 6
36885- 50 50 50 74 74 74 30 30 30 6 6 6
36886- 0 0 0 0 0 0 0 0 0 0 0 0
36887- 0 0 0 0 0 0 0 0 0 0 0 0
36888- 0 0 0 0 0 0 0 0 0 0 0 0
36889- 0 0 0 0 0 0 0 0 0 0 0 0
36890- 0 0 0 0 0 0 0 0 0 0 0 0
36891- 0 0 0 0 0 0 0 0 0 0 0 0
36892- 0 0 0 0 0 0 0 0 0 0 0 0
36893- 0 0 0 0 0 0 0 0 0 0 0 0
36894- 0 0 0 0 0 0 0 0 0 0 0 0
36895- 0 0 0 0 0 0 0 0 0 0 0 0
36896- 0 0 0 0 0 0 0 0 0 0 0 0
36897- 0 0 0 0 0 0 0 0 0 0 0 0
36898- 0 0 0 0 0 0 0 0 0 10 10 10
36899- 34 34 34 86 86 86 14 14 14 2 2 6
36900-121 87 25 192 133 9 219 162 10 239 182 13
36901-236 186 11 232 195 16 241 208 19 244 214 54
36902-246 218 60 246 218 38 246 215 20 241 208 19
36903-241 208 19 226 184 13 121 87 25 2 2 6
36904- 2 2 6 2 2 6 2 2 6 2 2 6
36905- 50 50 50 82 82 82 34 34 34 10 10 10
36906- 0 0 0 0 0 0 0 0 0 0 0 0
36907- 0 0 0 0 0 0 0 0 0 0 0 0
36908- 0 0 0 0 0 0 0 0 0 0 0 0
36909- 0 0 0 0 0 0 0 0 0 0 0 0
36910- 0 0 0 0 0 0 0 0 0 0 0 0
36911- 0 0 0 0 0 0 0 0 0 0 0 0
36912- 0 0 0 0 0 0 0 0 0 0 0 0
36913- 0 0 0 0 0 0 0 0 0 0 0 0
36914- 0 0 0 0 0 0 0 0 0 0 0 0
36915- 0 0 0 0 0 0 0 0 0 0 0 0
36916- 0 0 0 0 0 0 0 0 0 0 0 0
36917- 0 0 0 0 0 0 0 0 0 0 0 0
36918- 0 0 0 0 0 0 0 0 0 10 10 10
36919- 34 34 34 82 82 82 30 30 30 61 42 6
36920-180 123 7 206 145 10 230 174 11 239 182 13
36921-234 190 10 238 202 15 241 208 19 246 218 74
36922-246 218 38 246 215 20 246 215 20 246 215 20
36923-226 184 13 215 174 15 184 144 12 6 6 6
36924- 2 2 6 2 2 6 2 2 6 2 2 6
36925- 26 26 26 94 94 94 42 42 42 14 14 14
36926- 0 0 0 0 0 0 0 0 0 0 0 0
36927- 0 0 0 0 0 0 0 0 0 0 0 0
36928- 0 0 0 0 0 0 0 0 0 0 0 0
36929- 0 0 0 0 0 0 0 0 0 0 0 0
36930- 0 0 0 0 0 0 0 0 0 0 0 0
36931- 0 0 0 0 0 0 0 0 0 0 0 0
36932- 0 0 0 0 0 0 0 0 0 0 0 0
36933- 0 0 0 0 0 0 0 0 0 0 0 0
36934- 0 0 0 0 0 0 0 0 0 0 0 0
36935- 0 0 0 0 0 0 0 0 0 0 0 0
36936- 0 0 0 0 0 0 0 0 0 0 0 0
36937- 0 0 0 0 0 0 0 0 0 0 0 0
36938- 0 0 0 0 0 0 0 0 0 10 10 10
36939- 30 30 30 78 78 78 50 50 50 104 69 6
36940-192 133 9 216 158 10 236 178 12 236 186 11
36941-232 195 16 241 208 19 244 214 54 245 215 43
36942-246 215 20 246 215 20 241 208 19 198 155 10
36943-200 144 11 216 158 10 156 118 10 2 2 6
36944- 2 2 6 2 2 6 2 2 6 2 2 6
36945- 6 6 6 90 90 90 54 54 54 18 18 18
36946- 6 6 6 0 0 0 0 0 0 0 0 0
36947- 0 0 0 0 0 0 0 0 0 0 0 0
36948- 0 0 0 0 0 0 0 0 0 0 0 0
36949- 0 0 0 0 0 0 0 0 0 0 0 0
36950- 0 0 0 0 0 0 0 0 0 0 0 0
36951- 0 0 0 0 0 0 0 0 0 0 0 0
36952- 0 0 0 0 0 0 0 0 0 0 0 0
36953- 0 0 0 0 0 0 0 0 0 0 0 0
36954- 0 0 0 0 0 0 0 0 0 0 0 0
36955- 0 0 0 0 0 0 0 0 0 0 0 0
36956- 0 0 0 0 0 0 0 0 0 0 0 0
36957- 0 0 0 0 0 0 0 0 0 0 0 0
36958- 0 0 0 0 0 0 0 0 0 10 10 10
36959- 30 30 30 78 78 78 46 46 46 22 22 22
36960-137 92 6 210 162 10 239 182 13 238 190 10
36961-238 202 15 241 208 19 246 215 20 246 215 20
36962-241 208 19 203 166 17 185 133 11 210 150 10
36963-216 158 10 210 150 10 102 78 10 2 2 6
36964- 6 6 6 54 54 54 14 14 14 2 2 6
36965- 2 2 6 62 62 62 74 74 74 30 30 30
36966- 10 10 10 0 0 0 0 0 0 0 0 0
36967- 0 0 0 0 0 0 0 0 0 0 0 0
36968- 0 0 0 0 0 0 0 0 0 0 0 0
36969- 0 0 0 0 0 0 0 0 0 0 0 0
36970- 0 0 0 0 0 0 0 0 0 0 0 0
36971- 0 0 0 0 0 0 0 0 0 0 0 0
36972- 0 0 0 0 0 0 0 0 0 0 0 0
36973- 0 0 0 0 0 0 0 0 0 0 0 0
36974- 0 0 0 0 0 0 0 0 0 0 0 0
36975- 0 0 0 0 0 0 0 0 0 0 0 0
36976- 0 0 0 0 0 0 0 0 0 0 0 0
36977- 0 0 0 0 0 0 0 0 0 0 0 0
36978- 0 0 0 0 0 0 0 0 0 10 10 10
36979- 34 34 34 78 78 78 50 50 50 6 6 6
36980- 94 70 30 139 102 15 190 146 13 226 184 13
36981-232 200 30 232 195 16 215 174 15 190 146 13
36982-168 122 10 192 133 9 210 150 10 213 154 11
36983-202 150 34 182 157 106 101 98 89 2 2 6
36984- 2 2 6 78 78 78 116 116 116 58 58 58
36985- 2 2 6 22 22 22 90 90 90 46 46 46
36986- 18 18 18 6 6 6 0 0 0 0 0 0
36987- 0 0 0 0 0 0 0 0 0 0 0 0
36988- 0 0 0 0 0 0 0 0 0 0 0 0
36989- 0 0 0 0 0 0 0 0 0 0 0 0
36990- 0 0 0 0 0 0 0 0 0 0 0 0
36991- 0 0 0 0 0 0 0 0 0 0 0 0
36992- 0 0 0 0 0 0 0 0 0 0 0 0
36993- 0 0 0 0 0 0 0 0 0 0 0 0
36994- 0 0 0 0 0 0 0 0 0 0 0 0
36995- 0 0 0 0 0 0 0 0 0 0 0 0
36996- 0 0 0 0 0 0 0 0 0 0 0 0
36997- 0 0 0 0 0 0 0 0 0 0 0 0
36998- 0 0 0 0 0 0 0 0 0 10 10 10
36999- 38 38 38 86 86 86 50 50 50 6 6 6
37000-128 128 128 174 154 114 156 107 11 168 122 10
37001-198 155 10 184 144 12 197 138 11 200 144 11
37002-206 145 10 206 145 10 197 138 11 188 164 115
37003-195 195 195 198 198 198 174 174 174 14 14 14
37004- 2 2 6 22 22 22 116 116 116 116 116 116
37005- 22 22 22 2 2 6 74 74 74 70 70 70
37006- 30 30 30 10 10 10 0 0 0 0 0 0
37007- 0 0 0 0 0 0 0 0 0 0 0 0
37008- 0 0 0 0 0 0 0 0 0 0 0 0
37009- 0 0 0 0 0 0 0 0 0 0 0 0
37010- 0 0 0 0 0 0 0 0 0 0 0 0
37011- 0 0 0 0 0 0 0 0 0 0 0 0
37012- 0 0 0 0 0 0 0 0 0 0 0 0
37013- 0 0 0 0 0 0 0 0 0 0 0 0
37014- 0 0 0 0 0 0 0 0 0 0 0 0
37015- 0 0 0 0 0 0 0 0 0 0 0 0
37016- 0 0 0 0 0 0 0 0 0 0 0 0
37017- 0 0 0 0 0 0 0 0 0 0 0 0
37018- 0 0 0 0 0 0 6 6 6 18 18 18
37019- 50 50 50 101 101 101 26 26 26 10 10 10
37020-138 138 138 190 190 190 174 154 114 156 107 11
37021-197 138 11 200 144 11 197 138 11 192 133 9
37022-180 123 7 190 142 34 190 178 144 187 187 187
37023-202 202 202 221 221 221 214 214 214 66 66 66
37024- 2 2 6 2 2 6 50 50 50 62 62 62
37025- 6 6 6 2 2 6 10 10 10 90 90 90
37026- 50 50 50 18 18 18 6 6 6 0 0 0
37027- 0 0 0 0 0 0 0 0 0 0 0 0
37028- 0 0 0 0 0 0 0 0 0 0 0 0
37029- 0 0 0 0 0 0 0 0 0 0 0 0
37030- 0 0 0 0 0 0 0 0 0 0 0 0
37031- 0 0 0 0 0 0 0 0 0 0 0 0
37032- 0 0 0 0 0 0 0 0 0 0 0 0
37033- 0 0 0 0 0 0 0 0 0 0 0 0
37034- 0 0 0 0 0 0 0 0 0 0 0 0
37035- 0 0 0 0 0 0 0 0 0 0 0 0
37036- 0 0 0 0 0 0 0 0 0 0 0 0
37037- 0 0 0 0 0 0 0 0 0 0 0 0
37038- 0 0 0 0 0 0 10 10 10 34 34 34
37039- 74 74 74 74 74 74 2 2 6 6 6 6
37040-144 144 144 198 198 198 190 190 190 178 166 146
37041-154 121 60 156 107 11 156 107 11 168 124 44
37042-174 154 114 187 187 187 190 190 190 210 210 210
37043-246 246 246 253 253 253 253 253 253 182 182 182
37044- 6 6 6 2 2 6 2 2 6 2 2 6
37045- 2 2 6 2 2 6 2 2 6 62 62 62
37046- 74 74 74 34 34 34 14 14 14 0 0 0
37047- 0 0 0 0 0 0 0 0 0 0 0 0
37048- 0 0 0 0 0 0 0 0 0 0 0 0
37049- 0 0 0 0 0 0 0 0 0 0 0 0
37050- 0 0 0 0 0 0 0 0 0 0 0 0
37051- 0 0 0 0 0 0 0 0 0 0 0 0
37052- 0 0 0 0 0 0 0 0 0 0 0 0
37053- 0 0 0 0 0 0 0 0 0 0 0 0
37054- 0 0 0 0 0 0 0 0 0 0 0 0
37055- 0 0 0 0 0 0 0 0 0 0 0 0
37056- 0 0 0 0 0 0 0 0 0 0 0 0
37057- 0 0 0 0 0 0 0 0 0 0 0 0
37058- 0 0 0 10 10 10 22 22 22 54 54 54
37059- 94 94 94 18 18 18 2 2 6 46 46 46
37060-234 234 234 221 221 221 190 190 190 190 190 190
37061-190 190 190 187 187 187 187 187 187 190 190 190
37062-190 190 190 195 195 195 214 214 214 242 242 242
37063-253 253 253 253 253 253 253 253 253 253 253 253
37064- 82 82 82 2 2 6 2 2 6 2 2 6
37065- 2 2 6 2 2 6 2 2 6 14 14 14
37066- 86 86 86 54 54 54 22 22 22 6 6 6
37067- 0 0 0 0 0 0 0 0 0 0 0 0
37068- 0 0 0 0 0 0 0 0 0 0 0 0
37069- 0 0 0 0 0 0 0 0 0 0 0 0
37070- 0 0 0 0 0 0 0 0 0 0 0 0
37071- 0 0 0 0 0 0 0 0 0 0 0 0
37072- 0 0 0 0 0 0 0 0 0 0 0 0
37073- 0 0 0 0 0 0 0 0 0 0 0 0
37074- 0 0 0 0 0 0 0 0 0 0 0 0
37075- 0 0 0 0 0 0 0 0 0 0 0 0
37076- 0 0 0 0 0 0 0 0 0 0 0 0
37077- 0 0 0 0 0 0 0 0 0 0 0 0
37078- 6 6 6 18 18 18 46 46 46 90 90 90
37079- 46 46 46 18 18 18 6 6 6 182 182 182
37080-253 253 253 246 246 246 206 206 206 190 190 190
37081-190 190 190 190 190 190 190 190 190 190 190 190
37082-206 206 206 231 231 231 250 250 250 253 253 253
37083-253 253 253 253 253 253 253 253 253 253 253 253
37084-202 202 202 14 14 14 2 2 6 2 2 6
37085- 2 2 6 2 2 6 2 2 6 2 2 6
37086- 42 42 42 86 86 86 42 42 42 18 18 18
37087- 6 6 6 0 0 0 0 0 0 0 0 0
37088- 0 0 0 0 0 0 0 0 0 0 0 0
37089- 0 0 0 0 0 0 0 0 0 0 0 0
37090- 0 0 0 0 0 0 0 0 0 0 0 0
37091- 0 0 0 0 0 0 0 0 0 0 0 0
37092- 0 0 0 0 0 0 0 0 0 0 0 0
37093- 0 0 0 0 0 0 0 0 0 0 0 0
37094- 0 0 0 0 0 0 0 0 0 0 0 0
37095- 0 0 0 0 0 0 0 0 0 0 0 0
37096- 0 0 0 0 0 0 0 0 0 0 0 0
37097- 0 0 0 0 0 0 0 0 0 6 6 6
37098- 14 14 14 38 38 38 74 74 74 66 66 66
37099- 2 2 6 6 6 6 90 90 90 250 250 250
37100-253 253 253 253 253 253 238 238 238 198 198 198
37101-190 190 190 190 190 190 195 195 195 221 221 221
37102-246 246 246 253 253 253 253 253 253 253 253 253
37103-253 253 253 253 253 253 253 253 253 253 253 253
37104-253 253 253 82 82 82 2 2 6 2 2 6
37105- 2 2 6 2 2 6 2 2 6 2 2 6
37106- 2 2 6 78 78 78 70 70 70 34 34 34
37107- 14 14 14 6 6 6 0 0 0 0 0 0
37108- 0 0 0 0 0 0 0 0 0 0 0 0
37109- 0 0 0 0 0 0 0 0 0 0 0 0
37110- 0 0 0 0 0 0 0 0 0 0 0 0
37111- 0 0 0 0 0 0 0 0 0 0 0 0
37112- 0 0 0 0 0 0 0 0 0 0 0 0
37113- 0 0 0 0 0 0 0 0 0 0 0 0
37114- 0 0 0 0 0 0 0 0 0 0 0 0
37115- 0 0 0 0 0 0 0 0 0 0 0 0
37116- 0 0 0 0 0 0 0 0 0 0 0 0
37117- 0 0 0 0 0 0 0 0 0 14 14 14
37118- 34 34 34 66 66 66 78 78 78 6 6 6
37119- 2 2 6 18 18 18 218 218 218 253 253 253
37120-253 253 253 253 253 253 253 253 253 246 246 246
37121-226 226 226 231 231 231 246 246 246 253 253 253
37122-253 253 253 253 253 253 253 253 253 253 253 253
37123-253 253 253 253 253 253 253 253 253 253 253 253
37124-253 253 253 178 178 178 2 2 6 2 2 6
37125- 2 2 6 2 2 6 2 2 6 2 2 6
37126- 2 2 6 18 18 18 90 90 90 62 62 62
37127- 30 30 30 10 10 10 0 0 0 0 0 0
37128- 0 0 0 0 0 0 0 0 0 0 0 0
37129- 0 0 0 0 0 0 0 0 0 0 0 0
37130- 0 0 0 0 0 0 0 0 0 0 0 0
37131- 0 0 0 0 0 0 0 0 0 0 0 0
37132- 0 0 0 0 0 0 0 0 0 0 0 0
37133- 0 0 0 0 0 0 0 0 0 0 0 0
37134- 0 0 0 0 0 0 0 0 0 0 0 0
37135- 0 0 0 0 0 0 0 0 0 0 0 0
37136- 0 0 0 0 0 0 0 0 0 0 0 0
37137- 0 0 0 0 0 0 10 10 10 26 26 26
37138- 58 58 58 90 90 90 18 18 18 2 2 6
37139- 2 2 6 110 110 110 253 253 253 253 253 253
37140-253 253 253 253 253 253 253 253 253 253 253 253
37141-250 250 250 253 253 253 253 253 253 253 253 253
37142-253 253 253 253 253 253 253 253 253 253 253 253
37143-253 253 253 253 253 253 253 253 253 253 253 253
37144-253 253 253 231 231 231 18 18 18 2 2 6
37145- 2 2 6 2 2 6 2 2 6 2 2 6
37146- 2 2 6 2 2 6 18 18 18 94 94 94
37147- 54 54 54 26 26 26 10 10 10 0 0 0
37148- 0 0 0 0 0 0 0 0 0 0 0 0
37149- 0 0 0 0 0 0 0 0 0 0 0 0
37150- 0 0 0 0 0 0 0 0 0 0 0 0
37151- 0 0 0 0 0 0 0 0 0 0 0 0
37152- 0 0 0 0 0 0 0 0 0 0 0 0
37153- 0 0 0 0 0 0 0 0 0 0 0 0
37154- 0 0 0 0 0 0 0 0 0 0 0 0
37155- 0 0 0 0 0 0 0 0 0 0 0 0
37156- 0 0 0 0 0 0 0 0 0 0 0 0
37157- 0 0 0 6 6 6 22 22 22 50 50 50
37158- 90 90 90 26 26 26 2 2 6 2 2 6
37159- 14 14 14 195 195 195 250 250 250 253 253 253
37160-253 253 253 253 253 253 253 253 253 253 253 253
37161-253 253 253 253 253 253 253 253 253 253 253 253
37162-253 253 253 253 253 253 253 253 253 253 253 253
37163-253 253 253 253 253 253 253 253 253 253 253 253
37164-250 250 250 242 242 242 54 54 54 2 2 6
37165- 2 2 6 2 2 6 2 2 6 2 2 6
37166- 2 2 6 2 2 6 2 2 6 38 38 38
37167- 86 86 86 50 50 50 22 22 22 6 6 6
37168- 0 0 0 0 0 0 0 0 0 0 0 0
37169- 0 0 0 0 0 0 0 0 0 0 0 0
37170- 0 0 0 0 0 0 0 0 0 0 0 0
37171- 0 0 0 0 0 0 0 0 0 0 0 0
37172- 0 0 0 0 0 0 0 0 0 0 0 0
37173- 0 0 0 0 0 0 0 0 0 0 0 0
37174- 0 0 0 0 0 0 0 0 0 0 0 0
37175- 0 0 0 0 0 0 0 0 0 0 0 0
37176- 0 0 0 0 0 0 0 0 0 0 0 0
37177- 6 6 6 14 14 14 38 38 38 82 82 82
37178- 34 34 34 2 2 6 2 2 6 2 2 6
37179- 42 42 42 195 195 195 246 246 246 253 253 253
37180-253 253 253 253 253 253 253 253 253 250 250 250
37181-242 242 242 242 242 242 250 250 250 253 253 253
37182-253 253 253 253 253 253 253 253 253 253 253 253
37183-253 253 253 250 250 250 246 246 246 238 238 238
37184-226 226 226 231 231 231 101 101 101 6 6 6
37185- 2 2 6 2 2 6 2 2 6 2 2 6
37186- 2 2 6 2 2 6 2 2 6 2 2 6
37187- 38 38 38 82 82 82 42 42 42 14 14 14
37188- 6 6 6 0 0 0 0 0 0 0 0 0
37189- 0 0 0 0 0 0 0 0 0 0 0 0
37190- 0 0 0 0 0 0 0 0 0 0 0 0
37191- 0 0 0 0 0 0 0 0 0 0 0 0
37192- 0 0 0 0 0 0 0 0 0 0 0 0
37193- 0 0 0 0 0 0 0 0 0 0 0 0
37194- 0 0 0 0 0 0 0 0 0 0 0 0
37195- 0 0 0 0 0 0 0 0 0 0 0 0
37196- 0 0 0 0 0 0 0 0 0 0 0 0
37197- 10 10 10 26 26 26 62 62 62 66 66 66
37198- 2 2 6 2 2 6 2 2 6 6 6 6
37199- 70 70 70 170 170 170 206 206 206 234 234 234
37200-246 246 246 250 250 250 250 250 250 238 238 238
37201-226 226 226 231 231 231 238 238 238 250 250 250
37202-250 250 250 250 250 250 246 246 246 231 231 231
37203-214 214 214 206 206 206 202 202 202 202 202 202
37204-198 198 198 202 202 202 182 182 182 18 18 18
37205- 2 2 6 2 2 6 2 2 6 2 2 6
37206- 2 2 6 2 2 6 2 2 6 2 2 6
37207- 2 2 6 62 62 62 66 66 66 30 30 30
37208- 10 10 10 0 0 0 0 0 0 0 0 0
37209- 0 0 0 0 0 0 0 0 0 0 0 0
37210- 0 0 0 0 0 0 0 0 0 0 0 0
37211- 0 0 0 0 0 0 0 0 0 0 0 0
37212- 0 0 0 0 0 0 0 0 0 0 0 0
37213- 0 0 0 0 0 0 0 0 0 0 0 0
37214- 0 0 0 0 0 0 0 0 0 0 0 0
37215- 0 0 0 0 0 0 0 0 0 0 0 0
37216- 0 0 0 0 0 0 0 0 0 0 0 0
37217- 14 14 14 42 42 42 82 82 82 18 18 18
37218- 2 2 6 2 2 6 2 2 6 10 10 10
37219- 94 94 94 182 182 182 218 218 218 242 242 242
37220-250 250 250 253 253 253 253 253 253 250 250 250
37221-234 234 234 253 253 253 253 253 253 253 253 253
37222-253 253 253 253 253 253 253 253 253 246 246 246
37223-238 238 238 226 226 226 210 210 210 202 202 202
37224-195 195 195 195 195 195 210 210 210 158 158 158
37225- 6 6 6 14 14 14 50 50 50 14 14 14
37226- 2 2 6 2 2 6 2 2 6 2 2 6
37227- 2 2 6 6 6 6 86 86 86 46 46 46
37228- 18 18 18 6 6 6 0 0 0 0 0 0
37229- 0 0 0 0 0 0 0 0 0 0 0 0
37230- 0 0 0 0 0 0 0 0 0 0 0 0
37231- 0 0 0 0 0 0 0 0 0 0 0 0
37232- 0 0 0 0 0 0 0 0 0 0 0 0
37233- 0 0 0 0 0 0 0 0 0 0 0 0
37234- 0 0 0 0 0 0 0 0 0 0 0 0
37235- 0 0 0 0 0 0 0 0 0 0 0 0
37236- 0 0 0 0 0 0 0 0 0 6 6 6
37237- 22 22 22 54 54 54 70 70 70 2 2 6
37238- 2 2 6 10 10 10 2 2 6 22 22 22
37239-166 166 166 231 231 231 250 250 250 253 253 253
37240-253 253 253 253 253 253 253 253 253 250 250 250
37241-242 242 242 253 253 253 253 253 253 253 253 253
37242-253 253 253 253 253 253 253 253 253 253 253 253
37243-253 253 253 253 253 253 253 253 253 246 246 246
37244-231 231 231 206 206 206 198 198 198 226 226 226
37245- 94 94 94 2 2 6 6 6 6 38 38 38
37246- 30 30 30 2 2 6 2 2 6 2 2 6
37247- 2 2 6 2 2 6 62 62 62 66 66 66
37248- 26 26 26 10 10 10 0 0 0 0 0 0
37249- 0 0 0 0 0 0 0 0 0 0 0 0
37250- 0 0 0 0 0 0 0 0 0 0 0 0
37251- 0 0 0 0 0 0 0 0 0 0 0 0
37252- 0 0 0 0 0 0 0 0 0 0 0 0
37253- 0 0 0 0 0 0 0 0 0 0 0 0
37254- 0 0 0 0 0 0 0 0 0 0 0 0
37255- 0 0 0 0 0 0 0 0 0 0 0 0
37256- 0 0 0 0 0 0 0 0 0 10 10 10
37257- 30 30 30 74 74 74 50 50 50 2 2 6
37258- 26 26 26 26 26 26 2 2 6 106 106 106
37259-238 238 238 253 253 253 253 253 253 253 253 253
37260-253 253 253 253 253 253 253 253 253 253 253 253
37261-253 253 253 253 253 253 253 253 253 253 253 253
37262-253 253 253 253 253 253 253 253 253 253 253 253
37263-253 253 253 253 253 253 253 253 253 253 253 253
37264-253 253 253 246 246 246 218 218 218 202 202 202
37265-210 210 210 14 14 14 2 2 6 2 2 6
37266- 30 30 30 22 22 22 2 2 6 2 2 6
37267- 2 2 6 2 2 6 18 18 18 86 86 86
37268- 42 42 42 14 14 14 0 0 0 0 0 0
37269- 0 0 0 0 0 0 0 0 0 0 0 0
37270- 0 0 0 0 0 0 0 0 0 0 0 0
37271- 0 0 0 0 0 0 0 0 0 0 0 0
37272- 0 0 0 0 0 0 0 0 0 0 0 0
37273- 0 0 0 0 0 0 0 0 0 0 0 0
37274- 0 0 0 0 0 0 0 0 0 0 0 0
37275- 0 0 0 0 0 0 0 0 0 0 0 0
37276- 0 0 0 0 0 0 0 0 0 14 14 14
37277- 42 42 42 90 90 90 22 22 22 2 2 6
37278- 42 42 42 2 2 6 18 18 18 218 218 218
37279-253 253 253 253 253 253 253 253 253 253 253 253
37280-253 253 253 253 253 253 253 253 253 253 253 253
37281-253 253 253 253 253 253 253 253 253 253 253 253
37282-253 253 253 253 253 253 253 253 253 253 253 253
37283-253 253 253 253 253 253 253 253 253 253 253 253
37284-253 253 253 253 253 253 250 250 250 221 221 221
37285-218 218 218 101 101 101 2 2 6 14 14 14
37286- 18 18 18 38 38 38 10 10 10 2 2 6
37287- 2 2 6 2 2 6 2 2 6 78 78 78
37288- 58 58 58 22 22 22 6 6 6 0 0 0
37289- 0 0 0 0 0 0 0 0 0 0 0 0
37290- 0 0 0 0 0 0 0 0 0 0 0 0
37291- 0 0 0 0 0 0 0 0 0 0 0 0
37292- 0 0 0 0 0 0 0 0 0 0 0 0
37293- 0 0 0 0 0 0 0 0 0 0 0 0
37294- 0 0 0 0 0 0 0 0 0 0 0 0
37295- 0 0 0 0 0 0 0 0 0 0 0 0
37296- 0 0 0 0 0 0 6 6 6 18 18 18
37297- 54 54 54 82 82 82 2 2 6 26 26 26
37298- 22 22 22 2 2 6 123 123 123 253 253 253
37299-253 253 253 253 253 253 253 253 253 253 253 253
37300-253 253 253 253 253 253 253 253 253 253 253 253
37301-253 253 253 253 253 253 253 253 253 253 253 253
37302-253 253 253 253 253 253 253 253 253 253 253 253
37303-253 253 253 253 253 253 253 253 253 253 253 253
37304-253 253 253 253 253 253 253 253 253 250 250 250
37305-238 238 238 198 198 198 6 6 6 38 38 38
37306- 58 58 58 26 26 26 38 38 38 2 2 6
37307- 2 2 6 2 2 6 2 2 6 46 46 46
37308- 78 78 78 30 30 30 10 10 10 0 0 0
37309- 0 0 0 0 0 0 0 0 0 0 0 0
37310- 0 0 0 0 0 0 0 0 0 0 0 0
37311- 0 0 0 0 0 0 0 0 0 0 0 0
37312- 0 0 0 0 0 0 0 0 0 0 0 0
37313- 0 0 0 0 0 0 0 0 0 0 0 0
37314- 0 0 0 0 0 0 0 0 0 0 0 0
37315- 0 0 0 0 0 0 0 0 0 0 0 0
37316- 0 0 0 0 0 0 10 10 10 30 30 30
37317- 74 74 74 58 58 58 2 2 6 42 42 42
37318- 2 2 6 22 22 22 231 231 231 253 253 253
37319-253 253 253 253 253 253 253 253 253 253 253 253
37320-253 253 253 253 253 253 253 253 253 250 250 250
37321-253 253 253 253 253 253 253 253 253 253 253 253
37322-253 253 253 253 253 253 253 253 253 253 253 253
37323-253 253 253 253 253 253 253 253 253 253 253 253
37324-253 253 253 253 253 253 253 253 253 253 253 253
37325-253 253 253 246 246 246 46 46 46 38 38 38
37326- 42 42 42 14 14 14 38 38 38 14 14 14
37327- 2 2 6 2 2 6 2 2 6 6 6 6
37328- 86 86 86 46 46 46 14 14 14 0 0 0
37329- 0 0 0 0 0 0 0 0 0 0 0 0
37330- 0 0 0 0 0 0 0 0 0 0 0 0
37331- 0 0 0 0 0 0 0 0 0 0 0 0
37332- 0 0 0 0 0 0 0 0 0 0 0 0
37333- 0 0 0 0 0 0 0 0 0 0 0 0
37334- 0 0 0 0 0 0 0 0 0 0 0 0
37335- 0 0 0 0 0 0 0 0 0 0 0 0
37336- 0 0 0 6 6 6 14 14 14 42 42 42
37337- 90 90 90 18 18 18 18 18 18 26 26 26
37338- 2 2 6 116 116 116 253 253 253 253 253 253
37339-253 253 253 253 253 253 253 253 253 253 253 253
37340-253 253 253 253 253 253 250 250 250 238 238 238
37341-253 253 253 253 253 253 253 253 253 253 253 253
37342-253 253 253 253 253 253 253 253 253 253 253 253
37343-253 253 253 253 253 253 253 253 253 253 253 253
37344-253 253 253 253 253 253 253 253 253 253 253 253
37345-253 253 253 253 253 253 94 94 94 6 6 6
37346- 2 2 6 2 2 6 10 10 10 34 34 34
37347- 2 2 6 2 2 6 2 2 6 2 2 6
37348- 74 74 74 58 58 58 22 22 22 6 6 6
37349- 0 0 0 0 0 0 0 0 0 0 0 0
37350- 0 0 0 0 0 0 0 0 0 0 0 0
37351- 0 0 0 0 0 0 0 0 0 0 0 0
37352- 0 0 0 0 0 0 0 0 0 0 0 0
37353- 0 0 0 0 0 0 0 0 0 0 0 0
37354- 0 0 0 0 0 0 0 0 0 0 0 0
37355- 0 0 0 0 0 0 0 0 0 0 0 0
37356- 0 0 0 10 10 10 26 26 26 66 66 66
37357- 82 82 82 2 2 6 38 38 38 6 6 6
37358- 14 14 14 210 210 210 253 253 253 253 253 253
37359-253 253 253 253 253 253 253 253 253 253 253 253
37360-253 253 253 253 253 253 246 246 246 242 242 242
37361-253 253 253 253 253 253 253 253 253 253 253 253
37362-253 253 253 253 253 253 253 253 253 253 253 253
37363-253 253 253 253 253 253 253 253 253 253 253 253
37364-253 253 253 253 253 253 253 253 253 253 253 253
37365-253 253 253 253 253 253 144 144 144 2 2 6
37366- 2 2 6 2 2 6 2 2 6 46 46 46
37367- 2 2 6 2 2 6 2 2 6 2 2 6
37368- 42 42 42 74 74 74 30 30 30 10 10 10
37369- 0 0 0 0 0 0 0 0 0 0 0 0
37370- 0 0 0 0 0 0 0 0 0 0 0 0
37371- 0 0 0 0 0 0 0 0 0 0 0 0
37372- 0 0 0 0 0 0 0 0 0 0 0 0
37373- 0 0 0 0 0 0 0 0 0 0 0 0
37374- 0 0 0 0 0 0 0 0 0 0 0 0
37375- 0 0 0 0 0 0 0 0 0 0 0 0
37376- 6 6 6 14 14 14 42 42 42 90 90 90
37377- 26 26 26 6 6 6 42 42 42 2 2 6
37378- 74 74 74 250 250 250 253 253 253 253 253 253
37379-253 253 253 253 253 253 253 253 253 253 253 253
37380-253 253 253 253 253 253 242 242 242 242 242 242
37381-253 253 253 253 253 253 253 253 253 253 253 253
37382-253 253 253 253 253 253 253 253 253 253 253 253
37383-253 253 253 253 253 253 253 253 253 253 253 253
37384-253 253 253 253 253 253 253 253 253 253 253 253
37385-253 253 253 253 253 253 182 182 182 2 2 6
37386- 2 2 6 2 2 6 2 2 6 46 46 46
37387- 2 2 6 2 2 6 2 2 6 2 2 6
37388- 10 10 10 86 86 86 38 38 38 10 10 10
37389- 0 0 0 0 0 0 0 0 0 0 0 0
37390- 0 0 0 0 0 0 0 0 0 0 0 0
37391- 0 0 0 0 0 0 0 0 0 0 0 0
37392- 0 0 0 0 0 0 0 0 0 0 0 0
37393- 0 0 0 0 0 0 0 0 0 0 0 0
37394- 0 0 0 0 0 0 0 0 0 0 0 0
37395- 0 0 0 0 0 0 0 0 0 0 0 0
37396- 10 10 10 26 26 26 66 66 66 82 82 82
37397- 2 2 6 22 22 22 18 18 18 2 2 6
37398-149 149 149 253 253 253 253 253 253 253 253 253
37399-253 253 253 253 253 253 253 253 253 253 253 253
37400-253 253 253 253 253 253 234 234 234 242 242 242
37401-253 253 253 253 253 253 253 253 253 253 253 253
37402-253 253 253 253 253 253 253 253 253 253 253 253
37403-253 253 253 253 253 253 253 253 253 253 253 253
37404-253 253 253 253 253 253 253 253 253 253 253 253
37405-253 253 253 253 253 253 206 206 206 2 2 6
37406- 2 2 6 2 2 6 2 2 6 38 38 38
37407- 2 2 6 2 2 6 2 2 6 2 2 6
37408- 6 6 6 86 86 86 46 46 46 14 14 14
37409- 0 0 0 0 0 0 0 0 0 0 0 0
37410- 0 0 0 0 0 0 0 0 0 0 0 0
37411- 0 0 0 0 0 0 0 0 0 0 0 0
37412- 0 0 0 0 0 0 0 0 0 0 0 0
37413- 0 0 0 0 0 0 0 0 0 0 0 0
37414- 0 0 0 0 0 0 0 0 0 0 0 0
37415- 0 0 0 0 0 0 0 0 0 6 6 6
37416- 18 18 18 46 46 46 86 86 86 18 18 18
37417- 2 2 6 34 34 34 10 10 10 6 6 6
37418-210 210 210 253 253 253 253 253 253 253 253 253
37419-253 253 253 253 253 253 253 253 253 253 253 253
37420-253 253 253 253 253 253 234 234 234 242 242 242
37421-253 253 253 253 253 253 253 253 253 253 253 253
37422-253 253 253 253 253 253 253 253 253 253 253 253
37423-253 253 253 253 253 253 253 253 253 253 253 253
37424-253 253 253 253 253 253 253 253 253 253 253 253
37425-253 253 253 253 253 253 221 221 221 6 6 6
37426- 2 2 6 2 2 6 6 6 6 30 30 30
37427- 2 2 6 2 2 6 2 2 6 2 2 6
37428- 2 2 6 82 82 82 54 54 54 18 18 18
37429- 6 6 6 0 0 0 0 0 0 0 0 0
37430- 0 0 0 0 0 0 0 0 0 0 0 0
37431- 0 0 0 0 0 0 0 0 0 0 0 0
37432- 0 0 0 0 0 0 0 0 0 0 0 0
37433- 0 0 0 0 0 0 0 0 0 0 0 0
37434- 0 0 0 0 0 0 0 0 0 0 0 0
37435- 0 0 0 0 0 0 0 0 0 10 10 10
37436- 26 26 26 66 66 66 62 62 62 2 2 6
37437- 2 2 6 38 38 38 10 10 10 26 26 26
37438-238 238 238 253 253 253 253 253 253 253 253 253
37439-253 253 253 253 253 253 253 253 253 253 253 253
37440-253 253 253 253 253 253 231 231 231 238 238 238
37441-253 253 253 253 253 253 253 253 253 253 253 253
37442-253 253 253 253 253 253 253 253 253 253 253 253
37443-253 253 253 253 253 253 253 253 253 253 253 253
37444-253 253 253 253 253 253 253 253 253 253 253 253
37445-253 253 253 253 253 253 231 231 231 6 6 6
37446- 2 2 6 2 2 6 10 10 10 30 30 30
37447- 2 2 6 2 2 6 2 2 6 2 2 6
37448- 2 2 6 66 66 66 58 58 58 22 22 22
37449- 6 6 6 0 0 0 0 0 0 0 0 0
37450- 0 0 0 0 0 0 0 0 0 0 0 0
37451- 0 0 0 0 0 0 0 0 0 0 0 0
37452- 0 0 0 0 0 0 0 0 0 0 0 0
37453- 0 0 0 0 0 0 0 0 0 0 0 0
37454- 0 0 0 0 0 0 0 0 0 0 0 0
37455- 0 0 0 0 0 0 0 0 0 10 10 10
37456- 38 38 38 78 78 78 6 6 6 2 2 6
37457- 2 2 6 46 46 46 14 14 14 42 42 42
37458-246 246 246 253 253 253 253 253 253 253 253 253
37459-253 253 253 253 253 253 253 253 253 253 253 253
37460-253 253 253 253 253 253 231 231 231 242 242 242
37461-253 253 253 253 253 253 253 253 253 253 253 253
37462-253 253 253 253 253 253 253 253 253 253 253 253
37463-253 253 253 253 253 253 253 253 253 253 253 253
37464-253 253 253 253 253 253 253 253 253 253 253 253
37465-253 253 253 253 253 253 234 234 234 10 10 10
37466- 2 2 6 2 2 6 22 22 22 14 14 14
37467- 2 2 6 2 2 6 2 2 6 2 2 6
37468- 2 2 6 66 66 66 62 62 62 22 22 22
37469- 6 6 6 0 0 0 0 0 0 0 0 0
37470- 0 0 0 0 0 0 0 0 0 0 0 0
37471- 0 0 0 0 0 0 0 0 0 0 0 0
37472- 0 0 0 0 0 0 0 0 0 0 0 0
37473- 0 0 0 0 0 0 0 0 0 0 0 0
37474- 0 0 0 0 0 0 0 0 0 0 0 0
37475- 0 0 0 0 0 0 6 6 6 18 18 18
37476- 50 50 50 74 74 74 2 2 6 2 2 6
37477- 14 14 14 70 70 70 34 34 34 62 62 62
37478-250 250 250 253 253 253 253 253 253 253 253 253
37479-253 253 253 253 253 253 253 253 253 253 253 253
37480-253 253 253 253 253 253 231 231 231 246 246 246
37481-253 253 253 253 253 253 253 253 253 253 253 253
37482-253 253 253 253 253 253 253 253 253 253 253 253
37483-253 253 253 253 253 253 253 253 253 253 253 253
37484-253 253 253 253 253 253 253 253 253 253 253 253
37485-253 253 253 253 253 253 234 234 234 14 14 14
37486- 2 2 6 2 2 6 30 30 30 2 2 6
37487- 2 2 6 2 2 6 2 2 6 2 2 6
37488- 2 2 6 66 66 66 62 62 62 22 22 22
37489- 6 6 6 0 0 0 0 0 0 0 0 0
37490- 0 0 0 0 0 0 0 0 0 0 0 0
37491- 0 0 0 0 0 0 0 0 0 0 0 0
37492- 0 0 0 0 0 0 0 0 0 0 0 0
37493- 0 0 0 0 0 0 0 0 0 0 0 0
37494- 0 0 0 0 0 0 0 0 0 0 0 0
37495- 0 0 0 0 0 0 6 6 6 18 18 18
37496- 54 54 54 62 62 62 2 2 6 2 2 6
37497- 2 2 6 30 30 30 46 46 46 70 70 70
37498-250 250 250 253 253 253 253 253 253 253 253 253
37499-253 253 253 253 253 253 253 253 253 253 253 253
37500-253 253 253 253 253 253 231 231 231 246 246 246
37501-253 253 253 253 253 253 253 253 253 253 253 253
37502-253 253 253 253 253 253 253 253 253 253 253 253
37503-253 253 253 253 253 253 253 253 253 253 253 253
37504-253 253 253 253 253 253 253 253 253 253 253 253
37505-253 253 253 253 253 253 226 226 226 10 10 10
37506- 2 2 6 6 6 6 30 30 30 2 2 6
37507- 2 2 6 2 2 6 2 2 6 2 2 6
37508- 2 2 6 66 66 66 58 58 58 22 22 22
37509- 6 6 6 0 0 0 0 0 0 0 0 0
37510- 0 0 0 0 0 0 0 0 0 0 0 0
37511- 0 0 0 0 0 0 0 0 0 0 0 0
37512- 0 0 0 0 0 0 0 0 0 0 0 0
37513- 0 0 0 0 0 0 0 0 0 0 0 0
37514- 0 0 0 0 0 0 0 0 0 0 0 0
37515- 0 0 0 0 0 0 6 6 6 22 22 22
37516- 58 58 58 62 62 62 2 2 6 2 2 6
37517- 2 2 6 2 2 6 30 30 30 78 78 78
37518-250 250 250 253 253 253 253 253 253 253 253 253
37519-253 253 253 253 253 253 253 253 253 253 253 253
37520-253 253 253 253 253 253 231 231 231 246 246 246
37521-253 253 253 253 253 253 253 253 253 253 253 253
37522-253 253 253 253 253 253 253 253 253 253 253 253
37523-253 253 253 253 253 253 253 253 253 253 253 253
37524-253 253 253 253 253 253 253 253 253 253 253 253
37525-253 253 253 253 253 253 206 206 206 2 2 6
37526- 22 22 22 34 34 34 18 14 6 22 22 22
37527- 26 26 26 18 18 18 6 6 6 2 2 6
37528- 2 2 6 82 82 82 54 54 54 18 18 18
37529- 6 6 6 0 0 0 0 0 0 0 0 0
37530- 0 0 0 0 0 0 0 0 0 0 0 0
37531- 0 0 0 0 0 0 0 0 0 0 0 0
37532- 0 0 0 0 0 0 0 0 0 0 0 0
37533- 0 0 0 0 0 0 0 0 0 0 0 0
37534- 0 0 0 0 0 0 0 0 0 0 0 0
37535- 0 0 0 0 0 0 6 6 6 26 26 26
37536- 62 62 62 106 106 106 74 54 14 185 133 11
37537-210 162 10 121 92 8 6 6 6 62 62 62
37538-238 238 238 253 253 253 253 253 253 253 253 253
37539-253 253 253 253 253 253 253 253 253 253 253 253
37540-253 253 253 253 253 253 231 231 231 246 246 246
37541-253 253 253 253 253 253 253 253 253 253 253 253
37542-253 253 253 253 253 253 253 253 253 253 253 253
37543-253 253 253 253 253 253 253 253 253 253 253 253
37544-253 253 253 253 253 253 253 253 253 253 253 253
37545-253 253 253 253 253 253 158 158 158 18 18 18
37546- 14 14 14 2 2 6 2 2 6 2 2 6
37547- 6 6 6 18 18 18 66 66 66 38 38 38
37548- 6 6 6 94 94 94 50 50 50 18 18 18
37549- 6 6 6 0 0 0 0 0 0 0 0 0
37550- 0 0 0 0 0 0 0 0 0 0 0 0
37551- 0 0 0 0 0 0 0 0 0 0 0 0
37552- 0 0 0 0 0 0 0 0 0 0 0 0
37553- 0 0 0 0 0 0 0 0 0 0 0 0
37554- 0 0 0 0 0 0 0 0 0 6 6 6
37555- 10 10 10 10 10 10 18 18 18 38 38 38
37556- 78 78 78 142 134 106 216 158 10 242 186 14
37557-246 190 14 246 190 14 156 118 10 10 10 10
37558- 90 90 90 238 238 238 253 253 253 253 253 253
37559-253 253 253 253 253 253 253 253 253 253 253 253
37560-253 253 253 253 253 253 231 231 231 250 250 250
37561-253 253 253 253 253 253 253 253 253 253 253 253
37562-253 253 253 253 253 253 253 253 253 253 253 253
37563-253 253 253 253 253 253 253 253 253 253 253 253
37564-253 253 253 253 253 253 253 253 253 246 230 190
37565-238 204 91 238 204 91 181 142 44 37 26 9
37566- 2 2 6 2 2 6 2 2 6 2 2 6
37567- 2 2 6 2 2 6 38 38 38 46 46 46
37568- 26 26 26 106 106 106 54 54 54 18 18 18
37569- 6 6 6 0 0 0 0 0 0 0 0 0
37570- 0 0 0 0 0 0 0 0 0 0 0 0
37571- 0 0 0 0 0 0 0 0 0 0 0 0
37572- 0 0 0 0 0 0 0 0 0 0 0 0
37573- 0 0 0 0 0 0 0 0 0 0 0 0
37574- 0 0 0 6 6 6 14 14 14 22 22 22
37575- 30 30 30 38 38 38 50 50 50 70 70 70
37576-106 106 106 190 142 34 226 170 11 242 186 14
37577-246 190 14 246 190 14 246 190 14 154 114 10
37578- 6 6 6 74 74 74 226 226 226 253 253 253
37579-253 253 253 253 253 253 253 253 253 253 253 253
37580-253 253 253 253 253 253 231 231 231 250 250 250
37581-253 253 253 253 253 253 253 253 253 253 253 253
37582-253 253 253 253 253 253 253 253 253 253 253 253
37583-253 253 253 253 253 253 253 253 253 253 253 253
37584-253 253 253 253 253 253 253 253 253 228 184 62
37585-241 196 14 241 208 19 232 195 16 38 30 10
37586- 2 2 6 2 2 6 2 2 6 2 2 6
37587- 2 2 6 6 6 6 30 30 30 26 26 26
37588-203 166 17 154 142 90 66 66 66 26 26 26
37589- 6 6 6 0 0 0 0 0 0 0 0 0
37590- 0 0 0 0 0 0 0 0 0 0 0 0
37591- 0 0 0 0 0 0 0 0 0 0 0 0
37592- 0 0 0 0 0 0 0 0 0 0 0 0
37593- 0 0 0 0 0 0 0 0 0 0 0 0
37594- 6 6 6 18 18 18 38 38 38 58 58 58
37595- 78 78 78 86 86 86 101 101 101 123 123 123
37596-175 146 61 210 150 10 234 174 13 246 186 14
37597-246 190 14 246 190 14 246 190 14 238 190 10
37598-102 78 10 2 2 6 46 46 46 198 198 198
37599-253 253 253 253 253 253 253 253 253 253 253 253
37600-253 253 253 253 253 253 234 234 234 242 242 242
37601-253 253 253 253 253 253 253 253 253 253 253 253
37602-253 253 253 253 253 253 253 253 253 253 253 253
37603-253 253 253 253 253 253 253 253 253 253 253 253
37604-253 253 253 253 253 253 253 253 253 224 178 62
37605-242 186 14 241 196 14 210 166 10 22 18 6
37606- 2 2 6 2 2 6 2 2 6 2 2 6
37607- 2 2 6 2 2 6 6 6 6 121 92 8
37608-238 202 15 232 195 16 82 82 82 34 34 34
37609- 10 10 10 0 0 0 0 0 0 0 0 0
37610- 0 0 0 0 0 0 0 0 0 0 0 0
37611- 0 0 0 0 0 0 0 0 0 0 0 0
37612- 0 0 0 0 0 0 0 0 0 0 0 0
37613- 0 0 0 0 0 0 0 0 0 0 0 0
37614- 14 14 14 38 38 38 70 70 70 154 122 46
37615-190 142 34 200 144 11 197 138 11 197 138 11
37616-213 154 11 226 170 11 242 186 14 246 190 14
37617-246 190 14 246 190 14 246 190 14 246 190 14
37618-225 175 15 46 32 6 2 2 6 22 22 22
37619-158 158 158 250 250 250 253 253 253 253 253 253
37620-253 253 253 253 253 253 253 253 253 253 253 253
37621-253 253 253 253 253 253 253 253 253 253 253 253
37622-253 253 253 253 253 253 253 253 253 253 253 253
37623-253 253 253 253 253 253 253 253 253 253 253 253
37624-253 253 253 250 250 250 242 242 242 224 178 62
37625-239 182 13 236 186 11 213 154 11 46 32 6
37626- 2 2 6 2 2 6 2 2 6 2 2 6
37627- 2 2 6 2 2 6 61 42 6 225 175 15
37628-238 190 10 236 186 11 112 100 78 42 42 42
37629- 14 14 14 0 0 0 0 0 0 0 0 0
37630- 0 0 0 0 0 0 0 0 0 0 0 0
37631- 0 0 0 0 0 0 0 0 0 0 0 0
37632- 0 0 0 0 0 0 0 0 0 0 0 0
37633- 0 0 0 0 0 0 0 0 0 6 6 6
37634- 22 22 22 54 54 54 154 122 46 213 154 11
37635-226 170 11 230 174 11 226 170 11 226 170 11
37636-236 178 12 242 186 14 246 190 14 246 190 14
37637-246 190 14 246 190 14 246 190 14 246 190 14
37638-241 196 14 184 144 12 10 10 10 2 2 6
37639- 6 6 6 116 116 116 242 242 242 253 253 253
37640-253 253 253 253 253 253 253 253 253 253 253 253
37641-253 253 253 253 253 253 253 253 253 253 253 253
37642-253 253 253 253 253 253 253 253 253 253 253 253
37643-253 253 253 253 253 253 253 253 253 253 253 253
37644-253 253 253 231 231 231 198 198 198 214 170 54
37645-236 178 12 236 178 12 210 150 10 137 92 6
37646- 18 14 6 2 2 6 2 2 6 2 2 6
37647- 6 6 6 70 47 6 200 144 11 236 178 12
37648-239 182 13 239 182 13 124 112 88 58 58 58
37649- 22 22 22 6 6 6 0 0 0 0 0 0
37650- 0 0 0 0 0 0 0 0 0 0 0 0
37651- 0 0 0 0 0 0 0 0 0 0 0 0
37652- 0 0 0 0 0 0 0 0 0 0 0 0
37653- 0 0 0 0 0 0 0 0 0 10 10 10
37654- 30 30 30 70 70 70 180 133 36 226 170 11
37655-239 182 13 242 186 14 242 186 14 246 186 14
37656-246 190 14 246 190 14 246 190 14 246 190 14
37657-246 190 14 246 190 14 246 190 14 246 190 14
37658-246 190 14 232 195 16 98 70 6 2 2 6
37659- 2 2 6 2 2 6 66 66 66 221 221 221
37660-253 253 253 253 253 253 253 253 253 253 253 253
37661-253 253 253 253 253 253 253 253 253 253 253 253
37662-253 253 253 253 253 253 253 253 253 253 253 253
37663-253 253 253 253 253 253 253 253 253 253 253 253
37664-253 253 253 206 206 206 198 198 198 214 166 58
37665-230 174 11 230 174 11 216 158 10 192 133 9
37666-163 110 8 116 81 8 102 78 10 116 81 8
37667-167 114 7 197 138 11 226 170 11 239 182 13
37668-242 186 14 242 186 14 162 146 94 78 78 78
37669- 34 34 34 14 14 14 6 6 6 0 0 0
37670- 0 0 0 0 0 0 0 0 0 0 0 0
37671- 0 0 0 0 0 0 0 0 0 0 0 0
37672- 0 0 0 0 0 0 0 0 0 0 0 0
37673- 0 0 0 0 0 0 0 0 0 6 6 6
37674- 30 30 30 78 78 78 190 142 34 226 170 11
37675-239 182 13 246 190 14 246 190 14 246 190 14
37676-246 190 14 246 190 14 246 190 14 246 190 14
37677-246 190 14 246 190 14 246 190 14 246 190 14
37678-246 190 14 241 196 14 203 166 17 22 18 6
37679- 2 2 6 2 2 6 2 2 6 38 38 38
37680-218 218 218 253 253 253 253 253 253 253 253 253
37681-253 253 253 253 253 253 253 253 253 253 253 253
37682-253 253 253 253 253 253 253 253 253 253 253 253
37683-253 253 253 253 253 253 253 253 253 253 253 253
37684-250 250 250 206 206 206 198 198 198 202 162 69
37685-226 170 11 236 178 12 224 166 10 210 150 10
37686-200 144 11 197 138 11 192 133 9 197 138 11
37687-210 150 10 226 170 11 242 186 14 246 190 14
37688-246 190 14 246 186 14 225 175 15 124 112 88
37689- 62 62 62 30 30 30 14 14 14 6 6 6
37690- 0 0 0 0 0 0 0 0 0 0 0 0
37691- 0 0 0 0 0 0 0 0 0 0 0 0
37692- 0 0 0 0 0 0 0 0 0 0 0 0
37693- 0 0 0 0 0 0 0 0 0 10 10 10
37694- 30 30 30 78 78 78 174 135 50 224 166 10
37695-239 182 13 246 190 14 246 190 14 246 190 14
37696-246 190 14 246 190 14 246 190 14 246 190 14
37697-246 190 14 246 190 14 246 190 14 246 190 14
37698-246 190 14 246 190 14 241 196 14 139 102 15
37699- 2 2 6 2 2 6 2 2 6 2 2 6
37700- 78 78 78 250 250 250 253 253 253 253 253 253
37701-253 253 253 253 253 253 253 253 253 253 253 253
37702-253 253 253 253 253 253 253 253 253 253 253 253
37703-253 253 253 253 253 253 253 253 253 253 253 253
37704-250 250 250 214 214 214 198 198 198 190 150 46
37705-219 162 10 236 178 12 234 174 13 224 166 10
37706-216 158 10 213 154 11 213 154 11 216 158 10
37707-226 170 11 239 182 13 246 190 14 246 190 14
37708-246 190 14 246 190 14 242 186 14 206 162 42
37709-101 101 101 58 58 58 30 30 30 14 14 14
37710- 6 6 6 0 0 0 0 0 0 0 0 0
37711- 0 0 0 0 0 0 0 0 0 0 0 0
37712- 0 0 0 0 0 0 0 0 0 0 0 0
37713- 0 0 0 0 0 0 0 0 0 10 10 10
37714- 30 30 30 74 74 74 174 135 50 216 158 10
37715-236 178 12 246 190 14 246 190 14 246 190 14
37716-246 190 14 246 190 14 246 190 14 246 190 14
37717-246 190 14 246 190 14 246 190 14 246 190 14
37718-246 190 14 246 190 14 241 196 14 226 184 13
37719- 61 42 6 2 2 6 2 2 6 2 2 6
37720- 22 22 22 238 238 238 253 253 253 253 253 253
37721-253 253 253 253 253 253 253 253 253 253 253 253
37722-253 253 253 253 253 253 253 253 253 253 253 253
37723-253 253 253 253 253 253 253 253 253 253 253 253
37724-253 253 253 226 226 226 187 187 187 180 133 36
37725-216 158 10 236 178 12 239 182 13 236 178 12
37726-230 174 11 226 170 11 226 170 11 230 174 11
37727-236 178 12 242 186 14 246 190 14 246 190 14
37728-246 190 14 246 190 14 246 186 14 239 182 13
37729-206 162 42 106 106 106 66 66 66 34 34 34
37730- 14 14 14 6 6 6 0 0 0 0 0 0
37731- 0 0 0 0 0 0 0 0 0 0 0 0
37732- 0 0 0 0 0 0 0 0 0 0 0 0
37733- 0 0 0 0 0 0 0 0 0 6 6 6
37734- 26 26 26 70 70 70 163 133 67 213 154 11
37735-236 178 12 246 190 14 246 190 14 246 190 14
37736-246 190 14 246 190 14 246 190 14 246 190 14
37737-246 190 14 246 190 14 246 190 14 246 190 14
37738-246 190 14 246 190 14 246 190 14 241 196 14
37739-190 146 13 18 14 6 2 2 6 2 2 6
37740- 46 46 46 246 246 246 253 253 253 253 253 253
37741-253 253 253 253 253 253 253 253 253 253 253 253
37742-253 253 253 253 253 253 253 253 253 253 253 253
37743-253 253 253 253 253 253 253 253 253 253 253 253
37744-253 253 253 221 221 221 86 86 86 156 107 11
37745-216 158 10 236 178 12 242 186 14 246 186 14
37746-242 186 14 239 182 13 239 182 13 242 186 14
37747-242 186 14 246 186 14 246 190 14 246 190 14
37748-246 190 14 246 190 14 246 190 14 246 190 14
37749-242 186 14 225 175 15 142 122 72 66 66 66
37750- 30 30 30 10 10 10 0 0 0 0 0 0
37751- 0 0 0 0 0 0 0 0 0 0 0 0
37752- 0 0 0 0 0 0 0 0 0 0 0 0
37753- 0 0 0 0 0 0 0 0 0 6 6 6
37754- 26 26 26 70 70 70 163 133 67 210 150 10
37755-236 178 12 246 190 14 246 190 14 246 190 14
37756-246 190 14 246 190 14 246 190 14 246 190 14
37757-246 190 14 246 190 14 246 190 14 246 190 14
37758-246 190 14 246 190 14 246 190 14 246 190 14
37759-232 195 16 121 92 8 34 34 34 106 106 106
37760-221 221 221 253 253 253 253 253 253 253 253 253
37761-253 253 253 253 253 253 253 253 253 253 253 253
37762-253 253 253 253 253 253 253 253 253 253 253 253
37763-253 253 253 253 253 253 253 253 253 253 253 253
37764-242 242 242 82 82 82 18 14 6 163 110 8
37765-216 158 10 236 178 12 242 186 14 246 190 14
37766-246 190 14 246 190 14 246 190 14 246 190 14
37767-246 190 14 246 190 14 246 190 14 246 190 14
37768-246 190 14 246 190 14 246 190 14 246 190 14
37769-246 190 14 246 190 14 242 186 14 163 133 67
37770- 46 46 46 18 18 18 6 6 6 0 0 0
37771- 0 0 0 0 0 0 0 0 0 0 0 0
37772- 0 0 0 0 0 0 0 0 0 0 0 0
37773- 0 0 0 0 0 0 0 0 0 10 10 10
37774- 30 30 30 78 78 78 163 133 67 210 150 10
37775-236 178 12 246 186 14 246 190 14 246 190 14
37776-246 190 14 246 190 14 246 190 14 246 190 14
37777-246 190 14 246 190 14 246 190 14 246 190 14
37778-246 190 14 246 190 14 246 190 14 246 190 14
37779-241 196 14 215 174 15 190 178 144 253 253 253
37780-253 253 253 253 253 253 253 253 253 253 253 253
37781-253 253 253 253 253 253 253 253 253 253 253 253
37782-253 253 253 253 253 253 253 253 253 253 253 253
37783-253 253 253 253 253 253 253 253 253 218 218 218
37784- 58 58 58 2 2 6 22 18 6 167 114 7
37785-216 158 10 236 178 12 246 186 14 246 190 14
37786-246 190 14 246 190 14 246 190 14 246 190 14
37787-246 190 14 246 190 14 246 190 14 246 190 14
37788-246 190 14 246 190 14 246 190 14 246 190 14
37789-246 190 14 246 186 14 242 186 14 190 150 46
37790- 54 54 54 22 22 22 6 6 6 0 0 0
37791- 0 0 0 0 0 0 0 0 0 0 0 0
37792- 0 0 0 0 0 0 0 0 0 0 0 0
37793- 0 0 0 0 0 0 0 0 0 14 14 14
37794- 38 38 38 86 86 86 180 133 36 213 154 11
37795-236 178 12 246 186 14 246 190 14 246 190 14
37796-246 190 14 246 190 14 246 190 14 246 190 14
37797-246 190 14 246 190 14 246 190 14 246 190 14
37798-246 190 14 246 190 14 246 190 14 246 190 14
37799-246 190 14 232 195 16 190 146 13 214 214 214
37800-253 253 253 253 253 253 253 253 253 253 253 253
37801-253 253 253 253 253 253 253 253 253 253 253 253
37802-253 253 253 253 253 253 253 253 253 253 253 253
37803-253 253 253 250 250 250 170 170 170 26 26 26
37804- 2 2 6 2 2 6 37 26 9 163 110 8
37805-219 162 10 239 182 13 246 186 14 246 190 14
37806-246 190 14 246 190 14 246 190 14 246 190 14
37807-246 190 14 246 190 14 246 190 14 246 190 14
37808-246 190 14 246 190 14 246 190 14 246 190 14
37809-246 186 14 236 178 12 224 166 10 142 122 72
37810- 46 46 46 18 18 18 6 6 6 0 0 0
37811- 0 0 0 0 0 0 0 0 0 0 0 0
37812- 0 0 0 0 0 0 0 0 0 0 0 0
37813- 0 0 0 0 0 0 6 6 6 18 18 18
37814- 50 50 50 109 106 95 192 133 9 224 166 10
37815-242 186 14 246 190 14 246 190 14 246 190 14
37816-246 190 14 246 190 14 246 190 14 246 190 14
37817-246 190 14 246 190 14 246 190 14 246 190 14
37818-246 190 14 246 190 14 246 190 14 246 190 14
37819-242 186 14 226 184 13 210 162 10 142 110 46
37820-226 226 226 253 253 253 253 253 253 253 253 253
37821-253 253 253 253 253 253 253 253 253 253 253 253
37822-253 253 253 253 253 253 253 253 253 253 253 253
37823-198 198 198 66 66 66 2 2 6 2 2 6
37824- 2 2 6 2 2 6 50 34 6 156 107 11
37825-219 162 10 239 182 13 246 186 14 246 190 14
37826-246 190 14 246 190 14 246 190 14 246 190 14
37827-246 190 14 246 190 14 246 190 14 246 190 14
37828-246 190 14 246 190 14 246 190 14 242 186 14
37829-234 174 13 213 154 11 154 122 46 66 66 66
37830- 30 30 30 10 10 10 0 0 0 0 0 0
37831- 0 0 0 0 0 0 0 0 0 0 0 0
37832- 0 0 0 0 0 0 0 0 0 0 0 0
37833- 0 0 0 0 0 0 6 6 6 22 22 22
37834- 58 58 58 154 121 60 206 145 10 234 174 13
37835-242 186 14 246 186 14 246 190 14 246 190 14
37836-246 190 14 246 190 14 246 190 14 246 190 14
37837-246 190 14 246 190 14 246 190 14 246 190 14
37838-246 190 14 246 190 14 246 190 14 246 190 14
37839-246 186 14 236 178 12 210 162 10 163 110 8
37840- 61 42 6 138 138 138 218 218 218 250 250 250
37841-253 253 253 253 253 253 253 253 253 250 250 250
37842-242 242 242 210 210 210 144 144 144 66 66 66
37843- 6 6 6 2 2 6 2 2 6 2 2 6
37844- 2 2 6 2 2 6 61 42 6 163 110 8
37845-216 158 10 236 178 12 246 190 14 246 190 14
37846-246 190 14 246 190 14 246 190 14 246 190 14
37847-246 190 14 246 190 14 246 190 14 246 190 14
37848-246 190 14 239 182 13 230 174 11 216 158 10
37849-190 142 34 124 112 88 70 70 70 38 38 38
37850- 18 18 18 6 6 6 0 0 0 0 0 0
37851- 0 0 0 0 0 0 0 0 0 0 0 0
37852- 0 0 0 0 0 0 0 0 0 0 0 0
37853- 0 0 0 0 0 0 6 6 6 22 22 22
37854- 62 62 62 168 124 44 206 145 10 224 166 10
37855-236 178 12 239 182 13 242 186 14 242 186 14
37856-246 186 14 246 190 14 246 190 14 246 190 14
37857-246 190 14 246 190 14 246 190 14 246 190 14
37858-246 190 14 246 190 14 246 190 14 246 190 14
37859-246 190 14 236 178 12 216 158 10 175 118 6
37860- 80 54 7 2 2 6 6 6 6 30 30 30
37861- 54 54 54 62 62 62 50 50 50 38 38 38
37862- 14 14 14 2 2 6 2 2 6 2 2 6
37863- 2 2 6 2 2 6 2 2 6 2 2 6
37864- 2 2 6 6 6 6 80 54 7 167 114 7
37865-213 154 11 236 178 12 246 190 14 246 190 14
37866-246 190 14 246 190 14 246 190 14 246 190 14
37867-246 190 14 242 186 14 239 182 13 239 182 13
37868-230 174 11 210 150 10 174 135 50 124 112 88
37869- 82 82 82 54 54 54 34 34 34 18 18 18
37870- 6 6 6 0 0 0 0 0 0 0 0 0
37871- 0 0 0 0 0 0 0 0 0 0 0 0
37872- 0 0 0 0 0 0 0 0 0 0 0 0
37873- 0 0 0 0 0 0 6 6 6 18 18 18
37874- 50 50 50 158 118 36 192 133 9 200 144 11
37875-216 158 10 219 162 10 224 166 10 226 170 11
37876-230 174 11 236 178 12 239 182 13 239 182 13
37877-242 186 14 246 186 14 246 190 14 246 190 14
37878-246 190 14 246 190 14 246 190 14 246 190 14
37879-246 186 14 230 174 11 210 150 10 163 110 8
37880-104 69 6 10 10 10 2 2 6 2 2 6
37881- 2 2 6 2 2 6 2 2 6 2 2 6
37882- 2 2 6 2 2 6 2 2 6 2 2 6
37883- 2 2 6 2 2 6 2 2 6 2 2 6
37884- 2 2 6 6 6 6 91 60 6 167 114 7
37885-206 145 10 230 174 11 242 186 14 246 190 14
37886-246 190 14 246 190 14 246 186 14 242 186 14
37887-239 182 13 230 174 11 224 166 10 213 154 11
37888-180 133 36 124 112 88 86 86 86 58 58 58
37889- 38 38 38 22 22 22 10 10 10 6 6 6
37890- 0 0 0 0 0 0 0 0 0 0 0 0
37891- 0 0 0 0 0 0 0 0 0 0 0 0
37892- 0 0 0 0 0 0 0 0 0 0 0 0
37893- 0 0 0 0 0 0 0 0 0 14 14 14
37894- 34 34 34 70 70 70 138 110 50 158 118 36
37895-167 114 7 180 123 7 192 133 9 197 138 11
37896-200 144 11 206 145 10 213 154 11 219 162 10
37897-224 166 10 230 174 11 239 182 13 242 186 14
37898-246 186 14 246 186 14 246 186 14 246 186 14
37899-239 182 13 216 158 10 185 133 11 152 99 6
37900-104 69 6 18 14 6 2 2 6 2 2 6
37901- 2 2 6 2 2 6 2 2 6 2 2 6
37902- 2 2 6 2 2 6 2 2 6 2 2 6
37903- 2 2 6 2 2 6 2 2 6 2 2 6
37904- 2 2 6 6 6 6 80 54 7 152 99 6
37905-192 133 9 219 162 10 236 178 12 239 182 13
37906-246 186 14 242 186 14 239 182 13 236 178 12
37907-224 166 10 206 145 10 192 133 9 154 121 60
37908- 94 94 94 62 62 62 42 42 42 22 22 22
37909- 14 14 14 6 6 6 0 0 0 0 0 0
37910- 0 0 0 0 0 0 0 0 0 0 0 0
37911- 0 0 0 0 0 0 0 0 0 0 0 0
37912- 0 0 0 0 0 0 0 0 0 0 0 0
37913- 0 0 0 0 0 0 0 0 0 6 6 6
37914- 18 18 18 34 34 34 58 58 58 78 78 78
37915-101 98 89 124 112 88 142 110 46 156 107 11
37916-163 110 8 167 114 7 175 118 6 180 123 7
37917-185 133 11 197 138 11 210 150 10 219 162 10
37918-226 170 11 236 178 12 236 178 12 234 174 13
37919-219 162 10 197 138 11 163 110 8 130 83 6
37920- 91 60 6 10 10 10 2 2 6 2 2 6
37921- 18 18 18 38 38 38 38 38 38 38 38 38
37922- 38 38 38 38 38 38 38 38 38 38 38 38
37923- 38 38 38 38 38 38 26 26 26 2 2 6
37924- 2 2 6 6 6 6 70 47 6 137 92 6
37925-175 118 6 200 144 11 219 162 10 230 174 11
37926-234 174 13 230 174 11 219 162 10 210 150 10
37927-192 133 9 163 110 8 124 112 88 82 82 82
37928- 50 50 50 30 30 30 14 14 14 6 6 6
37929- 0 0 0 0 0 0 0 0 0 0 0 0
37930- 0 0 0 0 0 0 0 0 0 0 0 0
37931- 0 0 0 0 0 0 0 0 0 0 0 0
37932- 0 0 0 0 0 0 0 0 0 0 0 0
37933- 0 0 0 0 0 0 0 0 0 0 0 0
37934- 6 6 6 14 14 14 22 22 22 34 34 34
37935- 42 42 42 58 58 58 74 74 74 86 86 86
37936-101 98 89 122 102 70 130 98 46 121 87 25
37937-137 92 6 152 99 6 163 110 8 180 123 7
37938-185 133 11 197 138 11 206 145 10 200 144 11
37939-180 123 7 156 107 11 130 83 6 104 69 6
37940- 50 34 6 54 54 54 110 110 110 101 98 89
37941- 86 86 86 82 82 82 78 78 78 78 78 78
37942- 78 78 78 78 78 78 78 78 78 78 78 78
37943- 78 78 78 82 82 82 86 86 86 94 94 94
37944-106 106 106 101 101 101 86 66 34 124 80 6
37945-156 107 11 180 123 7 192 133 9 200 144 11
37946-206 145 10 200 144 11 192 133 9 175 118 6
37947-139 102 15 109 106 95 70 70 70 42 42 42
37948- 22 22 22 10 10 10 0 0 0 0 0 0
37949- 0 0 0 0 0 0 0 0 0 0 0 0
37950- 0 0 0 0 0 0 0 0 0 0 0 0
37951- 0 0 0 0 0 0 0 0 0 0 0 0
37952- 0 0 0 0 0 0 0 0 0 0 0 0
37953- 0 0 0 0 0 0 0 0 0 0 0 0
37954- 0 0 0 0 0 0 6 6 6 10 10 10
37955- 14 14 14 22 22 22 30 30 30 38 38 38
37956- 50 50 50 62 62 62 74 74 74 90 90 90
37957-101 98 89 112 100 78 121 87 25 124 80 6
37958-137 92 6 152 99 6 152 99 6 152 99 6
37959-138 86 6 124 80 6 98 70 6 86 66 30
37960-101 98 89 82 82 82 58 58 58 46 46 46
37961- 38 38 38 34 34 34 34 34 34 34 34 34
37962- 34 34 34 34 34 34 34 34 34 34 34 34
37963- 34 34 34 34 34 34 38 38 38 42 42 42
37964- 54 54 54 82 82 82 94 86 76 91 60 6
37965-134 86 6 156 107 11 167 114 7 175 118 6
37966-175 118 6 167 114 7 152 99 6 121 87 25
37967-101 98 89 62 62 62 34 34 34 18 18 18
37968- 6 6 6 0 0 0 0 0 0 0 0 0
37969- 0 0 0 0 0 0 0 0 0 0 0 0
37970- 0 0 0 0 0 0 0 0 0 0 0 0
37971- 0 0 0 0 0 0 0 0 0 0 0 0
37972- 0 0 0 0 0 0 0 0 0 0 0 0
37973- 0 0 0 0 0 0 0 0 0 0 0 0
37974- 0 0 0 0 0 0 0 0 0 0 0 0
37975- 0 0 0 6 6 6 6 6 6 10 10 10
37976- 18 18 18 22 22 22 30 30 30 42 42 42
37977- 50 50 50 66 66 66 86 86 86 101 98 89
37978-106 86 58 98 70 6 104 69 6 104 69 6
37979-104 69 6 91 60 6 82 62 34 90 90 90
37980- 62 62 62 38 38 38 22 22 22 14 14 14
37981- 10 10 10 10 10 10 10 10 10 10 10 10
37982- 10 10 10 10 10 10 6 6 6 10 10 10
37983- 10 10 10 10 10 10 10 10 10 14 14 14
37984- 22 22 22 42 42 42 70 70 70 89 81 66
37985- 80 54 7 104 69 6 124 80 6 137 92 6
37986-134 86 6 116 81 8 100 82 52 86 86 86
37987- 58 58 58 30 30 30 14 14 14 6 6 6
37988- 0 0 0 0 0 0 0 0 0 0 0 0
37989- 0 0 0 0 0 0 0 0 0 0 0 0
37990- 0 0 0 0 0 0 0 0 0 0 0 0
37991- 0 0 0 0 0 0 0 0 0 0 0 0
37992- 0 0 0 0 0 0 0 0 0 0 0 0
37993- 0 0 0 0 0 0 0 0 0 0 0 0
37994- 0 0 0 0 0 0 0 0 0 0 0 0
37995- 0 0 0 0 0 0 0 0 0 0 0 0
37996- 0 0 0 6 6 6 10 10 10 14 14 14
37997- 18 18 18 26 26 26 38 38 38 54 54 54
37998- 70 70 70 86 86 86 94 86 76 89 81 66
37999- 89 81 66 86 86 86 74 74 74 50 50 50
38000- 30 30 30 14 14 14 6 6 6 0 0 0
38001- 0 0 0 0 0 0 0 0 0 0 0 0
38002- 0 0 0 0 0 0 0 0 0 0 0 0
38003- 0 0 0 0 0 0 0 0 0 0 0 0
38004- 6 6 6 18 18 18 34 34 34 58 58 58
38005- 82 82 82 89 81 66 89 81 66 89 81 66
38006- 94 86 66 94 86 76 74 74 74 50 50 50
38007- 26 26 26 14 14 14 6 6 6 0 0 0
38008- 0 0 0 0 0 0 0 0 0 0 0 0
38009- 0 0 0 0 0 0 0 0 0 0 0 0
38010- 0 0 0 0 0 0 0 0 0 0 0 0
38011- 0 0 0 0 0 0 0 0 0 0 0 0
38012- 0 0 0 0 0 0 0 0 0 0 0 0
38013- 0 0 0 0 0 0 0 0 0 0 0 0
38014- 0 0 0 0 0 0 0 0 0 0 0 0
38015- 0 0 0 0 0 0 0 0 0 0 0 0
38016- 0 0 0 0 0 0 0 0 0 0 0 0
38017- 6 6 6 6 6 6 14 14 14 18 18 18
38018- 30 30 30 38 38 38 46 46 46 54 54 54
38019- 50 50 50 42 42 42 30 30 30 18 18 18
38020- 10 10 10 0 0 0 0 0 0 0 0 0
38021- 0 0 0 0 0 0 0 0 0 0 0 0
38022- 0 0 0 0 0 0 0 0 0 0 0 0
38023- 0 0 0 0 0 0 0 0 0 0 0 0
38024- 0 0 0 6 6 6 14 14 14 26 26 26
38025- 38 38 38 50 50 50 58 58 58 58 58 58
38026- 54 54 54 42 42 42 30 30 30 18 18 18
38027- 10 10 10 0 0 0 0 0 0 0 0 0
38028- 0 0 0 0 0 0 0 0 0 0 0 0
38029- 0 0 0 0 0 0 0 0 0 0 0 0
38030- 0 0 0 0 0 0 0 0 0 0 0 0
38031- 0 0 0 0 0 0 0 0 0 0 0 0
38032- 0 0 0 0 0 0 0 0 0 0 0 0
38033- 0 0 0 0 0 0 0 0 0 0 0 0
38034- 0 0 0 0 0 0 0 0 0 0 0 0
38035- 0 0 0 0 0 0 0 0 0 0 0 0
38036- 0 0 0 0 0 0 0 0 0 0 0 0
38037- 0 0 0 0 0 0 0 0 0 6 6 6
38038- 6 6 6 10 10 10 14 14 14 18 18 18
38039- 18 18 18 14 14 14 10 10 10 6 6 6
38040- 0 0 0 0 0 0 0 0 0 0 0 0
38041- 0 0 0 0 0 0 0 0 0 0 0 0
38042- 0 0 0 0 0 0 0 0 0 0 0 0
38043- 0 0 0 0 0 0 0 0 0 0 0 0
38044- 0 0 0 0 0 0 0 0 0 6 6 6
38045- 14 14 14 18 18 18 22 22 22 22 22 22
38046- 18 18 18 14 14 14 10 10 10 6 6 6
38047- 0 0 0 0 0 0 0 0 0 0 0 0
38048- 0 0 0 0 0 0 0 0 0 0 0 0
38049- 0 0 0 0 0 0 0 0 0 0 0 0
38050- 0 0 0 0 0 0 0 0 0 0 0 0
38051- 0 0 0 0 0 0 0 0 0 0 0 0
38052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38065+4 4 4 4 4 4
38066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38079+4 4 4 4 4 4
38080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38093+4 4 4 4 4 4
38094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38107+4 4 4 4 4 4
38108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38121+4 4 4 4 4 4
38122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38135+4 4 4 4 4 4
38136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38140+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38141+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38145+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38146+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38147+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38149+4 4 4 4 4 4
38150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38154+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38155+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38156+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38159+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38160+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38161+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38162+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38163+4 4 4 4 4 4
38164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38168+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38169+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38170+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38173+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38174+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38175+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38176+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38177+4 4 4 4 4 4
38178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38181+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38182+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38183+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38184+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38186+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38187+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38188+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38189+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38190+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38191+4 4 4 4 4 4
38192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38195+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38196+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38197+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38198+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38199+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38200+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38201+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38202+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38203+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38204+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38205+4 4 4 4 4 4
38206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38209+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38210+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38211+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38212+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38213+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38214+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38215+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38216+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38217+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38218+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38219+4 4 4 4 4 4
38220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38222+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38223+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38224+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38225+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38226+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38227+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38228+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38229+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38230+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38231+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38232+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38233+4 4 4 4 4 4
38234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38236+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38237+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38238+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38239+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38240+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38241+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38242+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38243+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38244+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38245+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38246+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38247+4 4 4 4 4 4
38248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38250+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38251+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38252+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38253+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38254+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38255+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38256+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38257+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38258+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38259+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38260+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38261+4 4 4 4 4 4
38262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38264+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38265+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38266+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38267+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38268+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38269+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38270+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38271+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38272+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38273+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38274+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38275+4 4 4 4 4 4
38276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38277+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38278+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38279+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38280+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38281+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38282+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38283+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38284+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38285+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38286+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38287+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38288+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38289+4 4 4 4 4 4
38290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38291+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38292+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38293+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38294+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38295+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38296+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38297+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38298+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38299+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38300+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38301+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38302+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38303+0 0 0 4 4 4
38304+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38305+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38306+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38307+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38308+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38309+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38310+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38311+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38312+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38313+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38314+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38315+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38316+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38317+2 0 0 0 0 0
38318+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38319+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38320+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38321+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38322+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38323+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38324+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38325+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38326+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38327+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38328+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38329+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38330+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38331+37 38 37 0 0 0
38332+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38333+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38334+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38335+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38336+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38337+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38338+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38339+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38340+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38341+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38342+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38343+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38344+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38345+85 115 134 4 0 0
38346+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38347+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38348+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38349+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38350+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38351+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38352+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38353+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38354+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38355+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38356+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38357+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38358+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38359+60 73 81 4 0 0
38360+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38361+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38362+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38363+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38364+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38365+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38366+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38367+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38368+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38369+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38370+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38371+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38372+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38373+16 19 21 4 0 0
38374+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38375+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38376+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38377+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38378+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38379+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38380+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38381+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38382+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38383+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38384+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38385+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38386+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38387+4 0 0 4 3 3
38388+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38389+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38390+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38392+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38393+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38394+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38395+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38396+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38397+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38398+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38399+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38400+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38401+3 2 2 4 4 4
38402+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38403+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38404+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38405+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38406+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38407+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38408+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38409+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38410+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38411+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38412+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38413+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38414+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38415+4 4 4 4 4 4
38416+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38417+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38418+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38419+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38420+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38421+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38422+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38423+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38424+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38425+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38426+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38427+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38428+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38429+4 4 4 4 4 4
38430+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38431+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38432+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38433+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38434+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38435+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38436+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38437+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38438+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38439+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38440+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38441+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38442+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38443+5 5 5 5 5 5
38444+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38445+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38446+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38447+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38448+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38449+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38450+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38451+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38452+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38453+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38454+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38455+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38456+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38457+5 5 5 4 4 4
38458+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38459+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38460+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38461+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38462+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38463+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38464+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38465+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38466+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38467+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38468+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38469+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38471+4 4 4 4 4 4
38472+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38473+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38474+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38475+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38476+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38477+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38478+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38479+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38480+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38481+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38482+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38483+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38485+4 4 4 4 4 4
38486+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38487+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38488+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38489+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38490+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38491+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38492+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38493+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38494+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38495+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38496+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38499+4 4 4 4 4 4
38500+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38501+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38502+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38503+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38504+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38505+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38506+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38507+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38508+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38509+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38510+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38513+4 4 4 4 4 4
38514+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38515+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38516+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38517+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38518+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38519+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38520+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38521+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38522+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38523+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38524+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38527+4 4 4 4 4 4
38528+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38529+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38530+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38531+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38532+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38533+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38534+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38535+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38536+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38537+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38538+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38541+4 4 4 4 4 4
38542+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38543+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38544+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38545+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38546+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38547+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38548+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38549+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38550+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38551+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38552+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38555+4 4 4 4 4 4
38556+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38557+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38558+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38559+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38560+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38561+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38562+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38563+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38564+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38565+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38566+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38569+4 4 4 4 4 4
38570+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38571+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38572+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38573+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38574+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38575+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38576+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38577+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38578+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38579+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38580+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38583+4 4 4 4 4 4
38584+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38585+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38586+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38587+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38588+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38589+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38590+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38591+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38592+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38593+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38594+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38597+4 4 4 4 4 4
38598+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38599+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38600+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38601+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38602+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38603+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38604+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38605+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38606+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38607+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38608+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38611+4 4 4 4 4 4
38612+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38613+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38614+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38615+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38616+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38617+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38618+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38619+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38620+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38621+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38622+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38625+4 4 4 4 4 4
38626+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38627+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38628+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38629+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38630+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38631+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38632+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38633+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38634+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38635+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38636+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38639+4 4 4 4 4 4
38640+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38641+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38642+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38643+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38644+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38645+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38646+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38647+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38648+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38649+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38650+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38653+4 4 4 4 4 4
38654+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38655+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38656+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38657+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38658+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38659+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38660+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38661+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38662+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38663+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38664+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38667+4 4 4 4 4 4
38668+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38669+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38670+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38671+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38672+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38673+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38674+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38675+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38676+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38677+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38678+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38681+4 4 4 4 4 4
38682+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38683+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38684+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38685+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38686+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38687+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38688+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38689+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38690+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38691+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38692+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38695+4 4 4 4 4 4
38696+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38697+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38698+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38699+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38700+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38701+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38702+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38703+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38704+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38705+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38706+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38709+4 4 4 4 4 4
38710+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38711+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38712+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38713+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38714+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38715+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38716+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38717+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38718+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38719+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38720+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38723+4 4 4 4 4 4
38724+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38725+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38726+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38727+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38728+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38729+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38730+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38731+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38732+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38733+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38734+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38737+4 4 4 4 4 4
38738+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38739+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38740+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38741+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38742+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38743+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38744+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38745+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38746+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38747+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38748+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38751+4 4 4 4 4 4
38752+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38753+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38754+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38755+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38756+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38757+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38758+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38759+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38760+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38761+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38762+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38765+4 4 4 4 4 4
38766+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38767+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38768+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38769+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38770+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38771+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38772+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38773+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38774+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38775+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38776+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38779+4 4 4 4 4 4
38780+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38781+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38782+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38783+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38784+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38785+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38786+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38787+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38788+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38789+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38790+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38793+4 4 4 4 4 4
38794+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38795+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38796+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38797+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38798+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38799+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38800+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38801+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38802+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38803+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38804+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38807+4 4 4 4 4 4
38808+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38809+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38810+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38811+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38812+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38813+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38814+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38815+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38816+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38817+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38818+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38821+4 4 4 4 4 4
38822+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38823+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38824+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38825+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38826+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38827+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38828+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38829+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38830+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38831+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38832+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38835+4 4 4 4 4 4
38836+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38837+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38838+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38839+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38840+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38841+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38842+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38843+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38844+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38845+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38846+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38849+4 4 4 4 4 4
38850+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38851+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38852+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38853+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38854+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38855+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38856+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38857+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38858+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38859+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38860+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38863+4 4 4 4 4 4
38864+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38865+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38866+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38867+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38868+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38869+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38870+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38871+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38872+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38873+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38877+4 4 4 4 4 4
38878+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38879+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38880+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38881+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38882+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38883+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38884+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38885+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38886+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38887+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38891+4 4 4 4 4 4
38892+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38893+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38894+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38895+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38896+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38897+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38898+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38899+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38900+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38901+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38905+4 4 4 4 4 4
38906+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38907+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38908+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38909+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38910+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38911+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38912+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38913+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38914+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38915+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38919+4 4 4 4 4 4
38920+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38921+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38922+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38923+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38924+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38925+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38926+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38927+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38928+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38933+4 4 4 4 4 4
38934+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38935+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38936+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38937+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38938+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38939+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38940+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38941+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38942+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38947+4 4 4 4 4 4
38948+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38949+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38950+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38951+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38952+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38953+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38954+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38955+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38956+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38961+4 4 4 4 4 4
38962+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38963+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
38964+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38965+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
38966+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
38967+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
38968+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
38969+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
38970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38975+4 4 4 4 4 4
38976+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38977+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
38978+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
38979+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
38980+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
38981+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
38982+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
38983+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38989+4 4 4 4 4 4
38990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38991+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
38992+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38993+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
38994+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
38995+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
38996+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
38997+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
38998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39003+4 4 4 4 4 4
39004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39005+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39006+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39007+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39008+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39009+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39010+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39011+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39017+4 4 4 4 4 4
39018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39019+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39020+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39021+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39022+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39023+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39024+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39025+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39031+4 4 4 4 4 4
39032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39034+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39035+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39036+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39037+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39038+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39039+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39045+4 4 4 4 4 4
39046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39049+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39050+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39051+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39052+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39059+4 4 4 4 4 4
39060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39063+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39064+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39065+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39066+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39073+4 4 4 4 4 4
39074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39077+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39078+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39079+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39080+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39087+4 4 4 4 4 4
39088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39091+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39092+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39093+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39094+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39101+4 4 4 4 4 4
39102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39106+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39107+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39108+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39115+4 4 4 4 4 4
39116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39120+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39121+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39122+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39129+4 4 4 4 4 4
39130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39134+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39135+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39136+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39143+4 4 4 4 4 4
39144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39148+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39149+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39157+4 4 4 4 4 4
39158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39162+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39163+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39171+4 4 4 4 4 4
39172diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39173index 3473e75..c930142 100644
39174--- a/drivers/video/udlfb.c
39175+++ b/drivers/video/udlfb.c
39176@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39177 dlfb_urb_completion(urb);
39178
39179 error:
39180- atomic_add(bytes_sent, &dev->bytes_sent);
39181- atomic_add(bytes_identical, &dev->bytes_identical);
39182- atomic_add(width*height*2, &dev->bytes_rendered);
39183+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39184+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39185+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39186 end_cycles = get_cycles();
39187- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39188+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39189 >> 10)), /* Kcycles */
39190 &dev->cpu_kcycles_used);
39191
39192@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39193 dlfb_urb_completion(urb);
39194
39195 error:
39196- atomic_add(bytes_sent, &dev->bytes_sent);
39197- atomic_add(bytes_identical, &dev->bytes_identical);
39198- atomic_add(bytes_rendered, &dev->bytes_rendered);
39199+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39200+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39201+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39202 end_cycles = get_cycles();
39203- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39204+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39205 >> 10)), /* Kcycles */
39206 &dev->cpu_kcycles_used);
39207 }
39208@@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39209 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39210 struct dlfb_data *dev = fb_info->par;
39211 return snprintf(buf, PAGE_SIZE, "%u\n",
39212- atomic_read(&dev->bytes_rendered));
39213+ atomic_read_unchecked(&dev->bytes_rendered));
39214 }
39215
39216 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39217@@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39218 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39219 struct dlfb_data *dev = fb_info->par;
39220 return snprintf(buf, PAGE_SIZE, "%u\n",
39221- atomic_read(&dev->bytes_identical));
39222+ atomic_read_unchecked(&dev->bytes_identical));
39223 }
39224
39225 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39226@@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39227 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39228 struct dlfb_data *dev = fb_info->par;
39229 return snprintf(buf, PAGE_SIZE, "%u\n",
39230- atomic_read(&dev->bytes_sent));
39231+ atomic_read_unchecked(&dev->bytes_sent));
39232 }
39233
39234 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39235@@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39236 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39237 struct dlfb_data *dev = fb_info->par;
39238 return snprintf(buf, PAGE_SIZE, "%u\n",
39239- atomic_read(&dev->cpu_kcycles_used));
39240+ atomic_read_unchecked(&dev->cpu_kcycles_used));
39241 }
39242
39243 static ssize_t edid_show(
39244@@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39245 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39246 struct dlfb_data *dev = fb_info->par;
39247
39248- atomic_set(&dev->bytes_rendered, 0);
39249- atomic_set(&dev->bytes_identical, 0);
39250- atomic_set(&dev->bytes_sent, 0);
39251- atomic_set(&dev->cpu_kcycles_used, 0);
39252+ atomic_set_unchecked(&dev->bytes_rendered, 0);
39253+ atomic_set_unchecked(&dev->bytes_identical, 0);
39254+ atomic_set_unchecked(&dev->bytes_sent, 0);
39255+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39256
39257 return count;
39258 }
39259diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39260index 7f8472c..9842e87 100644
39261--- a/drivers/video/uvesafb.c
39262+++ b/drivers/video/uvesafb.c
39263@@ -19,6 +19,7 @@
39264 #include <linux/io.h>
39265 #include <linux/mutex.h>
39266 #include <linux/slab.h>
39267+#include <linux/moduleloader.h>
39268 #include <video/edid.h>
39269 #include <video/uvesafb.h>
39270 #ifdef CONFIG_X86
39271@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39272 NULL,
39273 };
39274
39275- return call_usermodehelper(v86d_path, argv, envp, 1);
39276+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39277 }
39278
39279 /*
39280@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39281 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39282 par->pmi_setpal = par->ypan = 0;
39283 } else {
39284+
39285+#ifdef CONFIG_PAX_KERNEXEC
39286+#ifdef CONFIG_MODULES
39287+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39288+#endif
39289+ if (!par->pmi_code) {
39290+ par->pmi_setpal = par->ypan = 0;
39291+ return 0;
39292+ }
39293+#endif
39294+
39295 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39296 + task->t.regs.edi);
39297+
39298+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39299+ pax_open_kernel();
39300+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39301+ pax_close_kernel();
39302+
39303+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39304+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39305+#else
39306 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39307 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39308+#endif
39309+
39310 printk(KERN_INFO "uvesafb: protected mode interface info at "
39311 "%04x:%04x\n",
39312 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39313@@ -1821,6 +1844,11 @@ out:
39314 if (par->vbe_modes)
39315 kfree(par->vbe_modes);
39316
39317+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39318+ if (par->pmi_code)
39319+ module_free_exec(NULL, par->pmi_code);
39320+#endif
39321+
39322 framebuffer_release(info);
39323 return err;
39324 }
39325@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39326 kfree(par->vbe_state_orig);
39327 if (par->vbe_state_saved)
39328 kfree(par->vbe_state_saved);
39329+
39330+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39331+ if (par->pmi_code)
39332+ module_free_exec(NULL, par->pmi_code);
39333+#endif
39334+
39335 }
39336
39337 framebuffer_release(info);
39338diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39339index 501b340..86bd4cf 100644
39340--- a/drivers/video/vesafb.c
39341+++ b/drivers/video/vesafb.c
39342@@ -9,6 +9,7 @@
39343 */
39344
39345 #include <linux/module.h>
39346+#include <linux/moduleloader.h>
39347 #include <linux/kernel.h>
39348 #include <linux/errno.h>
39349 #include <linux/string.h>
39350@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39351 static int vram_total __initdata; /* Set total amount of memory */
39352 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39353 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39354-static void (*pmi_start)(void) __read_mostly;
39355-static void (*pmi_pal) (void) __read_mostly;
39356+static void (*pmi_start)(void) __read_only;
39357+static void (*pmi_pal) (void) __read_only;
39358 static int depth __read_mostly;
39359 static int vga_compat __read_mostly;
39360 /* --------------------------------------------------------------------- */
39361@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39362 unsigned int size_vmode;
39363 unsigned int size_remap;
39364 unsigned int size_total;
39365+ void *pmi_code = NULL;
39366
39367 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39368 return -ENODEV;
39369@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39370 size_remap = size_total;
39371 vesafb_fix.smem_len = size_remap;
39372
39373-#ifndef __i386__
39374- screen_info.vesapm_seg = 0;
39375-#endif
39376-
39377 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39378 printk(KERN_WARNING
39379 "vesafb: cannot reserve video memory at 0x%lx\n",
39380@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39381 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39382 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39383
39384+#ifdef __i386__
39385+
39386+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39387+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39388+ if (!pmi_code)
39389+#elif !defined(CONFIG_PAX_KERNEXEC)
39390+ if (0)
39391+#endif
39392+
39393+#endif
39394+ screen_info.vesapm_seg = 0;
39395+
39396 if (screen_info.vesapm_seg) {
39397- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39398- screen_info.vesapm_seg,screen_info.vesapm_off);
39399+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39400+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39401 }
39402
39403 if (screen_info.vesapm_seg < 0xc000)
39404@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39405
39406 if (ypan || pmi_setpal) {
39407 unsigned short *pmi_base;
39408+
39409 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39410- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39411- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39412+
39413+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39414+ pax_open_kernel();
39415+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39416+#else
39417+ pmi_code = pmi_base;
39418+#endif
39419+
39420+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39421+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39422+
39423+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39424+ pmi_start = ktva_ktla(pmi_start);
39425+ pmi_pal = ktva_ktla(pmi_pal);
39426+ pax_close_kernel();
39427+#endif
39428+
39429 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39430 if (pmi_base[3]) {
39431 printk(KERN_INFO "vesafb: pmi: ports = ");
39432@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39433 info->node, info->fix.id);
39434 return 0;
39435 err:
39436+
39437+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39438+ module_free_exec(NULL, pmi_code);
39439+#endif
39440+
39441 if (info->screen_base)
39442 iounmap(info->screen_base);
39443 framebuffer_release(info);
39444diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39445index 88714ae..16c2e11 100644
39446--- a/drivers/video/via/via_clock.h
39447+++ b/drivers/video/via/via_clock.h
39448@@ -56,7 +56,7 @@ struct via_clock {
39449
39450 void (*set_engine_pll_state)(u8 state);
39451 void (*set_engine_pll)(struct via_pll_config config);
39452-};
39453+} __no_const;
39454
39455
39456 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39457diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39458index e56c934..fc22f4b 100644
39459--- a/drivers/xen/xen-pciback/conf_space.h
39460+++ b/drivers/xen/xen-pciback/conf_space.h
39461@@ -44,15 +44,15 @@ struct config_field {
39462 struct {
39463 conf_dword_write write;
39464 conf_dword_read read;
39465- } dw;
39466+ } __no_const dw;
39467 struct {
39468 conf_word_write write;
39469 conf_word_read read;
39470- } w;
39471+ } __no_const w;
39472 struct {
39473 conf_byte_write write;
39474 conf_byte_read read;
39475- } b;
39476+ } __no_const b;
39477 } u;
39478 struct list_head list;
39479 };
39480diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39481index 879ed88..bc03a01 100644
39482--- a/fs/9p/vfs_inode.c
39483+++ b/fs/9p/vfs_inode.c
39484@@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39485 void
39486 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39487 {
39488- char *s = nd_get_link(nd);
39489+ const char *s = nd_get_link(nd);
39490
39491 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39492 IS_ERR(s) ? "<error>" : s);
39493diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39494index 79e2ca7..5828ad1 100644
39495--- a/fs/Kconfig.binfmt
39496+++ b/fs/Kconfig.binfmt
39497@@ -86,7 +86,7 @@ config HAVE_AOUT
39498
39499 config BINFMT_AOUT
39500 tristate "Kernel support for a.out and ECOFF binaries"
39501- depends on HAVE_AOUT
39502+ depends on HAVE_AOUT && BROKEN
39503 ---help---
39504 A.out (Assembler.OUTput) is a set of formats for libraries and
39505 executables used in the earliest versions of UNIX. Linux used
39506diff --git a/fs/aio.c b/fs/aio.c
39507index 969beb0..09fab51 100644
39508--- a/fs/aio.c
39509+++ b/fs/aio.c
39510@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39511 size += sizeof(struct io_event) * nr_events;
39512 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39513
39514- if (nr_pages < 0)
39515+ if (nr_pages <= 0)
39516 return -EINVAL;
39517
39518 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39519@@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39520 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39521 {
39522 ssize_t ret;
39523+ struct iovec iovstack;
39524
39525 #ifdef CONFIG_COMPAT
39526 if (compat)
39527 ret = compat_rw_copy_check_uvector(type,
39528 (struct compat_iovec __user *)kiocb->ki_buf,
39529- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39530+ kiocb->ki_nbytes, 1, &iovstack,
39531 &kiocb->ki_iovec, 1);
39532 else
39533 #endif
39534 ret = rw_copy_check_uvector(type,
39535 (struct iovec __user *)kiocb->ki_buf,
39536- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39537+ kiocb->ki_nbytes, 1, &iovstack,
39538 &kiocb->ki_iovec, 1);
39539 if (ret < 0)
39540 goto out;
39541
39542+ if (kiocb->ki_iovec == &iovstack) {
39543+ kiocb->ki_inline_vec = iovstack;
39544+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39545+ }
39546 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39547 kiocb->ki_cur_seg = 0;
39548 /* ki_nbytes/left now reflect bytes instead of segs */
39549diff --git a/fs/attr.c b/fs/attr.c
39550index 7ee7ba4..0c61a60 100644
39551--- a/fs/attr.c
39552+++ b/fs/attr.c
39553@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39554 unsigned long limit;
39555
39556 limit = rlimit(RLIMIT_FSIZE);
39557+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39558 if (limit != RLIM_INFINITY && offset > limit)
39559 goto out_sig;
39560 if (offset > inode->i_sb->s_maxbytes)
39561diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39562index e1fbdee..cd5ea56 100644
39563--- a/fs/autofs4/waitq.c
39564+++ b/fs/autofs4/waitq.c
39565@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39566 {
39567 unsigned long sigpipe, flags;
39568 mm_segment_t fs;
39569- const char *data = (const char *)addr;
39570+ const char __user *data = (const char __force_user *)addr;
39571 ssize_t wr = 0;
39572
39573 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39574diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39575index 8342ca6..82fd192 100644
39576--- a/fs/befs/linuxvfs.c
39577+++ b/fs/befs/linuxvfs.c
39578@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39579 {
39580 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39581 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39582- char *link = nd_get_link(nd);
39583+ const char *link = nd_get_link(nd);
39584 if (!IS_ERR(link))
39585 kfree(link);
39586 }
39587diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39588index a6395bd..a5b24c4 100644
39589--- a/fs/binfmt_aout.c
39590+++ b/fs/binfmt_aout.c
39591@@ -16,6 +16,7 @@
39592 #include <linux/string.h>
39593 #include <linux/fs.h>
39594 #include <linux/file.h>
39595+#include <linux/security.h>
39596 #include <linux/stat.h>
39597 #include <linux/fcntl.h>
39598 #include <linux/ptrace.h>
39599@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39600 #endif
39601 # define START_STACK(u) ((void __user *)u.start_stack)
39602
39603+ memset(&dump, 0, sizeof(dump));
39604+
39605 fs = get_fs();
39606 set_fs(KERNEL_DS);
39607 has_dumped = 1;
39608@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39609
39610 /* If the size of the dump file exceeds the rlimit, then see what would happen
39611 if we wrote the stack, but not the data area. */
39612+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39613 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39614 dump.u_dsize = 0;
39615
39616 /* Make sure we have enough room to write the stack and data areas. */
39617+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39618 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39619 dump.u_ssize = 0;
39620
39621@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39622 rlim = rlimit(RLIMIT_DATA);
39623 if (rlim >= RLIM_INFINITY)
39624 rlim = ~0;
39625+
39626+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39627 if (ex.a_data + ex.a_bss > rlim)
39628 return -ENOMEM;
39629
39630@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39631 install_exec_creds(bprm);
39632 current->flags &= ~PF_FORKNOEXEC;
39633
39634+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39635+ current->mm->pax_flags = 0UL;
39636+#endif
39637+
39638+#ifdef CONFIG_PAX_PAGEEXEC
39639+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39640+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39641+
39642+#ifdef CONFIG_PAX_EMUTRAMP
39643+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39644+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39645+#endif
39646+
39647+#ifdef CONFIG_PAX_MPROTECT
39648+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39649+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39650+#endif
39651+
39652+ }
39653+#endif
39654+
39655 if (N_MAGIC(ex) == OMAGIC) {
39656 unsigned long text_addr, map_size;
39657 loff_t pos;
39658@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39659
39660 down_write(&current->mm->mmap_sem);
39661 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39662- PROT_READ | PROT_WRITE | PROT_EXEC,
39663+ PROT_READ | PROT_WRITE,
39664 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39665 fd_offset + ex.a_text);
39666 up_write(&current->mm->mmap_sem);
39667diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39668index 21ac5ee..c1090ea 100644
39669--- a/fs/binfmt_elf.c
39670+++ b/fs/binfmt_elf.c
39671@@ -32,6 +32,7 @@
39672 #include <linux/elf.h>
39673 #include <linux/utsname.h>
39674 #include <linux/coredump.h>
39675+#include <linux/xattr.h>
39676 #include <asm/uaccess.h>
39677 #include <asm/param.h>
39678 #include <asm/page.h>
39679@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39680 #define elf_core_dump NULL
39681 #endif
39682
39683+#ifdef CONFIG_PAX_MPROTECT
39684+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39685+#endif
39686+
39687 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39688 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39689 #else
39690@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39691 .load_binary = load_elf_binary,
39692 .load_shlib = load_elf_library,
39693 .core_dump = elf_core_dump,
39694+
39695+#ifdef CONFIG_PAX_MPROTECT
39696+ .handle_mprotect= elf_handle_mprotect,
39697+#endif
39698+
39699 .min_coredump = ELF_EXEC_PAGESIZE,
39700 };
39701
39702@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39703
39704 static int set_brk(unsigned long start, unsigned long end)
39705 {
39706+ unsigned long e = end;
39707+
39708 start = ELF_PAGEALIGN(start);
39709 end = ELF_PAGEALIGN(end);
39710 if (end > start) {
39711@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39712 if (BAD_ADDR(addr))
39713 return addr;
39714 }
39715- current->mm->start_brk = current->mm->brk = end;
39716+ current->mm->start_brk = current->mm->brk = e;
39717 return 0;
39718 }
39719
39720@@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39721 elf_addr_t __user *u_rand_bytes;
39722 const char *k_platform = ELF_PLATFORM;
39723 const char *k_base_platform = ELF_BASE_PLATFORM;
39724- unsigned char k_rand_bytes[16];
39725+ u32 k_rand_bytes[4];
39726 int items;
39727 elf_addr_t *elf_info;
39728 int ei_index = 0;
39729 const struct cred *cred = current_cred();
39730 struct vm_area_struct *vma;
39731+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39732
39733 /*
39734 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39735@@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39736 * Generate 16 random bytes for userspace PRNG seeding.
39737 */
39738 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39739- u_rand_bytes = (elf_addr_t __user *)
39740- STACK_ALLOC(p, sizeof(k_rand_bytes));
39741+ srandom32(k_rand_bytes[0] ^ random32());
39742+ srandom32(k_rand_bytes[1] ^ random32());
39743+ srandom32(k_rand_bytes[2] ^ random32());
39744+ srandom32(k_rand_bytes[3] ^ random32());
39745+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39746+ u_rand_bytes = (elf_addr_t __user *) p;
39747 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39748 return -EFAULT;
39749
39750@@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39751 return -EFAULT;
39752 current->mm->env_end = p;
39753
39754+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39755+
39756 /* Put the elf_info on the stack in the right place. */
39757 sp = (elf_addr_t __user *)envp + 1;
39758- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39759+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39760 return -EFAULT;
39761 return 0;
39762 }
39763@@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39764 {
39765 struct elf_phdr *elf_phdata;
39766 struct elf_phdr *eppnt;
39767- unsigned long load_addr = 0;
39768+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39769 int load_addr_set = 0;
39770 unsigned long last_bss = 0, elf_bss = 0;
39771- unsigned long error = ~0UL;
39772+ unsigned long error = -EINVAL;
39773 unsigned long total_size;
39774 int retval, i, size;
39775
39776@@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39777 goto out_close;
39778 }
39779
39780+#ifdef CONFIG_PAX_SEGMEXEC
39781+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39782+ pax_task_size = SEGMEXEC_TASK_SIZE;
39783+#endif
39784+
39785 eppnt = elf_phdata;
39786 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39787 if (eppnt->p_type == PT_LOAD) {
39788@@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39789 k = load_addr + eppnt->p_vaddr;
39790 if (BAD_ADDR(k) ||
39791 eppnt->p_filesz > eppnt->p_memsz ||
39792- eppnt->p_memsz > TASK_SIZE ||
39793- TASK_SIZE - eppnt->p_memsz < k) {
39794+ eppnt->p_memsz > pax_task_size ||
39795+ pax_task_size - eppnt->p_memsz < k) {
39796 error = -ENOMEM;
39797 goto out_close;
39798 }
39799@@ -528,6 +552,348 @@ out:
39800 return error;
39801 }
39802
39803+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
39804+{
39805+ unsigned long pax_flags = 0UL;
39806+
39807+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39808+
39809+#ifdef CONFIG_PAX_PAGEEXEC
39810+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39811+ pax_flags |= MF_PAX_PAGEEXEC;
39812+#endif
39813+
39814+#ifdef CONFIG_PAX_SEGMEXEC
39815+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39816+ pax_flags |= MF_PAX_SEGMEXEC;
39817+#endif
39818+
39819+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39820+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39821+ if ((__supported_pte_mask & _PAGE_NX))
39822+ pax_flags &= ~MF_PAX_SEGMEXEC;
39823+ else
39824+ pax_flags &= ~MF_PAX_PAGEEXEC;
39825+ }
39826+#endif
39827+
39828+#ifdef CONFIG_PAX_EMUTRAMP
39829+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39830+ pax_flags |= MF_PAX_EMUTRAMP;
39831+#endif
39832+
39833+#ifdef CONFIG_PAX_MPROTECT
39834+ if (elf_phdata->p_flags & PF_MPROTECT)
39835+ pax_flags |= MF_PAX_MPROTECT;
39836+#endif
39837+
39838+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39839+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39840+ pax_flags |= MF_PAX_RANDMMAP;
39841+#endif
39842+
39843+#endif
39844+
39845+ return pax_flags;
39846+}
39847+
39848+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
39849+{
39850+ unsigned long pax_flags = 0UL;
39851+
39852+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39853+
39854+#ifdef CONFIG_PAX_PAGEEXEC
39855+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39856+ pax_flags |= MF_PAX_PAGEEXEC;
39857+#endif
39858+
39859+#ifdef CONFIG_PAX_SEGMEXEC
39860+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39861+ pax_flags |= MF_PAX_SEGMEXEC;
39862+#endif
39863+
39864+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39865+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39866+ if ((__supported_pte_mask & _PAGE_NX))
39867+ pax_flags &= ~MF_PAX_SEGMEXEC;
39868+ else
39869+ pax_flags &= ~MF_PAX_PAGEEXEC;
39870+ }
39871+#endif
39872+
39873+#ifdef CONFIG_PAX_EMUTRAMP
39874+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39875+ pax_flags |= MF_PAX_EMUTRAMP;
39876+#endif
39877+
39878+#ifdef CONFIG_PAX_MPROTECT
39879+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39880+ pax_flags |= MF_PAX_MPROTECT;
39881+#endif
39882+
39883+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39884+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39885+ pax_flags |= MF_PAX_RANDMMAP;
39886+#endif
39887+
39888+#endif
39889+
39890+ return pax_flags;
39891+}
39892+
39893+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39894+{
39895+ unsigned long pax_flags = 0UL;
39896+
39897+#ifdef CONFIG_PAX_EI_PAX
39898+
39899+#ifdef CONFIG_PAX_PAGEEXEC
39900+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39901+ pax_flags |= MF_PAX_PAGEEXEC;
39902+#endif
39903+
39904+#ifdef CONFIG_PAX_SEGMEXEC
39905+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39906+ pax_flags |= MF_PAX_SEGMEXEC;
39907+#endif
39908+
39909+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39910+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39911+ if ((__supported_pte_mask & _PAGE_NX))
39912+ pax_flags &= ~MF_PAX_SEGMEXEC;
39913+ else
39914+ pax_flags &= ~MF_PAX_PAGEEXEC;
39915+ }
39916+#endif
39917+
39918+#ifdef CONFIG_PAX_EMUTRAMP
39919+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39920+ pax_flags |= MF_PAX_EMUTRAMP;
39921+#endif
39922+
39923+#ifdef CONFIG_PAX_MPROTECT
39924+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39925+ pax_flags |= MF_PAX_MPROTECT;
39926+#endif
39927+
39928+#ifdef CONFIG_PAX_ASLR
39929+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39930+ pax_flags |= MF_PAX_RANDMMAP;
39931+#endif
39932+
39933+#else
39934+
39935+#ifdef CONFIG_PAX_PAGEEXEC
39936+ pax_flags |= MF_PAX_PAGEEXEC;
39937+#endif
39938+
39939+#ifdef CONFIG_PAX_MPROTECT
39940+ pax_flags |= MF_PAX_MPROTECT;
39941+#endif
39942+
39943+#ifdef CONFIG_PAX_RANDMMAP
39944+ pax_flags |= MF_PAX_RANDMMAP;
39945+#endif
39946+
39947+#ifdef CONFIG_PAX_SEGMEXEC
39948+ if (!(__supported_pte_mask & _PAGE_NX)) {
39949+ pax_flags &= ~MF_PAX_PAGEEXEC;
39950+ pax_flags |= MF_PAX_SEGMEXEC;
39951+ }
39952+#endif
39953+
39954+#endif
39955+
39956+ return pax_flags;
39957+}
39958+
39959+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39960+{
39961+
39962+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39963+ unsigned long i;
39964+
39965+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39966+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39967+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39968+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39969+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39970+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39971+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39972+ return ~0UL;
39973+
39974+#ifdef CONFIG_PAX_SOFTMODE
39975+ if (pax_softmode)
39976+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
39977+ else
39978+#endif
39979+
39980+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
39981+ break;
39982+ }
39983+#endif
39984+
39985+ return ~0UL;
39986+}
39987+
39988+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
39989+{
39990+ unsigned long pax_flags = 0UL;
39991+
39992+#ifdef CONFIG_PAX_PAGEEXEC
39993+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
39994+ pax_flags |= MF_PAX_PAGEEXEC;
39995+#endif
39996+
39997+#ifdef CONFIG_PAX_SEGMEXEC
39998+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
39999+ pax_flags |= MF_PAX_SEGMEXEC;
40000+#endif
40001+
40002+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40003+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40004+ if ((__supported_pte_mask & _PAGE_NX))
40005+ pax_flags &= ~MF_PAX_SEGMEXEC;
40006+ else
40007+ pax_flags &= ~MF_PAX_PAGEEXEC;
40008+ }
40009+#endif
40010+
40011+#ifdef CONFIG_PAX_EMUTRAMP
40012+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
40013+ pax_flags |= MF_PAX_EMUTRAMP;
40014+#endif
40015+
40016+#ifdef CONFIG_PAX_MPROTECT
40017+ if (pax_flags_softmode & MF_PAX_MPROTECT)
40018+ pax_flags |= MF_PAX_MPROTECT;
40019+#endif
40020+
40021+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40022+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
40023+ pax_flags |= MF_PAX_RANDMMAP;
40024+#endif
40025+
40026+ return pax_flags;
40027+}
40028+
40029+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
40030+{
40031+ unsigned long pax_flags = 0UL;
40032+
40033+#ifdef CONFIG_PAX_PAGEEXEC
40034+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
40035+ pax_flags |= MF_PAX_PAGEEXEC;
40036+#endif
40037+
40038+#ifdef CONFIG_PAX_SEGMEXEC
40039+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
40040+ pax_flags |= MF_PAX_SEGMEXEC;
40041+#endif
40042+
40043+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40044+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40045+ if ((__supported_pte_mask & _PAGE_NX))
40046+ pax_flags &= ~MF_PAX_SEGMEXEC;
40047+ else
40048+ pax_flags &= ~MF_PAX_PAGEEXEC;
40049+ }
40050+#endif
40051+
40052+#ifdef CONFIG_PAX_EMUTRAMP
40053+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
40054+ pax_flags |= MF_PAX_EMUTRAMP;
40055+#endif
40056+
40057+#ifdef CONFIG_PAX_MPROTECT
40058+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
40059+ pax_flags |= MF_PAX_MPROTECT;
40060+#endif
40061+
40062+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40063+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
40064+ pax_flags |= MF_PAX_RANDMMAP;
40065+#endif
40066+
40067+ return pax_flags;
40068+}
40069+
40070+static unsigned long pax_parse_xattr_pax(struct file * const file)
40071+{
40072+
40073+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40074+ ssize_t xattr_size, i;
40075+ unsigned char xattr_value[5];
40076+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
40077+
40078+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
40079+ if (xattr_size <= 0)
40080+ return ~0UL;
40081+
40082+ for (i = 0; i < xattr_size; i++)
40083+ switch (xattr_value[i]) {
40084+ default:
40085+ return ~0UL;
40086+
40087+#define parse_flag(option1, option2, flag) \
40088+ case option1: \
40089+ pax_flags_hardmode |= MF_PAX_##flag; \
40090+ break; \
40091+ case option2: \
40092+ pax_flags_softmode |= MF_PAX_##flag; \
40093+ break;
40094+
40095+ parse_flag('p', 'P', PAGEEXEC);
40096+ parse_flag('e', 'E', EMUTRAMP);
40097+ parse_flag('m', 'M', MPROTECT);
40098+ parse_flag('r', 'R', RANDMMAP);
40099+ parse_flag('s', 'S', SEGMEXEC);
40100+
40101+#undef parse_flag
40102+ }
40103+
40104+ if (pax_flags_hardmode & pax_flags_softmode)
40105+ return ~0UL;
40106+
40107+#ifdef CONFIG_PAX_SOFTMODE
40108+ if (pax_softmode)
40109+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
40110+ else
40111+#endif
40112+
40113+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
40114+#else
40115+ return ~0UL;
40116+#endif
40117+}
40118+
40119+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40120+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40121+{
40122+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40123+
40124+ pax_flags = pax_parse_ei_pax(elf_ex);
40125+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40126+ xattr_pax_flags = pax_parse_xattr_pax(file);
40127+
40128+ if (pt_pax_flags == ~0UL)
40129+ pt_pax_flags = xattr_pax_flags;
40130+ else if (xattr_pax_flags == ~0UL)
40131+ xattr_pax_flags = pt_pax_flags;
40132+ if (pt_pax_flags != xattr_pax_flags)
40133+ return -EINVAL;
40134+ if (pt_pax_flags != ~0UL)
40135+ pax_flags = pt_pax_flags;
40136+
40137+ if (0 > pax_check_flags(&pax_flags))
40138+ return -EINVAL;
40139+
40140+ current->mm->pax_flags = pax_flags;
40141+ return 0;
40142+}
40143+#endif
40144+
40145 /*
40146 * These are the functions used to load ELF style executables and shared
40147 * libraries. There is no binary dependent code anywhere else.
40148@@ -544,6 +910,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40149 {
40150 unsigned int random_variable = 0;
40151
40152+#ifdef CONFIG_PAX_RANDUSTACK
40153+ if (randomize_va_space)
40154+ return stack_top - current->mm->delta_stack;
40155+#endif
40156+
40157 if ((current->flags & PF_RANDOMIZE) &&
40158 !(current->personality & ADDR_NO_RANDOMIZE)) {
40159 random_variable = get_random_int() & STACK_RND_MASK;
40160@@ -562,7 +933,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40161 unsigned long load_addr = 0, load_bias = 0;
40162 int load_addr_set = 0;
40163 char * elf_interpreter = NULL;
40164- unsigned long error;
40165+ unsigned long error = 0;
40166 struct elf_phdr *elf_ppnt, *elf_phdata;
40167 unsigned long elf_bss, elf_brk;
40168 int retval, i;
40169@@ -572,11 +943,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40170 unsigned long start_code, end_code, start_data, end_data;
40171 unsigned long reloc_func_desc __maybe_unused = 0;
40172 int executable_stack = EXSTACK_DEFAULT;
40173- unsigned long def_flags = 0;
40174 struct {
40175 struct elfhdr elf_ex;
40176 struct elfhdr interp_elf_ex;
40177 } *loc;
40178+ unsigned long pax_task_size = TASK_SIZE;
40179
40180 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40181 if (!loc) {
40182@@ -713,11 +1084,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40183
40184 /* OK, This is the point of no return */
40185 current->flags &= ~PF_FORKNOEXEC;
40186- current->mm->def_flags = def_flags;
40187+
40188+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40189+ current->mm->pax_flags = 0UL;
40190+#endif
40191+
40192+#ifdef CONFIG_PAX_DLRESOLVE
40193+ current->mm->call_dl_resolve = 0UL;
40194+#endif
40195+
40196+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40197+ current->mm->call_syscall = 0UL;
40198+#endif
40199+
40200+#ifdef CONFIG_PAX_ASLR
40201+ current->mm->delta_mmap = 0UL;
40202+ current->mm->delta_stack = 0UL;
40203+#endif
40204+
40205+ current->mm->def_flags = 0;
40206+
40207+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40208+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40209+ send_sig(SIGKILL, current, 0);
40210+ goto out_free_dentry;
40211+ }
40212+#endif
40213+
40214+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40215+ pax_set_initial_flags(bprm);
40216+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40217+ if (pax_set_initial_flags_func)
40218+ (pax_set_initial_flags_func)(bprm);
40219+#endif
40220+
40221+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40222+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40223+ current->mm->context.user_cs_limit = PAGE_SIZE;
40224+ current->mm->def_flags |= VM_PAGEEXEC;
40225+ }
40226+#endif
40227+
40228+#ifdef CONFIG_PAX_SEGMEXEC
40229+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40230+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40231+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40232+ pax_task_size = SEGMEXEC_TASK_SIZE;
40233+ current->mm->def_flags |= VM_NOHUGEPAGE;
40234+ }
40235+#endif
40236+
40237+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40238+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40239+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40240+ put_cpu();
40241+ }
40242+#endif
40243
40244 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40245 may depend on the personality. */
40246 SET_PERSONALITY(loc->elf_ex);
40247+
40248+#ifdef CONFIG_PAX_ASLR
40249+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40250+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40251+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40252+ }
40253+#endif
40254+
40255+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40256+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40257+ executable_stack = EXSTACK_DISABLE_X;
40258+ current->personality &= ~READ_IMPLIES_EXEC;
40259+ } else
40260+#endif
40261+
40262 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40263 current->personality |= READ_IMPLIES_EXEC;
40264
40265@@ -808,6 +1249,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40266 #else
40267 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40268 #endif
40269+
40270+#ifdef CONFIG_PAX_RANDMMAP
40271+ /* PaX: randomize base address at the default exe base if requested */
40272+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40273+#ifdef CONFIG_SPARC64
40274+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40275+#else
40276+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40277+#endif
40278+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40279+ elf_flags |= MAP_FIXED;
40280+ }
40281+#endif
40282+
40283 }
40284
40285 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40286@@ -840,9 +1295,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40287 * allowed task size. Note that p_filesz must always be
40288 * <= p_memsz so it is only necessary to check p_memsz.
40289 */
40290- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40291- elf_ppnt->p_memsz > TASK_SIZE ||
40292- TASK_SIZE - elf_ppnt->p_memsz < k) {
40293+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40294+ elf_ppnt->p_memsz > pax_task_size ||
40295+ pax_task_size - elf_ppnt->p_memsz < k) {
40296 /* set_brk can never work. Avoid overflows. */
40297 send_sig(SIGKILL, current, 0);
40298 retval = -EINVAL;
40299@@ -870,6 +1325,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40300 start_data += load_bias;
40301 end_data += load_bias;
40302
40303+#ifdef CONFIG_PAX_RANDMMAP
40304+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40305+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40306+#endif
40307+
40308 /* Calling set_brk effectively mmaps the pages that we need
40309 * for the bss and break sections. We must do this before
40310 * mapping in the interpreter, to make sure it doesn't wind
40311@@ -881,9 +1341,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40312 goto out_free_dentry;
40313 }
40314 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40315- send_sig(SIGSEGV, current, 0);
40316- retval = -EFAULT; /* Nobody gets to see this, but.. */
40317- goto out_free_dentry;
40318+ /*
40319+ * This bss-zeroing can fail if the ELF
40320+ * file specifies odd protections. So
40321+ * we don't check the return value
40322+ */
40323 }
40324
40325 if (elf_interpreter) {
40326@@ -1098,7 +1560,7 @@ out:
40327 * Decide what to dump of a segment, part, all or none.
40328 */
40329 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40330- unsigned long mm_flags)
40331+ unsigned long mm_flags, long signr)
40332 {
40333 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40334
40335@@ -1132,7 +1594,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40336 if (vma->vm_file == NULL)
40337 return 0;
40338
40339- if (FILTER(MAPPED_PRIVATE))
40340+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40341 goto whole;
40342
40343 /*
40344@@ -1354,9 +1816,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40345 {
40346 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40347 int i = 0;
40348- do
40349+ do {
40350 i += 2;
40351- while (auxv[i - 2] != AT_NULL);
40352+ } while (auxv[i - 2] != AT_NULL);
40353 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40354 }
40355
40356@@ -1862,14 +2324,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40357 }
40358
40359 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40360- unsigned long mm_flags)
40361+ struct coredump_params *cprm)
40362 {
40363 struct vm_area_struct *vma;
40364 size_t size = 0;
40365
40366 for (vma = first_vma(current, gate_vma); vma != NULL;
40367 vma = next_vma(vma, gate_vma))
40368- size += vma_dump_size(vma, mm_flags);
40369+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40370 return size;
40371 }
40372
40373@@ -1963,7 +2425,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40374
40375 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40376
40377- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40378+ offset += elf_core_vma_data_size(gate_vma, cprm);
40379 offset += elf_core_extra_data_size();
40380 e_shoff = offset;
40381
40382@@ -1977,10 +2439,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40383 offset = dataoff;
40384
40385 size += sizeof(*elf);
40386+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40387 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40388 goto end_coredump;
40389
40390 size += sizeof(*phdr4note);
40391+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40392 if (size > cprm->limit
40393 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40394 goto end_coredump;
40395@@ -1994,7 +2458,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40396 phdr.p_offset = offset;
40397 phdr.p_vaddr = vma->vm_start;
40398 phdr.p_paddr = 0;
40399- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40400+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40401 phdr.p_memsz = vma->vm_end - vma->vm_start;
40402 offset += phdr.p_filesz;
40403 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40404@@ -2005,6 +2469,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40405 phdr.p_align = ELF_EXEC_PAGESIZE;
40406
40407 size += sizeof(phdr);
40408+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40409 if (size > cprm->limit
40410 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40411 goto end_coredump;
40412@@ -2029,7 +2494,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40413 unsigned long addr;
40414 unsigned long end;
40415
40416- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40417+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40418
40419 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40420 struct page *page;
40421@@ -2038,6 +2503,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40422 page = get_dump_page(addr);
40423 if (page) {
40424 void *kaddr = kmap(page);
40425+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40426 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40427 !dump_write(cprm->file, kaddr,
40428 PAGE_SIZE);
40429@@ -2055,6 +2521,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40430
40431 if (e_phnum == PN_XNUM) {
40432 size += sizeof(*shdr4extnum);
40433+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40434 if (size > cprm->limit
40435 || !dump_write(cprm->file, shdr4extnum,
40436 sizeof(*shdr4extnum)))
40437@@ -2075,6 +2542,97 @@ out:
40438
40439 #endif /* CONFIG_ELF_CORE */
40440
40441+#ifdef CONFIG_PAX_MPROTECT
40442+/* PaX: non-PIC ELF libraries need relocations on their executable segments
40443+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40444+ * we'll remove VM_MAYWRITE for good on RELRO segments.
40445+ *
40446+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40447+ * basis because we want to allow the common case and not the special ones.
40448+ */
40449+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40450+{
40451+ struct elfhdr elf_h;
40452+ struct elf_phdr elf_p;
40453+ unsigned long i;
40454+ unsigned long oldflags;
40455+ bool is_textrel_rw, is_textrel_rx, is_relro;
40456+
40457+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40458+ return;
40459+
40460+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40461+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40462+
40463+#ifdef CONFIG_PAX_ELFRELOCS
40464+ /* possible TEXTREL */
40465+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40466+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40467+#else
40468+ is_textrel_rw = false;
40469+ is_textrel_rx = false;
40470+#endif
40471+
40472+ /* possible RELRO */
40473+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40474+
40475+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40476+ return;
40477+
40478+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40479+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40480+
40481+#ifdef CONFIG_PAX_ETEXECRELOCS
40482+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40483+#else
40484+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40485+#endif
40486+
40487+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40488+ !elf_check_arch(&elf_h) ||
40489+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40490+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40491+ return;
40492+
40493+ for (i = 0UL; i < elf_h.e_phnum; i++) {
40494+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40495+ return;
40496+ switch (elf_p.p_type) {
40497+ case PT_DYNAMIC:
40498+ if (!is_textrel_rw && !is_textrel_rx)
40499+ continue;
40500+ i = 0UL;
40501+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40502+ elf_dyn dyn;
40503+
40504+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40505+ return;
40506+ if (dyn.d_tag == DT_NULL)
40507+ return;
40508+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40509+ gr_log_textrel(vma);
40510+ if (is_textrel_rw)
40511+ vma->vm_flags |= VM_MAYWRITE;
40512+ else
40513+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40514+ vma->vm_flags &= ~VM_MAYWRITE;
40515+ return;
40516+ }
40517+ i++;
40518+ }
40519+ return;
40520+
40521+ case PT_GNU_RELRO:
40522+ if (!is_relro)
40523+ continue;
40524+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40525+ vma->vm_flags &= ~VM_MAYWRITE;
40526+ return;
40527+ }
40528+ }
40529+}
40530+#endif
40531+
40532 static int __init init_elf_binfmt(void)
40533 {
40534 return register_binfmt(&elf_format);
40535diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40536index 1bffbe0..c8c283e 100644
40537--- a/fs/binfmt_flat.c
40538+++ b/fs/binfmt_flat.c
40539@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40540 realdatastart = (unsigned long) -ENOMEM;
40541 printk("Unable to allocate RAM for process data, errno %d\n",
40542 (int)-realdatastart);
40543+ down_write(&current->mm->mmap_sem);
40544 do_munmap(current->mm, textpos, text_len);
40545+ up_write(&current->mm->mmap_sem);
40546 ret = realdatastart;
40547 goto err;
40548 }
40549@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40550 }
40551 if (IS_ERR_VALUE(result)) {
40552 printk("Unable to read data+bss, errno %d\n", (int)-result);
40553+ down_write(&current->mm->mmap_sem);
40554 do_munmap(current->mm, textpos, text_len);
40555 do_munmap(current->mm, realdatastart, len);
40556+ up_write(&current->mm->mmap_sem);
40557 ret = result;
40558 goto err;
40559 }
40560@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40561 }
40562 if (IS_ERR_VALUE(result)) {
40563 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40564+ down_write(&current->mm->mmap_sem);
40565 do_munmap(current->mm, textpos, text_len + data_len + extra +
40566 MAX_SHARED_LIBS * sizeof(unsigned long));
40567+ up_write(&current->mm->mmap_sem);
40568 ret = result;
40569 goto err;
40570 }
40571diff --git a/fs/bio.c b/fs/bio.c
40572index b1fe82c..84da0a9 100644
40573--- a/fs/bio.c
40574+++ b/fs/bio.c
40575@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40576 const int read = bio_data_dir(bio) == READ;
40577 struct bio_map_data *bmd = bio->bi_private;
40578 int i;
40579- char *p = bmd->sgvecs[0].iov_base;
40580+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40581
40582 __bio_for_each_segment(bvec, bio, i, 0) {
40583 char *addr = page_address(bvec->bv_page);
40584diff --git a/fs/block_dev.c b/fs/block_dev.c
40585index b07f1da..9efcb92 100644
40586--- a/fs/block_dev.c
40587+++ b/fs/block_dev.c
40588@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40589 else if (bdev->bd_contains == bdev)
40590 return true; /* is a whole device which isn't held */
40591
40592- else if (whole->bd_holder == bd_may_claim)
40593+ else if (whole->bd_holder == (void *)bd_may_claim)
40594 return true; /* is a partition of a device that is being partitioned */
40595 else if (whole->bd_holder != NULL)
40596 return false; /* is a partition of a held device */
40597diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40598index dede441..f2a2507 100644
40599--- a/fs/btrfs/ctree.c
40600+++ b/fs/btrfs/ctree.c
40601@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40602 free_extent_buffer(buf);
40603 add_root_to_dirty_list(root);
40604 } else {
40605- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40606- parent_start = parent->start;
40607- else
40608+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40609+ if (parent)
40610+ parent_start = parent->start;
40611+ else
40612+ parent_start = 0;
40613+ } else
40614 parent_start = 0;
40615
40616 WARN_ON(trans->transid != btrfs_header_generation(parent));
40617diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40618index fd1a06d..6e9033d 100644
40619--- a/fs/btrfs/inode.c
40620+++ b/fs/btrfs/inode.c
40621@@ -6895,7 +6895,7 @@ fail:
40622 return -ENOMEM;
40623 }
40624
40625-static int btrfs_getattr(struct vfsmount *mnt,
40626+int btrfs_getattr(struct vfsmount *mnt,
40627 struct dentry *dentry, struct kstat *stat)
40628 {
40629 struct inode *inode = dentry->d_inode;
40630@@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40631 return 0;
40632 }
40633
40634+EXPORT_SYMBOL(btrfs_getattr);
40635+
40636+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40637+{
40638+ return BTRFS_I(inode)->root->anon_dev;
40639+}
40640+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40641+
40642 /*
40643 * If a file is moved, it will inherit the cow and compression flags of the new
40644 * directory.
40645diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40646index c04f02c..f5c9e2e 100644
40647--- a/fs/btrfs/ioctl.c
40648+++ b/fs/btrfs/ioctl.c
40649@@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40650 for (i = 0; i < num_types; i++) {
40651 struct btrfs_space_info *tmp;
40652
40653+ /* Don't copy in more than we allocated */
40654 if (!slot_count)
40655 break;
40656
40657+ slot_count--;
40658+
40659 info = NULL;
40660 rcu_read_lock();
40661 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40662@@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40663 memcpy(dest, &space, sizeof(space));
40664 dest++;
40665 space_args.total_spaces++;
40666- slot_count--;
40667 }
40668- if (!slot_count)
40669- break;
40670 }
40671 up_read(&info->groups_sem);
40672 }
40673
40674- user_dest = (struct btrfs_ioctl_space_info *)
40675+ user_dest = (struct btrfs_ioctl_space_info __user *)
40676 (arg + sizeof(struct btrfs_ioctl_space_args));
40677
40678 if (copy_to_user(user_dest, dest_orig, alloc_size))
40679diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40680index cfb5543..1ae7347 100644
40681--- a/fs/btrfs/relocation.c
40682+++ b/fs/btrfs/relocation.c
40683@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40684 }
40685 spin_unlock(&rc->reloc_root_tree.lock);
40686
40687- BUG_ON((struct btrfs_root *)node->data != root);
40688+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40689
40690 if (!del) {
40691 spin_lock(&rc->reloc_root_tree.lock);
40692diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40693index 622f469..e8d2d55 100644
40694--- a/fs/cachefiles/bind.c
40695+++ b/fs/cachefiles/bind.c
40696@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40697 args);
40698
40699 /* start by checking things over */
40700- ASSERT(cache->fstop_percent >= 0 &&
40701- cache->fstop_percent < cache->fcull_percent &&
40702+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40703 cache->fcull_percent < cache->frun_percent &&
40704 cache->frun_percent < 100);
40705
40706- ASSERT(cache->bstop_percent >= 0 &&
40707- cache->bstop_percent < cache->bcull_percent &&
40708+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40709 cache->bcull_percent < cache->brun_percent &&
40710 cache->brun_percent < 100);
40711
40712diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40713index 0a1467b..6a53245 100644
40714--- a/fs/cachefiles/daemon.c
40715+++ b/fs/cachefiles/daemon.c
40716@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40717 if (n > buflen)
40718 return -EMSGSIZE;
40719
40720- if (copy_to_user(_buffer, buffer, n) != 0)
40721+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40722 return -EFAULT;
40723
40724 return n;
40725@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40726 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40727 return -EIO;
40728
40729- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40730+ if (datalen > PAGE_SIZE - 1)
40731 return -EOPNOTSUPP;
40732
40733 /* drag the command string into the kernel so we can parse it */
40734@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40735 if (args[0] != '%' || args[1] != '\0')
40736 return -EINVAL;
40737
40738- if (fstop < 0 || fstop >= cache->fcull_percent)
40739+ if (fstop >= cache->fcull_percent)
40740 return cachefiles_daemon_range_error(cache, args);
40741
40742 cache->fstop_percent = fstop;
40743@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40744 if (args[0] != '%' || args[1] != '\0')
40745 return -EINVAL;
40746
40747- if (bstop < 0 || bstop >= cache->bcull_percent)
40748+ if (bstop >= cache->bcull_percent)
40749 return cachefiles_daemon_range_error(cache, args);
40750
40751 cache->bstop_percent = bstop;
40752diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40753index bd6bc1b..b627b53 100644
40754--- a/fs/cachefiles/internal.h
40755+++ b/fs/cachefiles/internal.h
40756@@ -57,7 +57,7 @@ struct cachefiles_cache {
40757 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40758 struct rb_root active_nodes; /* active nodes (can't be culled) */
40759 rwlock_t active_lock; /* lock for active_nodes */
40760- atomic_t gravecounter; /* graveyard uniquifier */
40761+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40762 unsigned frun_percent; /* when to stop culling (% files) */
40763 unsigned fcull_percent; /* when to start culling (% files) */
40764 unsigned fstop_percent; /* when to stop allocating (% files) */
40765@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40766 * proc.c
40767 */
40768 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40769-extern atomic_t cachefiles_lookup_histogram[HZ];
40770-extern atomic_t cachefiles_mkdir_histogram[HZ];
40771-extern atomic_t cachefiles_create_histogram[HZ];
40772+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40773+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40774+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40775
40776 extern int __init cachefiles_proc_init(void);
40777 extern void cachefiles_proc_cleanup(void);
40778 static inline
40779-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40780+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40781 {
40782 unsigned long jif = jiffies - start_jif;
40783 if (jif >= HZ)
40784 jif = HZ - 1;
40785- atomic_inc(&histogram[jif]);
40786+ atomic_inc_unchecked(&histogram[jif]);
40787 }
40788
40789 #else
40790diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
40791index a0358c2..d6137f2 100644
40792--- a/fs/cachefiles/namei.c
40793+++ b/fs/cachefiles/namei.c
40794@@ -318,7 +318,7 @@ try_again:
40795 /* first step is to make up a grave dentry in the graveyard */
40796 sprintf(nbuffer, "%08x%08x",
40797 (uint32_t) get_seconds(),
40798- (uint32_t) atomic_inc_return(&cache->gravecounter));
40799+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40800
40801 /* do the multiway lock magic */
40802 trap = lock_rename(cache->graveyard, dir);
40803diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
40804index eccd339..4c1d995 100644
40805--- a/fs/cachefiles/proc.c
40806+++ b/fs/cachefiles/proc.c
40807@@ -14,9 +14,9 @@
40808 #include <linux/seq_file.h>
40809 #include "internal.h"
40810
40811-atomic_t cachefiles_lookup_histogram[HZ];
40812-atomic_t cachefiles_mkdir_histogram[HZ];
40813-atomic_t cachefiles_create_histogram[HZ];
40814+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40815+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40816+atomic_unchecked_t cachefiles_create_histogram[HZ];
40817
40818 /*
40819 * display the latency histogram
40820@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
40821 return 0;
40822 default:
40823 index = (unsigned long) v - 3;
40824- x = atomic_read(&cachefiles_lookup_histogram[index]);
40825- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40826- z = atomic_read(&cachefiles_create_histogram[index]);
40827+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40828+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40829+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40830 if (x == 0 && y == 0 && z == 0)
40831 return 0;
40832
40833diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
40834index 0e3c092..818480e 100644
40835--- a/fs/cachefiles/rdwr.c
40836+++ b/fs/cachefiles/rdwr.c
40837@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
40838 old_fs = get_fs();
40839 set_fs(KERNEL_DS);
40840 ret = file->f_op->write(
40841- file, (const void __user *) data, len, &pos);
40842+ file, (const void __force_user *) data, len, &pos);
40843 set_fs(old_fs);
40844 kunmap(page);
40845 if (ret != len)
40846diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
40847index 9895400..fa40a7d 100644
40848--- a/fs/ceph/dir.c
40849+++ b/fs/ceph/dir.c
40850@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
40851 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40852 struct ceph_mds_client *mdsc = fsc->mdsc;
40853 unsigned frag = fpos_frag(filp->f_pos);
40854- int off = fpos_off(filp->f_pos);
40855+ unsigned int off = fpos_off(filp->f_pos);
40856 int err;
40857 u32 ftype;
40858 struct ceph_mds_reply_info_parsed *rinfo;
40859diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
40860index 84e8c07..6170d31 100644
40861--- a/fs/cifs/cifs_debug.c
40862+++ b/fs/cifs/cifs_debug.c
40863@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40864
40865 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40866 #ifdef CONFIG_CIFS_STATS2
40867- atomic_set(&totBufAllocCount, 0);
40868- atomic_set(&totSmBufAllocCount, 0);
40869+ atomic_set_unchecked(&totBufAllocCount, 0);
40870+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40871 #endif /* CONFIG_CIFS_STATS2 */
40872 spin_lock(&cifs_tcp_ses_lock);
40873 list_for_each(tmp1, &cifs_tcp_ses_list) {
40874@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40875 tcon = list_entry(tmp3,
40876 struct cifs_tcon,
40877 tcon_list);
40878- atomic_set(&tcon->num_smbs_sent, 0);
40879- atomic_set(&tcon->num_writes, 0);
40880- atomic_set(&tcon->num_reads, 0);
40881- atomic_set(&tcon->num_oplock_brks, 0);
40882- atomic_set(&tcon->num_opens, 0);
40883- atomic_set(&tcon->num_posixopens, 0);
40884- atomic_set(&tcon->num_posixmkdirs, 0);
40885- atomic_set(&tcon->num_closes, 0);
40886- atomic_set(&tcon->num_deletes, 0);
40887- atomic_set(&tcon->num_mkdirs, 0);
40888- atomic_set(&tcon->num_rmdirs, 0);
40889- atomic_set(&tcon->num_renames, 0);
40890- atomic_set(&tcon->num_t2renames, 0);
40891- atomic_set(&tcon->num_ffirst, 0);
40892- atomic_set(&tcon->num_fnext, 0);
40893- atomic_set(&tcon->num_fclose, 0);
40894- atomic_set(&tcon->num_hardlinks, 0);
40895- atomic_set(&tcon->num_symlinks, 0);
40896- atomic_set(&tcon->num_locks, 0);
40897+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40898+ atomic_set_unchecked(&tcon->num_writes, 0);
40899+ atomic_set_unchecked(&tcon->num_reads, 0);
40900+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40901+ atomic_set_unchecked(&tcon->num_opens, 0);
40902+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40903+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40904+ atomic_set_unchecked(&tcon->num_closes, 0);
40905+ atomic_set_unchecked(&tcon->num_deletes, 0);
40906+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40907+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40908+ atomic_set_unchecked(&tcon->num_renames, 0);
40909+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40910+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40911+ atomic_set_unchecked(&tcon->num_fnext, 0);
40912+ atomic_set_unchecked(&tcon->num_fclose, 0);
40913+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40914+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40915+ atomic_set_unchecked(&tcon->num_locks, 0);
40916 }
40917 }
40918 }
40919@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40920 smBufAllocCount.counter, cifs_min_small);
40921 #ifdef CONFIG_CIFS_STATS2
40922 seq_printf(m, "Total Large %d Small %d Allocations\n",
40923- atomic_read(&totBufAllocCount),
40924- atomic_read(&totSmBufAllocCount));
40925+ atomic_read_unchecked(&totBufAllocCount),
40926+ atomic_read_unchecked(&totSmBufAllocCount));
40927 #endif /* CONFIG_CIFS_STATS2 */
40928
40929 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40930@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40931 if (tcon->need_reconnect)
40932 seq_puts(m, "\tDISCONNECTED ");
40933 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40934- atomic_read(&tcon->num_smbs_sent),
40935- atomic_read(&tcon->num_oplock_brks));
40936+ atomic_read_unchecked(&tcon->num_smbs_sent),
40937+ atomic_read_unchecked(&tcon->num_oplock_brks));
40938 seq_printf(m, "\nReads: %d Bytes: %lld",
40939- atomic_read(&tcon->num_reads),
40940+ atomic_read_unchecked(&tcon->num_reads),
40941 (long long)(tcon->bytes_read));
40942 seq_printf(m, "\nWrites: %d Bytes: %lld",
40943- atomic_read(&tcon->num_writes),
40944+ atomic_read_unchecked(&tcon->num_writes),
40945 (long long)(tcon->bytes_written));
40946 seq_printf(m, "\nFlushes: %d",
40947- atomic_read(&tcon->num_flushes));
40948+ atomic_read_unchecked(&tcon->num_flushes));
40949 seq_printf(m, "\nLocks: %d HardLinks: %d "
40950 "Symlinks: %d",
40951- atomic_read(&tcon->num_locks),
40952- atomic_read(&tcon->num_hardlinks),
40953- atomic_read(&tcon->num_symlinks));
40954+ atomic_read_unchecked(&tcon->num_locks),
40955+ atomic_read_unchecked(&tcon->num_hardlinks),
40956+ atomic_read_unchecked(&tcon->num_symlinks));
40957 seq_printf(m, "\nOpens: %d Closes: %d "
40958 "Deletes: %d",
40959- atomic_read(&tcon->num_opens),
40960- atomic_read(&tcon->num_closes),
40961- atomic_read(&tcon->num_deletes));
40962+ atomic_read_unchecked(&tcon->num_opens),
40963+ atomic_read_unchecked(&tcon->num_closes),
40964+ atomic_read_unchecked(&tcon->num_deletes));
40965 seq_printf(m, "\nPosix Opens: %d "
40966 "Posix Mkdirs: %d",
40967- atomic_read(&tcon->num_posixopens),
40968- atomic_read(&tcon->num_posixmkdirs));
40969+ atomic_read_unchecked(&tcon->num_posixopens),
40970+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40971 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40972- atomic_read(&tcon->num_mkdirs),
40973- atomic_read(&tcon->num_rmdirs));
40974+ atomic_read_unchecked(&tcon->num_mkdirs),
40975+ atomic_read_unchecked(&tcon->num_rmdirs));
40976 seq_printf(m, "\nRenames: %d T2 Renames %d",
40977- atomic_read(&tcon->num_renames),
40978- atomic_read(&tcon->num_t2renames));
40979+ atomic_read_unchecked(&tcon->num_renames),
40980+ atomic_read_unchecked(&tcon->num_t2renames));
40981 seq_printf(m, "\nFindFirst: %d FNext %d "
40982 "FClose %d",
40983- atomic_read(&tcon->num_ffirst),
40984- atomic_read(&tcon->num_fnext),
40985- atomic_read(&tcon->num_fclose));
40986+ atomic_read_unchecked(&tcon->num_ffirst),
40987+ atomic_read_unchecked(&tcon->num_fnext),
40988+ atomic_read_unchecked(&tcon->num_fclose));
40989 }
40990 }
40991 }
40992diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
40993index 8f1fe32..38f9e27 100644
40994--- a/fs/cifs/cifsfs.c
40995+++ b/fs/cifs/cifsfs.c
40996@@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
40997 cifs_req_cachep = kmem_cache_create("cifs_request",
40998 CIFSMaxBufSize +
40999 MAX_CIFS_HDR_SIZE, 0,
41000- SLAB_HWCACHE_ALIGN, NULL);
41001+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
41002 if (cifs_req_cachep == NULL)
41003 return -ENOMEM;
41004
41005@@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
41006 efficient to alloc 1 per page off the slab compared to 17K (5page)
41007 alloc of large cifs buffers even when page debugging is on */
41008 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
41009- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
41010+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
41011 NULL);
41012 if (cifs_sm_req_cachep == NULL) {
41013 mempool_destroy(cifs_req_poolp);
41014@@ -1101,8 +1101,8 @@ init_cifs(void)
41015 atomic_set(&bufAllocCount, 0);
41016 atomic_set(&smBufAllocCount, 0);
41017 #ifdef CONFIG_CIFS_STATS2
41018- atomic_set(&totBufAllocCount, 0);
41019- atomic_set(&totSmBufAllocCount, 0);
41020+ atomic_set_unchecked(&totBufAllocCount, 0);
41021+ atomic_set_unchecked(&totSmBufAllocCount, 0);
41022 #endif /* CONFIG_CIFS_STATS2 */
41023
41024 atomic_set(&midCount, 0);
41025diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
41026index 8238aa1..0347196 100644
41027--- a/fs/cifs/cifsglob.h
41028+++ b/fs/cifs/cifsglob.h
41029@@ -392,28 +392,28 @@ struct cifs_tcon {
41030 __u16 Flags; /* optional support bits */
41031 enum statusEnum tidStatus;
41032 #ifdef CONFIG_CIFS_STATS
41033- atomic_t num_smbs_sent;
41034- atomic_t num_writes;
41035- atomic_t num_reads;
41036- atomic_t num_flushes;
41037- atomic_t num_oplock_brks;
41038- atomic_t num_opens;
41039- atomic_t num_closes;
41040- atomic_t num_deletes;
41041- atomic_t num_mkdirs;
41042- atomic_t num_posixopens;
41043- atomic_t num_posixmkdirs;
41044- atomic_t num_rmdirs;
41045- atomic_t num_renames;
41046- atomic_t num_t2renames;
41047- atomic_t num_ffirst;
41048- atomic_t num_fnext;
41049- atomic_t num_fclose;
41050- atomic_t num_hardlinks;
41051- atomic_t num_symlinks;
41052- atomic_t num_locks;
41053- atomic_t num_acl_get;
41054- atomic_t num_acl_set;
41055+ atomic_unchecked_t num_smbs_sent;
41056+ atomic_unchecked_t num_writes;
41057+ atomic_unchecked_t num_reads;
41058+ atomic_unchecked_t num_flushes;
41059+ atomic_unchecked_t num_oplock_brks;
41060+ atomic_unchecked_t num_opens;
41061+ atomic_unchecked_t num_closes;
41062+ atomic_unchecked_t num_deletes;
41063+ atomic_unchecked_t num_mkdirs;
41064+ atomic_unchecked_t num_posixopens;
41065+ atomic_unchecked_t num_posixmkdirs;
41066+ atomic_unchecked_t num_rmdirs;
41067+ atomic_unchecked_t num_renames;
41068+ atomic_unchecked_t num_t2renames;
41069+ atomic_unchecked_t num_ffirst;
41070+ atomic_unchecked_t num_fnext;
41071+ atomic_unchecked_t num_fclose;
41072+ atomic_unchecked_t num_hardlinks;
41073+ atomic_unchecked_t num_symlinks;
41074+ atomic_unchecked_t num_locks;
41075+ atomic_unchecked_t num_acl_get;
41076+ atomic_unchecked_t num_acl_set;
41077 #ifdef CONFIG_CIFS_STATS2
41078 unsigned long long time_writes;
41079 unsigned long long time_reads;
41080@@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
41081 }
41082
41083 #ifdef CONFIG_CIFS_STATS
41084-#define cifs_stats_inc atomic_inc
41085+#define cifs_stats_inc atomic_inc_unchecked
41086
41087 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
41088 unsigned int bytes)
41089@@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
41090 /* Various Debug counters */
41091 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
41092 #ifdef CONFIG_CIFS_STATS2
41093-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41094-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41095+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41096+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41097 #endif
41098 GLOBAL_EXTERN atomic_t smBufAllocCount;
41099 GLOBAL_EXTERN atomic_t midCount;
41100diff --git a/fs/cifs/link.c b/fs/cifs/link.c
41101index 6b0e064..94e6c3c 100644
41102--- a/fs/cifs/link.c
41103+++ b/fs/cifs/link.c
41104@@ -600,7 +600,7 @@ symlink_exit:
41105
41106 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41107 {
41108- char *p = nd_get_link(nd);
41109+ const char *p = nd_get_link(nd);
41110 if (!IS_ERR(p))
41111 kfree(p);
41112 }
41113diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
41114index 703ef5c..2a44ed5 100644
41115--- a/fs/cifs/misc.c
41116+++ b/fs/cifs/misc.c
41117@@ -156,7 +156,7 @@ cifs_buf_get(void)
41118 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41119 atomic_inc(&bufAllocCount);
41120 #ifdef CONFIG_CIFS_STATS2
41121- atomic_inc(&totBufAllocCount);
41122+ atomic_inc_unchecked(&totBufAllocCount);
41123 #endif /* CONFIG_CIFS_STATS2 */
41124 }
41125
41126@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41127 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41128 atomic_inc(&smBufAllocCount);
41129 #ifdef CONFIG_CIFS_STATS2
41130- atomic_inc(&totSmBufAllocCount);
41131+ atomic_inc_unchecked(&totSmBufAllocCount);
41132 #endif /* CONFIG_CIFS_STATS2 */
41133
41134 }
41135diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41136index 6901578..d402eb5 100644
41137--- a/fs/coda/cache.c
41138+++ b/fs/coda/cache.c
41139@@ -24,7 +24,7 @@
41140 #include "coda_linux.h"
41141 #include "coda_cache.h"
41142
41143-static atomic_t permission_epoch = ATOMIC_INIT(0);
41144+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41145
41146 /* replace or extend an acl cache hit */
41147 void coda_cache_enter(struct inode *inode, int mask)
41148@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41149 struct coda_inode_info *cii = ITOC(inode);
41150
41151 spin_lock(&cii->c_lock);
41152- cii->c_cached_epoch = atomic_read(&permission_epoch);
41153+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41154 if (cii->c_uid != current_fsuid()) {
41155 cii->c_uid = current_fsuid();
41156 cii->c_cached_perm = mask;
41157@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41158 {
41159 struct coda_inode_info *cii = ITOC(inode);
41160 spin_lock(&cii->c_lock);
41161- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41162+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41163 spin_unlock(&cii->c_lock);
41164 }
41165
41166 /* remove all acl caches */
41167 void coda_cache_clear_all(struct super_block *sb)
41168 {
41169- atomic_inc(&permission_epoch);
41170+ atomic_inc_unchecked(&permission_epoch);
41171 }
41172
41173
41174@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41175 spin_lock(&cii->c_lock);
41176 hit = (mask & cii->c_cached_perm) == mask &&
41177 cii->c_uid == current_fsuid() &&
41178- cii->c_cached_epoch == atomic_read(&permission_epoch);
41179+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41180 spin_unlock(&cii->c_lock);
41181
41182 return hit;
41183diff --git a/fs/compat.c b/fs/compat.c
41184index c987875..08771ca 100644
41185--- a/fs/compat.c
41186+++ b/fs/compat.c
41187@@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41188 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41189 {
41190 compat_ino_t ino = stat->ino;
41191- typeof(ubuf->st_uid) uid = 0;
41192- typeof(ubuf->st_gid) gid = 0;
41193+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41194+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41195 int err;
41196
41197 SET_UID(uid, stat->uid);
41198@@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41199
41200 set_fs(KERNEL_DS);
41201 /* The __user pointer cast is valid because of the set_fs() */
41202- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41203+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41204 set_fs(oldfs);
41205 /* truncating is ok because it's a user address */
41206 if (!ret)
41207@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41208 goto out;
41209
41210 ret = -EINVAL;
41211- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41212+ if (nr_segs > UIO_MAXIOV)
41213 goto out;
41214 if (nr_segs > fast_segs) {
41215 ret = -ENOMEM;
41216@@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41217
41218 struct compat_readdir_callback {
41219 struct compat_old_linux_dirent __user *dirent;
41220+ struct file * file;
41221 int result;
41222 };
41223
41224@@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41225 buf->result = -EOVERFLOW;
41226 return -EOVERFLOW;
41227 }
41228+
41229+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41230+ return 0;
41231+
41232 buf->result++;
41233 dirent = buf->dirent;
41234 if (!access_ok(VERIFY_WRITE, dirent,
41235@@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41236
41237 buf.result = 0;
41238 buf.dirent = dirent;
41239+ buf.file = file;
41240
41241 error = vfs_readdir(file, compat_fillonedir, &buf);
41242 if (buf.result)
41243@@ -914,6 +920,7 @@ struct compat_linux_dirent {
41244 struct compat_getdents_callback {
41245 struct compat_linux_dirent __user *current_dir;
41246 struct compat_linux_dirent __user *previous;
41247+ struct file * file;
41248 int count;
41249 int error;
41250 };
41251@@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41252 buf->error = -EOVERFLOW;
41253 return -EOVERFLOW;
41254 }
41255+
41256+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41257+ return 0;
41258+
41259 dirent = buf->previous;
41260 if (dirent) {
41261 if (__put_user(offset, &dirent->d_off))
41262@@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41263 buf.previous = NULL;
41264 buf.count = count;
41265 buf.error = 0;
41266+ buf.file = file;
41267
41268 error = vfs_readdir(file, compat_filldir, &buf);
41269 if (error >= 0)
41270@@ -1003,6 +1015,7 @@ out:
41271 struct compat_getdents_callback64 {
41272 struct linux_dirent64 __user *current_dir;
41273 struct linux_dirent64 __user *previous;
41274+ struct file * file;
41275 int count;
41276 int error;
41277 };
41278@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41279 buf->error = -EINVAL; /* only used if we fail.. */
41280 if (reclen > buf->count)
41281 return -EINVAL;
41282+
41283+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41284+ return 0;
41285+
41286 dirent = buf->previous;
41287
41288 if (dirent) {
41289@@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41290 buf.previous = NULL;
41291 buf.count = count;
41292 buf.error = 0;
41293+ buf.file = file;
41294
41295 error = vfs_readdir(file, compat_filldir64, &buf);
41296 if (error >= 0)
41297 error = buf.error;
41298 lastdirent = buf.previous;
41299 if (lastdirent) {
41300- typeof(lastdirent->d_off) d_off = file->f_pos;
41301+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41302 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41303 error = -EFAULT;
41304 else
41305diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41306index 112e45a..b59845b 100644
41307--- a/fs/compat_binfmt_elf.c
41308+++ b/fs/compat_binfmt_elf.c
41309@@ -30,11 +30,13 @@
41310 #undef elf_phdr
41311 #undef elf_shdr
41312 #undef elf_note
41313+#undef elf_dyn
41314 #undef elf_addr_t
41315 #define elfhdr elf32_hdr
41316 #define elf_phdr elf32_phdr
41317 #define elf_shdr elf32_shdr
41318 #define elf_note elf32_note
41319+#define elf_dyn Elf32_Dyn
41320 #define elf_addr_t Elf32_Addr
41321
41322 /*
41323diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41324index 51352de..93292ff 100644
41325--- a/fs/compat_ioctl.c
41326+++ b/fs/compat_ioctl.c
41327@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41328
41329 err = get_user(palp, &up->palette);
41330 err |= get_user(length, &up->length);
41331+ if (err)
41332+ return -EFAULT;
41333
41334 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41335 err = put_user(compat_ptr(palp), &up_native->palette);
41336@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41337 return -EFAULT;
41338 if (__get_user(udata, &ss32->iomem_base))
41339 return -EFAULT;
41340- ss.iomem_base = compat_ptr(udata);
41341+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41342 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41343 __get_user(ss.port_high, &ss32->port_high))
41344 return -EFAULT;
41345@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41346 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41347 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41348 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41349- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41350+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41351 return -EFAULT;
41352
41353 return ioctl_preallocate(file, p);
41354@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41355 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41356 {
41357 unsigned int a, b;
41358- a = *(unsigned int *)p;
41359- b = *(unsigned int *)q;
41360+ a = *(const unsigned int *)p;
41361+ b = *(const unsigned int *)q;
41362 if (a > b)
41363 return 1;
41364 if (a < b)
41365diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41366index 9a37a9b..35792b6 100644
41367--- a/fs/configfs/dir.c
41368+++ b/fs/configfs/dir.c
41369@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41370 }
41371 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41372 struct configfs_dirent *next;
41373- const char * name;
41374+ const unsigned char * name;
41375+ char d_name[sizeof(next->s_dentry->d_iname)];
41376 int len;
41377 struct inode *inode = NULL;
41378
41379@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41380 continue;
41381
41382 name = configfs_get_name(next);
41383- len = strlen(name);
41384+ if (next->s_dentry && name == next->s_dentry->d_iname) {
41385+ len = next->s_dentry->d_name.len;
41386+ memcpy(d_name, name, len);
41387+ name = d_name;
41388+ } else
41389+ len = strlen(name);
41390
41391 /*
41392 * We'll have a dentry and an inode for
41393diff --git a/fs/dcache.c b/fs/dcache.c
41394index f7908ae..920a680 100644
41395--- a/fs/dcache.c
41396+++ b/fs/dcache.c
41397@@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41398 mempages -= reserve;
41399
41400 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41401- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41402+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41403
41404 dcache_init();
41405 inode_init();
41406diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
41407index 2a83425..b082cec 100644
41408--- a/fs/ecryptfs/crypto.c
41409+++ b/fs/ecryptfs/crypto.c
41410@@ -417,17 +417,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
41411 (unsigned long long)(extent_base + extent_offset), rc);
41412 goto out;
41413 }
41414- if (unlikely(ecryptfs_verbosity > 0)) {
41415- ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
41416- "with iv:\n");
41417- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
41418- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
41419- "encryption:\n");
41420- ecryptfs_dump_hex((char *)
41421- (page_address(page)
41422- + (extent_offset * crypt_stat->extent_size)),
41423- 8);
41424- }
41425 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
41426 page, (extent_offset
41427 * crypt_stat->extent_size),
41428@@ -440,14 +429,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
41429 goto out;
41430 }
41431 rc = 0;
41432- if (unlikely(ecryptfs_verbosity > 0)) {
41433- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16llx]; "
41434- "rc = [%d]\n",
41435- (unsigned long long)(extent_base + extent_offset), rc);
41436- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
41437- "encryption:\n");
41438- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
41439- }
41440 out:
41441 return rc;
41442 }
41443@@ -543,17 +524,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
41444 (unsigned long long)(extent_base + extent_offset), rc);
41445 goto out;
41446 }
41447- if (unlikely(ecryptfs_verbosity > 0)) {
41448- ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
41449- "with iv:\n");
41450- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
41451- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
41452- "decryption:\n");
41453- ecryptfs_dump_hex((char *)
41454- (page_address(enc_extent_page)
41455- + (extent_offset * crypt_stat->extent_size)),
41456- 8);
41457- }
41458 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
41459 (extent_offset
41460 * crypt_stat->extent_size),
41461@@ -567,16 +537,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
41462 goto out;
41463 }
41464 rc = 0;
41465- if (unlikely(ecryptfs_verbosity > 0)) {
41466- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16llx]; "
41467- "rc = [%d]\n",
41468- (unsigned long long)(extent_base + extent_offset), rc);
41469- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
41470- "decryption:\n");
41471- ecryptfs_dump_hex((char *)(page_address(page)
41472- + (extent_offset
41473- * crypt_stat->extent_size)), 8);
41474- }
41475 out:
41476 return rc;
41477 }
41478diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41479index 32f90a3..a766407 100644
41480--- a/fs/ecryptfs/inode.c
41481+++ b/fs/ecryptfs/inode.c
41482@@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41483 old_fs = get_fs();
41484 set_fs(get_ds());
41485 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41486- (char __user *)lower_buf,
41487+ (char __force_user *)lower_buf,
41488 lower_bufsiz);
41489 set_fs(old_fs);
41490 if (rc < 0)
41491@@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41492 }
41493 old_fs = get_fs();
41494 set_fs(get_ds());
41495- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41496+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41497 set_fs(old_fs);
41498 if (rc < 0) {
41499 kfree(buf);
41500@@ -752,7 +752,7 @@ out:
41501 static void
41502 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41503 {
41504- char *buf = nd_get_link(nd);
41505+ const char *buf = nd_get_link(nd);
41506 if (!IS_ERR(buf)) {
41507 /* Free the char* */
41508 kfree(buf);
41509@@ -841,18 +841,6 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
41510 size_t num_zeros = (PAGE_CACHE_SIZE
41511 - (ia->ia_size & ~PAGE_CACHE_MASK));
41512
41513-
41514- /*
41515- * XXX(truncate) this should really happen at the begginning
41516- * of ->setattr. But the code is too messy to that as part
41517- * of a larger patch. ecryptfs is also totally missing out
41518- * on the inode_change_ok check at the beginning of
41519- * ->setattr while would include this.
41520- */
41521- rc = inode_newsize_ok(inode, ia->ia_size);
41522- if (rc)
41523- goto out;
41524-
41525 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
41526 truncate_setsize(inode, ia->ia_size);
41527 lower_ia->ia_size = ia->ia_size;
41528@@ -902,6 +890,28 @@ out:
41529 return rc;
41530 }
41531
41532+static int ecryptfs_inode_newsize_ok(struct inode *inode, loff_t offset)
41533+{
41534+ struct ecryptfs_crypt_stat *crypt_stat;
41535+ loff_t lower_oldsize, lower_newsize;
41536+
41537+ crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
41538+ lower_oldsize = upper_size_to_lower_size(crypt_stat,
41539+ i_size_read(inode));
41540+ lower_newsize = upper_size_to_lower_size(crypt_stat, offset);
41541+ if (lower_newsize > lower_oldsize) {
41542+ /*
41543+ * The eCryptfs inode and the new *lower* size are mixed here
41544+ * because we may not have the lower i_mutex held and/or it may
41545+ * not be appropriate to call inode_newsize_ok() with inodes
41546+ * from other filesystems.
41547+ */
41548+ return inode_newsize_ok(inode, lower_newsize);
41549+ }
41550+
41551+ return 0;
41552+}
41553+
41554 /**
41555 * ecryptfs_truncate
41556 * @dentry: The ecryptfs layer dentry
41557@@ -918,6 +928,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
41558 struct iattr lower_ia = { .ia_valid = 0 };
41559 int rc;
41560
41561+ rc = ecryptfs_inode_newsize_ok(dentry->d_inode, new_length);
41562+ if (rc)
41563+ return rc;
41564+
41565 rc = truncate_upper(dentry, &ia, &lower_ia);
41566 if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
41567 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
41568@@ -997,6 +1011,16 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
41569 }
41570 }
41571 mutex_unlock(&crypt_stat->cs_mutex);
41572+
41573+ rc = inode_change_ok(inode, ia);
41574+ if (rc)
41575+ goto out;
41576+ if (ia->ia_valid & ATTR_SIZE) {
41577+ rc = ecryptfs_inode_newsize_ok(inode, ia->ia_size);
41578+ if (rc)
41579+ goto out;
41580+ }
41581+
41582 if (S_ISREG(inode->i_mode)) {
41583 rc = filemap_write_and_wait(inode->i_mapping);
41584 if (rc)
41585diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41586index 940a82e..d3cdeea 100644
41587--- a/fs/ecryptfs/miscdev.c
41588+++ b/fs/ecryptfs/miscdev.c
41589@@ -328,7 +328,7 @@ check_list:
41590 goto out_unlock_msg_ctx;
41591 i = 5;
41592 if (msg_ctx->msg) {
41593- if (copy_to_user(&buf[i], packet_length, packet_length_size))
41594+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41595 goto out_unlock_msg_ctx;
41596 i += packet_length_size;
41597 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41598@@ -409,11 +409,47 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
41599 ssize_t sz = 0;
41600 char *data;
41601 uid_t euid = current_euid();
41602+ unsigned char packet_size_peek[3];
41603 int rc;
41604
41605- if (count == 0)
41606+ if (count == 0) {
41607 goto out;
41608+ } else if (count == (1 + 4)) {
41609+ /* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */
41610+ goto memdup;
41611+ } else if (count < (1 + 4 + 1)
41612+ || count > (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
41613+ + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES)) {
41614+ printk(KERN_WARNING "%s: Acceptable packet size range is "
41615+ "[%d-%lu], but amount of data written is [%zu].",
41616+ __func__, (1 + 4 + 1),
41617+ (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
41618+ + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES), count);
41619+ return -EINVAL;
41620+ }
41621
41622+ if (copy_from_user(packet_size_peek, (buf + 1 + 4),
41623+ sizeof(packet_size_peek))) {
41624+ printk(KERN_WARNING "%s: Error while inspecting packet size\n",
41625+ __func__);
41626+ return -EFAULT;
41627+ }
41628+
41629+ rc = ecryptfs_parse_packet_length(packet_size_peek, &packet_size,
41630+ &packet_size_length);
41631+ if (rc) {
41632+ printk(KERN_WARNING "%s: Error parsing packet length; "
41633+ "rc = [%d]\n", __func__, rc);
41634+ return rc;
41635+ }
41636+
41637+ if ((1 + 4 + packet_size_length + packet_size) != count) {
41638+ printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__,
41639+ packet_size);
41640+ return -EINVAL;
41641+ }
41642+
41643+memdup:
41644 data = memdup_user(buf, count);
41645 if (IS_ERR(data)) {
41646 printk(KERN_ERR "%s: memdup_user returned error [%ld]\n",
41647@@ -435,23 +471,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
41648 }
41649 memcpy(&counter_nbo, &data[i], 4);
41650 seq = be32_to_cpu(counter_nbo);
41651- i += 4;
41652- rc = ecryptfs_parse_packet_length(&data[i], &packet_size,
41653- &packet_size_length);
41654- if (rc) {
41655- printk(KERN_WARNING "%s: Error parsing packet length; "
41656- "rc = [%d]\n", __func__, rc);
41657- goto out_free;
41658- }
41659- i += packet_size_length;
41660- if ((1 + 4 + packet_size_length + packet_size) != count) {
41661- printk(KERN_WARNING "%s: (1 + packet_size_length([%zd])"
41662- " + packet_size([%zd]))([%zd]) != "
41663- "count([%zd]). Invalid packet format.\n",
41664- __func__, packet_size_length, packet_size,
41665- (1 + packet_size_length + packet_size), count);
41666- goto out_free;
41667- }
41668+ i += 4 + packet_size_length;
41669 rc = ecryptfs_miscdev_response(&data[i], packet_size,
41670 euid, current_user_ns(),
41671 task_pid(current), seq);
41672diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41673index 3745f7c..7d040a8 100644
41674--- a/fs/ecryptfs/read_write.c
41675+++ b/fs/ecryptfs/read_write.c
41676@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41677 return -EIO;
41678 fs_save = get_fs();
41679 set_fs(get_ds());
41680- rc = vfs_write(lower_file, data, size, &offset);
41681+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41682 set_fs(fs_save);
41683 mark_inode_dirty_sync(ecryptfs_inode);
41684 return rc;
41685@@ -130,13 +130,18 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
41686 pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
41687 size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
41688 size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
41689- size_t total_remaining_bytes = ((offset + size) - pos);
41690+ loff_t total_remaining_bytes = ((offset + size) - pos);
41691+
41692+ if (fatal_signal_pending(current)) {
41693+ rc = -EINTR;
41694+ break;
41695+ }
41696
41697 if (num_bytes > total_remaining_bytes)
41698 num_bytes = total_remaining_bytes;
41699 if (pos < offset) {
41700 /* remaining zeros to write, up to destination offset */
41701- size_t total_remaining_zeros = (offset - pos);
41702+ loff_t total_remaining_zeros = (offset - pos);
41703
41704 if (num_bytes > total_remaining_zeros)
41705 num_bytes = total_remaining_zeros;
41706@@ -193,15 +198,19 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
41707 }
41708 pos += num_bytes;
41709 }
41710- if ((offset + size) > ecryptfs_file_size) {
41711- i_size_write(ecryptfs_inode, (offset + size));
41712+ if (pos > ecryptfs_file_size) {
41713+ i_size_write(ecryptfs_inode, pos);
41714 if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) {
41715- rc = ecryptfs_write_inode_size_to_metadata(
41716+ int rc2;
41717+
41718+ rc2 = ecryptfs_write_inode_size_to_metadata(
41719 ecryptfs_inode);
41720- if (rc) {
41721+ if (rc2) {
41722 printk(KERN_ERR "Problem with "
41723 "ecryptfs_write_inode_size_to_metadata; "
41724- "rc = [%d]\n", rc);
41725+ "rc = [%d]\n", rc2);
41726+ if (!rc)
41727+ rc = rc2;
41728 goto out;
41729 }
41730 }
41731@@ -235,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41732 return -EIO;
41733 fs_save = get_fs();
41734 set_fs(get_ds());
41735- rc = vfs_read(lower_file, data, size, &offset);
41736+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41737 set_fs(fs_save);
41738 return rc;
41739 }
41740diff --git a/fs/exec.c b/fs/exec.c
41741index 3625464..fac01f4 100644
41742--- a/fs/exec.c
41743+++ b/fs/exec.c
41744@@ -55,12 +55,28 @@
41745 #include <linux/pipe_fs_i.h>
41746 #include <linux/oom.h>
41747 #include <linux/compat.h>
41748+#include <linux/random.h>
41749+#include <linux/seq_file.h>
41750+
41751+#ifdef CONFIG_PAX_REFCOUNT
41752+#include <linux/kallsyms.h>
41753+#include <linux/kdebug.h>
41754+#endif
41755
41756 #include <asm/uaccess.h>
41757 #include <asm/mmu_context.h>
41758 #include <asm/tlb.h>
41759 #include "internal.h"
41760
41761+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41762+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41763+#endif
41764+
41765+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41766+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41767+EXPORT_SYMBOL(pax_set_initial_flags_func);
41768+#endif
41769+
41770 int core_uses_pid;
41771 char core_pattern[CORENAME_MAX_SIZE] = "core";
41772 unsigned int core_pipe_limit;
41773@@ -70,7 +86,7 @@ struct core_name {
41774 char *corename;
41775 int used, size;
41776 };
41777-static atomic_t call_count = ATOMIC_INIT(1);
41778+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41779
41780 /* The maximal length of core_pattern is also specified in sysctl.c */
41781
41782@@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41783 int write)
41784 {
41785 struct page *page;
41786- int ret;
41787
41788-#ifdef CONFIG_STACK_GROWSUP
41789- if (write) {
41790- ret = expand_downwards(bprm->vma, pos);
41791- if (ret < 0)
41792- return NULL;
41793- }
41794-#endif
41795- ret = get_user_pages(current, bprm->mm, pos,
41796- 1, write, 1, &page, NULL);
41797- if (ret <= 0)
41798+ if (0 > expand_downwards(bprm->vma, pos))
41799+ return NULL;
41800+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41801 return NULL;
41802
41803 if (write) {
41804@@ -274,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41805 vma->vm_end = STACK_TOP_MAX;
41806 vma->vm_start = vma->vm_end - PAGE_SIZE;
41807 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41808+
41809+#ifdef CONFIG_PAX_SEGMEXEC
41810+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41811+#endif
41812+
41813 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41814 INIT_LIST_HEAD(&vma->anon_vma_chain);
41815
41816@@ -288,6 +301,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41817 mm->stack_vm = mm->total_vm = 1;
41818 up_write(&mm->mmap_sem);
41819 bprm->p = vma->vm_end - sizeof(void *);
41820+
41821+#ifdef CONFIG_PAX_RANDUSTACK
41822+ if (randomize_va_space)
41823+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41824+#endif
41825+
41826 return 0;
41827 err:
41828 up_write(&mm->mmap_sem);
41829@@ -396,19 +415,7 @@ err:
41830 return err;
41831 }
41832
41833-struct user_arg_ptr {
41834-#ifdef CONFIG_COMPAT
41835- bool is_compat;
41836-#endif
41837- union {
41838- const char __user *const __user *native;
41839-#ifdef CONFIG_COMPAT
41840- compat_uptr_t __user *compat;
41841-#endif
41842- } ptr;
41843-};
41844-
41845-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41846+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41847 {
41848 const char __user *native;
41849
41850@@ -417,14 +424,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41851 compat_uptr_t compat;
41852
41853 if (get_user(compat, argv.ptr.compat + nr))
41854- return ERR_PTR(-EFAULT);
41855+ return (const char __force_user *)ERR_PTR(-EFAULT);
41856
41857 return compat_ptr(compat);
41858 }
41859 #endif
41860
41861 if (get_user(native, argv.ptr.native + nr))
41862- return ERR_PTR(-EFAULT);
41863+ return (const char __force_user *)ERR_PTR(-EFAULT);
41864
41865 return native;
41866 }
41867@@ -443,7 +450,7 @@ static int count(struct user_arg_ptr argv, int max)
41868 if (!p)
41869 break;
41870
41871- if (IS_ERR(p))
41872+ if (IS_ERR((const char __force_kernel *)p))
41873 return -EFAULT;
41874
41875 if (i++ >= max)
41876@@ -477,7 +484,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41877
41878 ret = -EFAULT;
41879 str = get_user_arg_ptr(argv, argc);
41880- if (IS_ERR(str))
41881+ if (IS_ERR((const char __force_kernel *)str))
41882 goto out;
41883
41884 len = strnlen_user(str, MAX_ARG_STRLEN);
41885@@ -559,7 +566,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41886 int r;
41887 mm_segment_t oldfs = get_fs();
41888 struct user_arg_ptr argv = {
41889- .ptr.native = (const char __user *const __user *)__argv,
41890+ .ptr.native = (const char __force_user *const __force_user *)__argv,
41891 };
41892
41893 set_fs(KERNEL_DS);
41894@@ -594,7 +601,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41895 unsigned long new_end = old_end - shift;
41896 struct mmu_gather tlb;
41897
41898- BUG_ON(new_start > new_end);
41899+ if (new_start >= new_end || new_start < mmap_min_addr)
41900+ return -ENOMEM;
41901
41902 /*
41903 * ensure there are no vmas between where we want to go
41904@@ -603,6 +611,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41905 if (vma != find_vma(mm, new_start))
41906 return -EFAULT;
41907
41908+#ifdef CONFIG_PAX_SEGMEXEC
41909+ BUG_ON(pax_find_mirror_vma(vma));
41910+#endif
41911+
41912 /*
41913 * cover the whole range: [new_start, old_end)
41914 */
41915@@ -683,10 +695,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41916 stack_top = arch_align_stack(stack_top);
41917 stack_top = PAGE_ALIGN(stack_top);
41918
41919- if (unlikely(stack_top < mmap_min_addr) ||
41920- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41921- return -ENOMEM;
41922-
41923 stack_shift = vma->vm_end - stack_top;
41924
41925 bprm->p -= stack_shift;
41926@@ -698,8 +706,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41927 bprm->exec -= stack_shift;
41928
41929 down_write(&mm->mmap_sem);
41930+
41931+ /* Move stack pages down in memory. */
41932+ if (stack_shift) {
41933+ ret = shift_arg_pages(vma, stack_shift);
41934+ if (ret)
41935+ goto out_unlock;
41936+ }
41937+
41938 vm_flags = VM_STACK_FLAGS;
41939
41940+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41941+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41942+ vm_flags &= ~VM_EXEC;
41943+
41944+#ifdef CONFIG_PAX_MPROTECT
41945+ if (mm->pax_flags & MF_PAX_MPROTECT)
41946+ vm_flags &= ~VM_MAYEXEC;
41947+#endif
41948+
41949+ }
41950+#endif
41951+
41952 /*
41953 * Adjust stack execute permissions; explicitly enable for
41954 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41955@@ -718,13 +746,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41956 goto out_unlock;
41957 BUG_ON(prev != vma);
41958
41959- /* Move stack pages down in memory. */
41960- if (stack_shift) {
41961- ret = shift_arg_pages(vma, stack_shift);
41962- if (ret)
41963- goto out_unlock;
41964- }
41965-
41966 /* mprotect_fixup is overkill to remove the temporary stack flags */
41967 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41968
41969@@ -805,7 +826,7 @@ int kernel_read(struct file *file, loff_t offset,
41970 old_fs = get_fs();
41971 set_fs(get_ds());
41972 /* The cast to a user pointer is valid due to the set_fs() */
41973- result = vfs_read(file, (void __user *)addr, count, &pos);
41974+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41975 set_fs(old_fs);
41976 return result;
41977 }
41978@@ -1247,7 +1268,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
41979 }
41980 rcu_read_unlock();
41981
41982- if (p->fs->users > n_fs) {
41983+ if (atomic_read(&p->fs->users) > n_fs) {
41984 bprm->unsafe |= LSM_UNSAFE_SHARE;
41985 } else {
41986 res = -EAGAIN;
41987@@ -1450,6 +1471,11 @@ static int do_execve_common(const char *filename,
41988 struct user_arg_ptr envp,
41989 struct pt_regs *regs)
41990 {
41991+#ifdef CONFIG_GRKERNSEC
41992+ struct file *old_exec_file;
41993+ struct acl_subject_label *old_acl;
41994+ struct rlimit old_rlim[RLIM_NLIMITS];
41995+#endif
41996 struct linux_binprm *bprm;
41997 struct file *file;
41998 struct files_struct *displaced;
41999@@ -1457,6 +1483,8 @@ static int do_execve_common(const char *filename,
42000 int retval;
42001 const struct cred *cred = current_cred();
42002
42003+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
42004+
42005 /*
42006 * We move the actual failure in case of RLIMIT_NPROC excess from
42007 * set*uid() to execve() because too many poorly written programs
42008@@ -1497,12 +1525,27 @@ static int do_execve_common(const char *filename,
42009 if (IS_ERR(file))
42010 goto out_unmark;
42011
42012+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
42013+ retval = -EPERM;
42014+ goto out_file;
42015+ }
42016+
42017 sched_exec();
42018
42019 bprm->file = file;
42020 bprm->filename = filename;
42021 bprm->interp = filename;
42022
42023+ if (gr_process_user_ban()) {
42024+ retval = -EPERM;
42025+ goto out_file;
42026+ }
42027+
42028+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
42029+ retval = -EACCES;
42030+ goto out_file;
42031+ }
42032+
42033 retval = bprm_mm_init(bprm);
42034 if (retval)
42035 goto out_file;
42036@@ -1532,9 +1575,40 @@ static int do_execve_common(const char *filename,
42037 if (retval < 0)
42038 goto out;
42039
42040+ if (!gr_tpe_allow(file)) {
42041+ retval = -EACCES;
42042+ goto out;
42043+ }
42044+
42045+ if (gr_check_crash_exec(file)) {
42046+ retval = -EACCES;
42047+ goto out;
42048+ }
42049+
42050+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
42051+
42052+ gr_handle_exec_args(bprm, argv);
42053+
42054+#ifdef CONFIG_GRKERNSEC
42055+ old_acl = current->acl;
42056+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
42057+ old_exec_file = current->exec_file;
42058+ get_file(file);
42059+ current->exec_file = file;
42060+#endif
42061+
42062+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
42063+ bprm->unsafe);
42064+ if (retval < 0)
42065+ goto out_fail;
42066+
42067 retval = search_binary_handler(bprm,regs);
42068 if (retval < 0)
42069- goto out;
42070+ goto out_fail;
42071+#ifdef CONFIG_GRKERNSEC
42072+ if (old_exec_file)
42073+ fput(old_exec_file);
42074+#endif
42075
42076 /* execve succeeded */
42077 current->fs->in_exec = 0;
42078@@ -1545,6 +1619,14 @@ static int do_execve_common(const char *filename,
42079 put_files_struct(displaced);
42080 return retval;
42081
42082+out_fail:
42083+#ifdef CONFIG_GRKERNSEC
42084+ current->acl = old_acl;
42085+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
42086+ fput(current->exec_file);
42087+ current->exec_file = old_exec_file;
42088+#endif
42089+
42090 out:
42091 if (bprm->mm) {
42092 acct_arg_size(bprm, 0);
42093@@ -1618,7 +1700,7 @@ static int expand_corename(struct core_name *cn)
42094 {
42095 char *old_corename = cn->corename;
42096
42097- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
42098+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
42099 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
42100
42101 if (!cn->corename) {
42102@@ -1715,7 +1797,7 @@ static int format_corename(struct core_name *cn, long signr)
42103 int pid_in_pattern = 0;
42104 int err = 0;
42105
42106- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
42107+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
42108 cn->corename = kmalloc(cn->size, GFP_KERNEL);
42109 cn->used = 0;
42110
42111@@ -1812,6 +1894,218 @@ out:
42112 return ispipe;
42113 }
42114
42115+int pax_check_flags(unsigned long *flags)
42116+{
42117+ int retval = 0;
42118+
42119+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
42120+ if (*flags & MF_PAX_SEGMEXEC)
42121+ {
42122+ *flags &= ~MF_PAX_SEGMEXEC;
42123+ retval = -EINVAL;
42124+ }
42125+#endif
42126+
42127+ if ((*flags & MF_PAX_PAGEEXEC)
42128+
42129+#ifdef CONFIG_PAX_PAGEEXEC
42130+ && (*flags & MF_PAX_SEGMEXEC)
42131+#endif
42132+
42133+ )
42134+ {
42135+ *flags &= ~MF_PAX_PAGEEXEC;
42136+ retval = -EINVAL;
42137+ }
42138+
42139+ if ((*flags & MF_PAX_MPROTECT)
42140+
42141+#ifdef CONFIG_PAX_MPROTECT
42142+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42143+#endif
42144+
42145+ )
42146+ {
42147+ *flags &= ~MF_PAX_MPROTECT;
42148+ retval = -EINVAL;
42149+ }
42150+
42151+ if ((*flags & MF_PAX_EMUTRAMP)
42152+
42153+#ifdef CONFIG_PAX_EMUTRAMP
42154+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42155+#endif
42156+
42157+ )
42158+ {
42159+ *flags &= ~MF_PAX_EMUTRAMP;
42160+ retval = -EINVAL;
42161+ }
42162+
42163+ return retval;
42164+}
42165+
42166+EXPORT_SYMBOL(pax_check_flags);
42167+
42168+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42169+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
42170+{
42171+ struct task_struct *tsk = current;
42172+ struct mm_struct *mm = current->mm;
42173+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
42174+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
42175+ char *path_exec = NULL;
42176+ char *path_fault = NULL;
42177+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
42178+
42179+ if (buffer_exec && buffer_fault) {
42180+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
42181+
42182+ down_read(&mm->mmap_sem);
42183+ vma = mm->mmap;
42184+ while (vma && (!vma_exec || !vma_fault)) {
42185+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
42186+ vma_exec = vma;
42187+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
42188+ vma_fault = vma;
42189+ vma = vma->vm_next;
42190+ }
42191+ if (vma_exec) {
42192+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
42193+ if (IS_ERR(path_exec))
42194+ path_exec = "<path too long>";
42195+ else {
42196+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
42197+ if (path_exec) {
42198+ *path_exec = 0;
42199+ path_exec = buffer_exec;
42200+ } else
42201+ path_exec = "<path too long>";
42202+ }
42203+ }
42204+ if (vma_fault) {
42205+ start = vma_fault->vm_start;
42206+ end = vma_fault->vm_end;
42207+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
42208+ if (vma_fault->vm_file) {
42209+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
42210+ if (IS_ERR(path_fault))
42211+ path_fault = "<path too long>";
42212+ else {
42213+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
42214+ if (path_fault) {
42215+ *path_fault = 0;
42216+ path_fault = buffer_fault;
42217+ } else
42218+ path_fault = "<path too long>";
42219+ }
42220+ } else
42221+ path_fault = "<anonymous mapping>";
42222+ }
42223+ up_read(&mm->mmap_sem);
42224+ }
42225+ if (tsk->signal->curr_ip)
42226+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
42227+ else
42228+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
42229+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
42230+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
42231+ task_uid(tsk), task_euid(tsk), pc, sp);
42232+ free_page((unsigned long)buffer_exec);
42233+ free_page((unsigned long)buffer_fault);
42234+ pax_report_insns(regs, pc, sp);
42235+ do_coredump(SIGKILL, SIGKILL, regs);
42236+}
42237+#endif
42238+
42239+#ifdef CONFIG_PAX_REFCOUNT
42240+void pax_report_refcount_overflow(struct pt_regs *regs)
42241+{
42242+ if (current->signal->curr_ip)
42243+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42244+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
42245+ else
42246+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42247+ current->comm, task_pid_nr(current), current_uid(), current_euid());
42248+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
42249+ show_regs(regs);
42250+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
42251+}
42252+#endif
42253+
42254+#ifdef CONFIG_PAX_USERCOPY
42255+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
42256+int object_is_on_stack(const void *obj, unsigned long len)
42257+{
42258+ const void * const stack = task_stack_page(current);
42259+ const void * const stackend = stack + THREAD_SIZE;
42260+
42261+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42262+ const void *frame = NULL;
42263+ const void *oldframe;
42264+#endif
42265+
42266+ if (obj + len < obj)
42267+ return -1;
42268+
42269+ if (obj + len <= stack || stackend <= obj)
42270+ return 0;
42271+
42272+ if (obj < stack || stackend < obj + len)
42273+ return -1;
42274+
42275+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42276+ oldframe = __builtin_frame_address(1);
42277+ if (oldframe)
42278+ frame = __builtin_frame_address(2);
42279+ /*
42280+ low ----------------------------------------------> high
42281+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
42282+ ^----------------^
42283+ allow copies only within here
42284+ */
42285+ while (stack <= frame && frame < stackend) {
42286+ /* if obj + len extends past the last frame, this
42287+ check won't pass and the next frame will be 0,
42288+ causing us to bail out and correctly report
42289+ the copy as invalid
42290+ */
42291+ if (obj + len <= frame)
42292+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
42293+ oldframe = frame;
42294+ frame = *(const void * const *)frame;
42295+ }
42296+ return -1;
42297+#else
42298+ return 1;
42299+#endif
42300+}
42301+
42302+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
42303+{
42304+ if (current->signal->curr_ip)
42305+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42306+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42307+ else
42308+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42309+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42310+ dump_stack();
42311+ gr_handle_kernel_exploit();
42312+ do_group_exit(SIGKILL);
42313+}
42314+#endif
42315+
42316+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
42317+void pax_track_stack(void)
42318+{
42319+ unsigned long sp = (unsigned long)&sp;
42320+ if (sp < current_thread_info()->lowest_stack &&
42321+ sp > (unsigned long)task_stack_page(current))
42322+ current_thread_info()->lowest_stack = sp;
42323+}
42324+EXPORT_SYMBOL(pax_track_stack);
42325+#endif
42326+
42327 static int zap_process(struct task_struct *start, int exit_code)
42328 {
42329 struct task_struct *t;
42330@@ -2023,17 +2317,17 @@ static void wait_for_dump_helpers(struct file *file)
42331 pipe = file->f_path.dentry->d_inode->i_pipe;
42332
42333 pipe_lock(pipe);
42334- pipe->readers++;
42335- pipe->writers--;
42336+ atomic_inc(&pipe->readers);
42337+ atomic_dec(&pipe->writers);
42338
42339- while ((pipe->readers > 1) && (!signal_pending(current))) {
42340+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42341 wake_up_interruptible_sync(&pipe->wait);
42342 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42343 pipe_wait(pipe);
42344 }
42345
42346- pipe->readers--;
42347- pipe->writers++;
42348+ atomic_dec(&pipe->readers);
42349+ atomic_inc(&pipe->writers);
42350 pipe_unlock(pipe);
42351
42352 }
42353@@ -2094,7 +2388,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42354 int retval = 0;
42355 int flag = 0;
42356 int ispipe;
42357- static atomic_t core_dump_count = ATOMIC_INIT(0);
42358+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42359 struct coredump_params cprm = {
42360 .signr = signr,
42361 .regs = regs,
42362@@ -2109,6 +2403,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42363
42364 audit_core_dumps(signr);
42365
42366+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42367+ gr_handle_brute_attach(current, cprm.mm_flags);
42368+
42369 binfmt = mm->binfmt;
42370 if (!binfmt || !binfmt->core_dump)
42371 goto fail;
42372@@ -2176,7 +2473,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42373 }
42374 cprm.limit = RLIM_INFINITY;
42375
42376- dump_count = atomic_inc_return(&core_dump_count);
42377+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
42378 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42379 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42380 task_tgid_vnr(current), current->comm);
42381@@ -2203,6 +2500,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42382 } else {
42383 struct inode *inode;
42384
42385+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42386+
42387 if (cprm.limit < binfmt->min_coredump)
42388 goto fail_unlock;
42389
42390@@ -2246,7 +2545,7 @@ close_fail:
42391 filp_close(cprm.file, NULL);
42392 fail_dropcount:
42393 if (ispipe)
42394- atomic_dec(&core_dump_count);
42395+ atomic_dec_unchecked(&core_dump_count);
42396 fail_unlock:
42397 kfree(cn.corename);
42398 fail_corename:
42399@@ -2265,7 +2564,7 @@ fail:
42400 */
42401 int dump_write(struct file *file, const void *addr, int nr)
42402 {
42403- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42404+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42405 }
42406 EXPORT_SYMBOL(dump_write);
42407
42408diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42409index a8cbe1b..fed04cb 100644
42410--- a/fs/ext2/balloc.c
42411+++ b/fs/ext2/balloc.c
42412@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42413
42414 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42415 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42416- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42417+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42418 sbi->s_resuid != current_fsuid() &&
42419 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42420 return 0;
42421diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42422index a203892..4e64db5 100644
42423--- a/fs/ext3/balloc.c
42424+++ b/fs/ext3/balloc.c
42425@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42426
42427 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42428 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42429- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42430+ if (free_blocks < root_blocks + 1 &&
42431 !use_reservation && sbi->s_resuid != current_fsuid() &&
42432- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42433+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42434+ !capable_nolog(CAP_SYS_RESOURCE)) {
42435 return 0;
42436 }
42437 return 1;
42438diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42439index 12ccacd..a6035fce0 100644
42440--- a/fs/ext4/balloc.c
42441+++ b/fs/ext4/balloc.c
42442@@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42443 /* Hm, nope. Are (enough) root reserved clusters available? */
42444 if (sbi->s_resuid == current_fsuid() ||
42445 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42446- capable(CAP_SYS_RESOURCE) ||
42447- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42448+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42449+ capable_nolog(CAP_SYS_RESOURCE)) {
42450
42451 if (free_clusters >= (nclusters + dirty_clusters))
42452 return 1;
42453diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42454index 5b0e26a..0aa002d 100644
42455--- a/fs/ext4/ext4.h
42456+++ b/fs/ext4/ext4.h
42457@@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42458 unsigned long s_mb_last_start;
42459
42460 /* stats for buddy allocator */
42461- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42462- atomic_t s_bal_success; /* we found long enough chunks */
42463- atomic_t s_bal_allocated; /* in blocks */
42464- atomic_t s_bal_ex_scanned; /* total extents scanned */
42465- atomic_t s_bal_goals; /* goal hits */
42466- atomic_t s_bal_breaks; /* too long searches */
42467- atomic_t s_bal_2orders; /* 2^order hits */
42468+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42469+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42470+ atomic_unchecked_t s_bal_allocated; /* in blocks */
42471+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42472+ atomic_unchecked_t s_bal_goals; /* goal hits */
42473+ atomic_unchecked_t s_bal_breaks; /* too long searches */
42474+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42475 spinlock_t s_bal_lock;
42476 unsigned long s_mb_buddies_generated;
42477 unsigned long long s_mb_generation_time;
42478- atomic_t s_mb_lost_chunks;
42479- atomic_t s_mb_preallocated;
42480- atomic_t s_mb_discarded;
42481+ atomic_unchecked_t s_mb_lost_chunks;
42482+ atomic_unchecked_t s_mb_preallocated;
42483+ atomic_unchecked_t s_mb_discarded;
42484 atomic_t s_lock_busy;
42485
42486 /* locality groups */
42487diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42488index e2d8be8..c7f0ce9 100644
42489--- a/fs/ext4/mballoc.c
42490+++ b/fs/ext4/mballoc.c
42491@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42492 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42493
42494 if (EXT4_SB(sb)->s_mb_stats)
42495- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42496+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42497
42498 break;
42499 }
42500@@ -2088,7 +2088,7 @@ repeat:
42501 ac->ac_status = AC_STATUS_CONTINUE;
42502 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42503 cr = 3;
42504- atomic_inc(&sbi->s_mb_lost_chunks);
42505+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42506 goto repeat;
42507 }
42508 }
42509@@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42510 if (sbi->s_mb_stats) {
42511 ext4_msg(sb, KERN_INFO,
42512 "mballoc: %u blocks %u reqs (%u success)",
42513- atomic_read(&sbi->s_bal_allocated),
42514- atomic_read(&sbi->s_bal_reqs),
42515- atomic_read(&sbi->s_bal_success));
42516+ atomic_read_unchecked(&sbi->s_bal_allocated),
42517+ atomic_read_unchecked(&sbi->s_bal_reqs),
42518+ atomic_read_unchecked(&sbi->s_bal_success));
42519 ext4_msg(sb, KERN_INFO,
42520 "mballoc: %u extents scanned, %u goal hits, "
42521 "%u 2^N hits, %u breaks, %u lost",
42522- atomic_read(&sbi->s_bal_ex_scanned),
42523- atomic_read(&sbi->s_bal_goals),
42524- atomic_read(&sbi->s_bal_2orders),
42525- atomic_read(&sbi->s_bal_breaks),
42526- atomic_read(&sbi->s_mb_lost_chunks));
42527+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42528+ atomic_read_unchecked(&sbi->s_bal_goals),
42529+ atomic_read_unchecked(&sbi->s_bal_2orders),
42530+ atomic_read_unchecked(&sbi->s_bal_breaks),
42531+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42532 ext4_msg(sb, KERN_INFO,
42533 "mballoc: %lu generated and it took %Lu",
42534 sbi->s_mb_buddies_generated,
42535 sbi->s_mb_generation_time);
42536 ext4_msg(sb, KERN_INFO,
42537 "mballoc: %u preallocated, %u discarded",
42538- atomic_read(&sbi->s_mb_preallocated),
42539- atomic_read(&sbi->s_mb_discarded));
42540+ atomic_read_unchecked(&sbi->s_mb_preallocated),
42541+ atomic_read_unchecked(&sbi->s_mb_discarded));
42542 }
42543
42544 free_percpu(sbi->s_locality_groups);
42545@@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42546 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42547
42548 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42549- atomic_inc(&sbi->s_bal_reqs);
42550- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42551+ atomic_inc_unchecked(&sbi->s_bal_reqs);
42552+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42553 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42554- atomic_inc(&sbi->s_bal_success);
42555- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42556+ atomic_inc_unchecked(&sbi->s_bal_success);
42557+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42558 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42559 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42560- atomic_inc(&sbi->s_bal_goals);
42561+ atomic_inc_unchecked(&sbi->s_bal_goals);
42562 if (ac->ac_found > sbi->s_mb_max_to_scan)
42563- atomic_inc(&sbi->s_bal_breaks);
42564+ atomic_inc_unchecked(&sbi->s_bal_breaks);
42565 }
42566
42567 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42568@@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42569 trace_ext4_mb_new_inode_pa(ac, pa);
42570
42571 ext4_mb_use_inode_pa(ac, pa);
42572- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42573+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42574
42575 ei = EXT4_I(ac->ac_inode);
42576 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42577@@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42578 trace_ext4_mb_new_group_pa(ac, pa);
42579
42580 ext4_mb_use_group_pa(ac, pa);
42581- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42582+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42583
42584 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42585 lg = ac->ac_lg;
42586@@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42587 * from the bitmap and continue.
42588 */
42589 }
42590- atomic_add(free, &sbi->s_mb_discarded);
42591+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
42592
42593 return err;
42594 }
42595@@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42596 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42597 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42598 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42599- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42600+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42601 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42602
42603 return 0;
42604diff --git a/fs/fcntl.c b/fs/fcntl.c
42605index 22764c7..86372c9 100644
42606--- a/fs/fcntl.c
42607+++ b/fs/fcntl.c
42608@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42609 if (err)
42610 return err;
42611
42612+ if (gr_handle_chroot_fowner(pid, type))
42613+ return -ENOENT;
42614+ if (gr_check_protected_task_fowner(pid, type))
42615+ return -EACCES;
42616+
42617 f_modown(filp, pid, type, force);
42618 return 0;
42619 }
42620@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42621
42622 static int f_setown_ex(struct file *filp, unsigned long arg)
42623 {
42624- struct f_owner_ex * __user owner_p = (void * __user)arg;
42625+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42626 struct f_owner_ex owner;
42627 struct pid *pid;
42628 int type;
42629@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42630
42631 static int f_getown_ex(struct file *filp, unsigned long arg)
42632 {
42633- struct f_owner_ex * __user owner_p = (void * __user)arg;
42634+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42635 struct f_owner_ex owner;
42636 int ret = 0;
42637
42638@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42639 switch (cmd) {
42640 case F_DUPFD:
42641 case F_DUPFD_CLOEXEC:
42642+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42643 if (arg >= rlimit(RLIMIT_NOFILE))
42644 break;
42645 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42646diff --git a/fs/fifo.c b/fs/fifo.c
42647index b1a524d..4ee270e 100644
42648--- a/fs/fifo.c
42649+++ b/fs/fifo.c
42650@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42651 */
42652 filp->f_op = &read_pipefifo_fops;
42653 pipe->r_counter++;
42654- if (pipe->readers++ == 0)
42655+ if (atomic_inc_return(&pipe->readers) == 1)
42656 wake_up_partner(inode);
42657
42658- if (!pipe->writers) {
42659+ if (!atomic_read(&pipe->writers)) {
42660 if ((filp->f_flags & O_NONBLOCK)) {
42661 /* suppress POLLHUP until we have
42662 * seen a writer */
42663@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42664 * errno=ENXIO when there is no process reading the FIFO.
42665 */
42666 ret = -ENXIO;
42667- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42668+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42669 goto err;
42670
42671 filp->f_op = &write_pipefifo_fops;
42672 pipe->w_counter++;
42673- if (!pipe->writers++)
42674+ if (atomic_inc_return(&pipe->writers) == 1)
42675 wake_up_partner(inode);
42676
42677- if (!pipe->readers) {
42678+ if (!atomic_read(&pipe->readers)) {
42679 wait_for_partner(inode, &pipe->r_counter);
42680 if (signal_pending(current))
42681 goto err_wr;
42682@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42683 */
42684 filp->f_op = &rdwr_pipefifo_fops;
42685
42686- pipe->readers++;
42687- pipe->writers++;
42688+ atomic_inc(&pipe->readers);
42689+ atomic_inc(&pipe->writers);
42690 pipe->r_counter++;
42691 pipe->w_counter++;
42692- if (pipe->readers == 1 || pipe->writers == 1)
42693+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42694 wake_up_partner(inode);
42695 break;
42696
42697@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42698 return 0;
42699
42700 err_rd:
42701- if (!--pipe->readers)
42702+ if (atomic_dec_and_test(&pipe->readers))
42703 wake_up_interruptible(&pipe->wait);
42704 ret = -ERESTARTSYS;
42705 goto err;
42706
42707 err_wr:
42708- if (!--pipe->writers)
42709+ if (atomic_dec_and_test(&pipe->writers))
42710 wake_up_interruptible(&pipe->wait);
42711 ret = -ERESTARTSYS;
42712 goto err;
42713
42714 err:
42715- if (!pipe->readers && !pipe->writers)
42716+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42717 free_pipe_info(inode);
42718
42719 err_nocleanup:
42720diff --git a/fs/file.c b/fs/file.c
42721index 4c6992d..104cdea 100644
42722--- a/fs/file.c
42723+++ b/fs/file.c
42724@@ -15,6 +15,7 @@
42725 #include <linux/slab.h>
42726 #include <linux/vmalloc.h>
42727 #include <linux/file.h>
42728+#include <linux/security.h>
42729 #include <linux/fdtable.h>
42730 #include <linux/bitops.h>
42731 #include <linux/interrupt.h>
42732@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42733 * N.B. For clone tasks sharing a files structure, this test
42734 * will limit the total number of files that can be opened.
42735 */
42736+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42737 if (nr >= rlimit(RLIMIT_NOFILE))
42738 return -EMFILE;
42739
42740diff --git a/fs/filesystems.c b/fs/filesystems.c
42741index 0845f84..7b4ebef 100644
42742--- a/fs/filesystems.c
42743+++ b/fs/filesystems.c
42744@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42745 int len = dot ? dot - name : strlen(name);
42746
42747 fs = __get_fs_type(name, len);
42748+
42749+#ifdef CONFIG_GRKERNSEC_MODHARDEN
42750+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42751+#else
42752 if (!fs && (request_module("%.*s", len, name) == 0))
42753+#endif
42754 fs = __get_fs_type(name, len);
42755
42756 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42757diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42758index 78b519c..212c0d0 100644
42759--- a/fs/fs_struct.c
42760+++ b/fs/fs_struct.c
42761@@ -4,6 +4,7 @@
42762 #include <linux/path.h>
42763 #include <linux/slab.h>
42764 #include <linux/fs_struct.h>
42765+#include <linux/grsecurity.h>
42766 #include "internal.h"
42767
42768 static inline void path_get_longterm(struct path *path)
42769@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42770 old_root = fs->root;
42771 fs->root = *path;
42772 path_get_longterm(path);
42773+ gr_set_chroot_entries(current, path);
42774 write_seqcount_end(&fs->seq);
42775 spin_unlock(&fs->lock);
42776 if (old_root.dentry)
42777@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42778 && fs->root.mnt == old_root->mnt) {
42779 path_get_longterm(new_root);
42780 fs->root = *new_root;
42781+ gr_set_chroot_entries(p, new_root);
42782 count++;
42783 }
42784 if (fs->pwd.dentry == old_root->dentry
42785@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42786 spin_lock(&fs->lock);
42787 write_seqcount_begin(&fs->seq);
42788 tsk->fs = NULL;
42789- kill = !--fs->users;
42790+ gr_clear_chroot_entries(tsk);
42791+ kill = !atomic_dec_return(&fs->users);
42792 write_seqcount_end(&fs->seq);
42793 spin_unlock(&fs->lock);
42794 task_unlock(tsk);
42795@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42796 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42797 /* We don't need to lock fs - think why ;-) */
42798 if (fs) {
42799- fs->users = 1;
42800+ atomic_set(&fs->users, 1);
42801 fs->in_exec = 0;
42802 spin_lock_init(&fs->lock);
42803 seqcount_init(&fs->seq);
42804@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42805 spin_lock(&old->lock);
42806 fs->root = old->root;
42807 path_get_longterm(&fs->root);
42808+ /* instead of calling gr_set_chroot_entries here,
42809+ we call it from every caller of this function
42810+ */
42811 fs->pwd = old->pwd;
42812 path_get_longterm(&fs->pwd);
42813 spin_unlock(&old->lock);
42814@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42815
42816 task_lock(current);
42817 spin_lock(&fs->lock);
42818- kill = !--fs->users;
42819+ kill = !atomic_dec_return(&fs->users);
42820 current->fs = new_fs;
42821+ gr_set_chroot_entries(current, &new_fs->root);
42822 spin_unlock(&fs->lock);
42823 task_unlock(current);
42824
42825@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
42826
42827 /* to be mentioned only in INIT_TASK */
42828 struct fs_struct init_fs = {
42829- .users = 1,
42830+ .users = ATOMIC_INIT(1),
42831 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42832 .seq = SEQCNT_ZERO,
42833 .umask = 0022,
42834@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42835 task_lock(current);
42836
42837 spin_lock(&init_fs.lock);
42838- init_fs.users++;
42839+ atomic_inc(&init_fs.users);
42840 spin_unlock(&init_fs.lock);
42841
42842 spin_lock(&fs->lock);
42843 current->fs = &init_fs;
42844- kill = !--fs->users;
42845+ gr_set_chroot_entries(current, &current->fs->root);
42846+ kill = !atomic_dec_return(&fs->users);
42847 spin_unlock(&fs->lock);
42848
42849 task_unlock(current);
42850diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42851index 9905350..02eaec4 100644
42852--- a/fs/fscache/cookie.c
42853+++ b/fs/fscache/cookie.c
42854@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42855 parent ? (char *) parent->def->name : "<no-parent>",
42856 def->name, netfs_data);
42857
42858- fscache_stat(&fscache_n_acquires);
42859+ fscache_stat_unchecked(&fscache_n_acquires);
42860
42861 /* if there's no parent cookie, then we don't create one here either */
42862 if (!parent) {
42863- fscache_stat(&fscache_n_acquires_null);
42864+ fscache_stat_unchecked(&fscache_n_acquires_null);
42865 _leave(" [no parent]");
42866 return NULL;
42867 }
42868@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42869 /* allocate and initialise a cookie */
42870 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42871 if (!cookie) {
42872- fscache_stat(&fscache_n_acquires_oom);
42873+ fscache_stat_unchecked(&fscache_n_acquires_oom);
42874 _leave(" [ENOMEM]");
42875 return NULL;
42876 }
42877@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42878
42879 switch (cookie->def->type) {
42880 case FSCACHE_COOKIE_TYPE_INDEX:
42881- fscache_stat(&fscache_n_cookie_index);
42882+ fscache_stat_unchecked(&fscache_n_cookie_index);
42883 break;
42884 case FSCACHE_COOKIE_TYPE_DATAFILE:
42885- fscache_stat(&fscache_n_cookie_data);
42886+ fscache_stat_unchecked(&fscache_n_cookie_data);
42887 break;
42888 default:
42889- fscache_stat(&fscache_n_cookie_special);
42890+ fscache_stat_unchecked(&fscache_n_cookie_special);
42891 break;
42892 }
42893
42894@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42895 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42896 atomic_dec(&parent->n_children);
42897 __fscache_cookie_put(cookie);
42898- fscache_stat(&fscache_n_acquires_nobufs);
42899+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42900 _leave(" = NULL");
42901 return NULL;
42902 }
42903 }
42904
42905- fscache_stat(&fscache_n_acquires_ok);
42906+ fscache_stat_unchecked(&fscache_n_acquires_ok);
42907 _leave(" = %p", cookie);
42908 return cookie;
42909 }
42910@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42911 cache = fscache_select_cache_for_object(cookie->parent);
42912 if (!cache) {
42913 up_read(&fscache_addremove_sem);
42914- fscache_stat(&fscache_n_acquires_no_cache);
42915+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42916 _leave(" = -ENOMEDIUM [no cache]");
42917 return -ENOMEDIUM;
42918 }
42919@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42920 object = cache->ops->alloc_object(cache, cookie);
42921 fscache_stat_d(&fscache_n_cop_alloc_object);
42922 if (IS_ERR(object)) {
42923- fscache_stat(&fscache_n_object_no_alloc);
42924+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
42925 ret = PTR_ERR(object);
42926 goto error;
42927 }
42928
42929- fscache_stat(&fscache_n_object_alloc);
42930+ fscache_stat_unchecked(&fscache_n_object_alloc);
42931
42932 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42933
42934@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42935 struct fscache_object *object;
42936 struct hlist_node *_p;
42937
42938- fscache_stat(&fscache_n_updates);
42939+ fscache_stat_unchecked(&fscache_n_updates);
42940
42941 if (!cookie) {
42942- fscache_stat(&fscache_n_updates_null);
42943+ fscache_stat_unchecked(&fscache_n_updates_null);
42944 _leave(" [no cookie]");
42945 return;
42946 }
42947@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42948 struct fscache_object *object;
42949 unsigned long event;
42950
42951- fscache_stat(&fscache_n_relinquishes);
42952+ fscache_stat_unchecked(&fscache_n_relinquishes);
42953 if (retire)
42954- fscache_stat(&fscache_n_relinquishes_retire);
42955+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42956
42957 if (!cookie) {
42958- fscache_stat(&fscache_n_relinquishes_null);
42959+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42960 _leave(" [no cookie]");
42961 return;
42962 }
42963@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42964
42965 /* wait for the cookie to finish being instantiated (or to fail) */
42966 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42967- fscache_stat(&fscache_n_relinquishes_waitcrt);
42968+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42969 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42970 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42971 }
42972diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
42973index f6aad48..88dcf26 100644
42974--- a/fs/fscache/internal.h
42975+++ b/fs/fscache/internal.h
42976@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42977 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42978 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42979
42980-extern atomic_t fscache_n_op_pend;
42981-extern atomic_t fscache_n_op_run;
42982-extern atomic_t fscache_n_op_enqueue;
42983-extern atomic_t fscache_n_op_deferred_release;
42984-extern atomic_t fscache_n_op_release;
42985-extern atomic_t fscache_n_op_gc;
42986-extern atomic_t fscache_n_op_cancelled;
42987-extern atomic_t fscache_n_op_rejected;
42988+extern atomic_unchecked_t fscache_n_op_pend;
42989+extern atomic_unchecked_t fscache_n_op_run;
42990+extern atomic_unchecked_t fscache_n_op_enqueue;
42991+extern atomic_unchecked_t fscache_n_op_deferred_release;
42992+extern atomic_unchecked_t fscache_n_op_release;
42993+extern atomic_unchecked_t fscache_n_op_gc;
42994+extern atomic_unchecked_t fscache_n_op_cancelled;
42995+extern atomic_unchecked_t fscache_n_op_rejected;
42996
42997-extern atomic_t fscache_n_attr_changed;
42998-extern atomic_t fscache_n_attr_changed_ok;
42999-extern atomic_t fscache_n_attr_changed_nobufs;
43000-extern atomic_t fscache_n_attr_changed_nomem;
43001-extern atomic_t fscache_n_attr_changed_calls;
43002+extern atomic_unchecked_t fscache_n_attr_changed;
43003+extern atomic_unchecked_t fscache_n_attr_changed_ok;
43004+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
43005+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
43006+extern atomic_unchecked_t fscache_n_attr_changed_calls;
43007
43008-extern atomic_t fscache_n_allocs;
43009-extern atomic_t fscache_n_allocs_ok;
43010-extern atomic_t fscache_n_allocs_wait;
43011-extern atomic_t fscache_n_allocs_nobufs;
43012-extern atomic_t fscache_n_allocs_intr;
43013-extern atomic_t fscache_n_allocs_object_dead;
43014-extern atomic_t fscache_n_alloc_ops;
43015-extern atomic_t fscache_n_alloc_op_waits;
43016+extern atomic_unchecked_t fscache_n_allocs;
43017+extern atomic_unchecked_t fscache_n_allocs_ok;
43018+extern atomic_unchecked_t fscache_n_allocs_wait;
43019+extern atomic_unchecked_t fscache_n_allocs_nobufs;
43020+extern atomic_unchecked_t fscache_n_allocs_intr;
43021+extern atomic_unchecked_t fscache_n_allocs_object_dead;
43022+extern atomic_unchecked_t fscache_n_alloc_ops;
43023+extern atomic_unchecked_t fscache_n_alloc_op_waits;
43024
43025-extern atomic_t fscache_n_retrievals;
43026-extern atomic_t fscache_n_retrievals_ok;
43027-extern atomic_t fscache_n_retrievals_wait;
43028-extern atomic_t fscache_n_retrievals_nodata;
43029-extern atomic_t fscache_n_retrievals_nobufs;
43030-extern atomic_t fscache_n_retrievals_intr;
43031-extern atomic_t fscache_n_retrievals_nomem;
43032-extern atomic_t fscache_n_retrievals_object_dead;
43033-extern atomic_t fscache_n_retrieval_ops;
43034-extern atomic_t fscache_n_retrieval_op_waits;
43035+extern atomic_unchecked_t fscache_n_retrievals;
43036+extern atomic_unchecked_t fscache_n_retrievals_ok;
43037+extern atomic_unchecked_t fscache_n_retrievals_wait;
43038+extern atomic_unchecked_t fscache_n_retrievals_nodata;
43039+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
43040+extern atomic_unchecked_t fscache_n_retrievals_intr;
43041+extern atomic_unchecked_t fscache_n_retrievals_nomem;
43042+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
43043+extern atomic_unchecked_t fscache_n_retrieval_ops;
43044+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
43045
43046-extern atomic_t fscache_n_stores;
43047-extern atomic_t fscache_n_stores_ok;
43048-extern atomic_t fscache_n_stores_again;
43049-extern atomic_t fscache_n_stores_nobufs;
43050-extern atomic_t fscache_n_stores_oom;
43051-extern atomic_t fscache_n_store_ops;
43052-extern atomic_t fscache_n_store_calls;
43053-extern atomic_t fscache_n_store_pages;
43054-extern atomic_t fscache_n_store_radix_deletes;
43055-extern atomic_t fscache_n_store_pages_over_limit;
43056+extern atomic_unchecked_t fscache_n_stores;
43057+extern atomic_unchecked_t fscache_n_stores_ok;
43058+extern atomic_unchecked_t fscache_n_stores_again;
43059+extern atomic_unchecked_t fscache_n_stores_nobufs;
43060+extern atomic_unchecked_t fscache_n_stores_oom;
43061+extern atomic_unchecked_t fscache_n_store_ops;
43062+extern atomic_unchecked_t fscache_n_store_calls;
43063+extern atomic_unchecked_t fscache_n_store_pages;
43064+extern atomic_unchecked_t fscache_n_store_radix_deletes;
43065+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
43066
43067-extern atomic_t fscache_n_store_vmscan_not_storing;
43068-extern atomic_t fscache_n_store_vmscan_gone;
43069-extern atomic_t fscache_n_store_vmscan_busy;
43070-extern atomic_t fscache_n_store_vmscan_cancelled;
43071+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43072+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
43073+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
43074+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43075
43076-extern atomic_t fscache_n_marks;
43077-extern atomic_t fscache_n_uncaches;
43078+extern atomic_unchecked_t fscache_n_marks;
43079+extern atomic_unchecked_t fscache_n_uncaches;
43080
43081-extern atomic_t fscache_n_acquires;
43082-extern atomic_t fscache_n_acquires_null;
43083-extern atomic_t fscache_n_acquires_no_cache;
43084-extern atomic_t fscache_n_acquires_ok;
43085-extern atomic_t fscache_n_acquires_nobufs;
43086-extern atomic_t fscache_n_acquires_oom;
43087+extern atomic_unchecked_t fscache_n_acquires;
43088+extern atomic_unchecked_t fscache_n_acquires_null;
43089+extern atomic_unchecked_t fscache_n_acquires_no_cache;
43090+extern atomic_unchecked_t fscache_n_acquires_ok;
43091+extern atomic_unchecked_t fscache_n_acquires_nobufs;
43092+extern atomic_unchecked_t fscache_n_acquires_oom;
43093
43094-extern atomic_t fscache_n_updates;
43095-extern atomic_t fscache_n_updates_null;
43096-extern atomic_t fscache_n_updates_run;
43097+extern atomic_unchecked_t fscache_n_updates;
43098+extern atomic_unchecked_t fscache_n_updates_null;
43099+extern atomic_unchecked_t fscache_n_updates_run;
43100
43101-extern atomic_t fscache_n_relinquishes;
43102-extern atomic_t fscache_n_relinquishes_null;
43103-extern atomic_t fscache_n_relinquishes_waitcrt;
43104-extern atomic_t fscache_n_relinquishes_retire;
43105+extern atomic_unchecked_t fscache_n_relinquishes;
43106+extern atomic_unchecked_t fscache_n_relinquishes_null;
43107+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43108+extern atomic_unchecked_t fscache_n_relinquishes_retire;
43109
43110-extern atomic_t fscache_n_cookie_index;
43111-extern atomic_t fscache_n_cookie_data;
43112-extern atomic_t fscache_n_cookie_special;
43113+extern atomic_unchecked_t fscache_n_cookie_index;
43114+extern atomic_unchecked_t fscache_n_cookie_data;
43115+extern atomic_unchecked_t fscache_n_cookie_special;
43116
43117-extern atomic_t fscache_n_object_alloc;
43118-extern atomic_t fscache_n_object_no_alloc;
43119-extern atomic_t fscache_n_object_lookups;
43120-extern atomic_t fscache_n_object_lookups_negative;
43121-extern atomic_t fscache_n_object_lookups_positive;
43122-extern atomic_t fscache_n_object_lookups_timed_out;
43123-extern atomic_t fscache_n_object_created;
43124-extern atomic_t fscache_n_object_avail;
43125-extern atomic_t fscache_n_object_dead;
43126+extern atomic_unchecked_t fscache_n_object_alloc;
43127+extern atomic_unchecked_t fscache_n_object_no_alloc;
43128+extern atomic_unchecked_t fscache_n_object_lookups;
43129+extern atomic_unchecked_t fscache_n_object_lookups_negative;
43130+extern atomic_unchecked_t fscache_n_object_lookups_positive;
43131+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
43132+extern atomic_unchecked_t fscache_n_object_created;
43133+extern atomic_unchecked_t fscache_n_object_avail;
43134+extern atomic_unchecked_t fscache_n_object_dead;
43135
43136-extern atomic_t fscache_n_checkaux_none;
43137-extern atomic_t fscache_n_checkaux_okay;
43138-extern atomic_t fscache_n_checkaux_update;
43139-extern atomic_t fscache_n_checkaux_obsolete;
43140+extern atomic_unchecked_t fscache_n_checkaux_none;
43141+extern atomic_unchecked_t fscache_n_checkaux_okay;
43142+extern atomic_unchecked_t fscache_n_checkaux_update;
43143+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
43144
43145 extern atomic_t fscache_n_cop_alloc_object;
43146 extern atomic_t fscache_n_cop_lookup_object;
43147@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
43148 atomic_inc(stat);
43149 }
43150
43151+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
43152+{
43153+ atomic_inc_unchecked(stat);
43154+}
43155+
43156 static inline void fscache_stat_d(atomic_t *stat)
43157 {
43158 atomic_dec(stat);
43159@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
43160
43161 #define __fscache_stat(stat) (NULL)
43162 #define fscache_stat(stat) do {} while (0)
43163+#define fscache_stat_unchecked(stat) do {} while (0)
43164 #define fscache_stat_d(stat) do {} while (0)
43165 #endif
43166
43167diff --git a/fs/fscache/object.c b/fs/fscache/object.c
43168index b6b897c..0ffff9c 100644
43169--- a/fs/fscache/object.c
43170+++ b/fs/fscache/object.c
43171@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43172 /* update the object metadata on disk */
43173 case FSCACHE_OBJECT_UPDATING:
43174 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
43175- fscache_stat(&fscache_n_updates_run);
43176+ fscache_stat_unchecked(&fscache_n_updates_run);
43177 fscache_stat(&fscache_n_cop_update_object);
43178 object->cache->ops->update_object(object);
43179 fscache_stat_d(&fscache_n_cop_update_object);
43180@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43181 spin_lock(&object->lock);
43182 object->state = FSCACHE_OBJECT_DEAD;
43183 spin_unlock(&object->lock);
43184- fscache_stat(&fscache_n_object_dead);
43185+ fscache_stat_unchecked(&fscache_n_object_dead);
43186 goto terminal_transit;
43187
43188 /* handle the parent cache of this object being withdrawn from
43189@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43190 spin_lock(&object->lock);
43191 object->state = FSCACHE_OBJECT_DEAD;
43192 spin_unlock(&object->lock);
43193- fscache_stat(&fscache_n_object_dead);
43194+ fscache_stat_unchecked(&fscache_n_object_dead);
43195 goto terminal_transit;
43196
43197 /* complain about the object being woken up once it is
43198@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43199 parent->cookie->def->name, cookie->def->name,
43200 object->cache->tag->name);
43201
43202- fscache_stat(&fscache_n_object_lookups);
43203+ fscache_stat_unchecked(&fscache_n_object_lookups);
43204 fscache_stat(&fscache_n_cop_lookup_object);
43205 ret = object->cache->ops->lookup_object(object);
43206 fscache_stat_d(&fscache_n_cop_lookup_object);
43207@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43208 if (ret == -ETIMEDOUT) {
43209 /* probably stuck behind another object, so move this one to
43210 * the back of the queue */
43211- fscache_stat(&fscache_n_object_lookups_timed_out);
43212+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
43213 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43214 }
43215
43216@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
43217
43218 spin_lock(&object->lock);
43219 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43220- fscache_stat(&fscache_n_object_lookups_negative);
43221+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
43222
43223 /* transit here to allow write requests to begin stacking up
43224 * and read requests to begin returning ENODATA */
43225@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
43226 * result, in which case there may be data available */
43227 spin_lock(&object->lock);
43228 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43229- fscache_stat(&fscache_n_object_lookups_positive);
43230+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
43231
43232 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
43233
43234@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
43235 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43236 } else {
43237 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
43238- fscache_stat(&fscache_n_object_created);
43239+ fscache_stat_unchecked(&fscache_n_object_created);
43240
43241 object->state = FSCACHE_OBJECT_AVAILABLE;
43242 spin_unlock(&object->lock);
43243@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
43244 fscache_enqueue_dependents(object);
43245
43246 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
43247- fscache_stat(&fscache_n_object_avail);
43248+ fscache_stat_unchecked(&fscache_n_object_avail);
43249
43250 _leave("");
43251 }
43252@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43253 enum fscache_checkaux result;
43254
43255 if (!object->cookie->def->check_aux) {
43256- fscache_stat(&fscache_n_checkaux_none);
43257+ fscache_stat_unchecked(&fscache_n_checkaux_none);
43258 return FSCACHE_CHECKAUX_OKAY;
43259 }
43260
43261@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43262 switch (result) {
43263 /* entry okay as is */
43264 case FSCACHE_CHECKAUX_OKAY:
43265- fscache_stat(&fscache_n_checkaux_okay);
43266+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
43267 break;
43268
43269 /* entry requires update */
43270 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
43271- fscache_stat(&fscache_n_checkaux_update);
43272+ fscache_stat_unchecked(&fscache_n_checkaux_update);
43273 break;
43274
43275 /* entry requires deletion */
43276 case FSCACHE_CHECKAUX_OBSOLETE:
43277- fscache_stat(&fscache_n_checkaux_obsolete);
43278+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
43279 break;
43280
43281 default:
43282diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
43283index 30afdfa..2256596 100644
43284--- a/fs/fscache/operation.c
43285+++ b/fs/fscache/operation.c
43286@@ -17,7 +17,7 @@
43287 #include <linux/slab.h>
43288 #include "internal.h"
43289
43290-atomic_t fscache_op_debug_id;
43291+atomic_unchecked_t fscache_op_debug_id;
43292 EXPORT_SYMBOL(fscache_op_debug_id);
43293
43294 /**
43295@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
43296 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
43297 ASSERTCMP(atomic_read(&op->usage), >, 0);
43298
43299- fscache_stat(&fscache_n_op_enqueue);
43300+ fscache_stat_unchecked(&fscache_n_op_enqueue);
43301 switch (op->flags & FSCACHE_OP_TYPE) {
43302 case FSCACHE_OP_ASYNC:
43303 _debug("queue async");
43304@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
43305 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
43306 if (op->processor)
43307 fscache_enqueue_operation(op);
43308- fscache_stat(&fscache_n_op_run);
43309+ fscache_stat_unchecked(&fscache_n_op_run);
43310 }
43311
43312 /*
43313@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43314 if (object->n_ops > 1) {
43315 atomic_inc(&op->usage);
43316 list_add_tail(&op->pend_link, &object->pending_ops);
43317- fscache_stat(&fscache_n_op_pend);
43318+ fscache_stat_unchecked(&fscache_n_op_pend);
43319 } else if (!list_empty(&object->pending_ops)) {
43320 atomic_inc(&op->usage);
43321 list_add_tail(&op->pend_link, &object->pending_ops);
43322- fscache_stat(&fscache_n_op_pend);
43323+ fscache_stat_unchecked(&fscache_n_op_pend);
43324 fscache_start_operations(object);
43325 } else {
43326 ASSERTCMP(object->n_in_progress, ==, 0);
43327@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43328 object->n_exclusive++; /* reads and writes must wait */
43329 atomic_inc(&op->usage);
43330 list_add_tail(&op->pend_link, &object->pending_ops);
43331- fscache_stat(&fscache_n_op_pend);
43332+ fscache_stat_unchecked(&fscache_n_op_pend);
43333 ret = 0;
43334 } else {
43335 /* not allowed to submit ops in any other state */
43336@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
43337 if (object->n_exclusive > 0) {
43338 atomic_inc(&op->usage);
43339 list_add_tail(&op->pend_link, &object->pending_ops);
43340- fscache_stat(&fscache_n_op_pend);
43341+ fscache_stat_unchecked(&fscache_n_op_pend);
43342 } else if (!list_empty(&object->pending_ops)) {
43343 atomic_inc(&op->usage);
43344 list_add_tail(&op->pend_link, &object->pending_ops);
43345- fscache_stat(&fscache_n_op_pend);
43346+ fscache_stat_unchecked(&fscache_n_op_pend);
43347 fscache_start_operations(object);
43348 } else {
43349 ASSERTCMP(object->n_exclusive, ==, 0);
43350@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
43351 object->n_ops++;
43352 atomic_inc(&op->usage);
43353 list_add_tail(&op->pend_link, &object->pending_ops);
43354- fscache_stat(&fscache_n_op_pend);
43355+ fscache_stat_unchecked(&fscache_n_op_pend);
43356 ret = 0;
43357 } else if (object->state == FSCACHE_OBJECT_DYING ||
43358 object->state == FSCACHE_OBJECT_LC_DYING ||
43359 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43360- fscache_stat(&fscache_n_op_rejected);
43361+ fscache_stat_unchecked(&fscache_n_op_rejected);
43362 ret = -ENOBUFS;
43363 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43364 fscache_report_unexpected_submission(object, op, ostate);
43365@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
43366
43367 ret = -EBUSY;
43368 if (!list_empty(&op->pend_link)) {
43369- fscache_stat(&fscache_n_op_cancelled);
43370+ fscache_stat_unchecked(&fscache_n_op_cancelled);
43371 list_del_init(&op->pend_link);
43372 object->n_ops--;
43373 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43374@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
43375 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43376 BUG();
43377
43378- fscache_stat(&fscache_n_op_release);
43379+ fscache_stat_unchecked(&fscache_n_op_release);
43380
43381 if (op->release) {
43382 op->release(op);
43383@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
43384 * lock, and defer it otherwise */
43385 if (!spin_trylock(&object->lock)) {
43386 _debug("defer put");
43387- fscache_stat(&fscache_n_op_deferred_release);
43388+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
43389
43390 cache = object->cache;
43391 spin_lock(&cache->op_gc_list_lock);
43392@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43393
43394 _debug("GC DEFERRED REL OBJ%x OP%x",
43395 object->debug_id, op->debug_id);
43396- fscache_stat(&fscache_n_op_gc);
43397+ fscache_stat_unchecked(&fscache_n_op_gc);
43398
43399 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43400
43401diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43402index 3f7a59b..cf196cc 100644
43403--- a/fs/fscache/page.c
43404+++ b/fs/fscache/page.c
43405@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43406 val = radix_tree_lookup(&cookie->stores, page->index);
43407 if (!val) {
43408 rcu_read_unlock();
43409- fscache_stat(&fscache_n_store_vmscan_not_storing);
43410+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43411 __fscache_uncache_page(cookie, page);
43412 return true;
43413 }
43414@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43415 spin_unlock(&cookie->stores_lock);
43416
43417 if (xpage) {
43418- fscache_stat(&fscache_n_store_vmscan_cancelled);
43419- fscache_stat(&fscache_n_store_radix_deletes);
43420+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43421+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43422 ASSERTCMP(xpage, ==, page);
43423 } else {
43424- fscache_stat(&fscache_n_store_vmscan_gone);
43425+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43426 }
43427
43428 wake_up_bit(&cookie->flags, 0);
43429@@ -107,7 +107,7 @@ page_busy:
43430 /* we might want to wait here, but that could deadlock the allocator as
43431 * the work threads writing to the cache may all end up sleeping
43432 * on memory allocation */
43433- fscache_stat(&fscache_n_store_vmscan_busy);
43434+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43435 return false;
43436 }
43437 EXPORT_SYMBOL(__fscache_maybe_release_page);
43438@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43439 FSCACHE_COOKIE_STORING_TAG);
43440 if (!radix_tree_tag_get(&cookie->stores, page->index,
43441 FSCACHE_COOKIE_PENDING_TAG)) {
43442- fscache_stat(&fscache_n_store_radix_deletes);
43443+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43444 xpage = radix_tree_delete(&cookie->stores, page->index);
43445 }
43446 spin_unlock(&cookie->stores_lock);
43447@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43448
43449 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43450
43451- fscache_stat(&fscache_n_attr_changed_calls);
43452+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43453
43454 if (fscache_object_is_active(object)) {
43455 fscache_stat(&fscache_n_cop_attr_changed);
43456@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43457
43458 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43459
43460- fscache_stat(&fscache_n_attr_changed);
43461+ fscache_stat_unchecked(&fscache_n_attr_changed);
43462
43463 op = kzalloc(sizeof(*op), GFP_KERNEL);
43464 if (!op) {
43465- fscache_stat(&fscache_n_attr_changed_nomem);
43466+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43467 _leave(" = -ENOMEM");
43468 return -ENOMEM;
43469 }
43470@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43471 if (fscache_submit_exclusive_op(object, op) < 0)
43472 goto nobufs;
43473 spin_unlock(&cookie->lock);
43474- fscache_stat(&fscache_n_attr_changed_ok);
43475+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43476 fscache_put_operation(op);
43477 _leave(" = 0");
43478 return 0;
43479@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43480 nobufs:
43481 spin_unlock(&cookie->lock);
43482 kfree(op);
43483- fscache_stat(&fscache_n_attr_changed_nobufs);
43484+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43485 _leave(" = %d", -ENOBUFS);
43486 return -ENOBUFS;
43487 }
43488@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43489 /* allocate a retrieval operation and attempt to submit it */
43490 op = kzalloc(sizeof(*op), GFP_NOIO);
43491 if (!op) {
43492- fscache_stat(&fscache_n_retrievals_nomem);
43493+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43494 return NULL;
43495 }
43496
43497@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43498 return 0;
43499 }
43500
43501- fscache_stat(&fscache_n_retrievals_wait);
43502+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
43503
43504 jif = jiffies;
43505 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43506 fscache_wait_bit_interruptible,
43507 TASK_INTERRUPTIBLE) != 0) {
43508- fscache_stat(&fscache_n_retrievals_intr);
43509+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43510 _leave(" = -ERESTARTSYS");
43511 return -ERESTARTSYS;
43512 }
43513@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43514 */
43515 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43516 struct fscache_retrieval *op,
43517- atomic_t *stat_op_waits,
43518- atomic_t *stat_object_dead)
43519+ atomic_unchecked_t *stat_op_waits,
43520+ atomic_unchecked_t *stat_object_dead)
43521 {
43522 int ret;
43523
43524@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43525 goto check_if_dead;
43526
43527 _debug(">>> WT");
43528- fscache_stat(stat_op_waits);
43529+ fscache_stat_unchecked(stat_op_waits);
43530 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43531 fscache_wait_bit_interruptible,
43532 TASK_INTERRUPTIBLE) < 0) {
43533@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43534
43535 check_if_dead:
43536 if (unlikely(fscache_object_is_dead(object))) {
43537- fscache_stat(stat_object_dead);
43538+ fscache_stat_unchecked(stat_object_dead);
43539 return -ENOBUFS;
43540 }
43541 return 0;
43542@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43543
43544 _enter("%p,%p,,,", cookie, page);
43545
43546- fscache_stat(&fscache_n_retrievals);
43547+ fscache_stat_unchecked(&fscache_n_retrievals);
43548
43549 if (hlist_empty(&cookie->backing_objects))
43550 goto nobufs;
43551@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43552 goto nobufs_unlock;
43553 spin_unlock(&cookie->lock);
43554
43555- fscache_stat(&fscache_n_retrieval_ops);
43556+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43557
43558 /* pin the netfs read context in case we need to do the actual netfs
43559 * read because we've encountered a cache read failure */
43560@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43561
43562 error:
43563 if (ret == -ENOMEM)
43564- fscache_stat(&fscache_n_retrievals_nomem);
43565+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43566 else if (ret == -ERESTARTSYS)
43567- fscache_stat(&fscache_n_retrievals_intr);
43568+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43569 else if (ret == -ENODATA)
43570- fscache_stat(&fscache_n_retrievals_nodata);
43571+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43572 else if (ret < 0)
43573- fscache_stat(&fscache_n_retrievals_nobufs);
43574+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43575 else
43576- fscache_stat(&fscache_n_retrievals_ok);
43577+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43578
43579 fscache_put_retrieval(op);
43580 _leave(" = %d", ret);
43581@@ -429,7 +429,7 @@ nobufs_unlock:
43582 spin_unlock(&cookie->lock);
43583 kfree(op);
43584 nobufs:
43585- fscache_stat(&fscache_n_retrievals_nobufs);
43586+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43587 _leave(" = -ENOBUFS");
43588 return -ENOBUFS;
43589 }
43590@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43591
43592 _enter("%p,,%d,,,", cookie, *nr_pages);
43593
43594- fscache_stat(&fscache_n_retrievals);
43595+ fscache_stat_unchecked(&fscache_n_retrievals);
43596
43597 if (hlist_empty(&cookie->backing_objects))
43598 goto nobufs;
43599@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43600 goto nobufs_unlock;
43601 spin_unlock(&cookie->lock);
43602
43603- fscache_stat(&fscache_n_retrieval_ops);
43604+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43605
43606 /* pin the netfs read context in case we need to do the actual netfs
43607 * read because we've encountered a cache read failure */
43608@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43609
43610 error:
43611 if (ret == -ENOMEM)
43612- fscache_stat(&fscache_n_retrievals_nomem);
43613+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43614 else if (ret == -ERESTARTSYS)
43615- fscache_stat(&fscache_n_retrievals_intr);
43616+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43617 else if (ret == -ENODATA)
43618- fscache_stat(&fscache_n_retrievals_nodata);
43619+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43620 else if (ret < 0)
43621- fscache_stat(&fscache_n_retrievals_nobufs);
43622+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43623 else
43624- fscache_stat(&fscache_n_retrievals_ok);
43625+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43626
43627 fscache_put_retrieval(op);
43628 _leave(" = %d", ret);
43629@@ -545,7 +545,7 @@ nobufs_unlock:
43630 spin_unlock(&cookie->lock);
43631 kfree(op);
43632 nobufs:
43633- fscache_stat(&fscache_n_retrievals_nobufs);
43634+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43635 _leave(" = -ENOBUFS");
43636 return -ENOBUFS;
43637 }
43638@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43639
43640 _enter("%p,%p,,,", cookie, page);
43641
43642- fscache_stat(&fscache_n_allocs);
43643+ fscache_stat_unchecked(&fscache_n_allocs);
43644
43645 if (hlist_empty(&cookie->backing_objects))
43646 goto nobufs;
43647@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43648 goto nobufs_unlock;
43649 spin_unlock(&cookie->lock);
43650
43651- fscache_stat(&fscache_n_alloc_ops);
43652+ fscache_stat_unchecked(&fscache_n_alloc_ops);
43653
43654 ret = fscache_wait_for_retrieval_activation(
43655 object, op,
43656@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43657
43658 error:
43659 if (ret == -ERESTARTSYS)
43660- fscache_stat(&fscache_n_allocs_intr);
43661+ fscache_stat_unchecked(&fscache_n_allocs_intr);
43662 else if (ret < 0)
43663- fscache_stat(&fscache_n_allocs_nobufs);
43664+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43665 else
43666- fscache_stat(&fscache_n_allocs_ok);
43667+ fscache_stat_unchecked(&fscache_n_allocs_ok);
43668
43669 fscache_put_retrieval(op);
43670 _leave(" = %d", ret);
43671@@ -625,7 +625,7 @@ nobufs_unlock:
43672 spin_unlock(&cookie->lock);
43673 kfree(op);
43674 nobufs:
43675- fscache_stat(&fscache_n_allocs_nobufs);
43676+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43677 _leave(" = -ENOBUFS");
43678 return -ENOBUFS;
43679 }
43680@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43681
43682 spin_lock(&cookie->stores_lock);
43683
43684- fscache_stat(&fscache_n_store_calls);
43685+ fscache_stat_unchecked(&fscache_n_store_calls);
43686
43687 /* find a page to store */
43688 page = NULL;
43689@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43690 page = results[0];
43691 _debug("gang %d [%lx]", n, page->index);
43692 if (page->index > op->store_limit) {
43693- fscache_stat(&fscache_n_store_pages_over_limit);
43694+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43695 goto superseded;
43696 }
43697
43698@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43699 spin_unlock(&cookie->stores_lock);
43700 spin_unlock(&object->lock);
43701
43702- fscache_stat(&fscache_n_store_pages);
43703+ fscache_stat_unchecked(&fscache_n_store_pages);
43704 fscache_stat(&fscache_n_cop_write_page);
43705 ret = object->cache->ops->write_page(op, page);
43706 fscache_stat_d(&fscache_n_cop_write_page);
43707@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43708 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43709 ASSERT(PageFsCache(page));
43710
43711- fscache_stat(&fscache_n_stores);
43712+ fscache_stat_unchecked(&fscache_n_stores);
43713
43714 op = kzalloc(sizeof(*op), GFP_NOIO);
43715 if (!op)
43716@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43717 spin_unlock(&cookie->stores_lock);
43718 spin_unlock(&object->lock);
43719
43720- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43721+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43722 op->store_limit = object->store_limit;
43723
43724 if (fscache_submit_op(object, &op->op) < 0)
43725@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43726
43727 spin_unlock(&cookie->lock);
43728 radix_tree_preload_end();
43729- fscache_stat(&fscache_n_store_ops);
43730- fscache_stat(&fscache_n_stores_ok);
43731+ fscache_stat_unchecked(&fscache_n_store_ops);
43732+ fscache_stat_unchecked(&fscache_n_stores_ok);
43733
43734 /* the work queue now carries its own ref on the object */
43735 fscache_put_operation(&op->op);
43736@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43737 return 0;
43738
43739 already_queued:
43740- fscache_stat(&fscache_n_stores_again);
43741+ fscache_stat_unchecked(&fscache_n_stores_again);
43742 already_pending:
43743 spin_unlock(&cookie->stores_lock);
43744 spin_unlock(&object->lock);
43745 spin_unlock(&cookie->lock);
43746 radix_tree_preload_end();
43747 kfree(op);
43748- fscache_stat(&fscache_n_stores_ok);
43749+ fscache_stat_unchecked(&fscache_n_stores_ok);
43750 _leave(" = 0");
43751 return 0;
43752
43753@@ -851,14 +851,14 @@ nobufs:
43754 spin_unlock(&cookie->lock);
43755 radix_tree_preload_end();
43756 kfree(op);
43757- fscache_stat(&fscache_n_stores_nobufs);
43758+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
43759 _leave(" = -ENOBUFS");
43760 return -ENOBUFS;
43761
43762 nomem_free:
43763 kfree(op);
43764 nomem:
43765- fscache_stat(&fscache_n_stores_oom);
43766+ fscache_stat_unchecked(&fscache_n_stores_oom);
43767 _leave(" = -ENOMEM");
43768 return -ENOMEM;
43769 }
43770@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43771 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43772 ASSERTCMP(page, !=, NULL);
43773
43774- fscache_stat(&fscache_n_uncaches);
43775+ fscache_stat_unchecked(&fscache_n_uncaches);
43776
43777 /* cache withdrawal may beat us to it */
43778 if (!PageFsCache(page))
43779@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43780 unsigned long loop;
43781
43782 #ifdef CONFIG_FSCACHE_STATS
43783- atomic_add(pagevec->nr, &fscache_n_marks);
43784+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43785 #endif
43786
43787 for (loop = 0; loop < pagevec->nr; loop++) {
43788diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43789index 4765190..2a067f2 100644
43790--- a/fs/fscache/stats.c
43791+++ b/fs/fscache/stats.c
43792@@ -18,95 +18,95 @@
43793 /*
43794 * operation counters
43795 */
43796-atomic_t fscache_n_op_pend;
43797-atomic_t fscache_n_op_run;
43798-atomic_t fscache_n_op_enqueue;
43799-atomic_t fscache_n_op_requeue;
43800-atomic_t fscache_n_op_deferred_release;
43801-atomic_t fscache_n_op_release;
43802-atomic_t fscache_n_op_gc;
43803-atomic_t fscache_n_op_cancelled;
43804-atomic_t fscache_n_op_rejected;
43805+atomic_unchecked_t fscache_n_op_pend;
43806+atomic_unchecked_t fscache_n_op_run;
43807+atomic_unchecked_t fscache_n_op_enqueue;
43808+atomic_unchecked_t fscache_n_op_requeue;
43809+atomic_unchecked_t fscache_n_op_deferred_release;
43810+atomic_unchecked_t fscache_n_op_release;
43811+atomic_unchecked_t fscache_n_op_gc;
43812+atomic_unchecked_t fscache_n_op_cancelled;
43813+atomic_unchecked_t fscache_n_op_rejected;
43814
43815-atomic_t fscache_n_attr_changed;
43816-atomic_t fscache_n_attr_changed_ok;
43817-atomic_t fscache_n_attr_changed_nobufs;
43818-atomic_t fscache_n_attr_changed_nomem;
43819-atomic_t fscache_n_attr_changed_calls;
43820+atomic_unchecked_t fscache_n_attr_changed;
43821+atomic_unchecked_t fscache_n_attr_changed_ok;
43822+atomic_unchecked_t fscache_n_attr_changed_nobufs;
43823+atomic_unchecked_t fscache_n_attr_changed_nomem;
43824+atomic_unchecked_t fscache_n_attr_changed_calls;
43825
43826-atomic_t fscache_n_allocs;
43827-atomic_t fscache_n_allocs_ok;
43828-atomic_t fscache_n_allocs_wait;
43829-atomic_t fscache_n_allocs_nobufs;
43830-atomic_t fscache_n_allocs_intr;
43831-atomic_t fscache_n_allocs_object_dead;
43832-atomic_t fscache_n_alloc_ops;
43833-atomic_t fscache_n_alloc_op_waits;
43834+atomic_unchecked_t fscache_n_allocs;
43835+atomic_unchecked_t fscache_n_allocs_ok;
43836+atomic_unchecked_t fscache_n_allocs_wait;
43837+atomic_unchecked_t fscache_n_allocs_nobufs;
43838+atomic_unchecked_t fscache_n_allocs_intr;
43839+atomic_unchecked_t fscache_n_allocs_object_dead;
43840+atomic_unchecked_t fscache_n_alloc_ops;
43841+atomic_unchecked_t fscache_n_alloc_op_waits;
43842
43843-atomic_t fscache_n_retrievals;
43844-atomic_t fscache_n_retrievals_ok;
43845-atomic_t fscache_n_retrievals_wait;
43846-atomic_t fscache_n_retrievals_nodata;
43847-atomic_t fscache_n_retrievals_nobufs;
43848-atomic_t fscache_n_retrievals_intr;
43849-atomic_t fscache_n_retrievals_nomem;
43850-atomic_t fscache_n_retrievals_object_dead;
43851-atomic_t fscache_n_retrieval_ops;
43852-atomic_t fscache_n_retrieval_op_waits;
43853+atomic_unchecked_t fscache_n_retrievals;
43854+atomic_unchecked_t fscache_n_retrievals_ok;
43855+atomic_unchecked_t fscache_n_retrievals_wait;
43856+atomic_unchecked_t fscache_n_retrievals_nodata;
43857+atomic_unchecked_t fscache_n_retrievals_nobufs;
43858+atomic_unchecked_t fscache_n_retrievals_intr;
43859+atomic_unchecked_t fscache_n_retrievals_nomem;
43860+atomic_unchecked_t fscache_n_retrievals_object_dead;
43861+atomic_unchecked_t fscache_n_retrieval_ops;
43862+atomic_unchecked_t fscache_n_retrieval_op_waits;
43863
43864-atomic_t fscache_n_stores;
43865-atomic_t fscache_n_stores_ok;
43866-atomic_t fscache_n_stores_again;
43867-atomic_t fscache_n_stores_nobufs;
43868-atomic_t fscache_n_stores_oom;
43869-atomic_t fscache_n_store_ops;
43870-atomic_t fscache_n_store_calls;
43871-atomic_t fscache_n_store_pages;
43872-atomic_t fscache_n_store_radix_deletes;
43873-atomic_t fscache_n_store_pages_over_limit;
43874+atomic_unchecked_t fscache_n_stores;
43875+atomic_unchecked_t fscache_n_stores_ok;
43876+atomic_unchecked_t fscache_n_stores_again;
43877+atomic_unchecked_t fscache_n_stores_nobufs;
43878+atomic_unchecked_t fscache_n_stores_oom;
43879+atomic_unchecked_t fscache_n_store_ops;
43880+atomic_unchecked_t fscache_n_store_calls;
43881+atomic_unchecked_t fscache_n_store_pages;
43882+atomic_unchecked_t fscache_n_store_radix_deletes;
43883+atomic_unchecked_t fscache_n_store_pages_over_limit;
43884
43885-atomic_t fscache_n_store_vmscan_not_storing;
43886-atomic_t fscache_n_store_vmscan_gone;
43887-atomic_t fscache_n_store_vmscan_busy;
43888-atomic_t fscache_n_store_vmscan_cancelled;
43889+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43890+atomic_unchecked_t fscache_n_store_vmscan_gone;
43891+atomic_unchecked_t fscache_n_store_vmscan_busy;
43892+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43893
43894-atomic_t fscache_n_marks;
43895-atomic_t fscache_n_uncaches;
43896+atomic_unchecked_t fscache_n_marks;
43897+atomic_unchecked_t fscache_n_uncaches;
43898
43899-atomic_t fscache_n_acquires;
43900-atomic_t fscache_n_acquires_null;
43901-atomic_t fscache_n_acquires_no_cache;
43902-atomic_t fscache_n_acquires_ok;
43903-atomic_t fscache_n_acquires_nobufs;
43904-atomic_t fscache_n_acquires_oom;
43905+atomic_unchecked_t fscache_n_acquires;
43906+atomic_unchecked_t fscache_n_acquires_null;
43907+atomic_unchecked_t fscache_n_acquires_no_cache;
43908+atomic_unchecked_t fscache_n_acquires_ok;
43909+atomic_unchecked_t fscache_n_acquires_nobufs;
43910+atomic_unchecked_t fscache_n_acquires_oom;
43911
43912-atomic_t fscache_n_updates;
43913-atomic_t fscache_n_updates_null;
43914-atomic_t fscache_n_updates_run;
43915+atomic_unchecked_t fscache_n_updates;
43916+atomic_unchecked_t fscache_n_updates_null;
43917+atomic_unchecked_t fscache_n_updates_run;
43918
43919-atomic_t fscache_n_relinquishes;
43920-atomic_t fscache_n_relinquishes_null;
43921-atomic_t fscache_n_relinquishes_waitcrt;
43922-atomic_t fscache_n_relinquishes_retire;
43923+atomic_unchecked_t fscache_n_relinquishes;
43924+atomic_unchecked_t fscache_n_relinquishes_null;
43925+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43926+atomic_unchecked_t fscache_n_relinquishes_retire;
43927
43928-atomic_t fscache_n_cookie_index;
43929-atomic_t fscache_n_cookie_data;
43930-atomic_t fscache_n_cookie_special;
43931+atomic_unchecked_t fscache_n_cookie_index;
43932+atomic_unchecked_t fscache_n_cookie_data;
43933+atomic_unchecked_t fscache_n_cookie_special;
43934
43935-atomic_t fscache_n_object_alloc;
43936-atomic_t fscache_n_object_no_alloc;
43937-atomic_t fscache_n_object_lookups;
43938-atomic_t fscache_n_object_lookups_negative;
43939-atomic_t fscache_n_object_lookups_positive;
43940-atomic_t fscache_n_object_lookups_timed_out;
43941-atomic_t fscache_n_object_created;
43942-atomic_t fscache_n_object_avail;
43943-atomic_t fscache_n_object_dead;
43944+atomic_unchecked_t fscache_n_object_alloc;
43945+atomic_unchecked_t fscache_n_object_no_alloc;
43946+atomic_unchecked_t fscache_n_object_lookups;
43947+atomic_unchecked_t fscache_n_object_lookups_negative;
43948+atomic_unchecked_t fscache_n_object_lookups_positive;
43949+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43950+atomic_unchecked_t fscache_n_object_created;
43951+atomic_unchecked_t fscache_n_object_avail;
43952+atomic_unchecked_t fscache_n_object_dead;
43953
43954-atomic_t fscache_n_checkaux_none;
43955-atomic_t fscache_n_checkaux_okay;
43956-atomic_t fscache_n_checkaux_update;
43957-atomic_t fscache_n_checkaux_obsolete;
43958+atomic_unchecked_t fscache_n_checkaux_none;
43959+atomic_unchecked_t fscache_n_checkaux_okay;
43960+atomic_unchecked_t fscache_n_checkaux_update;
43961+atomic_unchecked_t fscache_n_checkaux_obsolete;
43962
43963 atomic_t fscache_n_cop_alloc_object;
43964 atomic_t fscache_n_cop_lookup_object;
43965@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
43966 seq_puts(m, "FS-Cache statistics\n");
43967
43968 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43969- atomic_read(&fscache_n_cookie_index),
43970- atomic_read(&fscache_n_cookie_data),
43971- atomic_read(&fscache_n_cookie_special));
43972+ atomic_read_unchecked(&fscache_n_cookie_index),
43973+ atomic_read_unchecked(&fscache_n_cookie_data),
43974+ atomic_read_unchecked(&fscache_n_cookie_special));
43975
43976 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43977- atomic_read(&fscache_n_object_alloc),
43978- atomic_read(&fscache_n_object_no_alloc),
43979- atomic_read(&fscache_n_object_avail),
43980- atomic_read(&fscache_n_object_dead));
43981+ atomic_read_unchecked(&fscache_n_object_alloc),
43982+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43983+ atomic_read_unchecked(&fscache_n_object_avail),
43984+ atomic_read_unchecked(&fscache_n_object_dead));
43985 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43986- atomic_read(&fscache_n_checkaux_none),
43987- atomic_read(&fscache_n_checkaux_okay),
43988- atomic_read(&fscache_n_checkaux_update),
43989- atomic_read(&fscache_n_checkaux_obsolete));
43990+ atomic_read_unchecked(&fscache_n_checkaux_none),
43991+ atomic_read_unchecked(&fscache_n_checkaux_okay),
43992+ atomic_read_unchecked(&fscache_n_checkaux_update),
43993+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43994
43995 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43996- atomic_read(&fscache_n_marks),
43997- atomic_read(&fscache_n_uncaches));
43998+ atomic_read_unchecked(&fscache_n_marks),
43999+ atomic_read_unchecked(&fscache_n_uncaches));
44000
44001 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
44002 " oom=%u\n",
44003- atomic_read(&fscache_n_acquires),
44004- atomic_read(&fscache_n_acquires_null),
44005- atomic_read(&fscache_n_acquires_no_cache),
44006- atomic_read(&fscache_n_acquires_ok),
44007- atomic_read(&fscache_n_acquires_nobufs),
44008- atomic_read(&fscache_n_acquires_oom));
44009+ atomic_read_unchecked(&fscache_n_acquires),
44010+ atomic_read_unchecked(&fscache_n_acquires_null),
44011+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
44012+ atomic_read_unchecked(&fscache_n_acquires_ok),
44013+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
44014+ atomic_read_unchecked(&fscache_n_acquires_oom));
44015
44016 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
44017- atomic_read(&fscache_n_object_lookups),
44018- atomic_read(&fscache_n_object_lookups_negative),
44019- atomic_read(&fscache_n_object_lookups_positive),
44020- atomic_read(&fscache_n_object_created),
44021- atomic_read(&fscache_n_object_lookups_timed_out));
44022+ atomic_read_unchecked(&fscache_n_object_lookups),
44023+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
44024+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
44025+ atomic_read_unchecked(&fscache_n_object_created),
44026+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
44027
44028 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
44029- atomic_read(&fscache_n_updates),
44030- atomic_read(&fscache_n_updates_null),
44031- atomic_read(&fscache_n_updates_run));
44032+ atomic_read_unchecked(&fscache_n_updates),
44033+ atomic_read_unchecked(&fscache_n_updates_null),
44034+ atomic_read_unchecked(&fscache_n_updates_run));
44035
44036 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
44037- atomic_read(&fscache_n_relinquishes),
44038- atomic_read(&fscache_n_relinquishes_null),
44039- atomic_read(&fscache_n_relinquishes_waitcrt),
44040- atomic_read(&fscache_n_relinquishes_retire));
44041+ atomic_read_unchecked(&fscache_n_relinquishes),
44042+ atomic_read_unchecked(&fscache_n_relinquishes_null),
44043+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
44044+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
44045
44046 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
44047- atomic_read(&fscache_n_attr_changed),
44048- atomic_read(&fscache_n_attr_changed_ok),
44049- atomic_read(&fscache_n_attr_changed_nobufs),
44050- atomic_read(&fscache_n_attr_changed_nomem),
44051- atomic_read(&fscache_n_attr_changed_calls));
44052+ atomic_read_unchecked(&fscache_n_attr_changed),
44053+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
44054+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
44055+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
44056+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
44057
44058 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
44059- atomic_read(&fscache_n_allocs),
44060- atomic_read(&fscache_n_allocs_ok),
44061- atomic_read(&fscache_n_allocs_wait),
44062- atomic_read(&fscache_n_allocs_nobufs),
44063- atomic_read(&fscache_n_allocs_intr));
44064+ atomic_read_unchecked(&fscache_n_allocs),
44065+ atomic_read_unchecked(&fscache_n_allocs_ok),
44066+ atomic_read_unchecked(&fscache_n_allocs_wait),
44067+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
44068+ atomic_read_unchecked(&fscache_n_allocs_intr));
44069 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
44070- atomic_read(&fscache_n_alloc_ops),
44071- atomic_read(&fscache_n_alloc_op_waits),
44072- atomic_read(&fscache_n_allocs_object_dead));
44073+ atomic_read_unchecked(&fscache_n_alloc_ops),
44074+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
44075+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
44076
44077 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
44078 " int=%u oom=%u\n",
44079- atomic_read(&fscache_n_retrievals),
44080- atomic_read(&fscache_n_retrievals_ok),
44081- atomic_read(&fscache_n_retrievals_wait),
44082- atomic_read(&fscache_n_retrievals_nodata),
44083- atomic_read(&fscache_n_retrievals_nobufs),
44084- atomic_read(&fscache_n_retrievals_intr),
44085- atomic_read(&fscache_n_retrievals_nomem));
44086+ atomic_read_unchecked(&fscache_n_retrievals),
44087+ atomic_read_unchecked(&fscache_n_retrievals_ok),
44088+ atomic_read_unchecked(&fscache_n_retrievals_wait),
44089+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
44090+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
44091+ atomic_read_unchecked(&fscache_n_retrievals_intr),
44092+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
44093 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
44094- atomic_read(&fscache_n_retrieval_ops),
44095- atomic_read(&fscache_n_retrieval_op_waits),
44096- atomic_read(&fscache_n_retrievals_object_dead));
44097+ atomic_read_unchecked(&fscache_n_retrieval_ops),
44098+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
44099+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
44100
44101 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
44102- atomic_read(&fscache_n_stores),
44103- atomic_read(&fscache_n_stores_ok),
44104- atomic_read(&fscache_n_stores_again),
44105- atomic_read(&fscache_n_stores_nobufs),
44106- atomic_read(&fscache_n_stores_oom));
44107+ atomic_read_unchecked(&fscache_n_stores),
44108+ atomic_read_unchecked(&fscache_n_stores_ok),
44109+ atomic_read_unchecked(&fscache_n_stores_again),
44110+ atomic_read_unchecked(&fscache_n_stores_nobufs),
44111+ atomic_read_unchecked(&fscache_n_stores_oom));
44112 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
44113- atomic_read(&fscache_n_store_ops),
44114- atomic_read(&fscache_n_store_calls),
44115- atomic_read(&fscache_n_store_pages),
44116- atomic_read(&fscache_n_store_radix_deletes),
44117- atomic_read(&fscache_n_store_pages_over_limit));
44118+ atomic_read_unchecked(&fscache_n_store_ops),
44119+ atomic_read_unchecked(&fscache_n_store_calls),
44120+ atomic_read_unchecked(&fscache_n_store_pages),
44121+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
44122+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
44123
44124 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
44125- atomic_read(&fscache_n_store_vmscan_not_storing),
44126- atomic_read(&fscache_n_store_vmscan_gone),
44127- atomic_read(&fscache_n_store_vmscan_busy),
44128- atomic_read(&fscache_n_store_vmscan_cancelled));
44129+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
44130+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
44131+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
44132+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
44133
44134 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
44135- atomic_read(&fscache_n_op_pend),
44136- atomic_read(&fscache_n_op_run),
44137- atomic_read(&fscache_n_op_enqueue),
44138- atomic_read(&fscache_n_op_cancelled),
44139- atomic_read(&fscache_n_op_rejected));
44140+ atomic_read_unchecked(&fscache_n_op_pend),
44141+ atomic_read_unchecked(&fscache_n_op_run),
44142+ atomic_read_unchecked(&fscache_n_op_enqueue),
44143+ atomic_read_unchecked(&fscache_n_op_cancelled),
44144+ atomic_read_unchecked(&fscache_n_op_rejected));
44145 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
44146- atomic_read(&fscache_n_op_deferred_release),
44147- atomic_read(&fscache_n_op_release),
44148- atomic_read(&fscache_n_op_gc));
44149+ atomic_read_unchecked(&fscache_n_op_deferred_release),
44150+ atomic_read_unchecked(&fscache_n_op_release),
44151+ atomic_read_unchecked(&fscache_n_op_gc));
44152
44153 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
44154 atomic_read(&fscache_n_cop_alloc_object),
44155diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
44156index 3426521..3b75162 100644
44157--- a/fs/fuse/cuse.c
44158+++ b/fs/fuse/cuse.c
44159@@ -587,10 +587,12 @@ static int __init cuse_init(void)
44160 INIT_LIST_HEAD(&cuse_conntbl[i]);
44161
44162 /* inherit and extend fuse_dev_operations */
44163- cuse_channel_fops = fuse_dev_operations;
44164- cuse_channel_fops.owner = THIS_MODULE;
44165- cuse_channel_fops.open = cuse_channel_open;
44166- cuse_channel_fops.release = cuse_channel_release;
44167+ pax_open_kernel();
44168+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
44169+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
44170+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
44171+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
44172+ pax_close_kernel();
44173
44174 cuse_class = class_create(THIS_MODULE, "cuse");
44175 if (IS_ERR(cuse_class))
44176diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
44177index 2aaf3ea..8e50863 100644
44178--- a/fs/fuse/dev.c
44179+++ b/fs/fuse/dev.c
44180@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
44181 ret = 0;
44182 pipe_lock(pipe);
44183
44184- if (!pipe->readers) {
44185+ if (!atomic_read(&pipe->readers)) {
44186 send_sig(SIGPIPE, current, 0);
44187 if (!ret)
44188 ret = -EPIPE;
44189diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
44190index 9f63e49..d8a64c0 100644
44191--- a/fs/fuse/dir.c
44192+++ b/fs/fuse/dir.c
44193@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
44194 return link;
44195 }
44196
44197-static void free_link(char *link)
44198+static void free_link(const char *link)
44199 {
44200 if (!IS_ERR(link))
44201 free_page((unsigned long) link);
44202diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
44203index cfd4959..a780959 100644
44204--- a/fs/gfs2/inode.c
44205+++ b/fs/gfs2/inode.c
44206@@ -1490,7 +1490,7 @@ out:
44207
44208 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44209 {
44210- char *s = nd_get_link(nd);
44211+ const char *s = nd_get_link(nd);
44212 if (!IS_ERR(s))
44213 kfree(s);
44214 }
44215diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
44216index 0be5a78..9cfb853 100644
44217--- a/fs/hugetlbfs/inode.c
44218+++ b/fs/hugetlbfs/inode.c
44219@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
44220 .kill_sb = kill_litter_super,
44221 };
44222
44223-static struct vfsmount *hugetlbfs_vfsmount;
44224+struct vfsmount *hugetlbfs_vfsmount;
44225
44226 static int can_do_hugetlb_shm(void)
44227 {
44228diff --git a/fs/inode.c b/fs/inode.c
44229index ee4e66b..0451521 100644
44230--- a/fs/inode.c
44231+++ b/fs/inode.c
44232@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44233
44234 #ifdef CONFIG_SMP
44235 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44236- static atomic_t shared_last_ino;
44237- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44238+ static atomic_unchecked_t shared_last_ino;
44239+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44240
44241 res = next - LAST_INO_BATCH;
44242 }
44243diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
44244index e513f19..2ab1351 100644
44245--- a/fs/jffs2/erase.c
44246+++ b/fs/jffs2/erase.c
44247@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
44248 struct jffs2_unknown_node marker = {
44249 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44250 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44251- .totlen = cpu_to_je32(c->cleanmarker_size)
44252+ .totlen = cpu_to_je32(c->cleanmarker_size),
44253+ .hdr_crc = cpu_to_je32(0)
44254 };
44255
44256 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44257diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
44258index b09e51d..e482afa 100644
44259--- a/fs/jffs2/wbuf.c
44260+++ b/fs/jffs2/wbuf.c
44261@@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
44262 {
44263 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44264 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44265- .totlen = constant_cpu_to_je32(8)
44266+ .totlen = constant_cpu_to_je32(8),
44267+ .hdr_crc = constant_cpu_to_je32(0)
44268 };
44269
44270 /*
44271diff --git a/fs/jfs/super.c b/fs/jfs/super.c
44272index a44eff0..462e07d 100644
44273--- a/fs/jfs/super.c
44274+++ b/fs/jfs/super.c
44275@@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
44276
44277 jfs_inode_cachep =
44278 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44279- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44280+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44281 init_once);
44282 if (jfs_inode_cachep == NULL)
44283 return -ENOMEM;
44284diff --git a/fs/libfs.c b/fs/libfs.c
44285index f6d411e..e82a08d 100644
44286--- a/fs/libfs.c
44287+++ b/fs/libfs.c
44288@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44289
44290 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44291 struct dentry *next;
44292+ char d_name[sizeof(next->d_iname)];
44293+ const unsigned char *name;
44294+
44295 next = list_entry(p, struct dentry, d_u.d_child);
44296 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44297 if (!simple_positive(next)) {
44298@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44299
44300 spin_unlock(&next->d_lock);
44301 spin_unlock(&dentry->d_lock);
44302- if (filldir(dirent, next->d_name.name,
44303+ name = next->d_name.name;
44304+ if (name == next->d_iname) {
44305+ memcpy(d_name, name, next->d_name.len);
44306+ name = d_name;
44307+ }
44308+ if (filldir(dirent, name,
44309 next->d_name.len, filp->f_pos,
44310 next->d_inode->i_ino,
44311 dt_type(next->d_inode)) < 0)
44312diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
44313index 8392cb8..80d6193 100644
44314--- a/fs/lockd/clntproc.c
44315+++ b/fs/lockd/clntproc.c
44316@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
44317 /*
44318 * Cookie counter for NLM requests
44319 */
44320-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44321+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44322
44323 void nlmclnt_next_cookie(struct nlm_cookie *c)
44324 {
44325- u32 cookie = atomic_inc_return(&nlm_cookie);
44326+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44327
44328 memcpy(c->data, &cookie, 4);
44329 c->len=4;
44330diff --git a/fs/locks.c b/fs/locks.c
44331index 637694b..f84a121 100644
44332--- a/fs/locks.c
44333+++ b/fs/locks.c
44334@@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
44335 return;
44336
44337 if (filp->f_op && filp->f_op->flock) {
44338- struct file_lock fl = {
44339+ struct file_lock flock = {
44340 .fl_pid = current->tgid,
44341 .fl_file = filp,
44342 .fl_flags = FL_FLOCK,
44343 .fl_type = F_UNLCK,
44344 .fl_end = OFFSET_MAX,
44345 };
44346- filp->f_op->flock(filp, F_SETLKW, &fl);
44347- if (fl.fl_ops && fl.fl_ops->fl_release_private)
44348- fl.fl_ops->fl_release_private(&fl);
44349+ filp->f_op->flock(filp, F_SETLKW, &flock);
44350+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
44351+ flock.fl_ops->fl_release_private(&flock);
44352 }
44353
44354 lock_flocks();
44355diff --git a/fs/namei.c b/fs/namei.c
44356index 5008f01..90328a7 100644
44357--- a/fs/namei.c
44358+++ b/fs/namei.c
44359@@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
44360 if (ret != -EACCES)
44361 return ret;
44362
44363+#ifdef CONFIG_GRKERNSEC
44364+ /* we'll block if we have to log due to a denied capability use */
44365+ if (mask & MAY_NOT_BLOCK)
44366+ return -ECHILD;
44367+#endif
44368+
44369 if (S_ISDIR(inode->i_mode)) {
44370 /* DACs are overridable for directories */
44371- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44372- return 0;
44373 if (!(mask & MAY_WRITE))
44374- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44375+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44376+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44377 return 0;
44378+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44379+ return 0;
44380 return -EACCES;
44381 }
44382 /*
44383+ * Searching includes executable on directories, else just read.
44384+ */
44385+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44386+ if (mask == MAY_READ)
44387+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44388+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44389+ return 0;
44390+
44391+ /*
44392 * Read/write DACs are always overridable.
44393 * Executable DACs are overridable when there is
44394 * at least one exec bit set.
44395@@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44396 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44397 return 0;
44398
44399- /*
44400- * Searching includes executable on directories, else just read.
44401- */
44402- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44403- if (mask == MAY_READ)
44404- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44405- return 0;
44406-
44407 return -EACCES;
44408 }
44409
44410@@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44411 return error;
44412 }
44413
44414+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
44415+ dentry->d_inode, dentry, nd->path.mnt)) {
44416+ error = -EACCES;
44417+ *p = ERR_PTR(error); /* no ->put_link(), please */
44418+ path_put(&nd->path);
44419+ return error;
44420+ }
44421+
44422 nd->last_type = LAST_BIND;
44423 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44424 error = PTR_ERR(*p);
44425 if (!IS_ERR(*p)) {
44426- char *s = nd_get_link(nd);
44427+ const char *s = nd_get_link(nd);
44428 error = 0;
44429 if (s)
44430 error = __vfs_follow_link(nd, s);
44431@@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
44432 if (!err)
44433 err = complete_walk(nd);
44434
44435+ if (!(nd->flags & LOOKUP_PARENT)) {
44436+#ifdef CONFIG_GRKERNSEC
44437+ if (flags & LOOKUP_RCU) {
44438+ if (!err)
44439+ path_put(&nd->path);
44440+ err = -ECHILD;
44441+ } else
44442+#endif
44443+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44444+ if (!err)
44445+ path_put(&nd->path);
44446+ err = -ENOENT;
44447+ }
44448+ }
44449+
44450 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44451 if (!nd->inode->i_op->lookup) {
44452 path_put(&nd->path);
44453@@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
44454 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44455
44456 if (likely(!retval)) {
44457+ if (*name != '/' && nd->path.dentry && nd->inode) {
44458+#ifdef CONFIG_GRKERNSEC
44459+ if (flags & LOOKUP_RCU)
44460+ return -ECHILD;
44461+#endif
44462+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44463+ return -ENOENT;
44464+ }
44465+
44466 if (unlikely(!audit_dummy_context())) {
44467 if (nd->path.dentry && nd->inode)
44468 audit_inode(name, nd->path.dentry);
44469@@ -2046,6 +2086,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44470 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44471 return -EPERM;
44472
44473+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44474+ return -EPERM;
44475+ if (gr_handle_rawio(inode))
44476+ return -EPERM;
44477+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44478+ return -EACCES;
44479+
44480 return 0;
44481 }
44482
44483@@ -2107,6 +2154,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44484 error = complete_walk(nd);
44485 if (error)
44486 return ERR_PTR(error);
44487+#ifdef CONFIG_GRKERNSEC
44488+ if (nd->flags & LOOKUP_RCU) {
44489+ error = -ECHILD;
44490+ goto exit;
44491+ }
44492+#endif
44493+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44494+ error = -ENOENT;
44495+ goto exit;
44496+ }
44497 audit_inode(pathname, nd->path.dentry);
44498 if (open_flag & O_CREAT) {
44499 error = -EISDIR;
44500@@ -2117,6 +2174,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44501 error = complete_walk(nd);
44502 if (error)
44503 return ERR_PTR(error);
44504+#ifdef CONFIG_GRKERNSEC
44505+ if (nd->flags & LOOKUP_RCU) {
44506+ error = -ECHILD;
44507+ goto exit;
44508+ }
44509+#endif
44510+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44511+ error = -ENOENT;
44512+ goto exit;
44513+ }
44514 audit_inode(pathname, dir);
44515 goto ok;
44516 }
44517@@ -2138,6 +2205,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44518 error = complete_walk(nd);
44519 if (error)
44520 return ERR_PTR(-ECHILD);
44521+#ifdef CONFIG_GRKERNSEC
44522+ if (nd->flags & LOOKUP_RCU) {
44523+ error = -ECHILD;
44524+ goto exit;
44525+ }
44526+#endif
44527+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44528+ error = -ENOENT;
44529+ goto exit;
44530+ }
44531
44532 error = -ENOTDIR;
44533 if (nd->flags & LOOKUP_DIRECTORY) {
44534@@ -2178,6 +2255,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44535 /* Negative dentry, just create the file */
44536 if (!dentry->d_inode) {
44537 int mode = op->mode;
44538+
44539+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44540+ error = -EACCES;
44541+ goto exit_mutex_unlock;
44542+ }
44543+
44544 if (!IS_POSIXACL(dir->d_inode))
44545 mode &= ~current_umask();
44546 /*
44547@@ -2201,6 +2284,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44548 error = vfs_create(dir->d_inode, dentry, mode, nd);
44549 if (error)
44550 goto exit_mutex_unlock;
44551+ else
44552+ gr_handle_create(path->dentry, path->mnt);
44553 mutex_unlock(&dir->d_inode->i_mutex);
44554 dput(nd->path.dentry);
44555 nd->path.dentry = dentry;
44556@@ -2210,6 +2295,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44557 /*
44558 * It already exists.
44559 */
44560+
44561+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44562+ error = -ENOENT;
44563+ goto exit_mutex_unlock;
44564+ }
44565+
44566+ /* only check if O_CREAT is specified, all other checks need to go
44567+ into may_open */
44568+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44569+ error = -EACCES;
44570+ goto exit_mutex_unlock;
44571+ }
44572+
44573 mutex_unlock(&dir->d_inode->i_mutex);
44574 audit_inode(pathname, path->dentry);
44575
44576@@ -2422,6 +2520,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44577 *path = nd.path;
44578 return dentry;
44579 eexist:
44580+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44581+ dput(dentry);
44582+ dentry = ERR_PTR(-ENOENT);
44583+ goto fail;
44584+ }
44585 dput(dentry);
44586 dentry = ERR_PTR(-EEXIST);
44587 fail:
44588@@ -2444,6 +2547,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44589 }
44590 EXPORT_SYMBOL(user_path_create);
44591
44592+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44593+{
44594+ char *tmp = getname(pathname);
44595+ struct dentry *res;
44596+ if (IS_ERR(tmp))
44597+ return ERR_CAST(tmp);
44598+ res = kern_path_create(dfd, tmp, path, is_dir);
44599+ if (IS_ERR(res))
44600+ putname(tmp);
44601+ else
44602+ *to = tmp;
44603+ return res;
44604+}
44605+
44606 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44607 {
44608 int error = may_create(dir, dentry);
44609@@ -2511,6 +2628,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44610 error = mnt_want_write(path.mnt);
44611 if (error)
44612 goto out_dput;
44613+
44614+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44615+ error = -EPERM;
44616+ goto out_drop_write;
44617+ }
44618+
44619+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44620+ error = -EACCES;
44621+ goto out_drop_write;
44622+ }
44623+
44624 error = security_path_mknod(&path, dentry, mode, dev);
44625 if (error)
44626 goto out_drop_write;
44627@@ -2528,6 +2656,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44628 }
44629 out_drop_write:
44630 mnt_drop_write(path.mnt);
44631+
44632+ if (!error)
44633+ gr_handle_create(dentry, path.mnt);
44634 out_dput:
44635 dput(dentry);
44636 mutex_unlock(&path.dentry->d_inode->i_mutex);
44637@@ -2577,12 +2708,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44638 error = mnt_want_write(path.mnt);
44639 if (error)
44640 goto out_dput;
44641+
44642+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44643+ error = -EACCES;
44644+ goto out_drop_write;
44645+ }
44646+
44647 error = security_path_mkdir(&path, dentry, mode);
44648 if (error)
44649 goto out_drop_write;
44650 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44651 out_drop_write:
44652 mnt_drop_write(path.mnt);
44653+
44654+ if (!error)
44655+ gr_handle_create(dentry, path.mnt);
44656 out_dput:
44657 dput(dentry);
44658 mutex_unlock(&path.dentry->d_inode->i_mutex);
44659@@ -2662,6 +2802,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44660 char * name;
44661 struct dentry *dentry;
44662 struct nameidata nd;
44663+ ino_t saved_ino = 0;
44664+ dev_t saved_dev = 0;
44665
44666 error = user_path_parent(dfd, pathname, &nd, &name);
44667 if (error)
44668@@ -2690,6 +2832,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44669 error = -ENOENT;
44670 goto exit3;
44671 }
44672+
44673+ saved_ino = dentry->d_inode->i_ino;
44674+ saved_dev = gr_get_dev_from_dentry(dentry);
44675+
44676+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44677+ error = -EACCES;
44678+ goto exit3;
44679+ }
44680+
44681 error = mnt_want_write(nd.path.mnt);
44682 if (error)
44683 goto exit3;
44684@@ -2697,6 +2848,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44685 if (error)
44686 goto exit4;
44687 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44688+ if (!error && (saved_dev || saved_ino))
44689+ gr_handle_delete(saved_ino, saved_dev);
44690 exit4:
44691 mnt_drop_write(nd.path.mnt);
44692 exit3:
44693@@ -2759,6 +2912,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44694 struct dentry *dentry;
44695 struct nameidata nd;
44696 struct inode *inode = NULL;
44697+ ino_t saved_ino = 0;
44698+ dev_t saved_dev = 0;
44699
44700 error = user_path_parent(dfd, pathname, &nd, &name);
44701 if (error)
44702@@ -2781,6 +2936,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44703 if (!inode)
44704 goto slashes;
44705 ihold(inode);
44706+
44707+ if (inode->i_nlink <= 1) {
44708+ saved_ino = inode->i_ino;
44709+ saved_dev = gr_get_dev_from_dentry(dentry);
44710+ }
44711+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44712+ error = -EACCES;
44713+ goto exit2;
44714+ }
44715+
44716 error = mnt_want_write(nd.path.mnt);
44717 if (error)
44718 goto exit2;
44719@@ -2788,6 +2953,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44720 if (error)
44721 goto exit3;
44722 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44723+ if (!error && (saved_ino || saved_dev))
44724+ gr_handle_delete(saved_ino, saved_dev);
44725 exit3:
44726 mnt_drop_write(nd.path.mnt);
44727 exit2:
44728@@ -2863,10 +3030,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44729 error = mnt_want_write(path.mnt);
44730 if (error)
44731 goto out_dput;
44732+
44733+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44734+ error = -EACCES;
44735+ goto out_drop_write;
44736+ }
44737+
44738 error = security_path_symlink(&path, dentry, from);
44739 if (error)
44740 goto out_drop_write;
44741 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44742+ if (!error)
44743+ gr_handle_create(dentry, path.mnt);
44744 out_drop_write:
44745 mnt_drop_write(path.mnt);
44746 out_dput:
44747@@ -2938,6 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44748 {
44749 struct dentry *new_dentry;
44750 struct path old_path, new_path;
44751+ char *to = NULL;
44752 int how = 0;
44753 int error;
44754
44755@@ -2961,7 +3137,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44756 if (error)
44757 return error;
44758
44759- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44760+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44761 error = PTR_ERR(new_dentry);
44762 if (IS_ERR(new_dentry))
44763 goto out;
44764@@ -2972,13 +3148,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44765 error = mnt_want_write(new_path.mnt);
44766 if (error)
44767 goto out_dput;
44768+
44769+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44770+ old_path.dentry->d_inode,
44771+ old_path.dentry->d_inode->i_mode, to)) {
44772+ error = -EACCES;
44773+ goto out_drop_write;
44774+ }
44775+
44776+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44777+ old_path.dentry, old_path.mnt, to)) {
44778+ error = -EACCES;
44779+ goto out_drop_write;
44780+ }
44781+
44782 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44783 if (error)
44784 goto out_drop_write;
44785 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44786+ if (!error)
44787+ gr_handle_create(new_dentry, new_path.mnt);
44788 out_drop_write:
44789 mnt_drop_write(new_path.mnt);
44790 out_dput:
44791+ putname(to);
44792 dput(new_dentry);
44793 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44794 path_put(&new_path);
44795@@ -3206,6 +3399,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44796 if (new_dentry == trap)
44797 goto exit5;
44798
44799+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44800+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44801+ to);
44802+ if (error)
44803+ goto exit5;
44804+
44805 error = mnt_want_write(oldnd.path.mnt);
44806 if (error)
44807 goto exit5;
44808@@ -3215,6 +3414,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44809 goto exit6;
44810 error = vfs_rename(old_dir->d_inode, old_dentry,
44811 new_dir->d_inode, new_dentry);
44812+ if (!error)
44813+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44814+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44815 exit6:
44816 mnt_drop_write(oldnd.path.mnt);
44817 exit5:
44818@@ -3240,6 +3442,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44819
44820 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44821 {
44822+ char tmpbuf[64];
44823+ const char *newlink;
44824 int len;
44825
44826 len = PTR_ERR(link);
44827@@ -3249,7 +3453,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44828 len = strlen(link);
44829 if (len > (unsigned) buflen)
44830 len = buflen;
44831- if (copy_to_user(buffer, link, len))
44832+
44833+ if (len < sizeof(tmpbuf)) {
44834+ memcpy(tmpbuf, link, len);
44835+ newlink = tmpbuf;
44836+ } else
44837+ newlink = link;
44838+
44839+ if (copy_to_user(buffer, newlink, len))
44840 len = -EFAULT;
44841 out:
44842 return len;
44843diff --git a/fs/namespace.c b/fs/namespace.c
44844index cfc6d44..b4632a5 100644
44845--- a/fs/namespace.c
44846+++ b/fs/namespace.c
44847@@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44848 if (!(sb->s_flags & MS_RDONLY))
44849 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44850 up_write(&sb->s_umount);
44851+
44852+ gr_log_remount(mnt->mnt_devname, retval);
44853+
44854 return retval;
44855 }
44856
44857@@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44858 br_write_unlock(vfsmount_lock);
44859 up_write(&namespace_sem);
44860 release_mounts(&umount_list);
44861+
44862+ gr_log_unmount(mnt->mnt_devname, retval);
44863+
44864 return retval;
44865 }
44866
44867@@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44868 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44869 MS_STRICTATIME);
44870
44871+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44872+ retval = -EPERM;
44873+ goto dput_out;
44874+ }
44875+
44876+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44877+ retval = -EPERM;
44878+ goto dput_out;
44879+ }
44880+
44881 if (flags & MS_REMOUNT)
44882 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44883 data_page);
44884@@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44885 dev_name, data_page);
44886 dput_out:
44887 path_put(&path);
44888+
44889+ gr_log_mount(dev_name, dir_name, retval);
44890+
44891 return retval;
44892 }
44893
44894@@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44895 if (error)
44896 goto out2;
44897
44898+ if (gr_handle_chroot_pivot()) {
44899+ error = -EPERM;
44900+ goto out2;
44901+ }
44902+
44903 get_fs_root(current->fs, &root);
44904 error = lock_mount(&old);
44905 if (error)
44906diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44907index 3db6b82..a57597e 100644
44908--- a/fs/nfs/blocklayout/blocklayout.c
44909+++ b/fs/nfs/blocklayout/blocklayout.c
44910@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44911 */
44912 struct parallel_io {
44913 struct kref refcnt;
44914- struct rpc_call_ops call_ops;
44915+ rpc_call_ops_no_const call_ops;
44916 void (*pnfs_callback) (void *data);
44917 void *data;
44918 };
44919diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44920index 50a15fa..ca113f9 100644
44921--- a/fs/nfs/inode.c
44922+++ b/fs/nfs/inode.c
44923@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44924 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44925 nfsi->attrtimeo_timestamp = jiffies;
44926
44927- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44928+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44929 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44930 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44931 else
44932@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44933 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44934 }
44935
44936-static atomic_long_t nfs_attr_generation_counter;
44937+static atomic_long_unchecked_t nfs_attr_generation_counter;
44938
44939 static unsigned long nfs_read_attr_generation_counter(void)
44940 {
44941- return atomic_long_read(&nfs_attr_generation_counter);
44942+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44943 }
44944
44945 unsigned long nfs_inc_attr_generation_counter(void)
44946 {
44947- return atomic_long_inc_return(&nfs_attr_generation_counter);
44948+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44949 }
44950
44951 void nfs_fattr_init(struct nfs_fattr *fattr)
44952diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
44953index 7a2e442..8e544cc 100644
44954--- a/fs/nfsd/vfs.c
44955+++ b/fs/nfsd/vfs.c
44956@@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44957 } else {
44958 oldfs = get_fs();
44959 set_fs(KERNEL_DS);
44960- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44961+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44962 set_fs(oldfs);
44963 }
44964
44965@@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44966
44967 /* Write the data. */
44968 oldfs = get_fs(); set_fs(KERNEL_DS);
44969- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44970+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44971 set_fs(oldfs);
44972 if (host_err < 0)
44973 goto out_nfserr;
44974@@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
44975 */
44976
44977 oldfs = get_fs(); set_fs(KERNEL_DS);
44978- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44979+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44980 set_fs(oldfs);
44981
44982 if (host_err < 0)
44983diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
44984index 9fde1c0..14e8827 100644
44985--- a/fs/notify/fanotify/fanotify_user.c
44986+++ b/fs/notify/fanotify/fanotify_user.c
44987@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
44988 goto out_close_fd;
44989
44990 ret = -EFAULT;
44991- if (copy_to_user(buf, &fanotify_event_metadata,
44992+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44993+ copy_to_user(buf, &fanotify_event_metadata,
44994 fanotify_event_metadata.event_len))
44995 goto out_kill_access_response;
44996
44997diff --git a/fs/notify/notification.c b/fs/notify/notification.c
44998index ee18815..7aa5d01 100644
44999--- a/fs/notify/notification.c
45000+++ b/fs/notify/notification.c
45001@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
45002 * get set to 0 so it will never get 'freed'
45003 */
45004 static struct fsnotify_event *q_overflow_event;
45005-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45006+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45007
45008 /**
45009 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
45010@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45011 */
45012 u32 fsnotify_get_cookie(void)
45013 {
45014- return atomic_inc_return(&fsnotify_sync_cookie);
45015+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
45016 }
45017 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
45018
45019diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
45020index 99e3610..02c1068 100644
45021--- a/fs/ntfs/dir.c
45022+++ b/fs/ntfs/dir.c
45023@@ -1329,7 +1329,7 @@ find_next_index_buffer:
45024 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
45025 ~(s64)(ndir->itype.index.block_size - 1)));
45026 /* Bounds checks. */
45027- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45028+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45029 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
45030 "inode 0x%lx or driver bug.", vdir->i_ino);
45031 goto err_out;
45032diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
45033index c587e2d..3641eaa 100644
45034--- a/fs/ntfs/file.c
45035+++ b/fs/ntfs/file.c
45036@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
45037 #endif /* NTFS_RW */
45038 };
45039
45040-const struct file_operations ntfs_empty_file_ops = {};
45041+const struct file_operations ntfs_empty_file_ops __read_only;
45042
45043-const struct inode_operations ntfs_empty_inode_ops = {};
45044+const struct inode_operations ntfs_empty_inode_ops __read_only;
45045diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
45046index 210c352..a174f83 100644
45047--- a/fs/ocfs2/localalloc.c
45048+++ b/fs/ocfs2/localalloc.c
45049@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
45050 goto bail;
45051 }
45052
45053- atomic_inc(&osb->alloc_stats.moves);
45054+ atomic_inc_unchecked(&osb->alloc_stats.moves);
45055
45056 bail:
45057 if (handle)
45058diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
45059index d355e6e..578d905 100644
45060--- a/fs/ocfs2/ocfs2.h
45061+++ b/fs/ocfs2/ocfs2.h
45062@@ -235,11 +235,11 @@ enum ocfs2_vol_state
45063
45064 struct ocfs2_alloc_stats
45065 {
45066- atomic_t moves;
45067- atomic_t local_data;
45068- atomic_t bitmap_data;
45069- atomic_t bg_allocs;
45070- atomic_t bg_extends;
45071+ atomic_unchecked_t moves;
45072+ atomic_unchecked_t local_data;
45073+ atomic_unchecked_t bitmap_data;
45074+ atomic_unchecked_t bg_allocs;
45075+ atomic_unchecked_t bg_extends;
45076 };
45077
45078 enum ocfs2_local_alloc_state
45079diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
45080index ba5d97e..c77db25 100644
45081--- a/fs/ocfs2/suballoc.c
45082+++ b/fs/ocfs2/suballoc.c
45083@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
45084 mlog_errno(status);
45085 goto bail;
45086 }
45087- atomic_inc(&osb->alloc_stats.bg_extends);
45088+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
45089
45090 /* You should never ask for this much metadata */
45091 BUG_ON(bits_wanted >
45092@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
45093 mlog_errno(status);
45094 goto bail;
45095 }
45096- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45097+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45098
45099 *suballoc_loc = res.sr_bg_blkno;
45100 *suballoc_bit_start = res.sr_bit_offset;
45101@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
45102 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
45103 res->sr_bits);
45104
45105- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45106+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45107
45108 BUG_ON(res->sr_bits != 1);
45109
45110@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
45111 mlog_errno(status);
45112 goto bail;
45113 }
45114- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45115+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45116
45117 BUG_ON(res.sr_bits != 1);
45118
45119@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45120 cluster_start,
45121 num_clusters);
45122 if (!status)
45123- atomic_inc(&osb->alloc_stats.local_data);
45124+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
45125 } else {
45126 if (min_clusters > (osb->bitmap_cpg - 1)) {
45127 /* The only paths asking for contiguousness
45128@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45129 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45130 res.sr_bg_blkno,
45131 res.sr_bit_offset);
45132- atomic_inc(&osb->alloc_stats.bitmap_data);
45133+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45134 *num_clusters = res.sr_bits;
45135 }
45136 }
45137diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
45138index 4994f8b..eaab8eb 100644
45139--- a/fs/ocfs2/super.c
45140+++ b/fs/ocfs2/super.c
45141@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
45142 "%10s => GlobalAllocs: %d LocalAllocs: %d "
45143 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45144 "Stats",
45145- atomic_read(&osb->alloc_stats.bitmap_data),
45146- atomic_read(&osb->alloc_stats.local_data),
45147- atomic_read(&osb->alloc_stats.bg_allocs),
45148- atomic_read(&osb->alloc_stats.moves),
45149- atomic_read(&osb->alloc_stats.bg_extends));
45150+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45151+ atomic_read_unchecked(&osb->alloc_stats.local_data),
45152+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45153+ atomic_read_unchecked(&osb->alloc_stats.moves),
45154+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45155
45156 out += snprintf(buf + out, len - out,
45157 "%10s => State: %u Descriptor: %llu Size: %u bits "
45158@@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
45159 spin_lock_init(&osb->osb_xattr_lock);
45160 ocfs2_init_steal_slots(osb);
45161
45162- atomic_set(&osb->alloc_stats.moves, 0);
45163- atomic_set(&osb->alloc_stats.local_data, 0);
45164- atomic_set(&osb->alloc_stats.bitmap_data, 0);
45165- atomic_set(&osb->alloc_stats.bg_allocs, 0);
45166- atomic_set(&osb->alloc_stats.bg_extends, 0);
45167+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45168+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45169+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45170+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45171+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45172
45173 /* Copy the blockcheck stats from the superblock probe */
45174 osb->osb_ecc_stats = *stats;
45175diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
45176index 5d22872..523db20 100644
45177--- a/fs/ocfs2/symlink.c
45178+++ b/fs/ocfs2/symlink.c
45179@@ -142,7 +142,7 @@ bail:
45180
45181 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45182 {
45183- char *link = nd_get_link(nd);
45184+ const char *link = nd_get_link(nd);
45185 if (!IS_ERR(link))
45186 kfree(link);
45187 }
45188diff --git a/fs/open.c b/fs/open.c
45189index 22c41b5..695cb17 100644
45190--- a/fs/open.c
45191+++ b/fs/open.c
45192@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
45193 error = locks_verify_truncate(inode, NULL, length);
45194 if (!error)
45195 error = security_path_truncate(&path);
45196+
45197+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45198+ error = -EACCES;
45199+
45200 if (!error)
45201 error = do_truncate(path.dentry, length, 0, NULL);
45202
45203@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
45204 if (__mnt_is_readonly(path.mnt))
45205 res = -EROFS;
45206
45207+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45208+ res = -EACCES;
45209+
45210 out_path_release:
45211 path_put(&path);
45212 out:
45213@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
45214 if (error)
45215 goto dput_and_out;
45216
45217+ gr_log_chdir(path.dentry, path.mnt);
45218+
45219 set_fs_pwd(current->fs, &path);
45220
45221 dput_and_out:
45222@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
45223 goto out_putf;
45224
45225 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45226+
45227+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45228+ error = -EPERM;
45229+
45230+ if (!error)
45231+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45232+
45233 if (!error)
45234 set_fs_pwd(current->fs, &file->f_path);
45235 out_putf:
45236@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
45237 if (error)
45238 goto dput_and_out;
45239
45240+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45241+ goto dput_and_out;
45242+
45243 set_fs_root(current->fs, &path);
45244+
45245+ gr_handle_chroot_chdir(&path);
45246+
45247 error = 0;
45248 dput_and_out:
45249 path_put(&path);
45250@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
45251 if (error)
45252 return error;
45253 mutex_lock(&inode->i_mutex);
45254+
45255+ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
45256+ error = -EACCES;
45257+ goto out_unlock;
45258+ }
45259+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45260+ error = -EACCES;
45261+ goto out_unlock;
45262+ }
45263+
45264 error = security_path_chmod(path->dentry, path->mnt, mode);
45265 if (error)
45266 goto out_unlock;
45267@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
45268 int error;
45269 struct iattr newattrs;
45270
45271+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
45272+ return -EACCES;
45273+
45274 newattrs.ia_valid = ATTR_CTIME;
45275 if (user != (uid_t) -1) {
45276 newattrs.ia_valid |= ATTR_UID;
45277diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
45278index 6296b40..417c00f 100644
45279--- a/fs/partitions/efi.c
45280+++ b/fs/partitions/efi.c
45281@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
45282 if (!gpt)
45283 return NULL;
45284
45285+ if (!le32_to_cpu(gpt->num_partition_entries))
45286+ return NULL;
45287+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
45288+ if (!pte)
45289+ return NULL;
45290+
45291 count = le32_to_cpu(gpt->num_partition_entries) *
45292 le32_to_cpu(gpt->sizeof_partition_entry);
45293- if (!count)
45294- return NULL;
45295- pte = kzalloc(count, GFP_KERNEL);
45296- if (!pte)
45297- return NULL;
45298-
45299 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
45300 (u8 *) pte,
45301 count) < count) {
45302diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
45303index bd8ae78..539d250 100644
45304--- a/fs/partitions/ldm.c
45305+++ b/fs/partitions/ldm.c
45306@@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
45307 goto found;
45308 }
45309
45310- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45311+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45312 if (!f) {
45313 ldm_crit ("Out of memory.");
45314 return false;
45315diff --git a/fs/pipe.c b/fs/pipe.c
45316index 4065f07..68c0706 100644
45317--- a/fs/pipe.c
45318+++ b/fs/pipe.c
45319@@ -420,9 +420,9 @@ redo:
45320 }
45321 if (bufs) /* More to do? */
45322 continue;
45323- if (!pipe->writers)
45324+ if (!atomic_read(&pipe->writers))
45325 break;
45326- if (!pipe->waiting_writers) {
45327+ if (!atomic_read(&pipe->waiting_writers)) {
45328 /* syscall merging: Usually we must not sleep
45329 * if O_NONBLOCK is set, or if we got some data.
45330 * But if a writer sleeps in kernel space, then
45331@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
45332 mutex_lock(&inode->i_mutex);
45333 pipe = inode->i_pipe;
45334
45335- if (!pipe->readers) {
45336+ if (!atomic_read(&pipe->readers)) {
45337 send_sig(SIGPIPE, current, 0);
45338 ret = -EPIPE;
45339 goto out;
45340@@ -530,7 +530,7 @@ redo1:
45341 for (;;) {
45342 int bufs;
45343
45344- if (!pipe->readers) {
45345+ if (!atomic_read(&pipe->readers)) {
45346 send_sig(SIGPIPE, current, 0);
45347 if (!ret)
45348 ret = -EPIPE;
45349@@ -616,9 +616,9 @@ redo2:
45350 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45351 do_wakeup = 0;
45352 }
45353- pipe->waiting_writers++;
45354+ atomic_inc(&pipe->waiting_writers);
45355 pipe_wait(pipe);
45356- pipe->waiting_writers--;
45357+ atomic_dec(&pipe->waiting_writers);
45358 }
45359 out:
45360 mutex_unlock(&inode->i_mutex);
45361@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45362 mask = 0;
45363 if (filp->f_mode & FMODE_READ) {
45364 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45365- if (!pipe->writers && filp->f_version != pipe->w_counter)
45366+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45367 mask |= POLLHUP;
45368 }
45369
45370@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45371 * Most Unices do not set POLLERR for FIFOs but on Linux they
45372 * behave exactly like pipes for poll().
45373 */
45374- if (!pipe->readers)
45375+ if (!atomic_read(&pipe->readers))
45376 mask |= POLLERR;
45377 }
45378
45379@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
45380
45381 mutex_lock(&inode->i_mutex);
45382 pipe = inode->i_pipe;
45383- pipe->readers -= decr;
45384- pipe->writers -= decw;
45385+ atomic_sub(decr, &pipe->readers);
45386+ atomic_sub(decw, &pipe->writers);
45387
45388- if (!pipe->readers && !pipe->writers) {
45389+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45390 free_pipe_info(inode);
45391 } else {
45392 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45393@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45394
45395 if (inode->i_pipe) {
45396 ret = 0;
45397- inode->i_pipe->readers++;
45398+ atomic_inc(&inode->i_pipe->readers);
45399 }
45400
45401 mutex_unlock(&inode->i_mutex);
45402@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45403
45404 if (inode->i_pipe) {
45405 ret = 0;
45406- inode->i_pipe->writers++;
45407+ atomic_inc(&inode->i_pipe->writers);
45408 }
45409
45410 mutex_unlock(&inode->i_mutex);
45411@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45412 if (inode->i_pipe) {
45413 ret = 0;
45414 if (filp->f_mode & FMODE_READ)
45415- inode->i_pipe->readers++;
45416+ atomic_inc(&inode->i_pipe->readers);
45417 if (filp->f_mode & FMODE_WRITE)
45418- inode->i_pipe->writers++;
45419+ atomic_inc(&inode->i_pipe->writers);
45420 }
45421
45422 mutex_unlock(&inode->i_mutex);
45423@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45424 inode->i_pipe = NULL;
45425 }
45426
45427-static struct vfsmount *pipe_mnt __read_mostly;
45428+struct vfsmount *pipe_mnt __read_mostly;
45429
45430 /*
45431 * pipefs_dname() is called from d_path().
45432@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45433 goto fail_iput;
45434 inode->i_pipe = pipe;
45435
45436- pipe->readers = pipe->writers = 1;
45437+ atomic_set(&pipe->readers, 1);
45438+ atomic_set(&pipe->writers, 1);
45439 inode->i_fop = &rdwr_pipefifo_fops;
45440
45441 /*
45442diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45443index 15af622..0e9f4467 100644
45444--- a/fs/proc/Kconfig
45445+++ b/fs/proc/Kconfig
45446@@ -30,12 +30,12 @@ config PROC_FS
45447
45448 config PROC_KCORE
45449 bool "/proc/kcore support" if !ARM
45450- depends on PROC_FS && MMU
45451+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45452
45453 config PROC_VMCORE
45454 bool "/proc/vmcore support"
45455- depends on PROC_FS && CRASH_DUMP
45456- default y
45457+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45458+ default n
45459 help
45460 Exports the dump image of crashed kernel in ELF format.
45461
45462@@ -59,8 +59,8 @@ config PROC_SYSCTL
45463 limited in memory.
45464
45465 config PROC_PAGE_MONITOR
45466- default y
45467- depends on PROC_FS && MMU
45468+ default n
45469+ depends on PROC_FS && MMU && !GRKERNSEC
45470 bool "Enable /proc page monitoring" if EXPERT
45471 help
45472 Various /proc files exist to monitor process memory utilization:
45473diff --git a/fs/proc/array.c b/fs/proc/array.c
45474index 3a1dafd..d41fc37 100644
45475--- a/fs/proc/array.c
45476+++ b/fs/proc/array.c
45477@@ -60,6 +60,7 @@
45478 #include <linux/tty.h>
45479 #include <linux/string.h>
45480 #include <linux/mman.h>
45481+#include <linux/grsecurity.h>
45482 #include <linux/proc_fs.h>
45483 #include <linux/ioport.h>
45484 #include <linux/uaccess.h>
45485@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45486 seq_putc(m, '\n');
45487 }
45488
45489+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45490+static inline void task_pax(struct seq_file *m, struct task_struct *p)
45491+{
45492+ if (p->mm)
45493+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45494+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45495+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45496+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45497+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45498+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45499+ else
45500+ seq_printf(m, "PaX:\t-----\n");
45501+}
45502+#endif
45503+
45504 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45505 struct pid *pid, struct task_struct *task)
45506 {
45507@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45508 task_cpus_allowed(m, task);
45509 cpuset_task_status_allowed(m, task);
45510 task_context_switch_counts(m, task);
45511+
45512+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45513+ task_pax(m, task);
45514+#endif
45515+
45516+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45517+ task_grsec_rbac(m, task);
45518+#endif
45519+
45520 return 0;
45521 }
45522
45523+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45524+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45525+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45526+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45527+#endif
45528+
45529 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45530 struct pid *pid, struct task_struct *task, int whole)
45531 {
45532@@ -449,6 +480,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45533 gtime = task->gtime;
45534 }
45535
45536+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45537+ if (PAX_RAND_FLAGS(mm)) {
45538+ eip = 0;
45539+ esp = 0;
45540+ wchan = 0;
45541+ }
45542+#endif
45543+#ifdef CONFIG_GRKERNSEC_HIDESYM
45544+ wchan = 0;
45545+ eip =0;
45546+ esp =0;
45547+#endif
45548+
45549 /* scale priority and nice values from timeslices to -20..20 */
45550 /* to make it look like a "normal" Unix priority/nice value */
45551 priority = task_prio(task);
45552@@ -489,9 +533,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45553 vsize,
45554 mm ? get_mm_rss(mm) : 0,
45555 rsslim,
45556+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45557+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45558+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45559+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45560+#else
45561 mm ? (permitted ? mm->start_code : 1) : 0,
45562 mm ? (permitted ? mm->end_code : 1) : 0,
45563 (permitted && mm) ? mm->start_stack : 0,
45564+#endif
45565 esp,
45566 eip,
45567 /* The signal information here is obsolete.
45568@@ -544,3 +594,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45569
45570 return 0;
45571 }
45572+
45573+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45574+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45575+{
45576+ u32 curr_ip = 0;
45577+ unsigned long flags;
45578+
45579+ if (lock_task_sighand(task, &flags)) {
45580+ curr_ip = task->signal->curr_ip;
45581+ unlock_task_sighand(task, &flags);
45582+ }
45583+
45584+ return sprintf(buffer, "%pI4\n", &curr_ip);
45585+}
45586+#endif
45587diff --git a/fs/proc/base.c b/fs/proc/base.c
45588index 1fc1dca..813fd0b 100644
45589--- a/fs/proc/base.c
45590+++ b/fs/proc/base.c
45591@@ -107,6 +107,22 @@ struct pid_entry {
45592 union proc_op op;
45593 };
45594
45595+struct getdents_callback {
45596+ struct linux_dirent __user * current_dir;
45597+ struct linux_dirent __user * previous;
45598+ struct file * file;
45599+ int count;
45600+ int error;
45601+};
45602+
45603+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45604+ loff_t offset, u64 ino, unsigned int d_type)
45605+{
45606+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45607+ buf->error = -EINVAL;
45608+ return 0;
45609+}
45610+
45611 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45612 .name = (NAME), \
45613 .len = sizeof(NAME) - 1, \
45614@@ -204,10 +220,12 @@ static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45615 return ERR_PTR(err);
45616
45617 mm = get_task_mm(task);
45618- if (mm && mm != current->mm &&
45619- !ptrace_may_access(task, mode)) {
45620- mmput(mm);
45621- mm = ERR_PTR(-EACCES);
45622+ if (mm) {
45623+ if ((mm != current->mm && !ptrace_may_access(task, mode)) ||
45624+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task)))) {
45625+ mmput(mm);
45626+ mm = ERR_PTR(-EACCES);
45627+ }
45628 }
45629 mutex_unlock(&task->signal->cred_guard_mutex);
45630
45631@@ -229,6 +247,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45632 if (!mm->arg_end)
45633 goto out_mm; /* Shh! No looking before we're done */
45634
45635+ if (gr_acl_handle_procpidmem(task))
45636+ goto out_mm;
45637+
45638 len = mm->arg_end - mm->arg_start;
45639
45640 if (len > PAGE_SIZE)
45641@@ -256,12 +277,28 @@ out:
45642 return res;
45643 }
45644
45645+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45646+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45647+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45648+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45649+#endif
45650+
45651 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45652 {
45653 struct mm_struct *mm = mm_for_maps(task);
45654 int res = PTR_ERR(mm);
45655 if (mm && !IS_ERR(mm)) {
45656 unsigned int nwords = 0;
45657+
45658+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45659+ /* allow if we're currently ptracing this task */
45660+ if (PAX_RAND_FLAGS(mm) &&
45661+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45662+ mmput(mm);
45663+ return 0;
45664+ }
45665+#endif
45666+
45667 do {
45668 nwords += 2;
45669 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45670@@ -275,7 +312,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45671 }
45672
45673
45674-#ifdef CONFIG_KALLSYMS
45675+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45676 /*
45677 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45678 * Returns the resolved symbol. If that fails, simply return the address.
45679@@ -314,7 +351,7 @@ static void unlock_trace(struct task_struct *task)
45680 mutex_unlock(&task->signal->cred_guard_mutex);
45681 }
45682
45683-#ifdef CONFIG_STACKTRACE
45684+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45685
45686 #define MAX_STACK_TRACE_DEPTH 64
45687
45688@@ -505,7 +542,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45689 return count;
45690 }
45691
45692-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45693+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45694 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45695 {
45696 long nr;
45697@@ -534,7 +571,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45698 /************************************************************************/
45699
45700 /* permission checks */
45701-static int proc_fd_access_allowed(struct inode *inode)
45702+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45703 {
45704 struct task_struct *task;
45705 int allowed = 0;
45706@@ -544,7 +581,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45707 */
45708 task = get_proc_task(inode);
45709 if (task) {
45710- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45711+ if (log)
45712+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45713+ else
45714+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45715 put_task_struct(task);
45716 }
45717 return allowed;
45718@@ -826,6 +866,10 @@ static ssize_t mem_read(struct file * file, char __user * buf,
45719 return ret;
45720 }
45721
45722+#define mem_write NULL
45723+
45724+#ifndef mem_write
45725+/* They were right the first time */
45726 static ssize_t mem_write(struct file * file, const char __user *buf,
45727 size_t count, loff_t *ppos)
45728 {
45729@@ -866,6 +910,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
45730 free_page((unsigned long) page);
45731 return copied;
45732 }
45733+#endif
45734
45735 loff_t mem_lseek(struct file *file, loff_t offset, int orig)
45736 {
45737@@ -911,6 +956,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45738 if (!task)
45739 goto out_no_task;
45740
45741+ if (gr_acl_handle_procpidmem(task))
45742+ goto out;
45743+
45744 ret = -ENOMEM;
45745 page = (char *)__get_free_page(GFP_TEMPORARY);
45746 if (!page)
45747@@ -1533,7 +1581,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45748 path_put(&nd->path);
45749
45750 /* Are we allowed to snoop on the tasks file descriptors? */
45751- if (!proc_fd_access_allowed(inode))
45752+ if (!proc_fd_access_allowed(inode,0))
45753 goto out;
45754
45755 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45756@@ -1572,8 +1620,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45757 struct path path;
45758
45759 /* Are we allowed to snoop on the tasks file descriptors? */
45760- if (!proc_fd_access_allowed(inode))
45761- goto out;
45762+ /* logging this is needed for learning on chromium to work properly,
45763+ but we don't want to flood the logs from 'ps' which does a readlink
45764+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45765+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45766+ */
45767+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45768+ if (!proc_fd_access_allowed(inode,0))
45769+ goto out;
45770+ } else {
45771+ if (!proc_fd_access_allowed(inode,1))
45772+ goto out;
45773+ }
45774
45775 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45776 if (error)
45777@@ -1638,7 +1696,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45778 rcu_read_lock();
45779 cred = __task_cred(task);
45780 inode->i_uid = cred->euid;
45781+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45782+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45783+#else
45784 inode->i_gid = cred->egid;
45785+#endif
45786 rcu_read_unlock();
45787 }
45788 security_task_to_inode(task, inode);
45789@@ -1656,6 +1718,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45790 struct inode *inode = dentry->d_inode;
45791 struct task_struct *task;
45792 const struct cred *cred;
45793+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45794+ const struct cred *tmpcred = current_cred();
45795+#endif
45796
45797 generic_fillattr(inode, stat);
45798
45799@@ -1663,13 +1728,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45800 stat->uid = 0;
45801 stat->gid = 0;
45802 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45803+
45804+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45805+ rcu_read_unlock();
45806+ return -ENOENT;
45807+ }
45808+
45809 if (task) {
45810+ cred = __task_cred(task);
45811+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45812+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45813+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45814+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45815+#endif
45816+ ) {
45817+#endif
45818 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45819+#ifdef CONFIG_GRKERNSEC_PROC_USER
45820+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45821+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45822+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45823+#endif
45824 task_dumpable(task)) {
45825- cred = __task_cred(task);
45826 stat->uid = cred->euid;
45827+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45828+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45829+#else
45830 stat->gid = cred->egid;
45831+#endif
45832 }
45833+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45834+ } else {
45835+ rcu_read_unlock();
45836+ return -ENOENT;
45837+ }
45838+#endif
45839 }
45840 rcu_read_unlock();
45841 return 0;
45842@@ -1706,11 +1799,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45843
45844 if (task) {
45845 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45846+#ifdef CONFIG_GRKERNSEC_PROC_USER
45847+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45848+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45849+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45850+#endif
45851 task_dumpable(task)) {
45852 rcu_read_lock();
45853 cred = __task_cred(task);
45854 inode->i_uid = cred->euid;
45855+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45856+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45857+#else
45858 inode->i_gid = cred->egid;
45859+#endif
45860 rcu_read_unlock();
45861 } else {
45862 inode->i_uid = 0;
45863@@ -1828,7 +1930,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45864 int fd = proc_fd(inode);
45865
45866 if (task) {
45867- files = get_files_struct(task);
45868+ if (!gr_acl_handle_procpidmem(task))
45869+ files = get_files_struct(task);
45870 put_task_struct(task);
45871 }
45872 if (files) {
45873@@ -2096,11 +2199,21 @@ static const struct file_operations proc_fd_operations = {
45874 */
45875 static int proc_fd_permission(struct inode *inode, int mask)
45876 {
45877+ struct task_struct *task;
45878 int rv = generic_permission(inode, mask);
45879- if (rv == 0)
45880- return 0;
45881+
45882 if (task_pid(current) == proc_pid(inode))
45883 rv = 0;
45884+
45885+ task = get_proc_task(inode);
45886+ if (task == NULL)
45887+ return rv;
45888+
45889+ if (gr_acl_handle_procpidmem(task))
45890+ rv = -EACCES;
45891+
45892+ put_task_struct(task);
45893+
45894 return rv;
45895 }
45896
45897@@ -2210,6 +2323,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
45898 if (!task)
45899 goto out_no_task;
45900
45901+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45902+ goto out;
45903+
45904 /*
45905 * Yes, it does not scale. And it should not. Don't add
45906 * new entries into /proc/<tgid>/ without very good reasons.
45907@@ -2254,6 +2370,9 @@ static int proc_pident_readdir(struct file *filp,
45908 if (!task)
45909 goto out_no_task;
45910
45911+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45912+ goto out;
45913+
45914 ret = 0;
45915 i = filp->f_pos;
45916 switch (i) {
45917@@ -2524,7 +2643,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
45918 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45919 void *cookie)
45920 {
45921- char *s = nd_get_link(nd);
45922+ const char *s = nd_get_link(nd);
45923 if (!IS_ERR(s))
45924 __putname(s);
45925 }
45926@@ -2722,7 +2841,7 @@ static const struct pid_entry tgid_base_stuff[] = {
45927 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45928 #endif
45929 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45930-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45931+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45932 INF("syscall", S_IRUGO, proc_pid_syscall),
45933 #endif
45934 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45935@@ -2747,10 +2866,10 @@ static const struct pid_entry tgid_base_stuff[] = {
45936 #ifdef CONFIG_SECURITY
45937 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45938 #endif
45939-#ifdef CONFIG_KALLSYMS
45940+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45941 INF("wchan", S_IRUGO, proc_pid_wchan),
45942 #endif
45943-#ifdef CONFIG_STACKTRACE
45944+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45945 ONE("stack", S_IRUGO, proc_pid_stack),
45946 #endif
45947 #ifdef CONFIG_SCHEDSTATS
45948@@ -2784,6 +2903,9 @@ static const struct pid_entry tgid_base_stuff[] = {
45949 #ifdef CONFIG_HARDWALL
45950 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45951 #endif
45952+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45953+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45954+#endif
45955 };
45956
45957 static int proc_tgid_base_readdir(struct file * filp,
45958@@ -2909,7 +3031,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
45959 if (!inode)
45960 goto out;
45961
45962+#ifdef CONFIG_GRKERNSEC_PROC_USER
45963+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45964+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45965+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45966+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45967+#else
45968 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45969+#endif
45970 inode->i_op = &proc_tgid_base_inode_operations;
45971 inode->i_fop = &proc_tgid_base_operations;
45972 inode->i_flags|=S_IMMUTABLE;
45973@@ -2951,7 +3080,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
45974 if (!task)
45975 goto out;
45976
45977+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45978+ goto out_put_task;
45979+
45980 result = proc_pid_instantiate(dir, dentry, task, NULL);
45981+out_put_task:
45982 put_task_struct(task);
45983 out:
45984 return result;
45985@@ -3016,6 +3149,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45986 {
45987 unsigned int nr;
45988 struct task_struct *reaper;
45989+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45990+ const struct cred *tmpcred = current_cred();
45991+ const struct cred *itercred;
45992+#endif
45993+ filldir_t __filldir = filldir;
45994 struct tgid_iter iter;
45995 struct pid_namespace *ns;
45996
45997@@ -3039,8 +3177,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45998 for (iter = next_tgid(ns, iter);
45999 iter.task;
46000 iter.tgid += 1, iter = next_tgid(ns, iter)) {
46001+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46002+ rcu_read_lock();
46003+ itercred = __task_cred(iter.task);
46004+#endif
46005+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
46006+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46007+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
46008+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46009+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46010+#endif
46011+ )
46012+#endif
46013+ )
46014+ __filldir = &gr_fake_filldir;
46015+ else
46016+ __filldir = filldir;
46017+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46018+ rcu_read_unlock();
46019+#endif
46020 filp->f_pos = iter.tgid + TGID_OFFSET;
46021- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
46022+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
46023 put_task_struct(iter.task);
46024 goto out;
46025 }
46026@@ -3068,7 +3225,7 @@ static const struct pid_entry tid_base_stuff[] = {
46027 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
46028 #endif
46029 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46030-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46031+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46032 INF("syscall", S_IRUGO, proc_pid_syscall),
46033 #endif
46034 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46035@@ -3092,10 +3249,10 @@ static const struct pid_entry tid_base_stuff[] = {
46036 #ifdef CONFIG_SECURITY
46037 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46038 #endif
46039-#ifdef CONFIG_KALLSYMS
46040+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46041 INF("wchan", S_IRUGO, proc_pid_wchan),
46042 #endif
46043-#ifdef CONFIG_STACKTRACE
46044+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46045 ONE("stack", S_IRUGO, proc_pid_stack),
46046 #endif
46047 #ifdef CONFIG_SCHEDSTATS
46048diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
46049index 82676e3..5f8518a 100644
46050--- a/fs/proc/cmdline.c
46051+++ b/fs/proc/cmdline.c
46052@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
46053
46054 static int __init proc_cmdline_init(void)
46055 {
46056+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46057+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
46058+#else
46059 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
46060+#endif
46061 return 0;
46062 }
46063 module_init(proc_cmdline_init);
46064diff --git a/fs/proc/devices.c b/fs/proc/devices.c
46065index b143471..bb105e5 100644
46066--- a/fs/proc/devices.c
46067+++ b/fs/proc/devices.c
46068@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
46069
46070 static int __init proc_devices_init(void)
46071 {
46072+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46073+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
46074+#else
46075 proc_create("devices", 0, NULL, &proc_devinfo_operations);
46076+#endif
46077 return 0;
46078 }
46079 module_init(proc_devices_init);
46080diff --git a/fs/proc/inode.c b/fs/proc/inode.c
46081index 7737c54..7172574 100644
46082--- a/fs/proc/inode.c
46083+++ b/fs/proc/inode.c
46084@@ -18,12 +18,18 @@
46085 #include <linux/module.h>
46086 #include <linux/sysctl.h>
46087 #include <linux/slab.h>
46088+#include <linux/grsecurity.h>
46089
46090 #include <asm/system.h>
46091 #include <asm/uaccess.h>
46092
46093 #include "internal.h"
46094
46095+#ifdef CONFIG_PROC_SYSCTL
46096+extern const struct inode_operations proc_sys_inode_operations;
46097+extern const struct inode_operations proc_sys_dir_operations;
46098+#endif
46099+
46100 static void proc_evict_inode(struct inode *inode)
46101 {
46102 struct proc_dir_entry *de;
46103@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
46104 ns_ops = PROC_I(inode)->ns_ops;
46105 if (ns_ops && ns_ops->put)
46106 ns_ops->put(PROC_I(inode)->ns);
46107+
46108+#ifdef CONFIG_PROC_SYSCTL
46109+ if (inode->i_op == &proc_sys_inode_operations ||
46110+ inode->i_op == &proc_sys_dir_operations)
46111+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
46112+#endif
46113+
46114 }
46115
46116 static struct kmem_cache * proc_inode_cachep;
46117@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
46118 if (de->mode) {
46119 inode->i_mode = de->mode;
46120 inode->i_uid = de->uid;
46121+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46122+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46123+#else
46124 inode->i_gid = de->gid;
46125+#endif
46126 }
46127 if (de->size)
46128 inode->i_size = de->size;
46129diff --git a/fs/proc/internal.h b/fs/proc/internal.h
46130index 7838e5c..ff92cbc 100644
46131--- a/fs/proc/internal.h
46132+++ b/fs/proc/internal.h
46133@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46134 struct pid *pid, struct task_struct *task);
46135 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46136 struct pid *pid, struct task_struct *task);
46137+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46138+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
46139+#endif
46140 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
46141
46142 extern const struct file_operations proc_maps_operations;
46143diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
46144index d245cb2..f4e8498 100644
46145--- a/fs/proc/kcore.c
46146+++ b/fs/proc/kcore.c
46147@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46148 * the addresses in the elf_phdr on our list.
46149 */
46150 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46151- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46152+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46153+ if (tsz > buflen)
46154 tsz = buflen;
46155-
46156+
46157 while (buflen) {
46158 struct kcore_list *m;
46159
46160@@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46161 kfree(elf_buf);
46162 } else {
46163 if (kern_addr_valid(start)) {
46164- unsigned long n;
46165+ char *elf_buf;
46166+ mm_segment_t oldfs;
46167
46168- n = copy_to_user(buffer, (char *)start, tsz);
46169- /*
46170- * We cannot distingush between fault on source
46171- * and fault on destination. When this happens
46172- * we clear too and hope it will trigger the
46173- * EFAULT again.
46174- */
46175- if (n) {
46176- if (clear_user(buffer + tsz - n,
46177- n))
46178+ elf_buf = kmalloc(tsz, GFP_KERNEL);
46179+ if (!elf_buf)
46180+ return -ENOMEM;
46181+ oldfs = get_fs();
46182+ set_fs(KERNEL_DS);
46183+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46184+ set_fs(oldfs);
46185+ if (copy_to_user(buffer, elf_buf, tsz)) {
46186+ kfree(elf_buf);
46187 return -EFAULT;
46188+ }
46189 }
46190+ set_fs(oldfs);
46191+ kfree(elf_buf);
46192 } else {
46193 if (clear_user(buffer, tsz))
46194 return -EFAULT;
46195@@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46196
46197 static int open_kcore(struct inode *inode, struct file *filp)
46198 {
46199+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46200+ return -EPERM;
46201+#endif
46202 if (!capable(CAP_SYS_RAWIO))
46203 return -EPERM;
46204 if (kcore_need_update)
46205diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
46206index 80e4645..53e5fcf 100644
46207--- a/fs/proc/meminfo.c
46208+++ b/fs/proc/meminfo.c
46209@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46210 vmi.used >> 10,
46211 vmi.largest_chunk >> 10
46212 #ifdef CONFIG_MEMORY_FAILURE
46213- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46214+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46215 #endif
46216 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46217 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46218diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
46219index b1822dd..df622cb 100644
46220--- a/fs/proc/nommu.c
46221+++ b/fs/proc/nommu.c
46222@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
46223 if (len < 1)
46224 len = 1;
46225 seq_printf(m, "%*c", len, ' ');
46226- seq_path(m, &file->f_path, "");
46227+ seq_path(m, &file->f_path, "\n\\");
46228 }
46229
46230 seq_putc(m, '\n');
46231diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
46232index f738024..876984a 100644
46233--- a/fs/proc/proc_net.c
46234+++ b/fs/proc/proc_net.c
46235@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
46236 struct task_struct *task;
46237 struct nsproxy *ns;
46238 struct net *net = NULL;
46239+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46240+ const struct cred *cred = current_cred();
46241+#endif
46242+
46243+#ifdef CONFIG_GRKERNSEC_PROC_USER
46244+ if (cred->fsuid)
46245+ return net;
46246+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46247+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46248+ return net;
46249+#endif
46250
46251 rcu_read_lock();
46252 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46253diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
46254index a6b6217..1e0579d 100644
46255--- a/fs/proc/proc_sysctl.c
46256+++ b/fs/proc/proc_sysctl.c
46257@@ -9,11 +9,13 @@
46258 #include <linux/namei.h>
46259 #include "internal.h"
46260
46261+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46262+
46263 static const struct dentry_operations proc_sys_dentry_operations;
46264 static const struct file_operations proc_sys_file_operations;
46265-static const struct inode_operations proc_sys_inode_operations;
46266+const struct inode_operations proc_sys_inode_operations;
46267 static const struct file_operations proc_sys_dir_file_operations;
46268-static const struct inode_operations proc_sys_dir_operations;
46269+const struct inode_operations proc_sys_dir_operations;
46270
46271 void proc_sys_poll_notify(struct ctl_table_poll *poll)
46272 {
46273@@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
46274
46275 err = NULL;
46276 d_set_d_op(dentry, &proc_sys_dentry_operations);
46277+
46278+ gr_handle_proc_create(dentry, inode);
46279+
46280 d_add(dentry, inode);
46281
46282+ if (gr_handle_sysctl(p, MAY_EXEC))
46283+ err = ERR_PTR(-ENOENT);
46284+
46285 out:
46286 sysctl_head_finish(head);
46287 return err;
46288@@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
46289 if (!table->proc_handler)
46290 goto out;
46291
46292+#ifdef CONFIG_GRKERNSEC
46293+ error = -EPERM;
46294+ if (write && !capable(CAP_SYS_ADMIN))
46295+ goto out;
46296+#endif
46297+
46298 /* careful: calling conventions are nasty here */
46299 res = count;
46300 error = table->proc_handler(table, write, buf, &res, ppos);
46301@@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
46302 return -ENOMEM;
46303 } else {
46304 d_set_d_op(child, &proc_sys_dentry_operations);
46305+
46306+ gr_handle_proc_create(child, inode);
46307+
46308 d_add(child, inode);
46309 }
46310 } else {
46311@@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
46312 if (*pos < file->f_pos)
46313 continue;
46314
46315+ if (gr_handle_sysctl(table, 0))
46316+ continue;
46317+
46318 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46319 if (res)
46320 return res;
46321@@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
46322 if (IS_ERR(head))
46323 return PTR_ERR(head);
46324
46325+ if (table && gr_handle_sysctl(table, MAY_EXEC))
46326+ return -ENOENT;
46327+
46328 generic_fillattr(inode, stat);
46329 if (table)
46330 stat->mode = (stat->mode & S_IFMT) | table->mode;
46331@@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
46332 .llseek = generic_file_llseek,
46333 };
46334
46335-static const struct inode_operations proc_sys_inode_operations = {
46336+const struct inode_operations proc_sys_inode_operations = {
46337 .permission = proc_sys_permission,
46338 .setattr = proc_sys_setattr,
46339 .getattr = proc_sys_getattr,
46340 };
46341
46342-static const struct inode_operations proc_sys_dir_operations = {
46343+const struct inode_operations proc_sys_dir_operations = {
46344 .lookup = proc_sys_lookup,
46345 .permission = proc_sys_permission,
46346 .setattr = proc_sys_setattr,
46347diff --git a/fs/proc/root.c b/fs/proc/root.c
46348index 03102d9..4ae347e 100644
46349--- a/fs/proc/root.c
46350+++ b/fs/proc/root.c
46351@@ -121,7 +121,15 @@ void __init proc_root_init(void)
46352 #ifdef CONFIG_PROC_DEVICETREE
46353 proc_device_tree_init();
46354 #endif
46355+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46356+#ifdef CONFIG_GRKERNSEC_PROC_USER
46357+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46358+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46359+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46360+#endif
46361+#else
46362 proc_mkdir("bus", NULL);
46363+#endif
46364 proc_sys_init();
46365 }
46366
46367diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
46368index 7dcd2a2..d1d9cb6 100644
46369--- a/fs/proc/task_mmu.c
46370+++ b/fs/proc/task_mmu.c
46371@@ -52,8 +52,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46372 "VmExe:\t%8lu kB\n"
46373 "VmLib:\t%8lu kB\n"
46374 "VmPTE:\t%8lu kB\n"
46375- "VmSwap:\t%8lu kB\n",
46376- hiwater_vm << (PAGE_SHIFT-10),
46377+ "VmSwap:\t%8lu kB\n"
46378+
46379+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46380+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46381+#endif
46382+
46383+ ,hiwater_vm << (PAGE_SHIFT-10),
46384 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46385 mm->locked_vm << (PAGE_SHIFT-10),
46386 mm->pinned_vm << (PAGE_SHIFT-10),
46387@@ -62,7 +67,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46388 data << (PAGE_SHIFT-10),
46389 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46390 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46391- swap << (PAGE_SHIFT-10));
46392+ swap << (PAGE_SHIFT-10)
46393+
46394+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46395+ , mm->context.user_cs_base, mm->context.user_cs_limit
46396+#endif
46397+
46398+ );
46399 }
46400
46401 unsigned long task_vsize(struct mm_struct *mm)
46402@@ -209,6 +220,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46403 return ret;
46404 }
46405
46406+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46407+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46408+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46409+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46410+#endif
46411+
46412 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46413 {
46414 struct mm_struct *mm = vma->vm_mm;
46415@@ -227,13 +244,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46416 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46417 }
46418
46419- /* We don't show the stack guard page in /proc/maps */
46420+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46421+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46422+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46423+#else
46424 start = vma->vm_start;
46425- if (stack_guard_page_start(vma, start))
46426- start += PAGE_SIZE;
46427 end = vma->vm_end;
46428- if (stack_guard_page_end(vma, end))
46429- end -= PAGE_SIZE;
46430+#endif
46431
46432 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46433 start,
46434@@ -242,7 +259,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46435 flags & VM_WRITE ? 'w' : '-',
46436 flags & VM_EXEC ? 'x' : '-',
46437 flags & VM_MAYSHARE ? 's' : 'p',
46438+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46439+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46440+#else
46441 pgoff,
46442+#endif
46443 MAJOR(dev), MINOR(dev), ino, &len);
46444
46445 /*
46446@@ -251,7 +272,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46447 */
46448 if (file) {
46449 pad_len_spaces(m, len);
46450- seq_path(m, &file->f_path, "\n");
46451+ seq_path(m, &file->f_path, "\n\\");
46452 } else {
46453 const char *name = arch_vma_name(vma);
46454 if (!name) {
46455@@ -259,8 +280,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46456 if (vma->vm_start <= mm->brk &&
46457 vma->vm_end >= mm->start_brk) {
46458 name = "[heap]";
46459- } else if (vma->vm_start <= mm->start_stack &&
46460- vma->vm_end >= mm->start_stack) {
46461+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46462+ (vma->vm_start <= mm->start_stack &&
46463+ vma->vm_end >= mm->start_stack)) {
46464 name = "[stack]";
46465 }
46466 } else {
46467@@ -435,11 +457,16 @@ static int show_smap(struct seq_file *m, void *v)
46468 };
46469
46470 memset(&mss, 0, sizeof mss);
46471- mss.vma = vma;
46472- /* mmap_sem is held in m_start */
46473- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46474- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46475-
46476+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46477+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46478+#endif
46479+ mss.vma = vma;
46480+ /* mmap_sem is held in m_start */
46481+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46482+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46483+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46484+ }
46485+#endif
46486 show_map_vma(m, vma);
46487
46488 seq_printf(m,
46489@@ -457,7 +484,11 @@ static int show_smap(struct seq_file *m, void *v)
46490 "KernelPageSize: %8lu kB\n"
46491 "MMUPageSize: %8lu kB\n"
46492 "Locked: %8lu kB\n",
46493+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46494+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46495+#else
46496 (vma->vm_end - vma->vm_start) >> 10,
46497+#endif
46498 mss.resident >> 10,
46499 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46500 mss.shared_clean >> 10,
46501@@ -1036,7 +1067,7 @@ static int show_numa_map(struct seq_file *m, void *v)
46502
46503 if (file) {
46504 seq_printf(m, " file=");
46505- seq_path(m, &file->f_path, "\n\t= ");
46506+ seq_path(m, &file->f_path, "\n\t\\= ");
46507 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46508 seq_printf(m, " heap");
46509 } else if (vma->vm_start <= mm->start_stack &&
46510diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46511index 980de54..2a4db5f 100644
46512--- a/fs/proc/task_nommu.c
46513+++ b/fs/proc/task_nommu.c
46514@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46515 else
46516 bytes += kobjsize(mm);
46517
46518- if (current->fs && current->fs->users > 1)
46519+ if (current->fs && atomic_read(&current->fs->users) > 1)
46520 sbytes += kobjsize(current->fs);
46521 else
46522 bytes += kobjsize(current->fs);
46523@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46524
46525 if (file) {
46526 pad_len_spaces(m, len);
46527- seq_path(m, &file->f_path, "");
46528+ seq_path(m, &file->f_path, "\n\\");
46529 } else if (mm) {
46530 if (vma->vm_start <= mm->start_stack &&
46531 vma->vm_end >= mm->start_stack) {
46532diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46533index d67908b..d13f6a6 100644
46534--- a/fs/quota/netlink.c
46535+++ b/fs/quota/netlink.c
46536@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46537 void quota_send_warning(short type, unsigned int id, dev_t dev,
46538 const char warntype)
46539 {
46540- static atomic_t seq;
46541+ static atomic_unchecked_t seq;
46542 struct sk_buff *skb;
46543 void *msg_head;
46544 int ret;
46545@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46546 "VFS: Not enough memory to send quota warning.\n");
46547 return;
46548 }
46549- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46550+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46551 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46552 if (!msg_head) {
46553 printk(KERN_ERR
46554diff --git a/fs/readdir.c b/fs/readdir.c
46555index 356f715..c918d38 100644
46556--- a/fs/readdir.c
46557+++ b/fs/readdir.c
46558@@ -17,6 +17,7 @@
46559 #include <linux/security.h>
46560 #include <linux/syscalls.h>
46561 #include <linux/unistd.h>
46562+#include <linux/namei.h>
46563
46564 #include <asm/uaccess.h>
46565
46566@@ -67,6 +68,7 @@ struct old_linux_dirent {
46567
46568 struct readdir_callback {
46569 struct old_linux_dirent __user * dirent;
46570+ struct file * file;
46571 int result;
46572 };
46573
46574@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46575 buf->result = -EOVERFLOW;
46576 return -EOVERFLOW;
46577 }
46578+
46579+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46580+ return 0;
46581+
46582 buf->result++;
46583 dirent = buf->dirent;
46584 if (!access_ok(VERIFY_WRITE, dirent,
46585@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46586
46587 buf.result = 0;
46588 buf.dirent = dirent;
46589+ buf.file = file;
46590
46591 error = vfs_readdir(file, fillonedir, &buf);
46592 if (buf.result)
46593@@ -142,6 +149,7 @@ struct linux_dirent {
46594 struct getdents_callback {
46595 struct linux_dirent __user * current_dir;
46596 struct linux_dirent __user * previous;
46597+ struct file * file;
46598 int count;
46599 int error;
46600 };
46601@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46602 buf->error = -EOVERFLOW;
46603 return -EOVERFLOW;
46604 }
46605+
46606+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46607+ return 0;
46608+
46609 dirent = buf->previous;
46610 if (dirent) {
46611 if (__put_user(offset, &dirent->d_off))
46612@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46613 buf.previous = NULL;
46614 buf.count = count;
46615 buf.error = 0;
46616+ buf.file = file;
46617
46618 error = vfs_readdir(file, filldir, &buf);
46619 if (error >= 0)
46620@@ -229,6 +242,7 @@ out:
46621 struct getdents_callback64 {
46622 struct linux_dirent64 __user * current_dir;
46623 struct linux_dirent64 __user * previous;
46624+ struct file *file;
46625 int count;
46626 int error;
46627 };
46628@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46629 buf->error = -EINVAL; /* only used if we fail.. */
46630 if (reclen > buf->count)
46631 return -EINVAL;
46632+
46633+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46634+ return 0;
46635+
46636 dirent = buf->previous;
46637 if (dirent) {
46638 if (__put_user(offset, &dirent->d_off))
46639@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46640
46641 buf.current_dir = dirent;
46642 buf.previous = NULL;
46643+ buf.file = file;
46644 buf.count = count;
46645 buf.error = 0;
46646
46647@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46648 error = buf.error;
46649 lastdirent = buf.previous;
46650 if (lastdirent) {
46651- typeof(lastdirent->d_off) d_off = file->f_pos;
46652+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46653 if (__put_user(d_off, &lastdirent->d_off))
46654 error = -EFAULT;
46655 else
46656diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46657index 60c0804..d814f98 100644
46658--- a/fs/reiserfs/do_balan.c
46659+++ b/fs/reiserfs/do_balan.c
46660@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46661 return;
46662 }
46663
46664- atomic_inc(&(fs_generation(tb->tb_sb)));
46665+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46666 do_balance_starts(tb);
46667
46668 /* balance leaf returns 0 except if combining L R and S into
46669diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46670index 7a99811..a7c96c4 100644
46671--- a/fs/reiserfs/procfs.c
46672+++ b/fs/reiserfs/procfs.c
46673@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46674 "SMALL_TAILS " : "NO_TAILS ",
46675 replay_only(sb) ? "REPLAY_ONLY " : "",
46676 convert_reiserfs(sb) ? "CONV " : "",
46677- atomic_read(&r->s_generation_counter),
46678+ atomic_read_unchecked(&r->s_generation_counter),
46679 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46680 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46681 SF(s_good_search_by_key_reada), SF(s_bmaps),
46682diff --git a/fs/select.c b/fs/select.c
46683index d33418f..2a5345e 100644
46684--- a/fs/select.c
46685+++ b/fs/select.c
46686@@ -20,6 +20,7 @@
46687 #include <linux/module.h>
46688 #include <linux/slab.h>
46689 #include <linux/poll.h>
46690+#include <linux/security.h>
46691 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46692 #include <linux/file.h>
46693 #include <linux/fdtable.h>
46694@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46695 struct poll_list *walk = head;
46696 unsigned long todo = nfds;
46697
46698+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46699 if (nfds > rlimit(RLIMIT_NOFILE))
46700 return -EINVAL;
46701
46702diff --git a/fs/seq_file.c b/fs/seq_file.c
46703index dba43c3..a99fb63 100644
46704--- a/fs/seq_file.c
46705+++ b/fs/seq_file.c
46706@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46707 return 0;
46708 }
46709 if (!m->buf) {
46710- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46711+ m->size = PAGE_SIZE;
46712+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46713 if (!m->buf)
46714 return -ENOMEM;
46715 }
46716@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46717 Eoverflow:
46718 m->op->stop(m, p);
46719 kfree(m->buf);
46720- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46721+ m->size <<= 1;
46722+ m->buf = kmalloc(m->size, GFP_KERNEL);
46723 return !m->buf ? -ENOMEM : -EAGAIN;
46724 }
46725
46726@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46727 m->version = file->f_version;
46728 /* grab buffer if we didn't have one */
46729 if (!m->buf) {
46730- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46731+ m->size = PAGE_SIZE;
46732+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46733 if (!m->buf)
46734 goto Enomem;
46735 }
46736@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46737 goto Fill;
46738 m->op->stop(m, p);
46739 kfree(m->buf);
46740- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46741+ m->size <<= 1;
46742+ m->buf = kmalloc(m->size, GFP_KERNEL);
46743 if (!m->buf)
46744 goto Enomem;
46745 m->count = 0;
46746@@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
46747 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46748 void *data)
46749 {
46750- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46751+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46752 int res = -ENOMEM;
46753
46754 if (op) {
46755diff --git a/fs/splice.c b/fs/splice.c
46756index fa2defa..8601650 100644
46757--- a/fs/splice.c
46758+++ b/fs/splice.c
46759@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46760 pipe_lock(pipe);
46761
46762 for (;;) {
46763- if (!pipe->readers) {
46764+ if (!atomic_read(&pipe->readers)) {
46765 send_sig(SIGPIPE, current, 0);
46766 if (!ret)
46767 ret = -EPIPE;
46768@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46769 do_wakeup = 0;
46770 }
46771
46772- pipe->waiting_writers++;
46773+ atomic_inc(&pipe->waiting_writers);
46774 pipe_wait(pipe);
46775- pipe->waiting_writers--;
46776+ atomic_dec(&pipe->waiting_writers);
46777 }
46778
46779 pipe_unlock(pipe);
46780@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46781 old_fs = get_fs();
46782 set_fs(get_ds());
46783 /* The cast to a user pointer is valid due to the set_fs() */
46784- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46785+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46786 set_fs(old_fs);
46787
46788 return res;
46789@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46790 old_fs = get_fs();
46791 set_fs(get_ds());
46792 /* The cast to a user pointer is valid due to the set_fs() */
46793- res = vfs_write(file, (const char __user *)buf, count, &pos);
46794+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46795 set_fs(old_fs);
46796
46797 return res;
46798@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46799 goto err;
46800
46801 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46802- vec[i].iov_base = (void __user *) page_address(page);
46803+ vec[i].iov_base = (void __force_user *) page_address(page);
46804 vec[i].iov_len = this_len;
46805 spd.pages[i] = page;
46806 spd.nr_pages++;
46807@@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46808 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46809 {
46810 while (!pipe->nrbufs) {
46811- if (!pipe->writers)
46812+ if (!atomic_read(&pipe->writers))
46813 return 0;
46814
46815- if (!pipe->waiting_writers && sd->num_spliced)
46816+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46817 return 0;
46818
46819 if (sd->flags & SPLICE_F_NONBLOCK)
46820@@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
46821 * out of the pipe right after the splice_to_pipe(). So set
46822 * PIPE_READERS appropriately.
46823 */
46824- pipe->readers = 1;
46825+ atomic_set(&pipe->readers, 1);
46826
46827 current->splice_pipe = pipe;
46828 }
46829@@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46830 ret = -ERESTARTSYS;
46831 break;
46832 }
46833- if (!pipe->writers)
46834+ if (!atomic_read(&pipe->writers))
46835 break;
46836- if (!pipe->waiting_writers) {
46837+ if (!atomic_read(&pipe->waiting_writers)) {
46838 if (flags & SPLICE_F_NONBLOCK) {
46839 ret = -EAGAIN;
46840 break;
46841@@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46842 pipe_lock(pipe);
46843
46844 while (pipe->nrbufs >= pipe->buffers) {
46845- if (!pipe->readers) {
46846+ if (!atomic_read(&pipe->readers)) {
46847 send_sig(SIGPIPE, current, 0);
46848 ret = -EPIPE;
46849 break;
46850@@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46851 ret = -ERESTARTSYS;
46852 break;
46853 }
46854- pipe->waiting_writers++;
46855+ atomic_inc(&pipe->waiting_writers);
46856 pipe_wait(pipe);
46857- pipe->waiting_writers--;
46858+ atomic_dec(&pipe->waiting_writers);
46859 }
46860
46861 pipe_unlock(pipe);
46862@@ -1819,14 +1819,14 @@ retry:
46863 pipe_double_lock(ipipe, opipe);
46864
46865 do {
46866- if (!opipe->readers) {
46867+ if (!atomic_read(&opipe->readers)) {
46868 send_sig(SIGPIPE, current, 0);
46869 if (!ret)
46870 ret = -EPIPE;
46871 break;
46872 }
46873
46874- if (!ipipe->nrbufs && !ipipe->writers)
46875+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46876 break;
46877
46878 /*
46879@@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46880 pipe_double_lock(ipipe, opipe);
46881
46882 do {
46883- if (!opipe->readers) {
46884+ if (!atomic_read(&opipe->readers)) {
46885 send_sig(SIGPIPE, current, 0);
46886 if (!ret)
46887 ret = -EPIPE;
46888@@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46889 * return EAGAIN if we have the potential of some data in the
46890 * future, otherwise just return 0
46891 */
46892- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46893+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46894 ret = -EAGAIN;
46895
46896 pipe_unlock(ipipe);
46897diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
46898index d4e6080b..0e58b99 100644
46899--- a/fs/sysfs/file.c
46900+++ b/fs/sysfs/file.c
46901@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
46902
46903 struct sysfs_open_dirent {
46904 atomic_t refcnt;
46905- atomic_t event;
46906+ atomic_unchecked_t event;
46907 wait_queue_head_t poll;
46908 struct list_head buffers; /* goes through sysfs_buffer.list */
46909 };
46910@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
46911 if (!sysfs_get_active(attr_sd))
46912 return -ENODEV;
46913
46914- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46915+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46916 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46917
46918 sysfs_put_active(attr_sd);
46919@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
46920 return -ENOMEM;
46921
46922 atomic_set(&new_od->refcnt, 0);
46923- atomic_set(&new_od->event, 1);
46924+ atomic_set_unchecked(&new_od->event, 1);
46925 init_waitqueue_head(&new_od->poll);
46926 INIT_LIST_HEAD(&new_od->buffers);
46927 goto retry;
46928@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
46929
46930 sysfs_put_active(attr_sd);
46931
46932- if (buffer->event != atomic_read(&od->event))
46933+ if (buffer->event != atomic_read_unchecked(&od->event))
46934 goto trigger;
46935
46936 return DEFAULT_POLLMASK;
46937@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
46938
46939 od = sd->s_attr.open;
46940 if (od) {
46941- atomic_inc(&od->event);
46942+ atomic_inc_unchecked(&od->event);
46943 wake_up_interruptible(&od->poll);
46944 }
46945
46946diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
46947index e34f0d9..740ea7b 100644
46948--- a/fs/sysfs/mount.c
46949+++ b/fs/sysfs/mount.c
46950@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46951 .s_name = "",
46952 .s_count = ATOMIC_INIT(1),
46953 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
46954+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46955+ .s_mode = S_IFDIR | S_IRWXU,
46956+#else
46957 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46958+#endif
46959 .s_ino = 1,
46960 };
46961
46962diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
46963index a7ac78f..02158e1 100644
46964--- a/fs/sysfs/symlink.c
46965+++ b/fs/sysfs/symlink.c
46966@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46967
46968 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46969 {
46970- char *page = nd_get_link(nd);
46971+ const char *page = nd_get_link(nd);
46972 if (!IS_ERR(page))
46973 free_page((unsigned long)page);
46974 }
46975diff --git a/fs/udf/misc.c b/fs/udf/misc.c
46976index c175b4d..8f36a16 100644
46977--- a/fs/udf/misc.c
46978+++ b/fs/udf/misc.c
46979@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
46980
46981 u8 udf_tag_checksum(const struct tag *t)
46982 {
46983- u8 *data = (u8 *)t;
46984+ const u8 *data = (const u8 *)t;
46985 u8 checksum = 0;
46986 int i;
46987 for (i = 0; i < sizeof(struct tag); ++i)
46988diff --git a/fs/utimes.c b/fs/utimes.c
46989index ba653f3..06ea4b1 100644
46990--- a/fs/utimes.c
46991+++ b/fs/utimes.c
46992@@ -1,6 +1,7 @@
46993 #include <linux/compiler.h>
46994 #include <linux/file.h>
46995 #include <linux/fs.h>
46996+#include <linux/security.h>
46997 #include <linux/linkage.h>
46998 #include <linux/mount.h>
46999 #include <linux/namei.h>
47000@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
47001 goto mnt_drop_write_and_out;
47002 }
47003 }
47004+
47005+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47006+ error = -EACCES;
47007+ goto mnt_drop_write_and_out;
47008+ }
47009+
47010 mutex_lock(&inode->i_mutex);
47011 error = notify_change(path->dentry, &newattrs);
47012 mutex_unlock(&inode->i_mutex);
47013diff --git a/fs/xattr.c b/fs/xattr.c
47014index 67583de..c5aad14 100644
47015--- a/fs/xattr.c
47016+++ b/fs/xattr.c
47017@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47018 * Extended attribute SET operations
47019 */
47020 static long
47021-setxattr(struct dentry *d, const char __user *name, const void __user *value,
47022+setxattr(struct path *path, const char __user *name, const void __user *value,
47023 size_t size, int flags)
47024 {
47025 int error;
47026@@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
47027 return PTR_ERR(kvalue);
47028 }
47029
47030- error = vfs_setxattr(d, kname, kvalue, size, flags);
47031+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47032+ error = -EACCES;
47033+ goto out;
47034+ }
47035+
47036+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47037+out:
47038 kfree(kvalue);
47039 return error;
47040 }
47041@@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
47042 return error;
47043 error = mnt_want_write(path.mnt);
47044 if (!error) {
47045- error = setxattr(path.dentry, name, value, size, flags);
47046+ error = setxattr(&path, name, value, size, flags);
47047 mnt_drop_write(path.mnt);
47048 }
47049 path_put(&path);
47050@@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
47051 return error;
47052 error = mnt_want_write(path.mnt);
47053 if (!error) {
47054- error = setxattr(path.dentry, name, value, size, flags);
47055+ error = setxattr(&path, name, value, size, flags);
47056 mnt_drop_write(path.mnt);
47057 }
47058 path_put(&path);
47059@@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
47060 const void __user *,value, size_t, size, int, flags)
47061 {
47062 struct file *f;
47063- struct dentry *dentry;
47064 int error = -EBADF;
47065
47066 f = fget(fd);
47067 if (!f)
47068 return error;
47069- dentry = f->f_path.dentry;
47070- audit_inode(NULL, dentry);
47071+ audit_inode(NULL, f->f_path.dentry);
47072 error = mnt_want_write_file(f);
47073 if (!error) {
47074- error = setxattr(dentry, name, value, size, flags);
47075+ error = setxattr(&f->f_path, name, value, size, flags);
47076 mnt_drop_write(f->f_path.mnt);
47077 }
47078 fput(f);
47079diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
47080index 8d5a506..7f62712 100644
47081--- a/fs/xattr_acl.c
47082+++ b/fs/xattr_acl.c
47083@@ -17,8 +17,8 @@
47084 struct posix_acl *
47085 posix_acl_from_xattr(const void *value, size_t size)
47086 {
47087- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47088- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47089+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47090+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47091 int count;
47092 struct posix_acl *acl;
47093 struct posix_acl_entry *acl_e;
47094diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
47095index d0ab788..827999b 100644
47096--- a/fs/xfs/xfs_bmap.c
47097+++ b/fs/xfs/xfs_bmap.c
47098@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
47099 int nmap,
47100 int ret_nmap);
47101 #else
47102-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47103+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47104 #endif /* DEBUG */
47105
47106 STATIC int
47107diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
47108index 79d05e8..e3e5861 100644
47109--- a/fs/xfs/xfs_dir2_sf.c
47110+++ b/fs/xfs/xfs_dir2_sf.c
47111@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47112 }
47113
47114 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47115- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47116+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47117+ char name[sfep->namelen];
47118+ memcpy(name, sfep->name, sfep->namelen);
47119+ if (filldir(dirent, name, sfep->namelen,
47120+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
47121+ *offset = off & 0x7fffffff;
47122+ return 0;
47123+ }
47124+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47125 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47126 *offset = off & 0x7fffffff;
47127 return 0;
47128diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
47129index d99a905..9f88202 100644
47130--- a/fs/xfs/xfs_ioctl.c
47131+++ b/fs/xfs/xfs_ioctl.c
47132@@ -128,7 +128,7 @@ xfs_find_handle(
47133 }
47134
47135 error = -EFAULT;
47136- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47137+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47138 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47139 goto out_put;
47140
47141diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
47142index 23ce927..e274cc1 100644
47143--- a/fs/xfs/xfs_iops.c
47144+++ b/fs/xfs/xfs_iops.c
47145@@ -447,7 +447,7 @@ xfs_vn_put_link(
47146 struct nameidata *nd,
47147 void *p)
47148 {
47149- char *s = nd_get_link(nd);
47150+ const char *s = nd_get_link(nd);
47151
47152 if (!IS_ERR(s))
47153 kfree(s);
47154diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
47155index ce9268a..ee98d0b 100644
47156--- a/fs/xfs/xfs_vnodeops.c
47157+++ b/fs/xfs/xfs_vnodeops.c
47158@@ -131,7 +131,8 @@ xfs_readlink(
47159 __func__, (unsigned long long) ip->i_ino,
47160 (long long) pathlen);
47161 ASSERT(0);
47162- return XFS_ERROR(EFSCORRUPTED);
47163+ error = XFS_ERROR(EFSCORRUPTED);
47164+ goto out;
47165 }
47166
47167
47168diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
47169new file mode 100644
47170index 0000000..ab77366
47171--- /dev/null
47172+++ b/grsecurity/Kconfig
47173@@ -0,0 +1,1065 @@
47174+#
47175+# grecurity configuration
47176+#
47177+
47178+menu "Grsecurity"
47179+
47180+config GRKERNSEC
47181+ bool "Grsecurity"
47182+ select CRYPTO
47183+ select CRYPTO_SHA256
47184+ help
47185+ If you say Y here, you will be able to configure many features
47186+ that will enhance the security of your system. It is highly
47187+ recommended that you say Y here and read through the help
47188+ for each option so that you fully understand the features and
47189+ can evaluate their usefulness for your machine.
47190+
47191+choice
47192+ prompt "Security Level"
47193+ depends on GRKERNSEC
47194+ default GRKERNSEC_CUSTOM
47195+
47196+config GRKERNSEC_LOW
47197+ bool "Low"
47198+ select GRKERNSEC_LINK
47199+ select GRKERNSEC_FIFO
47200+ select GRKERNSEC_RANDNET
47201+ select GRKERNSEC_DMESG
47202+ select GRKERNSEC_CHROOT
47203+ select GRKERNSEC_CHROOT_CHDIR
47204+
47205+ help
47206+ If you choose this option, several of the grsecurity options will
47207+ be enabled that will give you greater protection against a number
47208+ of attacks, while assuring that none of your software will have any
47209+ conflicts with the additional security measures. If you run a lot
47210+ of unusual software, or you are having problems with the higher
47211+ security levels, you should say Y here. With this option, the
47212+ following features are enabled:
47213+
47214+ - Linking restrictions
47215+ - FIFO restrictions
47216+ - Restricted dmesg
47217+ - Enforced chdir("/") on chroot
47218+ - Runtime module disabling
47219+
47220+config GRKERNSEC_MEDIUM
47221+ bool "Medium"
47222+ select PAX
47223+ select PAX_EI_PAX
47224+ select PAX_PT_PAX_FLAGS
47225+ select PAX_HAVE_ACL_FLAGS
47226+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47227+ select GRKERNSEC_CHROOT
47228+ select GRKERNSEC_CHROOT_SYSCTL
47229+ select GRKERNSEC_LINK
47230+ select GRKERNSEC_FIFO
47231+ select GRKERNSEC_DMESG
47232+ select GRKERNSEC_RANDNET
47233+ select GRKERNSEC_FORKFAIL
47234+ select GRKERNSEC_TIME
47235+ select GRKERNSEC_SIGNAL
47236+ select GRKERNSEC_CHROOT
47237+ select GRKERNSEC_CHROOT_UNIX
47238+ select GRKERNSEC_CHROOT_MOUNT
47239+ select GRKERNSEC_CHROOT_PIVOT
47240+ select GRKERNSEC_CHROOT_DOUBLE
47241+ select GRKERNSEC_CHROOT_CHDIR
47242+ select GRKERNSEC_CHROOT_MKNOD
47243+ select GRKERNSEC_PROC
47244+ select GRKERNSEC_PROC_USERGROUP
47245+ select PAX_RANDUSTACK
47246+ select PAX_ASLR
47247+ select PAX_RANDMMAP
47248+ select PAX_REFCOUNT if (X86 || SPARC64)
47249+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47250+
47251+ help
47252+ If you say Y here, several features in addition to those included
47253+ in the low additional security level will be enabled. These
47254+ features provide even more security to your system, though in rare
47255+ cases they may be incompatible with very old or poorly written
47256+ software. If you enable this option, make sure that your auth
47257+ service (identd) is running as gid 1001. With this option,
47258+ the following features (in addition to those provided in the
47259+ low additional security level) will be enabled:
47260+
47261+ - Failed fork logging
47262+ - Time change logging
47263+ - Signal logging
47264+ - Deny mounts in chroot
47265+ - Deny double chrooting
47266+ - Deny sysctl writes in chroot
47267+ - Deny mknod in chroot
47268+ - Deny access to abstract AF_UNIX sockets out of chroot
47269+ - Deny pivot_root in chroot
47270+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
47271+ - /proc restrictions with special GID set to 10 (usually wheel)
47272+ - Address Space Layout Randomization (ASLR)
47273+ - Prevent exploitation of most refcount overflows
47274+ - Bounds checking of copying between the kernel and userland
47275+
47276+config GRKERNSEC_HIGH
47277+ bool "High"
47278+ select GRKERNSEC_LINK
47279+ select GRKERNSEC_FIFO
47280+ select GRKERNSEC_DMESG
47281+ select GRKERNSEC_FORKFAIL
47282+ select GRKERNSEC_TIME
47283+ select GRKERNSEC_SIGNAL
47284+ select GRKERNSEC_CHROOT
47285+ select GRKERNSEC_CHROOT_SHMAT
47286+ select GRKERNSEC_CHROOT_UNIX
47287+ select GRKERNSEC_CHROOT_MOUNT
47288+ select GRKERNSEC_CHROOT_FCHDIR
47289+ select GRKERNSEC_CHROOT_PIVOT
47290+ select GRKERNSEC_CHROOT_DOUBLE
47291+ select GRKERNSEC_CHROOT_CHDIR
47292+ select GRKERNSEC_CHROOT_MKNOD
47293+ select GRKERNSEC_CHROOT_CAPS
47294+ select GRKERNSEC_CHROOT_SYSCTL
47295+ select GRKERNSEC_CHROOT_FINDTASK
47296+ select GRKERNSEC_SYSFS_RESTRICT
47297+ select GRKERNSEC_PROC
47298+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47299+ select GRKERNSEC_HIDESYM
47300+ select GRKERNSEC_BRUTE
47301+ select GRKERNSEC_PROC_USERGROUP
47302+ select GRKERNSEC_KMEM
47303+ select GRKERNSEC_RESLOG
47304+ select GRKERNSEC_RANDNET
47305+ select GRKERNSEC_PROC_ADD
47306+ select GRKERNSEC_CHROOT_CHMOD
47307+ select GRKERNSEC_CHROOT_NICE
47308+ select GRKERNSEC_SETXID
47309+ select GRKERNSEC_AUDIT_MOUNT
47310+ select GRKERNSEC_MODHARDEN if (MODULES)
47311+ select GRKERNSEC_HARDEN_PTRACE
47312+ select GRKERNSEC_PTRACE_READEXEC
47313+ select GRKERNSEC_VM86 if (X86_32)
47314+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47315+ select PAX
47316+ select PAX_RANDUSTACK
47317+ select PAX_ASLR
47318+ select PAX_RANDMMAP
47319+ select PAX_NOEXEC
47320+ select PAX_MPROTECT
47321+ select PAX_EI_PAX
47322+ select PAX_PT_PAX_FLAGS
47323+ select PAX_HAVE_ACL_FLAGS
47324+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47325+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
47326+ select PAX_RANDKSTACK if (X86_TSC && X86)
47327+ select PAX_SEGMEXEC if (X86_32)
47328+ select PAX_PAGEEXEC
47329+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47330+ select PAX_EMUTRAMP if (PARISC)
47331+ select PAX_EMUSIGRT if (PARISC)
47332+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47333+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47334+ select PAX_REFCOUNT if (X86 || SPARC64)
47335+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47336+ help
47337+ If you say Y here, many of the features of grsecurity will be
47338+ enabled, which will protect you against many kinds of attacks
47339+ against your system. The heightened security comes at a cost
47340+ of an increased chance of incompatibilities with rare software
47341+ on your machine. Since this security level enables PaX, you should
47342+ view <http://pax.grsecurity.net> and read about the PaX
47343+ project. While you are there, download chpax and run it on
47344+ binaries that cause problems with PaX. Also remember that
47345+ since the /proc restrictions are enabled, you must run your
47346+ identd as gid 1001. This security level enables the following
47347+ features in addition to those listed in the low and medium
47348+ security levels:
47349+
47350+ - Additional /proc restrictions
47351+ - Chmod restrictions in chroot
47352+ - No signals, ptrace, or viewing of processes outside of chroot
47353+ - Capability restrictions in chroot
47354+ - Deny fchdir out of chroot
47355+ - Priority restrictions in chroot
47356+ - Segmentation-based implementation of PaX
47357+ - Mprotect restrictions
47358+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47359+ - Kernel stack randomization
47360+ - Mount/unmount/remount logging
47361+ - Kernel symbol hiding
47362+ - Hardening of module auto-loading
47363+ - Ptrace restrictions
47364+ - Restricted vm86 mode
47365+ - Restricted sysfs/debugfs
47366+ - Active kernel exploit response
47367+
47368+config GRKERNSEC_CUSTOM
47369+ bool "Custom"
47370+ help
47371+ If you say Y here, you will be able to configure every grsecurity
47372+ option, which allows you to enable many more features that aren't
47373+ covered in the basic security levels. These additional features
47374+ include TPE, socket restrictions, and the sysctl system for
47375+ grsecurity. It is advised that you read through the help for
47376+ each option to determine its usefulness in your situation.
47377+
47378+endchoice
47379+
47380+menu "Address Space Protection"
47381+depends on GRKERNSEC
47382+
47383+config GRKERNSEC_KMEM
47384+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
47385+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47386+ help
47387+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47388+ be written to or read from to modify or leak the contents of the running
47389+ kernel. /dev/port will also not be allowed to be opened. If you have module
47390+ support disabled, enabling this will close up four ways that are
47391+ currently used to insert malicious code into the running kernel.
47392+ Even with all these features enabled, we still highly recommend that
47393+ you use the RBAC system, as it is still possible for an attacker to
47394+ modify the running kernel through privileged I/O granted by ioperm/iopl.
47395+ If you are not using XFree86, you may be able to stop this additional
47396+ case by enabling the 'Disable privileged I/O' option. Though nothing
47397+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47398+ but only to video memory, which is the only writing we allow in this
47399+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47400+ not be allowed to mprotect it with PROT_WRITE later.
47401+ It is highly recommended that you say Y here if you meet all the
47402+ conditions above.
47403+
47404+config GRKERNSEC_VM86
47405+ bool "Restrict VM86 mode"
47406+ depends on X86_32
47407+
47408+ help
47409+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47410+ make use of a special execution mode on 32bit x86 processors called
47411+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47412+ video cards and will still work with this option enabled. The purpose
47413+ of the option is to prevent exploitation of emulation errors in
47414+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
47415+ Nearly all users should be able to enable this option.
47416+
47417+config GRKERNSEC_IO
47418+ bool "Disable privileged I/O"
47419+ depends on X86
47420+ select RTC_CLASS
47421+ select RTC_INTF_DEV
47422+ select RTC_DRV_CMOS
47423+
47424+ help
47425+ If you say Y here, all ioperm and iopl calls will return an error.
47426+ Ioperm and iopl can be used to modify the running kernel.
47427+ Unfortunately, some programs need this access to operate properly,
47428+ the most notable of which are XFree86 and hwclock. hwclock can be
47429+ remedied by having RTC support in the kernel, so real-time
47430+ clock support is enabled if this option is enabled, to ensure
47431+ that hwclock operates correctly. XFree86 still will not
47432+ operate correctly with this option enabled, so DO NOT CHOOSE Y
47433+ IF YOU USE XFree86. If you use XFree86 and you still want to
47434+ protect your kernel against modification, use the RBAC system.
47435+
47436+config GRKERNSEC_PROC_MEMMAP
47437+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
47438+ default y if (PAX_NOEXEC || PAX_ASLR)
47439+ depends on PAX_NOEXEC || PAX_ASLR
47440+ help
47441+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47442+ give no information about the addresses of its mappings if
47443+ PaX features that rely on random addresses are enabled on the task.
47444+ If you use PaX it is greatly recommended that you say Y here as it
47445+ closes up a hole that makes the full ASLR useless for suid
47446+ binaries.
47447+
47448+config GRKERNSEC_BRUTE
47449+ bool "Deter exploit bruteforcing"
47450+ help
47451+ If you say Y here, attempts to bruteforce exploits against forking
47452+ daemons such as apache or sshd, as well as against suid/sgid binaries
47453+ will be deterred. When a child of a forking daemon is killed by PaX
47454+ or crashes due to an illegal instruction or other suspicious signal,
47455+ the parent process will be delayed 30 seconds upon every subsequent
47456+ fork until the administrator is able to assess the situation and
47457+ restart the daemon.
47458+ In the suid/sgid case, the attempt is logged, the user has all their
47459+ processes terminated, and they are prevented from executing any further
47460+ processes for 15 minutes.
47461+ It is recommended that you also enable signal logging in the auditing
47462+ section so that logs are generated when a process triggers a suspicious
47463+ signal.
47464+ If the sysctl option is enabled, a sysctl option with name
47465+ "deter_bruteforce" is created.
47466+
47467+
47468+config GRKERNSEC_MODHARDEN
47469+ bool "Harden module auto-loading"
47470+ depends on MODULES
47471+ help
47472+ If you say Y here, module auto-loading in response to use of some
47473+ feature implemented by an unloaded module will be restricted to
47474+ root users. Enabling this option helps defend against attacks
47475+ by unprivileged users who abuse the auto-loading behavior to
47476+ cause a vulnerable module to load that is then exploited.
47477+
47478+ If this option prevents a legitimate use of auto-loading for a
47479+ non-root user, the administrator can execute modprobe manually
47480+ with the exact name of the module mentioned in the alert log.
47481+ Alternatively, the administrator can add the module to the list
47482+ of modules loaded at boot by modifying init scripts.
47483+
47484+ Modification of init scripts will most likely be needed on
47485+ Ubuntu servers with encrypted home directory support enabled,
47486+ as the first non-root user logging in will cause the ecb(aes),
47487+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47488+
47489+config GRKERNSEC_HIDESYM
47490+ bool "Hide kernel symbols"
47491+ help
47492+ If you say Y here, getting information on loaded modules, and
47493+ displaying all kernel symbols through a syscall will be restricted
47494+ to users with CAP_SYS_MODULE. For software compatibility reasons,
47495+ /proc/kallsyms will be restricted to the root user. The RBAC
47496+ system can hide that entry even from root.
47497+
47498+ This option also prevents leaking of kernel addresses through
47499+ several /proc entries.
47500+
47501+ Note that this option is only effective provided the following
47502+ conditions are met:
47503+ 1) The kernel using grsecurity is not precompiled by some distribution
47504+ 2) You have also enabled GRKERNSEC_DMESG
47505+ 3) You are using the RBAC system and hiding other files such as your
47506+ kernel image and System.map. Alternatively, enabling this option
47507+ causes the permissions on /boot, /lib/modules, and the kernel
47508+ source directory to change at compile time to prevent
47509+ reading by non-root users.
47510+ If the above conditions are met, this option will aid in providing a
47511+ useful protection against local kernel exploitation of overflows
47512+ and arbitrary read/write vulnerabilities.
47513+
47514+config GRKERNSEC_KERN_LOCKOUT
47515+ bool "Active kernel exploit response"
47516+ depends on X86 || ARM || PPC || SPARC
47517+ help
47518+ If you say Y here, when a PaX alert is triggered due to suspicious
47519+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47520+ or an OOPs occurs due to bad memory accesses, instead of just
47521+ terminating the offending process (and potentially allowing
47522+ a subsequent exploit from the same user), we will take one of two
47523+ actions:
47524+ If the user was root, we will panic the system
47525+ If the user was non-root, we will log the attempt, terminate
47526+ all processes owned by the user, then prevent them from creating
47527+ any new processes until the system is restarted
47528+ This deters repeated kernel exploitation/bruteforcing attempts
47529+ and is useful for later forensics.
47530+
47531+endmenu
47532+menu "Role Based Access Control Options"
47533+depends on GRKERNSEC
47534+
47535+config GRKERNSEC_RBAC_DEBUG
47536+ bool
47537+
47538+config GRKERNSEC_NO_RBAC
47539+ bool "Disable RBAC system"
47540+ help
47541+ If you say Y here, the /dev/grsec device will be removed from the kernel,
47542+ preventing the RBAC system from being enabled. You should only say Y
47543+ here if you have no intention of using the RBAC system, so as to prevent
47544+ an attacker with root access from misusing the RBAC system to hide files
47545+ and processes when loadable module support and /dev/[k]mem have been
47546+ locked down.
47547+
47548+config GRKERNSEC_ACL_HIDEKERN
47549+ bool "Hide kernel processes"
47550+ help
47551+ If you say Y here, all kernel threads will be hidden to all
47552+ processes but those whose subject has the "view hidden processes"
47553+ flag.
47554+
47555+config GRKERNSEC_ACL_MAXTRIES
47556+ int "Maximum tries before password lockout"
47557+ default 3
47558+ help
47559+ This option enforces the maximum number of times a user can attempt
47560+ to authorize themselves with the grsecurity RBAC system before being
47561+ denied the ability to attempt authorization again for a specified time.
47562+ The lower the number, the harder it will be to brute-force a password.
47563+
47564+config GRKERNSEC_ACL_TIMEOUT
47565+ int "Time to wait after max password tries, in seconds"
47566+ default 30
47567+ help
47568+ This option specifies the time the user must wait after attempting to
47569+ authorize to the RBAC system with the maximum number of invalid
47570+ passwords. The higher the number, the harder it will be to brute-force
47571+ a password.
47572+
47573+endmenu
47574+menu "Filesystem Protections"
47575+depends on GRKERNSEC
47576+
47577+config GRKERNSEC_PROC
47578+ bool "Proc restrictions"
47579+ help
47580+ If you say Y here, the permissions of the /proc filesystem
47581+ will be altered to enhance system security and privacy. You MUST
47582+ choose either a user only restriction or a user and group restriction.
47583+ Depending upon the option you choose, you can either restrict users to
47584+ see only the processes they themselves run, or choose a group that can
47585+ view all processes and files normally restricted to root if you choose
47586+ the "restrict to user only" option. NOTE: If you're running identd as
47587+ a non-root user, you will have to run it as the group you specify here.
47588+
47589+config GRKERNSEC_PROC_USER
47590+ bool "Restrict /proc to user only"
47591+ depends on GRKERNSEC_PROC
47592+ help
47593+ If you say Y here, non-root users will only be able to view their own
47594+ processes, and restricts them from viewing network-related information,
47595+ and viewing kernel symbol and module information.
47596+
47597+config GRKERNSEC_PROC_USERGROUP
47598+ bool "Allow special group"
47599+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47600+ help
47601+ If you say Y here, you will be able to select a group that will be
47602+ able to view all processes and network-related information. If you've
47603+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47604+ remain hidden. This option is useful if you want to run identd as
47605+ a non-root user.
47606+
47607+config GRKERNSEC_PROC_GID
47608+ int "GID for special group"
47609+ depends on GRKERNSEC_PROC_USERGROUP
47610+ default 1001
47611+
47612+config GRKERNSEC_PROC_ADD
47613+ bool "Additional restrictions"
47614+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47615+ help
47616+ If you say Y here, additional restrictions will be placed on
47617+ /proc that keep normal users from viewing device information and
47618+ slabinfo information that could be useful for exploits.
47619+
47620+config GRKERNSEC_LINK
47621+ bool "Linking restrictions"
47622+ help
47623+ If you say Y here, /tmp race exploits will be prevented, since users
47624+ will no longer be able to follow symlinks owned by other users in
47625+ world-writable +t directories (e.g. /tmp), unless the owner of the
47626+ symlink is the owner of the directory. users will also not be
47627+ able to hardlink to files they do not own. If the sysctl option is
47628+ enabled, a sysctl option with name "linking_restrictions" is created.
47629+
47630+config GRKERNSEC_FIFO
47631+ bool "FIFO restrictions"
47632+ help
47633+ If you say Y here, users will not be able to write to FIFOs they don't
47634+ own in world-writable +t directories (e.g. /tmp), unless the owner of
47635+ the FIFO is the same owner of the directory it's held in. If the sysctl
47636+ option is enabled, a sysctl option with name "fifo_restrictions" is
47637+ created.
47638+
47639+config GRKERNSEC_SYSFS_RESTRICT
47640+ bool "Sysfs/debugfs restriction"
47641+ depends on SYSFS
47642+ help
47643+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47644+ any filesystem normally mounted under it (e.g. debugfs) will only
47645+ be accessible by root. These filesystems generally provide access
47646+ to hardware and debug information that isn't appropriate for unprivileged
47647+ users of the system. Sysfs and debugfs have also become a large source
47648+ of new vulnerabilities, ranging from infoleaks to local compromise.
47649+ There has been very little oversight with an eye toward security involved
47650+ in adding new exporters of information to these filesystems, so their
47651+ use is discouraged.
47652+ This option is equivalent to a chmod 0700 of the mount paths.
47653+
47654+config GRKERNSEC_ROFS
47655+ bool "Runtime read-only mount protection"
47656+ help
47657+ If you say Y here, a sysctl option with name "romount_protect" will
47658+ be created. By setting this option to 1 at runtime, filesystems
47659+ will be protected in the following ways:
47660+ * No new writable mounts will be allowed
47661+ * Existing read-only mounts won't be able to be remounted read/write
47662+ * Write operations will be denied on all block devices
47663+ This option acts independently of grsec_lock: once it is set to 1,
47664+ it cannot be turned off. Therefore, please be mindful of the resulting
47665+ behavior if this option is enabled in an init script on a read-only
47666+ filesystem. This feature is mainly intended for secure embedded systems.
47667+
47668+config GRKERNSEC_CHROOT
47669+ bool "Chroot jail restrictions"
47670+ help
47671+ If you say Y here, you will be able to choose several options that will
47672+ make breaking out of a chrooted jail much more difficult. If you
47673+ encounter no software incompatibilities with the following options, it
47674+ is recommended that you enable each one.
47675+
47676+config GRKERNSEC_CHROOT_MOUNT
47677+ bool "Deny mounts"
47678+ depends on GRKERNSEC_CHROOT
47679+ help
47680+ If you say Y here, processes inside a chroot will not be able to
47681+ mount or remount filesystems. If the sysctl option is enabled, a
47682+ sysctl option with name "chroot_deny_mount" is created.
47683+
47684+config GRKERNSEC_CHROOT_DOUBLE
47685+ bool "Deny double-chroots"
47686+ depends on GRKERNSEC_CHROOT
47687+ help
47688+ If you say Y here, processes inside a chroot will not be able to chroot
47689+ again outside the chroot. This is a widely used method of breaking
47690+ out of a chroot jail and should not be allowed. If the sysctl
47691+ option is enabled, a sysctl option with name
47692+ "chroot_deny_chroot" is created.
47693+
47694+config GRKERNSEC_CHROOT_PIVOT
47695+ bool "Deny pivot_root in chroot"
47696+ depends on GRKERNSEC_CHROOT
47697+ help
47698+ If you say Y here, processes inside a chroot will not be able to use
47699+ a function called pivot_root() that was introduced in Linux 2.3.41. It
47700+ works similar to chroot in that it changes the root filesystem. This
47701+ function could be misused in a chrooted process to attempt to break out
47702+ of the chroot, and therefore should not be allowed. If the sysctl
47703+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
47704+ created.
47705+
47706+config GRKERNSEC_CHROOT_CHDIR
47707+ bool "Enforce chdir(\"/\") on all chroots"
47708+ depends on GRKERNSEC_CHROOT
47709+ help
47710+ If you say Y here, the current working directory of all newly-chrooted
47711+ applications will be set to the the root directory of the chroot.
47712+ The man page on chroot(2) states:
47713+ Note that this call does not change the current working
47714+ directory, so that `.' can be outside the tree rooted at
47715+ `/'. In particular, the super-user can escape from a
47716+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47717+
47718+ It is recommended that you say Y here, since it's not known to break
47719+ any software. If the sysctl option is enabled, a sysctl option with
47720+ name "chroot_enforce_chdir" is created.
47721+
47722+config GRKERNSEC_CHROOT_CHMOD
47723+ bool "Deny (f)chmod +s"
47724+ depends on GRKERNSEC_CHROOT
47725+ help
47726+ If you say Y here, processes inside a chroot will not be able to chmod
47727+ or fchmod files to make them have suid or sgid bits. This protects
47728+ against another published method of breaking a chroot. If the sysctl
47729+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
47730+ created.
47731+
47732+config GRKERNSEC_CHROOT_FCHDIR
47733+ bool "Deny fchdir out of chroot"
47734+ depends on GRKERNSEC_CHROOT
47735+ help
47736+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
47737+ to a file descriptor of the chrooting process that points to a directory
47738+ outside the filesystem will be stopped. If the sysctl option
47739+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47740+
47741+config GRKERNSEC_CHROOT_MKNOD
47742+ bool "Deny mknod"
47743+ depends on GRKERNSEC_CHROOT
47744+ help
47745+ If you say Y here, processes inside a chroot will not be allowed to
47746+ mknod. The problem with using mknod inside a chroot is that it
47747+ would allow an attacker to create a device entry that is the same
47748+ as one on the physical root of your system, which could range from
47749+ anything from the console device to a device for your harddrive (which
47750+ they could then use to wipe the drive or steal data). It is recommended
47751+ that you say Y here, unless you run into software incompatibilities.
47752+ If the sysctl option is enabled, a sysctl option with name
47753+ "chroot_deny_mknod" is created.
47754+
47755+config GRKERNSEC_CHROOT_SHMAT
47756+ bool "Deny shmat() out of chroot"
47757+ depends on GRKERNSEC_CHROOT
47758+ help
47759+ If you say Y here, processes inside a chroot will not be able to attach
47760+ to shared memory segments that were created outside of the chroot jail.
47761+ It is recommended that you say Y here. If the sysctl option is enabled,
47762+ a sysctl option with name "chroot_deny_shmat" is created.
47763+
47764+config GRKERNSEC_CHROOT_UNIX
47765+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
47766+ depends on GRKERNSEC_CHROOT
47767+ help
47768+ If you say Y here, processes inside a chroot will not be able to
47769+ connect to abstract (meaning not belonging to a filesystem) Unix
47770+ domain sockets that were bound outside of a chroot. It is recommended
47771+ that you say Y here. If the sysctl option is enabled, a sysctl option
47772+ with name "chroot_deny_unix" is created.
47773+
47774+config GRKERNSEC_CHROOT_FINDTASK
47775+ bool "Protect outside processes"
47776+ depends on GRKERNSEC_CHROOT
47777+ help
47778+ If you say Y here, processes inside a chroot will not be able to
47779+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47780+ getsid, or view any process outside of the chroot. If the sysctl
47781+ option is enabled, a sysctl option with name "chroot_findtask" is
47782+ created.
47783+
47784+config GRKERNSEC_CHROOT_NICE
47785+ bool "Restrict priority changes"
47786+ depends on GRKERNSEC_CHROOT
47787+ help
47788+ If you say Y here, processes inside a chroot will not be able to raise
47789+ the priority of processes in the chroot, or alter the priority of
47790+ processes outside the chroot. This provides more security than simply
47791+ removing CAP_SYS_NICE from the process' capability set. If the
47792+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47793+ is created.
47794+
47795+config GRKERNSEC_CHROOT_SYSCTL
47796+ bool "Deny sysctl writes"
47797+ depends on GRKERNSEC_CHROOT
47798+ help
47799+ If you say Y here, an attacker in a chroot will not be able to
47800+ write to sysctl entries, either by sysctl(2) or through a /proc
47801+ interface. It is strongly recommended that you say Y here. If the
47802+ sysctl option is enabled, a sysctl option with name
47803+ "chroot_deny_sysctl" is created.
47804+
47805+config GRKERNSEC_CHROOT_CAPS
47806+ bool "Capability restrictions"
47807+ depends on GRKERNSEC_CHROOT
47808+ help
47809+ If you say Y here, the capabilities on all processes within a
47810+ chroot jail will be lowered to stop module insertion, raw i/o,
47811+ system and net admin tasks, rebooting the system, modifying immutable
47812+ files, modifying IPC owned by another, and changing the system time.
47813+ This is left an option because it can break some apps. Disable this
47814+ if your chrooted apps are having problems performing those kinds of
47815+ tasks. If the sysctl option is enabled, a sysctl option with
47816+ name "chroot_caps" is created.
47817+
47818+endmenu
47819+menu "Kernel Auditing"
47820+depends on GRKERNSEC
47821+
47822+config GRKERNSEC_AUDIT_GROUP
47823+ bool "Single group for auditing"
47824+ help
47825+ If you say Y here, the exec, chdir, and (un)mount logging features
47826+ will only operate on a group you specify. This option is recommended
47827+ if you only want to watch certain users instead of having a large
47828+ amount of logs from the entire system. If the sysctl option is enabled,
47829+ a sysctl option with name "audit_group" is created.
47830+
47831+config GRKERNSEC_AUDIT_GID
47832+ int "GID for auditing"
47833+ depends on GRKERNSEC_AUDIT_GROUP
47834+ default 1007
47835+
47836+config GRKERNSEC_EXECLOG
47837+ bool "Exec logging"
47838+ help
47839+ If you say Y here, all execve() calls will be logged (since the
47840+ other exec*() calls are frontends to execve(), all execution
47841+ will be logged). Useful for shell-servers that like to keep track
47842+ of their users. If the sysctl option is enabled, a sysctl option with
47843+ name "exec_logging" is created.
47844+ WARNING: This option when enabled will produce a LOT of logs, especially
47845+ on an active system.
47846+
47847+config GRKERNSEC_RESLOG
47848+ bool "Resource logging"
47849+ help
47850+ If you say Y here, all attempts to overstep resource limits will
47851+ be logged with the resource name, the requested size, and the current
47852+ limit. It is highly recommended that you say Y here. If the sysctl
47853+ option is enabled, a sysctl option with name "resource_logging" is
47854+ created. If the RBAC system is enabled, the sysctl value is ignored.
47855+
47856+config GRKERNSEC_CHROOT_EXECLOG
47857+ bool "Log execs within chroot"
47858+ help
47859+ If you say Y here, all executions inside a chroot jail will be logged
47860+ to syslog. This can cause a large amount of logs if certain
47861+ applications (eg. djb's daemontools) are installed on the system, and
47862+ is therefore left as an option. If the sysctl option is enabled, a
47863+ sysctl option with name "chroot_execlog" is created.
47864+
47865+config GRKERNSEC_AUDIT_PTRACE
47866+ bool "Ptrace logging"
47867+ help
47868+ If you say Y here, all attempts to attach to a process via ptrace
47869+ will be logged. If the sysctl option is enabled, a sysctl option
47870+ with name "audit_ptrace" is created.
47871+
47872+config GRKERNSEC_AUDIT_CHDIR
47873+ bool "Chdir logging"
47874+ help
47875+ If you say Y here, all chdir() calls will be logged. If the sysctl
47876+ option is enabled, a sysctl option with name "audit_chdir" is created.
47877+
47878+config GRKERNSEC_AUDIT_MOUNT
47879+ bool "(Un)Mount logging"
47880+ help
47881+ If you say Y here, all mounts and unmounts will be logged. If the
47882+ sysctl option is enabled, a sysctl option with name "audit_mount" is
47883+ created.
47884+
47885+config GRKERNSEC_SIGNAL
47886+ bool "Signal logging"
47887+ help
47888+ If you say Y here, certain important signals will be logged, such as
47889+ SIGSEGV, which will as a result inform you of when a error in a program
47890+ occurred, which in some cases could mean a possible exploit attempt.
47891+ If the sysctl option is enabled, a sysctl option with name
47892+ "signal_logging" is created.
47893+
47894+config GRKERNSEC_FORKFAIL
47895+ bool "Fork failure logging"
47896+ help
47897+ If you say Y here, all failed fork() attempts will be logged.
47898+ This could suggest a fork bomb, or someone attempting to overstep
47899+ their process limit. If the sysctl option is enabled, a sysctl option
47900+ with name "forkfail_logging" is created.
47901+
47902+config GRKERNSEC_TIME
47903+ bool "Time change logging"
47904+ help
47905+ If you say Y here, any changes of the system clock will be logged.
47906+ If the sysctl option is enabled, a sysctl option with name
47907+ "timechange_logging" is created.
47908+
47909+config GRKERNSEC_PROC_IPADDR
47910+ bool "/proc/<pid>/ipaddr support"
47911+ help
47912+ If you say Y here, a new entry will be added to each /proc/<pid>
47913+ directory that contains the IP address of the person using the task.
47914+ The IP is carried across local TCP and AF_UNIX stream sockets.
47915+ This information can be useful for IDS/IPSes to perform remote response
47916+ to a local attack. The entry is readable by only the owner of the
47917+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
47918+ the RBAC system), and thus does not create privacy concerns.
47919+
47920+config GRKERNSEC_RWXMAP_LOG
47921+ bool 'Denied RWX mmap/mprotect logging'
47922+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
47923+ help
47924+ If you say Y here, calls to mmap() and mprotect() with explicit
47925+ usage of PROT_WRITE and PROT_EXEC together will be logged when
47926+ denied by the PAX_MPROTECT feature. If the sysctl option is
47927+ enabled, a sysctl option with name "rwxmap_logging" is created.
47928+
47929+config GRKERNSEC_AUDIT_TEXTREL
47930+ bool 'ELF text relocations logging (READ HELP)'
47931+ depends on PAX_MPROTECT
47932+ help
47933+ If you say Y here, text relocations will be logged with the filename
47934+ of the offending library or binary. The purpose of the feature is
47935+ to help Linux distribution developers get rid of libraries and
47936+ binaries that need text relocations which hinder the future progress
47937+ of PaX. Only Linux distribution developers should say Y here, and
47938+ never on a production machine, as this option creates an information
47939+ leak that could aid an attacker in defeating the randomization of
47940+ a single memory region. If the sysctl option is enabled, a sysctl
47941+ option with name "audit_textrel" is created.
47942+
47943+endmenu
47944+
47945+menu "Executable Protections"
47946+depends on GRKERNSEC
47947+
47948+config GRKERNSEC_DMESG
47949+ bool "Dmesg(8) restriction"
47950+ help
47951+ If you say Y here, non-root users will not be able to use dmesg(8)
47952+ to view up to the last 4kb of messages in the kernel's log buffer.
47953+ The kernel's log buffer often contains kernel addresses and other
47954+ identifying information useful to an attacker in fingerprinting a
47955+ system for a targeted exploit.
47956+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
47957+ created.
47958+
47959+config GRKERNSEC_HARDEN_PTRACE
47960+ bool "Deter ptrace-based process snooping"
47961+ help
47962+ If you say Y here, TTY sniffers and other malicious monitoring
47963+ programs implemented through ptrace will be defeated. If you
47964+ have been using the RBAC system, this option has already been
47965+ enabled for several years for all users, with the ability to make
47966+ fine-grained exceptions.
47967+
47968+ This option only affects the ability of non-root users to ptrace
47969+ processes that are not a descendent of the ptracing process.
47970+ This means that strace ./binary and gdb ./binary will still work,
47971+ but attaching to arbitrary processes will not. If the sysctl
47972+ option is enabled, a sysctl option with name "harden_ptrace" is
47973+ created.
47974+
47975+config GRKERNSEC_PTRACE_READEXEC
47976+ bool "Require read access to ptrace sensitive binaries"
47977+ help
47978+ If you say Y here, unprivileged users will not be able to ptrace unreadable
47979+ binaries. This option is useful in environments that
47980+ remove the read bits (e.g. file mode 4711) from suid binaries to
47981+ prevent infoleaking of their contents. This option adds
47982+ consistency to the use of that file mode, as the binary could normally
47983+ be read out when run without privileges while ptracing.
47984+
47985+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
47986+ is created.
47987+
47988+config GRKERNSEC_SETXID
47989+ bool "Enforce consistent multithreaded privileges"
47990+ help
47991+ If you say Y here, a change from a root uid to a non-root uid
47992+ in a multithreaded application will cause the resulting uids,
47993+ gids, supplementary groups, and capabilities in that thread
47994+ to be propagated to the other threads of the process. In most
47995+ cases this is unnecessary, as glibc will emulate this behavior
47996+ on behalf of the application. Other libcs do not act in the
47997+ same way, allowing the other threads of the process to continue
47998+ running with root privileges. If the sysctl option is enabled,
47999+ a sysctl option with name "consistent_setxid" is created.
48000+
48001+config GRKERNSEC_TPE
48002+ bool "Trusted Path Execution (TPE)"
48003+ help
48004+ If you say Y here, you will be able to choose a gid to add to the
48005+ supplementary groups of users you want to mark as "untrusted."
48006+ These users will not be able to execute any files that are not in
48007+ root-owned directories writable only by root. If the sysctl option
48008+ is enabled, a sysctl option with name "tpe" is created.
48009+
48010+config GRKERNSEC_TPE_ALL
48011+ bool "Partially restrict all non-root users"
48012+ depends on GRKERNSEC_TPE
48013+ help
48014+ If you say Y here, all non-root users will be covered under
48015+ a weaker TPE restriction. This is separate from, and in addition to,
48016+ the main TPE options that you have selected elsewhere. Thus, if a
48017+ "trusted" GID is chosen, this restriction applies to even that GID.
48018+ Under this restriction, all non-root users will only be allowed to
48019+ execute files in directories they own that are not group or
48020+ world-writable, or in directories owned by root and writable only by
48021+ root. If the sysctl option is enabled, a sysctl option with name
48022+ "tpe_restrict_all" is created.
48023+
48024+config GRKERNSEC_TPE_INVERT
48025+ bool "Invert GID option"
48026+ depends on GRKERNSEC_TPE
48027+ help
48028+ If you say Y here, the group you specify in the TPE configuration will
48029+ decide what group TPE restrictions will be *disabled* for. This
48030+ option is useful if you want TPE restrictions to be applied to most
48031+ users on the system. If the sysctl option is enabled, a sysctl option
48032+ with name "tpe_invert" is created. Unlike other sysctl options, this
48033+ entry will default to on for backward-compatibility.
48034+
48035+config GRKERNSEC_TPE_GID
48036+ int "GID for untrusted users"
48037+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
48038+ default 1005
48039+ help
48040+ Setting this GID determines what group TPE restrictions will be
48041+ *enabled* for. If the sysctl option is enabled, a sysctl option
48042+ with name "tpe_gid" is created.
48043+
48044+config GRKERNSEC_TPE_GID
48045+ int "GID for trusted users"
48046+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
48047+ default 1005
48048+ help
48049+ Setting this GID determines what group TPE restrictions will be
48050+ *disabled* for. If the sysctl option is enabled, a sysctl option
48051+ with name "tpe_gid" is created.
48052+
48053+endmenu
48054+menu "Network Protections"
48055+depends on GRKERNSEC
48056+
48057+config GRKERNSEC_RANDNET
48058+ bool "Larger entropy pools"
48059+ help
48060+ If you say Y here, the entropy pools used for many features of Linux
48061+ and grsecurity will be doubled in size. Since several grsecurity
48062+ features use additional randomness, it is recommended that you say Y
48063+ here. Saying Y here has a similar effect as modifying
48064+ /proc/sys/kernel/random/poolsize.
48065+
48066+config GRKERNSEC_BLACKHOLE
48067+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
48068+ depends on NET
48069+ help
48070+ If you say Y here, neither TCP resets nor ICMP
48071+ destination-unreachable packets will be sent in response to packets
48072+ sent to ports for which no associated listening process exists.
48073+ This feature supports both IPV4 and IPV6 and exempts the
48074+ loopback interface from blackholing. Enabling this feature
48075+ makes a host more resilient to DoS attacks and reduces network
48076+ visibility against scanners.
48077+
48078+ The blackhole feature as-implemented is equivalent to the FreeBSD
48079+ blackhole feature, as it prevents RST responses to all packets, not
48080+ just SYNs. Under most application behavior this causes no
48081+ problems, but applications (like haproxy) may not close certain
48082+ connections in a way that cleanly terminates them on the remote
48083+ end, leaving the remote host in LAST_ACK state. Because of this
48084+ side-effect and to prevent intentional LAST_ACK DoSes, this
48085+ feature also adds automatic mitigation against such attacks.
48086+ The mitigation drastically reduces the amount of time a socket
48087+ can spend in LAST_ACK state. If you're using haproxy and not
48088+ all servers it connects to have this option enabled, consider
48089+ disabling this feature on the haproxy host.
48090+
48091+ If the sysctl option is enabled, two sysctl options with names
48092+ "ip_blackhole" and "lastack_retries" will be created.
48093+ While "ip_blackhole" takes the standard zero/non-zero on/off
48094+ toggle, "lastack_retries" uses the same kinds of values as
48095+ "tcp_retries1" and "tcp_retries2". The default value of 4
48096+ prevents a socket from lasting more than 45 seconds in LAST_ACK
48097+ state.
48098+
48099+config GRKERNSEC_SOCKET
48100+ bool "Socket restrictions"
48101+ depends on NET
48102+ help
48103+ If you say Y here, you will be able to choose from several options.
48104+ If you assign a GID on your system and add it to the supplementary
48105+ groups of users you want to restrict socket access to, this patch
48106+ will perform up to three things, based on the option(s) you choose.
48107+
48108+config GRKERNSEC_SOCKET_ALL
48109+ bool "Deny any sockets to group"
48110+ depends on GRKERNSEC_SOCKET
48111+ help
48112+ If you say Y here, you will be able to choose a GID of whose users will
48113+ be unable to connect to other hosts from your machine or run server
48114+ applications from your machine. If the sysctl option is enabled, a
48115+ sysctl option with name "socket_all" is created.
48116+
48117+config GRKERNSEC_SOCKET_ALL_GID
48118+ int "GID to deny all sockets for"
48119+ depends on GRKERNSEC_SOCKET_ALL
48120+ default 1004
48121+ help
48122+ Here you can choose the GID to disable socket access for. Remember to
48123+ add the users you want socket access disabled for to the GID
48124+ specified here. If the sysctl option is enabled, a sysctl option
48125+ with name "socket_all_gid" is created.
48126+
48127+config GRKERNSEC_SOCKET_CLIENT
48128+ bool "Deny client sockets to group"
48129+ depends on GRKERNSEC_SOCKET
48130+ help
48131+ If you say Y here, you will be able to choose a GID of whose users will
48132+ be unable to connect to other hosts from your machine, but will be
48133+ able to run servers. If this option is enabled, all users in the group
48134+ you specify will have to use passive mode when initiating ftp transfers
48135+ from the shell on your machine. If the sysctl option is enabled, a
48136+ sysctl option with name "socket_client" is created.
48137+
48138+config GRKERNSEC_SOCKET_CLIENT_GID
48139+ int "GID to deny client sockets for"
48140+ depends on GRKERNSEC_SOCKET_CLIENT
48141+ default 1003
48142+ help
48143+ Here you can choose the GID to disable client socket access for.
48144+ Remember to add the users you want client socket access disabled for to
48145+ the GID specified here. If the sysctl option is enabled, a sysctl
48146+ option with name "socket_client_gid" is created.
48147+
48148+config GRKERNSEC_SOCKET_SERVER
48149+ bool "Deny server sockets to group"
48150+ depends on GRKERNSEC_SOCKET
48151+ help
48152+ If you say Y here, you will be able to choose a GID of whose users will
48153+ be unable to run server applications from your machine. If the sysctl
48154+ option is enabled, a sysctl option with name "socket_server" is created.
48155+
48156+config GRKERNSEC_SOCKET_SERVER_GID
48157+ int "GID to deny server sockets for"
48158+ depends on GRKERNSEC_SOCKET_SERVER
48159+ default 1002
48160+ help
48161+ Here you can choose the GID to disable server socket access for.
48162+ Remember to add the users you want server socket access disabled for to
48163+ the GID specified here. If the sysctl option is enabled, a sysctl
48164+ option with name "socket_server_gid" is created.
48165+
48166+endmenu
48167+menu "Sysctl support"
48168+depends on GRKERNSEC && SYSCTL
48169+
48170+config GRKERNSEC_SYSCTL
48171+ bool "Sysctl support"
48172+ help
48173+ If you say Y here, you will be able to change the options that
48174+ grsecurity runs with at bootup, without having to recompile your
48175+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
48176+ to enable (1) or disable (0) various features. All the sysctl entries
48177+ are mutable until the "grsec_lock" entry is set to a non-zero value.
48178+ All features enabled in the kernel configuration are disabled at boot
48179+ if you do not say Y to the "Turn on features by default" option.
48180+ All options should be set at startup, and the grsec_lock entry should
48181+ be set to a non-zero value after all the options are set.
48182+ *THIS IS EXTREMELY IMPORTANT*
48183+
48184+config GRKERNSEC_SYSCTL_DISTRO
48185+ bool "Extra sysctl support for distro makers (READ HELP)"
48186+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
48187+ help
48188+ If you say Y here, additional sysctl options will be created
48189+ for features that affect processes running as root. Therefore,
48190+ it is critical when using this option that the grsec_lock entry be
48191+ enabled after boot. Only distros with prebuilt kernel packages
48192+ with this option enabled that can ensure grsec_lock is enabled
48193+ after boot should use this option.
48194+ *Failure to set grsec_lock after boot makes all grsec features
48195+ this option covers useless*
48196+
48197+ Currently this option creates the following sysctl entries:
48198+ "Disable Privileged I/O": "disable_priv_io"
48199+
48200+config GRKERNSEC_SYSCTL_ON
48201+ bool "Turn on features by default"
48202+ depends on GRKERNSEC_SYSCTL
48203+ help
48204+ If you say Y here, instead of having all features enabled in the
48205+ kernel configuration disabled at boot time, the features will be
48206+ enabled at boot time. It is recommended you say Y here unless
48207+ there is some reason you would want all sysctl-tunable features to
48208+ be disabled by default. As mentioned elsewhere, it is important
48209+ to enable the grsec_lock entry once you have finished modifying
48210+ the sysctl entries.
48211+
48212+endmenu
48213+menu "Logging Options"
48214+depends on GRKERNSEC
48215+
48216+config GRKERNSEC_FLOODTIME
48217+ int "Seconds in between log messages (minimum)"
48218+ default 10
48219+ help
48220+ This option allows you to enforce the number of seconds between
48221+ grsecurity log messages. The default should be suitable for most
48222+ people, however, if you choose to change it, choose a value small enough
48223+ to allow informative logs to be produced, but large enough to
48224+ prevent flooding.
48225+
48226+config GRKERNSEC_FLOODBURST
48227+ int "Number of messages in a burst (maximum)"
48228+ default 6
48229+ help
48230+ This option allows you to choose the maximum number of messages allowed
48231+ within the flood time interval you chose in a separate option. The
48232+ default should be suitable for most people, however if you find that
48233+ many of your logs are being interpreted as flooding, you may want to
48234+ raise this value.
48235+
48236+endmenu
48237+
48238+endmenu
48239diff --git a/grsecurity/Makefile b/grsecurity/Makefile
48240new file mode 100644
48241index 0000000..be9ae3a
48242--- /dev/null
48243+++ b/grsecurity/Makefile
48244@@ -0,0 +1,36 @@
48245+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
48246+# during 2001-2009 it has been completely redesigned by Brad Spengler
48247+# into an RBAC system
48248+#
48249+# All code in this directory and various hooks inserted throughout the kernel
48250+# are copyright Brad Spengler - Open Source Security, Inc., and released
48251+# under the GPL v2 or higher
48252+
48253+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
48254+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
48255+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
48256+
48257+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
48258+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
48259+ gracl_learn.o grsec_log.o
48260+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
48261+
48262+ifdef CONFIG_NET
48263+obj-y += grsec_sock.o
48264+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48265+endif
48266+
48267+ifndef CONFIG_GRKERNSEC
48268+obj-y += grsec_disabled.o
48269+endif
48270+
48271+ifdef CONFIG_GRKERNSEC_HIDESYM
48272+extra-y := grsec_hidesym.o
48273+$(obj)/grsec_hidesym.o:
48274+ @-chmod -f 500 /boot
48275+ @-chmod -f 500 /lib/modules
48276+ @-chmod -f 500 /lib64/modules
48277+ @-chmod -f 500 /lib32/modules
48278+ @-chmod -f 700 .
48279+ @echo ' grsec: protected kernel image paths'
48280+endif
48281diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
48282new file mode 100644
48283index 0000000..d3b423d
48284--- /dev/null
48285+++ b/grsecurity/gracl.c
48286@@ -0,0 +1,4155 @@
48287+#include <linux/kernel.h>
48288+#include <linux/module.h>
48289+#include <linux/sched.h>
48290+#include <linux/mm.h>
48291+#include <linux/file.h>
48292+#include <linux/fs.h>
48293+#include <linux/namei.h>
48294+#include <linux/mount.h>
48295+#include <linux/tty.h>
48296+#include <linux/proc_fs.h>
48297+#include <linux/lglock.h>
48298+#include <linux/slab.h>
48299+#include <linux/vmalloc.h>
48300+#include <linux/types.h>
48301+#include <linux/sysctl.h>
48302+#include <linux/netdevice.h>
48303+#include <linux/ptrace.h>
48304+#include <linux/gracl.h>
48305+#include <linux/gralloc.h>
48306+#include <linux/security.h>
48307+#include <linux/grinternal.h>
48308+#include <linux/pid_namespace.h>
48309+#include <linux/fdtable.h>
48310+#include <linux/percpu.h>
48311+
48312+#include <asm/uaccess.h>
48313+#include <asm/errno.h>
48314+#include <asm/mman.h>
48315+
48316+static struct acl_role_db acl_role_set;
48317+static struct name_db name_set;
48318+static struct inodev_db inodev_set;
48319+
48320+/* for keeping track of userspace pointers used for subjects, so we
48321+ can share references in the kernel as well
48322+*/
48323+
48324+static struct path real_root;
48325+
48326+static struct acl_subj_map_db subj_map_set;
48327+
48328+static struct acl_role_label *default_role;
48329+
48330+static struct acl_role_label *role_list;
48331+
48332+static u16 acl_sp_role_value;
48333+
48334+extern char *gr_shared_page[4];
48335+static DEFINE_MUTEX(gr_dev_mutex);
48336+DEFINE_RWLOCK(gr_inode_lock);
48337+
48338+struct gr_arg *gr_usermode;
48339+
48340+static unsigned int gr_status __read_only = GR_STATUS_INIT;
48341+
48342+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48343+extern void gr_clear_learn_entries(void);
48344+
48345+#ifdef CONFIG_GRKERNSEC_RESLOG
48346+extern void gr_log_resource(const struct task_struct *task,
48347+ const int res, const unsigned long wanted, const int gt);
48348+#endif
48349+
48350+unsigned char *gr_system_salt;
48351+unsigned char *gr_system_sum;
48352+
48353+static struct sprole_pw **acl_special_roles = NULL;
48354+static __u16 num_sprole_pws = 0;
48355+
48356+static struct acl_role_label *kernel_role = NULL;
48357+
48358+static unsigned int gr_auth_attempts = 0;
48359+static unsigned long gr_auth_expires = 0UL;
48360+
48361+#ifdef CONFIG_NET
48362+extern struct vfsmount *sock_mnt;
48363+#endif
48364+
48365+extern struct vfsmount *pipe_mnt;
48366+extern struct vfsmount *shm_mnt;
48367+#ifdef CONFIG_HUGETLBFS
48368+extern struct vfsmount *hugetlbfs_vfsmount;
48369+#endif
48370+
48371+static struct acl_object_label *fakefs_obj_rw;
48372+static struct acl_object_label *fakefs_obj_rwx;
48373+
48374+extern int gr_init_uidset(void);
48375+extern void gr_free_uidset(void);
48376+extern void gr_remove_uid(uid_t uid);
48377+extern int gr_find_uid(uid_t uid);
48378+
48379+DECLARE_BRLOCK(vfsmount_lock);
48380+
48381+__inline__ int
48382+gr_acl_is_enabled(void)
48383+{
48384+ return (gr_status & GR_READY);
48385+}
48386+
48387+#ifdef CONFIG_BTRFS_FS
48388+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48389+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48390+#endif
48391+
48392+static inline dev_t __get_dev(const struct dentry *dentry)
48393+{
48394+#ifdef CONFIG_BTRFS_FS
48395+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48396+ return get_btrfs_dev_from_inode(dentry->d_inode);
48397+ else
48398+#endif
48399+ return dentry->d_inode->i_sb->s_dev;
48400+}
48401+
48402+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48403+{
48404+ return __get_dev(dentry);
48405+}
48406+
48407+static char gr_task_roletype_to_char(struct task_struct *task)
48408+{
48409+ switch (task->role->roletype &
48410+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48411+ GR_ROLE_SPECIAL)) {
48412+ case GR_ROLE_DEFAULT:
48413+ return 'D';
48414+ case GR_ROLE_USER:
48415+ return 'U';
48416+ case GR_ROLE_GROUP:
48417+ return 'G';
48418+ case GR_ROLE_SPECIAL:
48419+ return 'S';
48420+ }
48421+
48422+ return 'X';
48423+}
48424+
48425+char gr_roletype_to_char(void)
48426+{
48427+ return gr_task_roletype_to_char(current);
48428+}
48429+
48430+__inline__ int
48431+gr_acl_tpe_check(void)
48432+{
48433+ if (unlikely(!(gr_status & GR_READY)))
48434+ return 0;
48435+ if (current->role->roletype & GR_ROLE_TPE)
48436+ return 1;
48437+ else
48438+ return 0;
48439+}
48440+
48441+int
48442+gr_handle_rawio(const struct inode *inode)
48443+{
48444+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48445+ if (inode && S_ISBLK(inode->i_mode) &&
48446+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48447+ !capable(CAP_SYS_RAWIO))
48448+ return 1;
48449+#endif
48450+ return 0;
48451+}
48452+
48453+static int
48454+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48455+{
48456+ if (likely(lena != lenb))
48457+ return 0;
48458+
48459+ return !memcmp(a, b, lena);
48460+}
48461+
48462+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48463+{
48464+ *buflen -= namelen;
48465+ if (*buflen < 0)
48466+ return -ENAMETOOLONG;
48467+ *buffer -= namelen;
48468+ memcpy(*buffer, str, namelen);
48469+ return 0;
48470+}
48471+
48472+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48473+{
48474+ return prepend(buffer, buflen, name->name, name->len);
48475+}
48476+
48477+static int prepend_path(const struct path *path, struct path *root,
48478+ char **buffer, int *buflen)
48479+{
48480+ struct dentry *dentry = path->dentry;
48481+ struct vfsmount *vfsmnt = path->mnt;
48482+ bool slash = false;
48483+ int error = 0;
48484+
48485+ while (dentry != root->dentry || vfsmnt != root->mnt) {
48486+ struct dentry * parent;
48487+
48488+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48489+ /* Global root? */
48490+ if (vfsmnt->mnt_parent == vfsmnt) {
48491+ goto out;
48492+ }
48493+ dentry = vfsmnt->mnt_mountpoint;
48494+ vfsmnt = vfsmnt->mnt_parent;
48495+ continue;
48496+ }
48497+ parent = dentry->d_parent;
48498+ prefetch(parent);
48499+ spin_lock(&dentry->d_lock);
48500+ error = prepend_name(buffer, buflen, &dentry->d_name);
48501+ spin_unlock(&dentry->d_lock);
48502+ if (!error)
48503+ error = prepend(buffer, buflen, "/", 1);
48504+ if (error)
48505+ break;
48506+
48507+ slash = true;
48508+ dentry = parent;
48509+ }
48510+
48511+out:
48512+ if (!error && !slash)
48513+ error = prepend(buffer, buflen, "/", 1);
48514+
48515+ return error;
48516+}
48517+
48518+/* this must be called with vfsmount_lock and rename_lock held */
48519+
48520+static char *__our_d_path(const struct path *path, struct path *root,
48521+ char *buf, int buflen)
48522+{
48523+ char *res = buf + buflen;
48524+ int error;
48525+
48526+ prepend(&res, &buflen, "\0", 1);
48527+ error = prepend_path(path, root, &res, &buflen);
48528+ if (error)
48529+ return ERR_PTR(error);
48530+
48531+ return res;
48532+}
48533+
48534+static char *
48535+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48536+{
48537+ char *retval;
48538+
48539+ retval = __our_d_path(path, root, buf, buflen);
48540+ if (unlikely(IS_ERR(retval)))
48541+ retval = strcpy(buf, "<path too long>");
48542+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48543+ retval[1] = '\0';
48544+
48545+ return retval;
48546+}
48547+
48548+static char *
48549+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48550+ char *buf, int buflen)
48551+{
48552+ struct path path;
48553+ char *res;
48554+
48555+ path.dentry = (struct dentry *)dentry;
48556+ path.mnt = (struct vfsmount *)vfsmnt;
48557+
48558+ /* we can use real_root.dentry, real_root.mnt, because this is only called
48559+ by the RBAC system */
48560+ res = gen_full_path(&path, &real_root, buf, buflen);
48561+
48562+ return res;
48563+}
48564+
48565+static char *
48566+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48567+ char *buf, int buflen)
48568+{
48569+ char *res;
48570+ struct path path;
48571+ struct path root;
48572+ struct task_struct *reaper = &init_task;
48573+
48574+ path.dentry = (struct dentry *)dentry;
48575+ path.mnt = (struct vfsmount *)vfsmnt;
48576+
48577+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48578+ get_fs_root(reaper->fs, &root);
48579+
48580+ write_seqlock(&rename_lock);
48581+ br_read_lock(vfsmount_lock);
48582+ res = gen_full_path(&path, &root, buf, buflen);
48583+ br_read_unlock(vfsmount_lock);
48584+ write_sequnlock(&rename_lock);
48585+
48586+ path_put(&root);
48587+ return res;
48588+}
48589+
48590+static char *
48591+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48592+{
48593+ char *ret;
48594+ write_seqlock(&rename_lock);
48595+ br_read_lock(vfsmount_lock);
48596+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48597+ PAGE_SIZE);
48598+ br_read_unlock(vfsmount_lock);
48599+ write_sequnlock(&rename_lock);
48600+ return ret;
48601+}
48602+
48603+static char *
48604+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48605+{
48606+ char *ret;
48607+ char *buf;
48608+ int buflen;
48609+
48610+ write_seqlock(&rename_lock);
48611+ br_read_lock(vfsmount_lock);
48612+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48613+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48614+ buflen = (int)(ret - buf);
48615+ if (buflen >= 5)
48616+ prepend(&ret, &buflen, "/proc", 5);
48617+ else
48618+ ret = strcpy(buf, "<path too long>");
48619+ br_read_unlock(vfsmount_lock);
48620+ write_sequnlock(&rename_lock);
48621+ return ret;
48622+}
48623+
48624+char *
48625+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48626+{
48627+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48628+ PAGE_SIZE);
48629+}
48630+
48631+char *
48632+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48633+{
48634+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48635+ PAGE_SIZE);
48636+}
48637+
48638+char *
48639+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48640+{
48641+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48642+ PAGE_SIZE);
48643+}
48644+
48645+char *
48646+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48647+{
48648+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48649+ PAGE_SIZE);
48650+}
48651+
48652+char *
48653+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48654+{
48655+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48656+ PAGE_SIZE);
48657+}
48658+
48659+__inline__ __u32
48660+to_gr_audit(const __u32 reqmode)
48661+{
48662+ /* masks off auditable permission flags, then shifts them to create
48663+ auditing flags, and adds the special case of append auditing if
48664+ we're requesting write */
48665+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48666+}
48667+
48668+struct acl_subject_label *
48669+lookup_subject_map(const struct acl_subject_label *userp)
48670+{
48671+ unsigned int index = shash(userp, subj_map_set.s_size);
48672+ struct subject_map *match;
48673+
48674+ match = subj_map_set.s_hash[index];
48675+
48676+ while (match && match->user != userp)
48677+ match = match->next;
48678+
48679+ if (match != NULL)
48680+ return match->kernel;
48681+ else
48682+ return NULL;
48683+}
48684+
48685+static void
48686+insert_subj_map_entry(struct subject_map *subjmap)
48687+{
48688+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48689+ struct subject_map **curr;
48690+
48691+ subjmap->prev = NULL;
48692+
48693+ curr = &subj_map_set.s_hash[index];
48694+ if (*curr != NULL)
48695+ (*curr)->prev = subjmap;
48696+
48697+ subjmap->next = *curr;
48698+ *curr = subjmap;
48699+
48700+ return;
48701+}
48702+
48703+static struct acl_role_label *
48704+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48705+ const gid_t gid)
48706+{
48707+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48708+ struct acl_role_label *match;
48709+ struct role_allowed_ip *ipp;
48710+ unsigned int x;
48711+ u32 curr_ip = task->signal->curr_ip;
48712+
48713+ task->signal->saved_ip = curr_ip;
48714+
48715+ match = acl_role_set.r_hash[index];
48716+
48717+ while (match) {
48718+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48719+ for (x = 0; x < match->domain_child_num; x++) {
48720+ if (match->domain_children[x] == uid)
48721+ goto found;
48722+ }
48723+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48724+ break;
48725+ match = match->next;
48726+ }
48727+found:
48728+ if (match == NULL) {
48729+ try_group:
48730+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48731+ match = acl_role_set.r_hash[index];
48732+
48733+ while (match) {
48734+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48735+ for (x = 0; x < match->domain_child_num; x++) {
48736+ if (match->domain_children[x] == gid)
48737+ goto found2;
48738+ }
48739+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48740+ break;
48741+ match = match->next;
48742+ }
48743+found2:
48744+ if (match == NULL)
48745+ match = default_role;
48746+ if (match->allowed_ips == NULL)
48747+ return match;
48748+ else {
48749+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48750+ if (likely
48751+ ((ntohl(curr_ip) & ipp->netmask) ==
48752+ (ntohl(ipp->addr) & ipp->netmask)))
48753+ return match;
48754+ }
48755+ match = default_role;
48756+ }
48757+ } else if (match->allowed_ips == NULL) {
48758+ return match;
48759+ } else {
48760+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48761+ if (likely
48762+ ((ntohl(curr_ip) & ipp->netmask) ==
48763+ (ntohl(ipp->addr) & ipp->netmask)))
48764+ return match;
48765+ }
48766+ goto try_group;
48767+ }
48768+
48769+ return match;
48770+}
48771+
48772+struct acl_subject_label *
48773+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48774+ const struct acl_role_label *role)
48775+{
48776+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48777+ struct acl_subject_label *match;
48778+
48779+ match = role->subj_hash[index];
48780+
48781+ while (match && (match->inode != ino || match->device != dev ||
48782+ (match->mode & GR_DELETED))) {
48783+ match = match->next;
48784+ }
48785+
48786+ if (match && !(match->mode & GR_DELETED))
48787+ return match;
48788+ else
48789+ return NULL;
48790+}
48791+
48792+struct acl_subject_label *
48793+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48794+ const struct acl_role_label *role)
48795+{
48796+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48797+ struct acl_subject_label *match;
48798+
48799+ match = role->subj_hash[index];
48800+
48801+ while (match && (match->inode != ino || match->device != dev ||
48802+ !(match->mode & GR_DELETED))) {
48803+ match = match->next;
48804+ }
48805+
48806+ if (match && (match->mode & GR_DELETED))
48807+ return match;
48808+ else
48809+ return NULL;
48810+}
48811+
48812+static struct acl_object_label *
48813+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48814+ const struct acl_subject_label *subj)
48815+{
48816+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48817+ struct acl_object_label *match;
48818+
48819+ match = subj->obj_hash[index];
48820+
48821+ while (match && (match->inode != ino || match->device != dev ||
48822+ (match->mode & GR_DELETED))) {
48823+ match = match->next;
48824+ }
48825+
48826+ if (match && !(match->mode & GR_DELETED))
48827+ return match;
48828+ else
48829+ return NULL;
48830+}
48831+
48832+static struct acl_object_label *
48833+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
48834+ const struct acl_subject_label *subj)
48835+{
48836+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48837+ struct acl_object_label *match;
48838+
48839+ match = subj->obj_hash[index];
48840+
48841+ while (match && (match->inode != ino || match->device != dev ||
48842+ !(match->mode & GR_DELETED))) {
48843+ match = match->next;
48844+ }
48845+
48846+ if (match && (match->mode & GR_DELETED))
48847+ return match;
48848+
48849+ match = subj->obj_hash[index];
48850+
48851+ while (match && (match->inode != ino || match->device != dev ||
48852+ (match->mode & GR_DELETED))) {
48853+ match = match->next;
48854+ }
48855+
48856+ if (match && !(match->mode & GR_DELETED))
48857+ return match;
48858+ else
48859+ return NULL;
48860+}
48861+
48862+static struct name_entry *
48863+lookup_name_entry(const char *name)
48864+{
48865+ unsigned int len = strlen(name);
48866+ unsigned int key = full_name_hash(name, len);
48867+ unsigned int index = key % name_set.n_size;
48868+ struct name_entry *match;
48869+
48870+ match = name_set.n_hash[index];
48871+
48872+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
48873+ match = match->next;
48874+
48875+ return match;
48876+}
48877+
48878+static struct name_entry *
48879+lookup_name_entry_create(const char *name)
48880+{
48881+ unsigned int len = strlen(name);
48882+ unsigned int key = full_name_hash(name, len);
48883+ unsigned int index = key % name_set.n_size;
48884+ struct name_entry *match;
48885+
48886+ match = name_set.n_hash[index];
48887+
48888+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48889+ !match->deleted))
48890+ match = match->next;
48891+
48892+ if (match && match->deleted)
48893+ return match;
48894+
48895+ match = name_set.n_hash[index];
48896+
48897+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48898+ match->deleted))
48899+ match = match->next;
48900+
48901+ if (match && !match->deleted)
48902+ return match;
48903+ else
48904+ return NULL;
48905+}
48906+
48907+static struct inodev_entry *
48908+lookup_inodev_entry(const ino_t ino, const dev_t dev)
48909+{
48910+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
48911+ struct inodev_entry *match;
48912+
48913+ match = inodev_set.i_hash[index];
48914+
48915+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
48916+ match = match->next;
48917+
48918+ return match;
48919+}
48920+
48921+static void
48922+insert_inodev_entry(struct inodev_entry *entry)
48923+{
48924+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
48925+ inodev_set.i_size);
48926+ struct inodev_entry **curr;
48927+
48928+ entry->prev = NULL;
48929+
48930+ curr = &inodev_set.i_hash[index];
48931+ if (*curr != NULL)
48932+ (*curr)->prev = entry;
48933+
48934+ entry->next = *curr;
48935+ *curr = entry;
48936+
48937+ return;
48938+}
48939+
48940+static void
48941+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48942+{
48943+ unsigned int index =
48944+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48945+ struct acl_role_label **curr;
48946+ struct acl_role_label *tmp;
48947+
48948+ curr = &acl_role_set.r_hash[index];
48949+
48950+ /* if role was already inserted due to domains and already has
48951+ a role in the same bucket as it attached, then we need to
48952+ combine these two buckets
48953+ */
48954+ if (role->next) {
48955+ tmp = role->next;
48956+ while (tmp->next)
48957+ tmp = tmp->next;
48958+ tmp->next = *curr;
48959+ } else
48960+ role->next = *curr;
48961+ *curr = role;
48962+
48963+ return;
48964+}
48965+
48966+static void
48967+insert_acl_role_label(struct acl_role_label *role)
48968+{
48969+ int i;
48970+
48971+ if (role_list == NULL) {
48972+ role_list = role;
48973+ role->prev = NULL;
48974+ } else {
48975+ role->prev = role_list;
48976+ role_list = role;
48977+ }
48978+
48979+ /* used for hash chains */
48980+ role->next = NULL;
48981+
48982+ if (role->roletype & GR_ROLE_DOMAIN) {
48983+ for (i = 0; i < role->domain_child_num; i++)
48984+ __insert_acl_role_label(role, role->domain_children[i]);
48985+ } else
48986+ __insert_acl_role_label(role, role->uidgid);
48987+}
48988+
48989+static int
48990+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48991+{
48992+ struct name_entry **curr, *nentry;
48993+ struct inodev_entry *ientry;
48994+ unsigned int len = strlen(name);
48995+ unsigned int key = full_name_hash(name, len);
48996+ unsigned int index = key % name_set.n_size;
48997+
48998+ curr = &name_set.n_hash[index];
48999+
49000+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
49001+ curr = &((*curr)->next);
49002+
49003+ if (*curr != NULL)
49004+ return 1;
49005+
49006+ nentry = acl_alloc(sizeof (struct name_entry));
49007+ if (nentry == NULL)
49008+ return 0;
49009+ ientry = acl_alloc(sizeof (struct inodev_entry));
49010+ if (ientry == NULL)
49011+ return 0;
49012+ ientry->nentry = nentry;
49013+
49014+ nentry->key = key;
49015+ nentry->name = name;
49016+ nentry->inode = inode;
49017+ nentry->device = device;
49018+ nentry->len = len;
49019+ nentry->deleted = deleted;
49020+
49021+ nentry->prev = NULL;
49022+ curr = &name_set.n_hash[index];
49023+ if (*curr != NULL)
49024+ (*curr)->prev = nentry;
49025+ nentry->next = *curr;
49026+ *curr = nentry;
49027+
49028+ /* insert us into the table searchable by inode/dev */
49029+ insert_inodev_entry(ientry);
49030+
49031+ return 1;
49032+}
49033+
49034+static void
49035+insert_acl_obj_label(struct acl_object_label *obj,
49036+ struct acl_subject_label *subj)
49037+{
49038+ unsigned int index =
49039+ fhash(obj->inode, obj->device, subj->obj_hash_size);
49040+ struct acl_object_label **curr;
49041+
49042+
49043+ obj->prev = NULL;
49044+
49045+ curr = &subj->obj_hash[index];
49046+ if (*curr != NULL)
49047+ (*curr)->prev = obj;
49048+
49049+ obj->next = *curr;
49050+ *curr = obj;
49051+
49052+ return;
49053+}
49054+
49055+static void
49056+insert_acl_subj_label(struct acl_subject_label *obj,
49057+ struct acl_role_label *role)
49058+{
49059+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
49060+ struct acl_subject_label **curr;
49061+
49062+ obj->prev = NULL;
49063+
49064+ curr = &role->subj_hash[index];
49065+ if (*curr != NULL)
49066+ (*curr)->prev = obj;
49067+
49068+ obj->next = *curr;
49069+ *curr = obj;
49070+
49071+ return;
49072+}
49073+
49074+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
49075+
49076+static void *
49077+create_table(__u32 * len, int elementsize)
49078+{
49079+ unsigned int table_sizes[] = {
49080+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
49081+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
49082+ 4194301, 8388593, 16777213, 33554393, 67108859
49083+ };
49084+ void *newtable = NULL;
49085+ unsigned int pwr = 0;
49086+
49087+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
49088+ table_sizes[pwr] <= *len)
49089+ pwr++;
49090+
49091+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
49092+ return newtable;
49093+
49094+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
49095+ newtable =
49096+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
49097+ else
49098+ newtable = vmalloc(table_sizes[pwr] * elementsize);
49099+
49100+ *len = table_sizes[pwr];
49101+
49102+ return newtable;
49103+}
49104+
49105+static int
49106+init_variables(const struct gr_arg *arg)
49107+{
49108+ struct task_struct *reaper = &init_task;
49109+ unsigned int stacksize;
49110+
49111+ subj_map_set.s_size = arg->role_db.num_subjects;
49112+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
49113+ name_set.n_size = arg->role_db.num_objects;
49114+ inodev_set.i_size = arg->role_db.num_objects;
49115+
49116+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
49117+ !name_set.n_size || !inodev_set.i_size)
49118+ return 1;
49119+
49120+ if (!gr_init_uidset())
49121+ return 1;
49122+
49123+ /* set up the stack that holds allocation info */
49124+
49125+ stacksize = arg->role_db.num_pointers + 5;
49126+
49127+ if (!acl_alloc_stack_init(stacksize))
49128+ return 1;
49129+
49130+ /* grab reference for the real root dentry and vfsmount */
49131+ get_fs_root(reaper->fs, &real_root);
49132+
49133+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49134+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
49135+#endif
49136+
49137+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
49138+ if (fakefs_obj_rw == NULL)
49139+ return 1;
49140+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
49141+
49142+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
49143+ if (fakefs_obj_rwx == NULL)
49144+ return 1;
49145+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
49146+
49147+ subj_map_set.s_hash =
49148+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
49149+ acl_role_set.r_hash =
49150+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
49151+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
49152+ inodev_set.i_hash =
49153+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
49154+
49155+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
49156+ !name_set.n_hash || !inodev_set.i_hash)
49157+ return 1;
49158+
49159+ memset(subj_map_set.s_hash, 0,
49160+ sizeof(struct subject_map *) * subj_map_set.s_size);
49161+ memset(acl_role_set.r_hash, 0,
49162+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
49163+ memset(name_set.n_hash, 0,
49164+ sizeof (struct name_entry *) * name_set.n_size);
49165+ memset(inodev_set.i_hash, 0,
49166+ sizeof (struct inodev_entry *) * inodev_set.i_size);
49167+
49168+ return 0;
49169+}
49170+
49171+/* free information not needed after startup
49172+ currently contains user->kernel pointer mappings for subjects
49173+*/
49174+
49175+static void
49176+free_init_variables(void)
49177+{
49178+ __u32 i;
49179+
49180+ if (subj_map_set.s_hash) {
49181+ for (i = 0; i < subj_map_set.s_size; i++) {
49182+ if (subj_map_set.s_hash[i]) {
49183+ kfree(subj_map_set.s_hash[i]);
49184+ subj_map_set.s_hash[i] = NULL;
49185+ }
49186+ }
49187+
49188+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49189+ PAGE_SIZE)
49190+ kfree(subj_map_set.s_hash);
49191+ else
49192+ vfree(subj_map_set.s_hash);
49193+ }
49194+
49195+ return;
49196+}
49197+
49198+static void
49199+free_variables(void)
49200+{
49201+ struct acl_subject_label *s;
49202+ struct acl_role_label *r;
49203+ struct task_struct *task, *task2;
49204+ unsigned int x;
49205+
49206+ gr_clear_learn_entries();
49207+
49208+ read_lock(&tasklist_lock);
49209+ do_each_thread(task2, task) {
49210+ task->acl_sp_role = 0;
49211+ task->acl_role_id = 0;
49212+ task->acl = NULL;
49213+ task->role = NULL;
49214+ } while_each_thread(task2, task);
49215+ read_unlock(&tasklist_lock);
49216+
49217+ /* release the reference to the real root dentry and vfsmount */
49218+ path_put(&real_root);
49219+
49220+ /* free all object hash tables */
49221+
49222+ FOR_EACH_ROLE_START(r)
49223+ if (r->subj_hash == NULL)
49224+ goto next_role;
49225+ FOR_EACH_SUBJECT_START(r, s, x)
49226+ if (s->obj_hash == NULL)
49227+ break;
49228+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49229+ kfree(s->obj_hash);
49230+ else
49231+ vfree(s->obj_hash);
49232+ FOR_EACH_SUBJECT_END(s, x)
49233+ FOR_EACH_NESTED_SUBJECT_START(r, s)
49234+ if (s->obj_hash == NULL)
49235+ break;
49236+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49237+ kfree(s->obj_hash);
49238+ else
49239+ vfree(s->obj_hash);
49240+ FOR_EACH_NESTED_SUBJECT_END(s)
49241+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49242+ kfree(r->subj_hash);
49243+ else
49244+ vfree(r->subj_hash);
49245+ r->subj_hash = NULL;
49246+next_role:
49247+ FOR_EACH_ROLE_END(r)
49248+
49249+ acl_free_all();
49250+
49251+ if (acl_role_set.r_hash) {
49252+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49253+ PAGE_SIZE)
49254+ kfree(acl_role_set.r_hash);
49255+ else
49256+ vfree(acl_role_set.r_hash);
49257+ }
49258+ if (name_set.n_hash) {
49259+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
49260+ PAGE_SIZE)
49261+ kfree(name_set.n_hash);
49262+ else
49263+ vfree(name_set.n_hash);
49264+ }
49265+
49266+ if (inodev_set.i_hash) {
49267+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49268+ PAGE_SIZE)
49269+ kfree(inodev_set.i_hash);
49270+ else
49271+ vfree(inodev_set.i_hash);
49272+ }
49273+
49274+ gr_free_uidset();
49275+
49276+ memset(&name_set, 0, sizeof (struct name_db));
49277+ memset(&inodev_set, 0, sizeof (struct inodev_db));
49278+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49279+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49280+
49281+ default_role = NULL;
49282+ role_list = NULL;
49283+
49284+ return;
49285+}
49286+
49287+static __u32
49288+count_user_objs(struct acl_object_label *userp)
49289+{
49290+ struct acl_object_label o_tmp;
49291+ __u32 num = 0;
49292+
49293+ while (userp) {
49294+ if (copy_from_user(&o_tmp, userp,
49295+ sizeof (struct acl_object_label)))
49296+ break;
49297+
49298+ userp = o_tmp.prev;
49299+ num++;
49300+ }
49301+
49302+ return num;
49303+}
49304+
49305+static struct acl_subject_label *
49306+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49307+
49308+static int
49309+copy_user_glob(struct acl_object_label *obj)
49310+{
49311+ struct acl_object_label *g_tmp, **guser;
49312+ unsigned int len;
49313+ char *tmp;
49314+
49315+ if (obj->globbed == NULL)
49316+ return 0;
49317+
49318+ guser = &obj->globbed;
49319+ while (*guser) {
49320+ g_tmp = (struct acl_object_label *)
49321+ acl_alloc(sizeof (struct acl_object_label));
49322+ if (g_tmp == NULL)
49323+ return -ENOMEM;
49324+
49325+ if (copy_from_user(g_tmp, *guser,
49326+ sizeof (struct acl_object_label)))
49327+ return -EFAULT;
49328+
49329+ len = strnlen_user(g_tmp->filename, PATH_MAX);
49330+
49331+ if (!len || len >= PATH_MAX)
49332+ return -EINVAL;
49333+
49334+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49335+ return -ENOMEM;
49336+
49337+ if (copy_from_user(tmp, g_tmp->filename, len))
49338+ return -EFAULT;
49339+ tmp[len-1] = '\0';
49340+ g_tmp->filename = tmp;
49341+
49342+ *guser = g_tmp;
49343+ guser = &(g_tmp->next);
49344+ }
49345+
49346+ return 0;
49347+}
49348+
49349+static int
49350+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49351+ struct acl_role_label *role)
49352+{
49353+ struct acl_object_label *o_tmp;
49354+ unsigned int len;
49355+ int ret;
49356+ char *tmp;
49357+
49358+ while (userp) {
49359+ if ((o_tmp = (struct acl_object_label *)
49360+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
49361+ return -ENOMEM;
49362+
49363+ if (copy_from_user(o_tmp, userp,
49364+ sizeof (struct acl_object_label)))
49365+ return -EFAULT;
49366+
49367+ userp = o_tmp->prev;
49368+
49369+ len = strnlen_user(o_tmp->filename, PATH_MAX);
49370+
49371+ if (!len || len >= PATH_MAX)
49372+ return -EINVAL;
49373+
49374+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49375+ return -ENOMEM;
49376+
49377+ if (copy_from_user(tmp, o_tmp->filename, len))
49378+ return -EFAULT;
49379+ tmp[len-1] = '\0';
49380+ o_tmp->filename = tmp;
49381+
49382+ insert_acl_obj_label(o_tmp, subj);
49383+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49384+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49385+ return -ENOMEM;
49386+
49387+ ret = copy_user_glob(o_tmp);
49388+ if (ret)
49389+ return ret;
49390+
49391+ if (o_tmp->nested) {
49392+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49393+ if (IS_ERR(o_tmp->nested))
49394+ return PTR_ERR(o_tmp->nested);
49395+
49396+ /* insert into nested subject list */
49397+ o_tmp->nested->next = role->hash->first;
49398+ role->hash->first = o_tmp->nested;
49399+ }
49400+ }
49401+
49402+ return 0;
49403+}
49404+
49405+static __u32
49406+count_user_subjs(struct acl_subject_label *userp)
49407+{
49408+ struct acl_subject_label s_tmp;
49409+ __u32 num = 0;
49410+
49411+ while (userp) {
49412+ if (copy_from_user(&s_tmp, userp,
49413+ sizeof (struct acl_subject_label)))
49414+ break;
49415+
49416+ userp = s_tmp.prev;
49417+ /* do not count nested subjects against this count, since
49418+ they are not included in the hash table, but are
49419+ attached to objects. We have already counted
49420+ the subjects in userspace for the allocation
49421+ stack
49422+ */
49423+ if (!(s_tmp.mode & GR_NESTED))
49424+ num++;
49425+ }
49426+
49427+ return num;
49428+}
49429+
49430+static int
49431+copy_user_allowedips(struct acl_role_label *rolep)
49432+{
49433+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49434+
49435+ ruserip = rolep->allowed_ips;
49436+
49437+ while (ruserip) {
49438+ rlast = rtmp;
49439+
49440+ if ((rtmp = (struct role_allowed_ip *)
49441+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49442+ return -ENOMEM;
49443+
49444+ if (copy_from_user(rtmp, ruserip,
49445+ sizeof (struct role_allowed_ip)))
49446+ return -EFAULT;
49447+
49448+ ruserip = rtmp->prev;
49449+
49450+ if (!rlast) {
49451+ rtmp->prev = NULL;
49452+ rolep->allowed_ips = rtmp;
49453+ } else {
49454+ rlast->next = rtmp;
49455+ rtmp->prev = rlast;
49456+ }
49457+
49458+ if (!ruserip)
49459+ rtmp->next = NULL;
49460+ }
49461+
49462+ return 0;
49463+}
49464+
49465+static int
49466+copy_user_transitions(struct acl_role_label *rolep)
49467+{
49468+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
49469+
49470+ unsigned int len;
49471+ char *tmp;
49472+
49473+ rusertp = rolep->transitions;
49474+
49475+ while (rusertp) {
49476+ rlast = rtmp;
49477+
49478+ if ((rtmp = (struct role_transition *)
49479+ acl_alloc(sizeof (struct role_transition))) == NULL)
49480+ return -ENOMEM;
49481+
49482+ if (copy_from_user(rtmp, rusertp,
49483+ sizeof (struct role_transition)))
49484+ return -EFAULT;
49485+
49486+ rusertp = rtmp->prev;
49487+
49488+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49489+
49490+ if (!len || len >= GR_SPROLE_LEN)
49491+ return -EINVAL;
49492+
49493+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49494+ return -ENOMEM;
49495+
49496+ if (copy_from_user(tmp, rtmp->rolename, len))
49497+ return -EFAULT;
49498+ tmp[len-1] = '\0';
49499+ rtmp->rolename = tmp;
49500+
49501+ if (!rlast) {
49502+ rtmp->prev = NULL;
49503+ rolep->transitions = rtmp;
49504+ } else {
49505+ rlast->next = rtmp;
49506+ rtmp->prev = rlast;
49507+ }
49508+
49509+ if (!rusertp)
49510+ rtmp->next = NULL;
49511+ }
49512+
49513+ return 0;
49514+}
49515+
49516+static struct acl_subject_label *
49517+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49518+{
49519+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49520+ unsigned int len;
49521+ char *tmp;
49522+ __u32 num_objs;
49523+ struct acl_ip_label **i_tmp, *i_utmp2;
49524+ struct gr_hash_struct ghash;
49525+ struct subject_map *subjmap;
49526+ unsigned int i_num;
49527+ int err;
49528+
49529+ s_tmp = lookup_subject_map(userp);
49530+
49531+ /* we've already copied this subject into the kernel, just return
49532+ the reference to it, and don't copy it over again
49533+ */
49534+ if (s_tmp)
49535+ return(s_tmp);
49536+
49537+ if ((s_tmp = (struct acl_subject_label *)
49538+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49539+ return ERR_PTR(-ENOMEM);
49540+
49541+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49542+ if (subjmap == NULL)
49543+ return ERR_PTR(-ENOMEM);
49544+
49545+ subjmap->user = userp;
49546+ subjmap->kernel = s_tmp;
49547+ insert_subj_map_entry(subjmap);
49548+
49549+ if (copy_from_user(s_tmp, userp,
49550+ sizeof (struct acl_subject_label)))
49551+ return ERR_PTR(-EFAULT);
49552+
49553+ len = strnlen_user(s_tmp->filename, PATH_MAX);
49554+
49555+ if (!len || len >= PATH_MAX)
49556+ return ERR_PTR(-EINVAL);
49557+
49558+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49559+ return ERR_PTR(-ENOMEM);
49560+
49561+ if (copy_from_user(tmp, s_tmp->filename, len))
49562+ return ERR_PTR(-EFAULT);
49563+ tmp[len-1] = '\0';
49564+ s_tmp->filename = tmp;
49565+
49566+ if (!strcmp(s_tmp->filename, "/"))
49567+ role->root_label = s_tmp;
49568+
49569+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49570+ return ERR_PTR(-EFAULT);
49571+
49572+ /* copy user and group transition tables */
49573+
49574+ if (s_tmp->user_trans_num) {
49575+ uid_t *uidlist;
49576+
49577+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49578+ if (uidlist == NULL)
49579+ return ERR_PTR(-ENOMEM);
49580+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49581+ return ERR_PTR(-EFAULT);
49582+
49583+ s_tmp->user_transitions = uidlist;
49584+ }
49585+
49586+ if (s_tmp->group_trans_num) {
49587+ gid_t *gidlist;
49588+
49589+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49590+ if (gidlist == NULL)
49591+ return ERR_PTR(-ENOMEM);
49592+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49593+ return ERR_PTR(-EFAULT);
49594+
49595+ s_tmp->group_transitions = gidlist;
49596+ }
49597+
49598+ /* set up object hash table */
49599+ num_objs = count_user_objs(ghash.first);
49600+
49601+ s_tmp->obj_hash_size = num_objs;
49602+ s_tmp->obj_hash =
49603+ (struct acl_object_label **)
49604+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49605+
49606+ if (!s_tmp->obj_hash)
49607+ return ERR_PTR(-ENOMEM);
49608+
49609+ memset(s_tmp->obj_hash, 0,
49610+ s_tmp->obj_hash_size *
49611+ sizeof (struct acl_object_label *));
49612+
49613+ /* add in objects */
49614+ err = copy_user_objs(ghash.first, s_tmp, role);
49615+
49616+ if (err)
49617+ return ERR_PTR(err);
49618+
49619+ /* set pointer for parent subject */
49620+ if (s_tmp->parent_subject) {
49621+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49622+
49623+ if (IS_ERR(s_tmp2))
49624+ return s_tmp2;
49625+
49626+ s_tmp->parent_subject = s_tmp2;
49627+ }
49628+
49629+ /* add in ip acls */
49630+
49631+ if (!s_tmp->ip_num) {
49632+ s_tmp->ips = NULL;
49633+ goto insert;
49634+ }
49635+
49636+ i_tmp =
49637+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49638+ sizeof (struct acl_ip_label *));
49639+
49640+ if (!i_tmp)
49641+ return ERR_PTR(-ENOMEM);
49642+
49643+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49644+ *(i_tmp + i_num) =
49645+ (struct acl_ip_label *)
49646+ acl_alloc(sizeof (struct acl_ip_label));
49647+ if (!*(i_tmp + i_num))
49648+ return ERR_PTR(-ENOMEM);
49649+
49650+ if (copy_from_user
49651+ (&i_utmp2, s_tmp->ips + i_num,
49652+ sizeof (struct acl_ip_label *)))
49653+ return ERR_PTR(-EFAULT);
49654+
49655+ if (copy_from_user
49656+ (*(i_tmp + i_num), i_utmp2,
49657+ sizeof (struct acl_ip_label)))
49658+ return ERR_PTR(-EFAULT);
49659+
49660+ if ((*(i_tmp + i_num))->iface == NULL)
49661+ continue;
49662+
49663+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49664+ if (!len || len >= IFNAMSIZ)
49665+ return ERR_PTR(-EINVAL);
49666+ tmp = acl_alloc(len);
49667+ if (tmp == NULL)
49668+ return ERR_PTR(-ENOMEM);
49669+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49670+ return ERR_PTR(-EFAULT);
49671+ (*(i_tmp + i_num))->iface = tmp;
49672+ }
49673+
49674+ s_tmp->ips = i_tmp;
49675+
49676+insert:
49677+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49678+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49679+ return ERR_PTR(-ENOMEM);
49680+
49681+ return s_tmp;
49682+}
49683+
49684+static int
49685+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49686+{
49687+ struct acl_subject_label s_pre;
49688+ struct acl_subject_label * ret;
49689+ int err;
49690+
49691+ while (userp) {
49692+ if (copy_from_user(&s_pre, userp,
49693+ sizeof (struct acl_subject_label)))
49694+ return -EFAULT;
49695+
49696+ /* do not add nested subjects here, add
49697+ while parsing objects
49698+ */
49699+
49700+ if (s_pre.mode & GR_NESTED) {
49701+ userp = s_pre.prev;
49702+ continue;
49703+ }
49704+
49705+ ret = do_copy_user_subj(userp, role);
49706+
49707+ err = PTR_ERR(ret);
49708+ if (IS_ERR(ret))
49709+ return err;
49710+
49711+ insert_acl_subj_label(ret, role);
49712+
49713+ userp = s_pre.prev;
49714+ }
49715+
49716+ return 0;
49717+}
49718+
49719+static int
49720+copy_user_acl(struct gr_arg *arg)
49721+{
49722+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49723+ struct sprole_pw *sptmp;
49724+ struct gr_hash_struct *ghash;
49725+ uid_t *domainlist;
49726+ unsigned int r_num;
49727+ unsigned int len;
49728+ char *tmp;
49729+ int err = 0;
49730+ __u16 i;
49731+ __u32 num_subjs;
49732+
49733+ /* we need a default and kernel role */
49734+ if (arg->role_db.num_roles < 2)
49735+ return -EINVAL;
49736+
49737+ /* copy special role authentication info from userspace */
49738+
49739+ num_sprole_pws = arg->num_sprole_pws;
49740+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49741+
49742+ if (!acl_special_roles) {
49743+ err = -ENOMEM;
49744+ goto cleanup;
49745+ }
49746+
49747+ for (i = 0; i < num_sprole_pws; i++) {
49748+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49749+ if (!sptmp) {
49750+ err = -ENOMEM;
49751+ goto cleanup;
49752+ }
49753+ if (copy_from_user(sptmp, arg->sprole_pws + i,
49754+ sizeof (struct sprole_pw))) {
49755+ err = -EFAULT;
49756+ goto cleanup;
49757+ }
49758+
49759+ len =
49760+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49761+
49762+ if (!len || len >= GR_SPROLE_LEN) {
49763+ err = -EINVAL;
49764+ goto cleanup;
49765+ }
49766+
49767+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49768+ err = -ENOMEM;
49769+ goto cleanup;
49770+ }
49771+
49772+ if (copy_from_user(tmp, sptmp->rolename, len)) {
49773+ err = -EFAULT;
49774+ goto cleanup;
49775+ }
49776+ tmp[len-1] = '\0';
49777+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49778+ printk(KERN_ALERT "Copying special role %s\n", tmp);
49779+#endif
49780+ sptmp->rolename = tmp;
49781+ acl_special_roles[i] = sptmp;
49782+ }
49783+
49784+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49785+
49786+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49787+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
49788+
49789+ if (!r_tmp) {
49790+ err = -ENOMEM;
49791+ goto cleanup;
49792+ }
49793+
49794+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
49795+ sizeof (struct acl_role_label *))) {
49796+ err = -EFAULT;
49797+ goto cleanup;
49798+ }
49799+
49800+ if (copy_from_user(r_tmp, r_utmp2,
49801+ sizeof (struct acl_role_label))) {
49802+ err = -EFAULT;
49803+ goto cleanup;
49804+ }
49805+
49806+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49807+
49808+ if (!len || len >= PATH_MAX) {
49809+ err = -EINVAL;
49810+ goto cleanup;
49811+ }
49812+
49813+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49814+ err = -ENOMEM;
49815+ goto cleanup;
49816+ }
49817+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
49818+ err = -EFAULT;
49819+ goto cleanup;
49820+ }
49821+ tmp[len-1] = '\0';
49822+ r_tmp->rolename = tmp;
49823+
49824+ if (!strcmp(r_tmp->rolename, "default")
49825+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
49826+ default_role = r_tmp;
49827+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
49828+ kernel_role = r_tmp;
49829+ }
49830+
49831+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
49832+ err = -ENOMEM;
49833+ goto cleanup;
49834+ }
49835+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
49836+ err = -EFAULT;
49837+ goto cleanup;
49838+ }
49839+
49840+ r_tmp->hash = ghash;
49841+
49842+ num_subjs = count_user_subjs(r_tmp->hash->first);
49843+
49844+ r_tmp->subj_hash_size = num_subjs;
49845+ r_tmp->subj_hash =
49846+ (struct acl_subject_label **)
49847+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
49848+
49849+ if (!r_tmp->subj_hash) {
49850+ err = -ENOMEM;
49851+ goto cleanup;
49852+ }
49853+
49854+ err = copy_user_allowedips(r_tmp);
49855+ if (err)
49856+ goto cleanup;
49857+
49858+ /* copy domain info */
49859+ if (r_tmp->domain_children != NULL) {
49860+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
49861+ if (domainlist == NULL) {
49862+ err = -ENOMEM;
49863+ goto cleanup;
49864+ }
49865+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
49866+ err = -EFAULT;
49867+ goto cleanup;
49868+ }
49869+ r_tmp->domain_children = domainlist;
49870+ }
49871+
49872+ err = copy_user_transitions(r_tmp);
49873+ if (err)
49874+ goto cleanup;
49875+
49876+ memset(r_tmp->subj_hash, 0,
49877+ r_tmp->subj_hash_size *
49878+ sizeof (struct acl_subject_label *));
49879+
49880+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
49881+
49882+ if (err)
49883+ goto cleanup;
49884+
49885+ /* set nested subject list to null */
49886+ r_tmp->hash->first = NULL;
49887+
49888+ insert_acl_role_label(r_tmp);
49889+ }
49890+
49891+ goto return_err;
49892+ cleanup:
49893+ free_variables();
49894+ return_err:
49895+ return err;
49896+
49897+}
49898+
49899+static int
49900+gracl_init(struct gr_arg *args)
49901+{
49902+ int error = 0;
49903+
49904+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
49905+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
49906+
49907+ if (init_variables(args)) {
49908+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
49909+ error = -ENOMEM;
49910+ free_variables();
49911+ goto out;
49912+ }
49913+
49914+ error = copy_user_acl(args);
49915+ free_init_variables();
49916+ if (error) {
49917+ free_variables();
49918+ goto out;
49919+ }
49920+
49921+ if ((error = gr_set_acls(0))) {
49922+ free_variables();
49923+ goto out;
49924+ }
49925+
49926+ pax_open_kernel();
49927+ gr_status |= GR_READY;
49928+ pax_close_kernel();
49929+
49930+ out:
49931+ return error;
49932+}
49933+
49934+/* derived from glibc fnmatch() 0: match, 1: no match*/
49935+
49936+static int
49937+glob_match(const char *p, const char *n)
49938+{
49939+ char c;
49940+
49941+ while ((c = *p++) != '\0') {
49942+ switch (c) {
49943+ case '?':
49944+ if (*n == '\0')
49945+ return 1;
49946+ else if (*n == '/')
49947+ return 1;
49948+ break;
49949+ case '\\':
49950+ if (*n != c)
49951+ return 1;
49952+ break;
49953+ case '*':
49954+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
49955+ if (*n == '/')
49956+ return 1;
49957+ else if (c == '?') {
49958+ if (*n == '\0')
49959+ return 1;
49960+ else
49961+ ++n;
49962+ }
49963+ }
49964+ if (c == '\0') {
49965+ return 0;
49966+ } else {
49967+ const char *endp;
49968+
49969+ if ((endp = strchr(n, '/')) == NULL)
49970+ endp = n + strlen(n);
49971+
49972+ if (c == '[') {
49973+ for (--p; n < endp; ++n)
49974+ if (!glob_match(p, n))
49975+ return 0;
49976+ } else if (c == '/') {
49977+ while (*n != '\0' && *n != '/')
49978+ ++n;
49979+ if (*n == '/' && !glob_match(p, n + 1))
49980+ return 0;
49981+ } else {
49982+ for (--p; n < endp; ++n)
49983+ if (*n == c && !glob_match(p, n))
49984+ return 0;
49985+ }
49986+
49987+ return 1;
49988+ }
49989+ case '[':
49990+ {
49991+ int not;
49992+ char cold;
49993+
49994+ if (*n == '\0' || *n == '/')
49995+ return 1;
49996+
49997+ not = (*p == '!' || *p == '^');
49998+ if (not)
49999+ ++p;
50000+
50001+ c = *p++;
50002+ for (;;) {
50003+ unsigned char fn = (unsigned char)*n;
50004+
50005+ if (c == '\0')
50006+ return 1;
50007+ else {
50008+ if (c == fn)
50009+ goto matched;
50010+ cold = c;
50011+ c = *p++;
50012+
50013+ if (c == '-' && *p != ']') {
50014+ unsigned char cend = *p++;
50015+
50016+ if (cend == '\0')
50017+ return 1;
50018+
50019+ if (cold <= fn && fn <= cend)
50020+ goto matched;
50021+
50022+ c = *p++;
50023+ }
50024+ }
50025+
50026+ if (c == ']')
50027+ break;
50028+ }
50029+ if (!not)
50030+ return 1;
50031+ break;
50032+ matched:
50033+ while (c != ']') {
50034+ if (c == '\0')
50035+ return 1;
50036+
50037+ c = *p++;
50038+ }
50039+ if (not)
50040+ return 1;
50041+ }
50042+ break;
50043+ default:
50044+ if (c != *n)
50045+ return 1;
50046+ }
50047+
50048+ ++n;
50049+ }
50050+
50051+ if (*n == '\0')
50052+ return 0;
50053+
50054+ if (*n == '/')
50055+ return 0;
50056+
50057+ return 1;
50058+}
50059+
50060+static struct acl_object_label *
50061+chk_glob_label(struct acl_object_label *globbed,
50062+ struct dentry *dentry, struct vfsmount *mnt, char **path)
50063+{
50064+ struct acl_object_label *tmp;
50065+
50066+ if (*path == NULL)
50067+ *path = gr_to_filename_nolock(dentry, mnt);
50068+
50069+ tmp = globbed;
50070+
50071+ while (tmp) {
50072+ if (!glob_match(tmp->filename, *path))
50073+ return tmp;
50074+ tmp = tmp->next;
50075+ }
50076+
50077+ return NULL;
50078+}
50079+
50080+static struct acl_object_label *
50081+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50082+ const ino_t curr_ino, const dev_t curr_dev,
50083+ const struct acl_subject_label *subj, char **path, const int checkglob)
50084+{
50085+ struct acl_subject_label *tmpsubj;
50086+ struct acl_object_label *retval;
50087+ struct acl_object_label *retval2;
50088+
50089+ tmpsubj = (struct acl_subject_label *) subj;
50090+ read_lock(&gr_inode_lock);
50091+ do {
50092+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
50093+ if (retval) {
50094+ if (checkglob && retval->globbed) {
50095+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
50096+ (struct vfsmount *)orig_mnt, path);
50097+ if (retval2)
50098+ retval = retval2;
50099+ }
50100+ break;
50101+ }
50102+ } while ((tmpsubj = tmpsubj->parent_subject));
50103+ read_unlock(&gr_inode_lock);
50104+
50105+ return retval;
50106+}
50107+
50108+static __inline__ struct acl_object_label *
50109+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50110+ struct dentry *curr_dentry,
50111+ const struct acl_subject_label *subj, char **path, const int checkglob)
50112+{
50113+ int newglob = checkglob;
50114+ ino_t inode;
50115+ dev_t device;
50116+
50117+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
50118+ as we don't want a / * rule to match instead of the / object
50119+ don't do this for create lookups that call this function though, since they're looking up
50120+ on the parent and thus need globbing checks on all paths
50121+ */
50122+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
50123+ newglob = GR_NO_GLOB;
50124+
50125+ spin_lock(&curr_dentry->d_lock);
50126+ inode = curr_dentry->d_inode->i_ino;
50127+ device = __get_dev(curr_dentry);
50128+ spin_unlock(&curr_dentry->d_lock);
50129+
50130+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
50131+}
50132+
50133+static struct acl_object_label *
50134+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50135+ const struct acl_subject_label *subj, char *path, const int checkglob)
50136+{
50137+ struct dentry *dentry = (struct dentry *) l_dentry;
50138+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50139+ struct acl_object_label *retval;
50140+ struct dentry *parent;
50141+
50142+ write_seqlock(&rename_lock);
50143+ br_read_lock(vfsmount_lock);
50144+
50145+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
50146+#ifdef CONFIG_NET
50147+ mnt == sock_mnt ||
50148+#endif
50149+#ifdef CONFIG_HUGETLBFS
50150+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
50151+#endif
50152+ /* ignore Eric Biederman */
50153+ IS_PRIVATE(l_dentry->d_inode))) {
50154+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
50155+ goto out;
50156+ }
50157+
50158+ for (;;) {
50159+ if (dentry == real_root.dentry && mnt == real_root.mnt)
50160+ break;
50161+
50162+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50163+ if (mnt->mnt_parent == mnt)
50164+ break;
50165+
50166+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50167+ if (retval != NULL)
50168+ goto out;
50169+
50170+ dentry = mnt->mnt_mountpoint;
50171+ mnt = mnt->mnt_parent;
50172+ continue;
50173+ }
50174+
50175+ parent = dentry->d_parent;
50176+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50177+ if (retval != NULL)
50178+ goto out;
50179+
50180+ dentry = parent;
50181+ }
50182+
50183+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50184+
50185+ /* real_root is pinned so we don't have to hold a reference */
50186+ if (retval == NULL)
50187+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50188+out:
50189+ br_read_unlock(vfsmount_lock);
50190+ write_sequnlock(&rename_lock);
50191+
50192+ BUG_ON(retval == NULL);
50193+
50194+ return retval;
50195+}
50196+
50197+static __inline__ struct acl_object_label *
50198+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50199+ const struct acl_subject_label *subj)
50200+{
50201+ char *path = NULL;
50202+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50203+}
50204+
50205+static __inline__ struct acl_object_label *
50206+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50207+ const struct acl_subject_label *subj)
50208+{
50209+ char *path = NULL;
50210+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50211+}
50212+
50213+static __inline__ struct acl_object_label *
50214+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50215+ const struct acl_subject_label *subj, char *path)
50216+{
50217+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50218+}
50219+
50220+static struct acl_subject_label *
50221+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50222+ const struct acl_role_label *role)
50223+{
50224+ struct dentry *dentry = (struct dentry *) l_dentry;
50225+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50226+ struct acl_subject_label *retval;
50227+ struct dentry *parent;
50228+
50229+ write_seqlock(&rename_lock);
50230+ br_read_lock(vfsmount_lock);
50231+
50232+ for (;;) {
50233+ if (dentry == real_root.dentry && mnt == real_root.mnt)
50234+ break;
50235+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50236+ if (mnt->mnt_parent == mnt)
50237+ break;
50238+
50239+ spin_lock(&dentry->d_lock);
50240+ read_lock(&gr_inode_lock);
50241+ retval =
50242+ lookup_acl_subj_label(dentry->d_inode->i_ino,
50243+ __get_dev(dentry), role);
50244+ read_unlock(&gr_inode_lock);
50245+ spin_unlock(&dentry->d_lock);
50246+ if (retval != NULL)
50247+ goto out;
50248+
50249+ dentry = mnt->mnt_mountpoint;
50250+ mnt = mnt->mnt_parent;
50251+ continue;
50252+ }
50253+
50254+ spin_lock(&dentry->d_lock);
50255+ read_lock(&gr_inode_lock);
50256+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50257+ __get_dev(dentry), role);
50258+ read_unlock(&gr_inode_lock);
50259+ parent = dentry->d_parent;
50260+ spin_unlock(&dentry->d_lock);
50261+
50262+ if (retval != NULL)
50263+ goto out;
50264+
50265+ dentry = parent;
50266+ }
50267+
50268+ spin_lock(&dentry->d_lock);
50269+ read_lock(&gr_inode_lock);
50270+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50271+ __get_dev(dentry), role);
50272+ read_unlock(&gr_inode_lock);
50273+ spin_unlock(&dentry->d_lock);
50274+
50275+ if (unlikely(retval == NULL)) {
50276+ /* real_root is pinned, we don't need to hold a reference */
50277+ read_lock(&gr_inode_lock);
50278+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50279+ __get_dev(real_root.dentry), role);
50280+ read_unlock(&gr_inode_lock);
50281+ }
50282+out:
50283+ br_read_unlock(vfsmount_lock);
50284+ write_sequnlock(&rename_lock);
50285+
50286+ BUG_ON(retval == NULL);
50287+
50288+ return retval;
50289+}
50290+
50291+static void
50292+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50293+{
50294+ struct task_struct *task = current;
50295+ const struct cred *cred = current_cred();
50296+
50297+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50298+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50299+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50300+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50301+
50302+ return;
50303+}
50304+
50305+static void
50306+gr_log_learn_sysctl(const char *path, const __u32 mode)
50307+{
50308+ struct task_struct *task = current;
50309+ const struct cred *cred = current_cred();
50310+
50311+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50312+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50313+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50314+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50315+
50316+ return;
50317+}
50318+
50319+static void
50320+gr_log_learn_id_change(const char type, const unsigned int real,
50321+ const unsigned int effective, const unsigned int fs)
50322+{
50323+ struct task_struct *task = current;
50324+ const struct cred *cred = current_cred();
50325+
50326+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50327+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50328+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50329+ type, real, effective, fs, &task->signal->saved_ip);
50330+
50331+ return;
50332+}
50333+
50334+__u32
50335+gr_search_file(const struct dentry * dentry, const __u32 mode,
50336+ const struct vfsmount * mnt)
50337+{
50338+ __u32 retval = mode;
50339+ struct acl_subject_label *curracl;
50340+ struct acl_object_label *currobj;
50341+
50342+ if (unlikely(!(gr_status & GR_READY)))
50343+ return (mode & ~GR_AUDITS);
50344+
50345+ curracl = current->acl;
50346+
50347+ currobj = chk_obj_label(dentry, mnt, curracl);
50348+ retval = currobj->mode & mode;
50349+
50350+ /* if we're opening a specified transfer file for writing
50351+ (e.g. /dev/initctl), then transfer our role to init
50352+ */
50353+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50354+ current->role->roletype & GR_ROLE_PERSIST)) {
50355+ struct task_struct *task = init_pid_ns.child_reaper;
50356+
50357+ if (task->role != current->role) {
50358+ task->acl_sp_role = 0;
50359+ task->acl_role_id = current->acl_role_id;
50360+ task->role = current->role;
50361+ rcu_read_lock();
50362+ read_lock(&grsec_exec_file_lock);
50363+ gr_apply_subject_to_task(task);
50364+ read_unlock(&grsec_exec_file_lock);
50365+ rcu_read_unlock();
50366+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50367+ }
50368+ }
50369+
50370+ if (unlikely
50371+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50372+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50373+ __u32 new_mode = mode;
50374+
50375+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50376+
50377+ retval = new_mode;
50378+
50379+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50380+ new_mode |= GR_INHERIT;
50381+
50382+ if (!(mode & GR_NOLEARN))
50383+ gr_log_learn(dentry, mnt, new_mode);
50384+ }
50385+
50386+ return retval;
50387+}
50388+
50389+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50390+ const struct dentry *parent,
50391+ const struct vfsmount *mnt)
50392+{
50393+ struct name_entry *match;
50394+ struct acl_object_label *matchpo;
50395+ struct acl_subject_label *curracl;
50396+ char *path;
50397+
50398+ if (unlikely(!(gr_status & GR_READY)))
50399+ return NULL;
50400+
50401+ preempt_disable();
50402+ path = gr_to_filename_rbac(new_dentry, mnt);
50403+ match = lookup_name_entry_create(path);
50404+
50405+ curracl = current->acl;
50406+
50407+ if (match) {
50408+ read_lock(&gr_inode_lock);
50409+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50410+ read_unlock(&gr_inode_lock);
50411+
50412+ if (matchpo) {
50413+ preempt_enable();
50414+ return matchpo;
50415+ }
50416+ }
50417+
50418+ // lookup parent
50419+
50420+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50421+
50422+ preempt_enable();
50423+ return matchpo;
50424+}
50425+
50426+__u32
50427+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50428+ const struct vfsmount * mnt, const __u32 mode)
50429+{
50430+ struct acl_object_label *matchpo;
50431+ __u32 retval;
50432+
50433+ if (unlikely(!(gr_status & GR_READY)))
50434+ return (mode & ~GR_AUDITS);
50435+
50436+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
50437+
50438+ retval = matchpo->mode & mode;
50439+
50440+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50441+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50442+ __u32 new_mode = mode;
50443+
50444+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50445+
50446+ gr_log_learn(new_dentry, mnt, new_mode);
50447+ return new_mode;
50448+ }
50449+
50450+ return retval;
50451+}
50452+
50453+__u32
50454+gr_check_link(const struct dentry * new_dentry,
50455+ const struct dentry * parent_dentry,
50456+ const struct vfsmount * parent_mnt,
50457+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50458+{
50459+ struct acl_object_label *obj;
50460+ __u32 oldmode, newmode;
50461+ __u32 needmode;
50462+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50463+ GR_DELETE | GR_INHERIT;
50464+
50465+ if (unlikely(!(gr_status & GR_READY)))
50466+ return (GR_CREATE | GR_LINK);
50467+
50468+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50469+ oldmode = obj->mode;
50470+
50471+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50472+ newmode = obj->mode;
50473+
50474+ needmode = newmode & checkmodes;
50475+
50476+ // old name for hardlink must have at least the permissions of the new name
50477+ if ((oldmode & needmode) != needmode)
50478+ goto bad;
50479+
50480+ // if old name had restrictions/auditing, make sure the new name does as well
50481+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50482+
50483+ // don't allow hardlinking of suid/sgid files without permission
50484+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50485+ needmode |= GR_SETID;
50486+
50487+ if ((newmode & needmode) != needmode)
50488+ goto bad;
50489+
50490+ // enforce minimum permissions
50491+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50492+ return newmode;
50493+bad:
50494+ needmode = oldmode;
50495+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50496+ needmode |= GR_SETID;
50497+
50498+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50499+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50500+ return (GR_CREATE | GR_LINK);
50501+ } else if (newmode & GR_SUPPRESS)
50502+ return GR_SUPPRESS;
50503+ else
50504+ return 0;
50505+}
50506+
50507+int
50508+gr_check_hidden_task(const struct task_struct *task)
50509+{
50510+ if (unlikely(!(gr_status & GR_READY)))
50511+ return 0;
50512+
50513+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50514+ return 1;
50515+
50516+ return 0;
50517+}
50518+
50519+int
50520+gr_check_protected_task(const struct task_struct *task)
50521+{
50522+ if (unlikely(!(gr_status & GR_READY) || !task))
50523+ return 0;
50524+
50525+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50526+ task->acl != current->acl)
50527+ return 1;
50528+
50529+ return 0;
50530+}
50531+
50532+int
50533+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50534+{
50535+ struct task_struct *p;
50536+ int ret = 0;
50537+
50538+ if (unlikely(!(gr_status & GR_READY) || !pid))
50539+ return ret;
50540+
50541+ read_lock(&tasklist_lock);
50542+ do_each_pid_task(pid, type, p) {
50543+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50544+ p->acl != current->acl) {
50545+ ret = 1;
50546+ goto out;
50547+ }
50548+ } while_each_pid_task(pid, type, p);
50549+out:
50550+ read_unlock(&tasklist_lock);
50551+
50552+ return ret;
50553+}
50554+
50555+void
50556+gr_copy_label(struct task_struct *tsk)
50557+{
50558+ tsk->signal->used_accept = 0;
50559+ tsk->acl_sp_role = 0;
50560+ tsk->acl_role_id = current->acl_role_id;
50561+ tsk->acl = current->acl;
50562+ tsk->role = current->role;
50563+ tsk->signal->curr_ip = current->signal->curr_ip;
50564+ tsk->signal->saved_ip = current->signal->saved_ip;
50565+ if (current->exec_file)
50566+ get_file(current->exec_file);
50567+ tsk->exec_file = current->exec_file;
50568+ tsk->is_writable = current->is_writable;
50569+ if (unlikely(current->signal->used_accept)) {
50570+ current->signal->curr_ip = 0;
50571+ current->signal->saved_ip = 0;
50572+ }
50573+
50574+ return;
50575+}
50576+
50577+static void
50578+gr_set_proc_res(struct task_struct *task)
50579+{
50580+ struct acl_subject_label *proc;
50581+ unsigned short i;
50582+
50583+ proc = task->acl;
50584+
50585+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50586+ return;
50587+
50588+ for (i = 0; i < RLIM_NLIMITS; i++) {
50589+ if (!(proc->resmask & (1 << i)))
50590+ continue;
50591+
50592+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50593+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50594+ }
50595+
50596+ return;
50597+}
50598+
50599+extern int __gr_process_user_ban(struct user_struct *user);
50600+
50601+int
50602+gr_check_user_change(int real, int effective, int fs)
50603+{
50604+ unsigned int i;
50605+ __u16 num;
50606+ uid_t *uidlist;
50607+ int curuid;
50608+ int realok = 0;
50609+ int effectiveok = 0;
50610+ int fsok = 0;
50611+
50612+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50613+ struct user_struct *user;
50614+
50615+ if (real == -1)
50616+ goto skipit;
50617+
50618+ user = find_user(real);
50619+ if (user == NULL)
50620+ goto skipit;
50621+
50622+ if (__gr_process_user_ban(user)) {
50623+ /* for find_user */
50624+ free_uid(user);
50625+ return 1;
50626+ }
50627+
50628+ /* for find_user */
50629+ free_uid(user);
50630+
50631+skipit:
50632+#endif
50633+
50634+ if (unlikely(!(gr_status & GR_READY)))
50635+ return 0;
50636+
50637+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50638+ gr_log_learn_id_change('u', real, effective, fs);
50639+
50640+ num = current->acl->user_trans_num;
50641+ uidlist = current->acl->user_transitions;
50642+
50643+ if (uidlist == NULL)
50644+ return 0;
50645+
50646+ if (real == -1)
50647+ realok = 1;
50648+ if (effective == -1)
50649+ effectiveok = 1;
50650+ if (fs == -1)
50651+ fsok = 1;
50652+
50653+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
50654+ for (i = 0; i < num; i++) {
50655+ curuid = (int)uidlist[i];
50656+ if (real == curuid)
50657+ realok = 1;
50658+ if (effective == curuid)
50659+ effectiveok = 1;
50660+ if (fs == curuid)
50661+ fsok = 1;
50662+ }
50663+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
50664+ for (i = 0; i < num; i++) {
50665+ curuid = (int)uidlist[i];
50666+ if (real == curuid)
50667+ break;
50668+ if (effective == curuid)
50669+ break;
50670+ if (fs == curuid)
50671+ break;
50672+ }
50673+ /* not in deny list */
50674+ if (i == num) {
50675+ realok = 1;
50676+ effectiveok = 1;
50677+ fsok = 1;
50678+ }
50679+ }
50680+
50681+ if (realok && effectiveok && fsok)
50682+ return 0;
50683+ else {
50684+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50685+ return 1;
50686+ }
50687+}
50688+
50689+int
50690+gr_check_group_change(int real, int effective, int fs)
50691+{
50692+ unsigned int i;
50693+ __u16 num;
50694+ gid_t *gidlist;
50695+ int curgid;
50696+ int realok = 0;
50697+ int effectiveok = 0;
50698+ int fsok = 0;
50699+
50700+ if (unlikely(!(gr_status & GR_READY)))
50701+ return 0;
50702+
50703+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50704+ gr_log_learn_id_change('g', real, effective, fs);
50705+
50706+ num = current->acl->group_trans_num;
50707+ gidlist = current->acl->group_transitions;
50708+
50709+ if (gidlist == NULL)
50710+ return 0;
50711+
50712+ if (real == -1)
50713+ realok = 1;
50714+ if (effective == -1)
50715+ effectiveok = 1;
50716+ if (fs == -1)
50717+ fsok = 1;
50718+
50719+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
50720+ for (i = 0; i < num; i++) {
50721+ curgid = (int)gidlist[i];
50722+ if (real == curgid)
50723+ realok = 1;
50724+ if (effective == curgid)
50725+ effectiveok = 1;
50726+ if (fs == curgid)
50727+ fsok = 1;
50728+ }
50729+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
50730+ for (i = 0; i < num; i++) {
50731+ curgid = (int)gidlist[i];
50732+ if (real == curgid)
50733+ break;
50734+ if (effective == curgid)
50735+ break;
50736+ if (fs == curgid)
50737+ break;
50738+ }
50739+ /* not in deny list */
50740+ if (i == num) {
50741+ realok = 1;
50742+ effectiveok = 1;
50743+ fsok = 1;
50744+ }
50745+ }
50746+
50747+ if (realok && effectiveok && fsok)
50748+ return 0;
50749+ else {
50750+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50751+ return 1;
50752+ }
50753+}
50754+
50755+void
50756+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50757+{
50758+ struct acl_role_label *role = task->role;
50759+ struct acl_subject_label *subj = NULL;
50760+ struct acl_object_label *obj;
50761+ struct file *filp;
50762+
50763+ if (unlikely(!(gr_status & GR_READY)))
50764+ return;
50765+
50766+ filp = task->exec_file;
50767+
50768+ /* kernel process, we'll give them the kernel role */
50769+ if (unlikely(!filp)) {
50770+ task->role = kernel_role;
50771+ task->acl = kernel_role->root_label;
50772+ return;
50773+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50774+ role = lookup_acl_role_label(task, uid, gid);
50775+
50776+ /* perform subject lookup in possibly new role
50777+ we can use this result below in the case where role == task->role
50778+ */
50779+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50780+
50781+ /* if we changed uid/gid, but result in the same role
50782+ and are using inheritance, don't lose the inherited subject
50783+ if current subject is other than what normal lookup
50784+ would result in, we arrived via inheritance, don't
50785+ lose subject
50786+ */
50787+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50788+ (subj == task->acl)))
50789+ task->acl = subj;
50790+
50791+ task->role = role;
50792+
50793+ task->is_writable = 0;
50794+
50795+ /* ignore additional mmap checks for processes that are writable
50796+ by the default ACL */
50797+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50798+ if (unlikely(obj->mode & GR_WRITE))
50799+ task->is_writable = 1;
50800+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50801+ if (unlikely(obj->mode & GR_WRITE))
50802+ task->is_writable = 1;
50803+
50804+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50805+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50806+#endif
50807+
50808+ gr_set_proc_res(task);
50809+
50810+ return;
50811+}
50812+
50813+int
50814+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50815+ const int unsafe_flags)
50816+{
50817+ struct task_struct *task = current;
50818+ struct acl_subject_label *newacl;
50819+ struct acl_object_label *obj;
50820+ __u32 retmode;
50821+
50822+ if (unlikely(!(gr_status & GR_READY)))
50823+ return 0;
50824+
50825+ newacl = chk_subj_label(dentry, mnt, task->role);
50826+
50827+ task_lock(task);
50828+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
50829+ !(task->role->roletype & GR_ROLE_GOD) &&
50830+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
50831+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50832+ task_unlock(task);
50833+ if (unsafe_flags & LSM_UNSAFE_SHARE)
50834+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
50835+ else
50836+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
50837+ return -EACCES;
50838+ }
50839+ task_unlock(task);
50840+
50841+ obj = chk_obj_label(dentry, mnt, task->acl);
50842+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
50843+
50844+ if (!(task->acl->mode & GR_INHERITLEARN) &&
50845+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
50846+ if (obj->nested)
50847+ task->acl = obj->nested;
50848+ else
50849+ task->acl = newacl;
50850+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
50851+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
50852+
50853+ task->is_writable = 0;
50854+
50855+ /* ignore additional mmap checks for processes that are writable
50856+ by the default ACL */
50857+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
50858+ if (unlikely(obj->mode & GR_WRITE))
50859+ task->is_writable = 1;
50860+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
50861+ if (unlikely(obj->mode & GR_WRITE))
50862+ task->is_writable = 1;
50863+
50864+ gr_set_proc_res(task);
50865+
50866+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50867+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50868+#endif
50869+ return 0;
50870+}
50871+
50872+/* always called with valid inodev ptr */
50873+static void
50874+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
50875+{
50876+ struct acl_object_label *matchpo;
50877+ struct acl_subject_label *matchps;
50878+ struct acl_subject_label *subj;
50879+ struct acl_role_label *role;
50880+ unsigned int x;
50881+
50882+ FOR_EACH_ROLE_START(role)
50883+ FOR_EACH_SUBJECT_START(role, subj, x)
50884+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
50885+ matchpo->mode |= GR_DELETED;
50886+ FOR_EACH_SUBJECT_END(subj,x)
50887+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50888+ if (subj->inode == ino && subj->device == dev)
50889+ subj->mode |= GR_DELETED;
50890+ FOR_EACH_NESTED_SUBJECT_END(subj)
50891+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
50892+ matchps->mode |= GR_DELETED;
50893+ FOR_EACH_ROLE_END(role)
50894+
50895+ inodev->nentry->deleted = 1;
50896+
50897+ return;
50898+}
50899+
50900+void
50901+gr_handle_delete(const ino_t ino, const dev_t dev)
50902+{
50903+ struct inodev_entry *inodev;
50904+
50905+ if (unlikely(!(gr_status & GR_READY)))
50906+ return;
50907+
50908+ write_lock(&gr_inode_lock);
50909+ inodev = lookup_inodev_entry(ino, dev);
50910+ if (inodev != NULL)
50911+ do_handle_delete(inodev, ino, dev);
50912+ write_unlock(&gr_inode_lock);
50913+
50914+ return;
50915+}
50916+
50917+static void
50918+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
50919+ const ino_t newinode, const dev_t newdevice,
50920+ struct acl_subject_label *subj)
50921+{
50922+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
50923+ struct acl_object_label *match;
50924+
50925+ match = subj->obj_hash[index];
50926+
50927+ while (match && (match->inode != oldinode ||
50928+ match->device != olddevice ||
50929+ !(match->mode & GR_DELETED)))
50930+ match = match->next;
50931+
50932+ if (match && (match->inode == oldinode)
50933+ && (match->device == olddevice)
50934+ && (match->mode & GR_DELETED)) {
50935+ if (match->prev == NULL) {
50936+ subj->obj_hash[index] = match->next;
50937+ if (match->next != NULL)
50938+ match->next->prev = NULL;
50939+ } else {
50940+ match->prev->next = match->next;
50941+ if (match->next != NULL)
50942+ match->next->prev = match->prev;
50943+ }
50944+ match->prev = NULL;
50945+ match->next = NULL;
50946+ match->inode = newinode;
50947+ match->device = newdevice;
50948+ match->mode &= ~GR_DELETED;
50949+
50950+ insert_acl_obj_label(match, subj);
50951+ }
50952+
50953+ return;
50954+}
50955+
50956+static void
50957+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50958+ const ino_t newinode, const dev_t newdevice,
50959+ struct acl_role_label *role)
50960+{
50961+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50962+ struct acl_subject_label *match;
50963+
50964+ match = role->subj_hash[index];
50965+
50966+ while (match && (match->inode != oldinode ||
50967+ match->device != olddevice ||
50968+ !(match->mode & GR_DELETED)))
50969+ match = match->next;
50970+
50971+ if (match && (match->inode == oldinode)
50972+ && (match->device == olddevice)
50973+ && (match->mode & GR_DELETED)) {
50974+ if (match->prev == NULL) {
50975+ role->subj_hash[index] = match->next;
50976+ if (match->next != NULL)
50977+ match->next->prev = NULL;
50978+ } else {
50979+ match->prev->next = match->next;
50980+ if (match->next != NULL)
50981+ match->next->prev = match->prev;
50982+ }
50983+ match->prev = NULL;
50984+ match->next = NULL;
50985+ match->inode = newinode;
50986+ match->device = newdevice;
50987+ match->mode &= ~GR_DELETED;
50988+
50989+ insert_acl_subj_label(match, role);
50990+ }
50991+
50992+ return;
50993+}
50994+
50995+static void
50996+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50997+ const ino_t newinode, const dev_t newdevice)
50998+{
50999+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
51000+ struct inodev_entry *match;
51001+
51002+ match = inodev_set.i_hash[index];
51003+
51004+ while (match && (match->nentry->inode != oldinode ||
51005+ match->nentry->device != olddevice || !match->nentry->deleted))
51006+ match = match->next;
51007+
51008+ if (match && (match->nentry->inode == oldinode)
51009+ && (match->nentry->device == olddevice) &&
51010+ match->nentry->deleted) {
51011+ if (match->prev == NULL) {
51012+ inodev_set.i_hash[index] = match->next;
51013+ if (match->next != NULL)
51014+ match->next->prev = NULL;
51015+ } else {
51016+ match->prev->next = match->next;
51017+ if (match->next != NULL)
51018+ match->next->prev = match->prev;
51019+ }
51020+ match->prev = NULL;
51021+ match->next = NULL;
51022+ match->nentry->inode = newinode;
51023+ match->nentry->device = newdevice;
51024+ match->nentry->deleted = 0;
51025+
51026+ insert_inodev_entry(match);
51027+ }
51028+
51029+ return;
51030+}
51031+
51032+static void
51033+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
51034+{
51035+ struct acl_subject_label *subj;
51036+ struct acl_role_label *role;
51037+ unsigned int x;
51038+
51039+ FOR_EACH_ROLE_START(role)
51040+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
51041+
51042+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
51043+ if ((subj->inode == ino) && (subj->device == dev)) {
51044+ subj->inode = ino;
51045+ subj->device = dev;
51046+ }
51047+ FOR_EACH_NESTED_SUBJECT_END(subj)
51048+ FOR_EACH_SUBJECT_START(role, subj, x)
51049+ update_acl_obj_label(matchn->inode, matchn->device,
51050+ ino, dev, subj);
51051+ FOR_EACH_SUBJECT_END(subj,x)
51052+ FOR_EACH_ROLE_END(role)
51053+
51054+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
51055+
51056+ return;
51057+}
51058+
51059+static void
51060+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
51061+ const struct vfsmount *mnt)
51062+{
51063+ ino_t ino = dentry->d_inode->i_ino;
51064+ dev_t dev = __get_dev(dentry);
51065+
51066+ __do_handle_create(matchn, ino, dev);
51067+
51068+ return;
51069+}
51070+
51071+void
51072+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
51073+{
51074+ struct name_entry *matchn;
51075+
51076+ if (unlikely(!(gr_status & GR_READY)))
51077+ return;
51078+
51079+ preempt_disable();
51080+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
51081+
51082+ if (unlikely((unsigned long)matchn)) {
51083+ write_lock(&gr_inode_lock);
51084+ do_handle_create(matchn, dentry, mnt);
51085+ write_unlock(&gr_inode_lock);
51086+ }
51087+ preempt_enable();
51088+
51089+ return;
51090+}
51091+
51092+void
51093+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
51094+{
51095+ struct name_entry *matchn;
51096+
51097+ if (unlikely(!(gr_status & GR_READY)))
51098+ return;
51099+
51100+ preempt_disable();
51101+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
51102+
51103+ if (unlikely((unsigned long)matchn)) {
51104+ write_lock(&gr_inode_lock);
51105+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
51106+ write_unlock(&gr_inode_lock);
51107+ }
51108+ preempt_enable();
51109+
51110+ return;
51111+}
51112+
51113+void
51114+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51115+ struct dentry *old_dentry,
51116+ struct dentry *new_dentry,
51117+ struct vfsmount *mnt, const __u8 replace)
51118+{
51119+ struct name_entry *matchn;
51120+ struct inodev_entry *inodev;
51121+ struct inode *inode = new_dentry->d_inode;
51122+ ino_t old_ino = old_dentry->d_inode->i_ino;
51123+ dev_t old_dev = __get_dev(old_dentry);
51124+
51125+ /* vfs_rename swaps the name and parent link for old_dentry and
51126+ new_dentry
51127+ at this point, old_dentry has the new name, parent link, and inode
51128+ for the renamed file
51129+ if a file is being replaced by a rename, new_dentry has the inode
51130+ and name for the replaced file
51131+ */
51132+
51133+ if (unlikely(!(gr_status & GR_READY)))
51134+ return;
51135+
51136+ preempt_disable();
51137+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
51138+
51139+ /* we wouldn't have to check d_inode if it weren't for
51140+ NFS silly-renaming
51141+ */
51142+
51143+ write_lock(&gr_inode_lock);
51144+ if (unlikely(replace && inode)) {
51145+ ino_t new_ino = inode->i_ino;
51146+ dev_t new_dev = __get_dev(new_dentry);
51147+
51148+ inodev = lookup_inodev_entry(new_ino, new_dev);
51149+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
51150+ do_handle_delete(inodev, new_ino, new_dev);
51151+ }
51152+
51153+ inodev = lookup_inodev_entry(old_ino, old_dev);
51154+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
51155+ do_handle_delete(inodev, old_ino, old_dev);
51156+
51157+ if (unlikely((unsigned long)matchn))
51158+ do_handle_create(matchn, old_dentry, mnt);
51159+
51160+ write_unlock(&gr_inode_lock);
51161+ preempt_enable();
51162+
51163+ return;
51164+}
51165+
51166+static int
51167+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
51168+ unsigned char **sum)
51169+{
51170+ struct acl_role_label *r;
51171+ struct role_allowed_ip *ipp;
51172+ struct role_transition *trans;
51173+ unsigned int i;
51174+ int found = 0;
51175+ u32 curr_ip = current->signal->curr_ip;
51176+
51177+ current->signal->saved_ip = curr_ip;
51178+
51179+ /* check transition table */
51180+
51181+ for (trans = current->role->transitions; trans; trans = trans->next) {
51182+ if (!strcmp(rolename, trans->rolename)) {
51183+ found = 1;
51184+ break;
51185+ }
51186+ }
51187+
51188+ if (!found)
51189+ return 0;
51190+
51191+ /* handle special roles that do not require authentication
51192+ and check ip */
51193+
51194+ FOR_EACH_ROLE_START(r)
51195+ if (!strcmp(rolename, r->rolename) &&
51196+ (r->roletype & GR_ROLE_SPECIAL)) {
51197+ found = 0;
51198+ if (r->allowed_ips != NULL) {
51199+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51200+ if ((ntohl(curr_ip) & ipp->netmask) ==
51201+ (ntohl(ipp->addr) & ipp->netmask))
51202+ found = 1;
51203+ }
51204+ } else
51205+ found = 2;
51206+ if (!found)
51207+ return 0;
51208+
51209+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51210+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51211+ *salt = NULL;
51212+ *sum = NULL;
51213+ return 1;
51214+ }
51215+ }
51216+ FOR_EACH_ROLE_END(r)
51217+
51218+ for (i = 0; i < num_sprole_pws; i++) {
51219+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51220+ *salt = acl_special_roles[i]->salt;
51221+ *sum = acl_special_roles[i]->sum;
51222+ return 1;
51223+ }
51224+ }
51225+
51226+ return 0;
51227+}
51228+
51229+static void
51230+assign_special_role(char *rolename)
51231+{
51232+ struct acl_object_label *obj;
51233+ struct acl_role_label *r;
51234+ struct acl_role_label *assigned = NULL;
51235+ struct task_struct *tsk;
51236+ struct file *filp;
51237+
51238+ FOR_EACH_ROLE_START(r)
51239+ if (!strcmp(rolename, r->rolename) &&
51240+ (r->roletype & GR_ROLE_SPECIAL)) {
51241+ assigned = r;
51242+ break;
51243+ }
51244+ FOR_EACH_ROLE_END(r)
51245+
51246+ if (!assigned)
51247+ return;
51248+
51249+ read_lock(&tasklist_lock);
51250+ read_lock(&grsec_exec_file_lock);
51251+
51252+ tsk = current->real_parent;
51253+ if (tsk == NULL)
51254+ goto out_unlock;
51255+
51256+ filp = tsk->exec_file;
51257+ if (filp == NULL)
51258+ goto out_unlock;
51259+
51260+ tsk->is_writable = 0;
51261+
51262+ tsk->acl_sp_role = 1;
51263+ tsk->acl_role_id = ++acl_sp_role_value;
51264+ tsk->role = assigned;
51265+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51266+
51267+ /* ignore additional mmap checks for processes that are writable
51268+ by the default ACL */
51269+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51270+ if (unlikely(obj->mode & GR_WRITE))
51271+ tsk->is_writable = 1;
51272+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51273+ if (unlikely(obj->mode & GR_WRITE))
51274+ tsk->is_writable = 1;
51275+
51276+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51277+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51278+#endif
51279+
51280+out_unlock:
51281+ read_unlock(&grsec_exec_file_lock);
51282+ read_unlock(&tasklist_lock);
51283+ return;
51284+}
51285+
51286+int gr_check_secure_terminal(struct task_struct *task)
51287+{
51288+ struct task_struct *p, *p2, *p3;
51289+ struct files_struct *files;
51290+ struct fdtable *fdt;
51291+ struct file *our_file = NULL, *file;
51292+ int i;
51293+
51294+ if (task->signal->tty == NULL)
51295+ return 1;
51296+
51297+ files = get_files_struct(task);
51298+ if (files != NULL) {
51299+ rcu_read_lock();
51300+ fdt = files_fdtable(files);
51301+ for (i=0; i < fdt->max_fds; i++) {
51302+ file = fcheck_files(files, i);
51303+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51304+ get_file(file);
51305+ our_file = file;
51306+ }
51307+ }
51308+ rcu_read_unlock();
51309+ put_files_struct(files);
51310+ }
51311+
51312+ if (our_file == NULL)
51313+ return 1;
51314+
51315+ read_lock(&tasklist_lock);
51316+ do_each_thread(p2, p) {
51317+ files = get_files_struct(p);
51318+ if (files == NULL ||
51319+ (p->signal && p->signal->tty == task->signal->tty)) {
51320+ if (files != NULL)
51321+ put_files_struct(files);
51322+ continue;
51323+ }
51324+ rcu_read_lock();
51325+ fdt = files_fdtable(files);
51326+ for (i=0; i < fdt->max_fds; i++) {
51327+ file = fcheck_files(files, i);
51328+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51329+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51330+ p3 = task;
51331+ while (p3->pid > 0) {
51332+ if (p3 == p)
51333+ break;
51334+ p3 = p3->real_parent;
51335+ }
51336+ if (p3 == p)
51337+ break;
51338+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51339+ gr_handle_alertkill(p);
51340+ rcu_read_unlock();
51341+ put_files_struct(files);
51342+ read_unlock(&tasklist_lock);
51343+ fput(our_file);
51344+ return 0;
51345+ }
51346+ }
51347+ rcu_read_unlock();
51348+ put_files_struct(files);
51349+ } while_each_thread(p2, p);
51350+ read_unlock(&tasklist_lock);
51351+
51352+ fput(our_file);
51353+ return 1;
51354+}
51355+
51356+ssize_t
51357+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51358+{
51359+ struct gr_arg_wrapper uwrap;
51360+ unsigned char *sprole_salt = NULL;
51361+ unsigned char *sprole_sum = NULL;
51362+ int error = sizeof (struct gr_arg_wrapper);
51363+ int error2 = 0;
51364+
51365+ mutex_lock(&gr_dev_mutex);
51366+
51367+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51368+ error = -EPERM;
51369+ goto out;
51370+ }
51371+
51372+ if (count != sizeof (struct gr_arg_wrapper)) {
51373+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51374+ error = -EINVAL;
51375+ goto out;
51376+ }
51377+
51378+
51379+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51380+ gr_auth_expires = 0;
51381+ gr_auth_attempts = 0;
51382+ }
51383+
51384+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51385+ error = -EFAULT;
51386+ goto out;
51387+ }
51388+
51389+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51390+ error = -EINVAL;
51391+ goto out;
51392+ }
51393+
51394+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51395+ error = -EFAULT;
51396+ goto out;
51397+ }
51398+
51399+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51400+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51401+ time_after(gr_auth_expires, get_seconds())) {
51402+ error = -EBUSY;
51403+ goto out;
51404+ }
51405+
51406+ /* if non-root trying to do anything other than use a special role,
51407+ do not attempt authentication, do not count towards authentication
51408+ locking
51409+ */
51410+
51411+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51412+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51413+ current_uid()) {
51414+ error = -EPERM;
51415+ goto out;
51416+ }
51417+
51418+ /* ensure pw and special role name are null terminated */
51419+
51420+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51421+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51422+
51423+ /* Okay.
51424+ * We have our enough of the argument structure..(we have yet
51425+ * to copy_from_user the tables themselves) . Copy the tables
51426+ * only if we need them, i.e. for loading operations. */
51427+
51428+ switch (gr_usermode->mode) {
51429+ case GR_STATUS:
51430+ if (gr_status & GR_READY) {
51431+ error = 1;
51432+ if (!gr_check_secure_terminal(current))
51433+ error = 3;
51434+ } else
51435+ error = 2;
51436+ goto out;
51437+ case GR_SHUTDOWN:
51438+ if ((gr_status & GR_READY)
51439+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51440+ pax_open_kernel();
51441+ gr_status &= ~GR_READY;
51442+ pax_close_kernel();
51443+
51444+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51445+ free_variables();
51446+ memset(gr_usermode, 0, sizeof (struct gr_arg));
51447+ memset(gr_system_salt, 0, GR_SALT_LEN);
51448+ memset(gr_system_sum, 0, GR_SHA_LEN);
51449+ } else if (gr_status & GR_READY) {
51450+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51451+ error = -EPERM;
51452+ } else {
51453+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51454+ error = -EAGAIN;
51455+ }
51456+ break;
51457+ case GR_ENABLE:
51458+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51459+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51460+ else {
51461+ if (gr_status & GR_READY)
51462+ error = -EAGAIN;
51463+ else
51464+ error = error2;
51465+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51466+ }
51467+ break;
51468+ case GR_RELOAD:
51469+ if (!(gr_status & GR_READY)) {
51470+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51471+ error = -EAGAIN;
51472+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51473+ preempt_disable();
51474+
51475+ pax_open_kernel();
51476+ gr_status &= ~GR_READY;
51477+ pax_close_kernel();
51478+
51479+ free_variables();
51480+ if (!(error2 = gracl_init(gr_usermode))) {
51481+ preempt_enable();
51482+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51483+ } else {
51484+ preempt_enable();
51485+ error = error2;
51486+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51487+ }
51488+ } else {
51489+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51490+ error = -EPERM;
51491+ }
51492+ break;
51493+ case GR_SEGVMOD:
51494+ if (unlikely(!(gr_status & GR_READY))) {
51495+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51496+ error = -EAGAIN;
51497+ break;
51498+ }
51499+
51500+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51501+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51502+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51503+ struct acl_subject_label *segvacl;
51504+ segvacl =
51505+ lookup_acl_subj_label(gr_usermode->segv_inode,
51506+ gr_usermode->segv_device,
51507+ current->role);
51508+ if (segvacl) {
51509+ segvacl->crashes = 0;
51510+ segvacl->expires = 0;
51511+ }
51512+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51513+ gr_remove_uid(gr_usermode->segv_uid);
51514+ }
51515+ } else {
51516+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51517+ error = -EPERM;
51518+ }
51519+ break;
51520+ case GR_SPROLE:
51521+ case GR_SPROLEPAM:
51522+ if (unlikely(!(gr_status & GR_READY))) {
51523+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51524+ error = -EAGAIN;
51525+ break;
51526+ }
51527+
51528+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51529+ current->role->expires = 0;
51530+ current->role->auth_attempts = 0;
51531+ }
51532+
51533+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51534+ time_after(current->role->expires, get_seconds())) {
51535+ error = -EBUSY;
51536+ goto out;
51537+ }
51538+
51539+ if (lookup_special_role_auth
51540+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51541+ && ((!sprole_salt && !sprole_sum)
51542+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51543+ char *p = "";
51544+ assign_special_role(gr_usermode->sp_role);
51545+ read_lock(&tasklist_lock);
51546+ if (current->real_parent)
51547+ p = current->real_parent->role->rolename;
51548+ read_unlock(&tasklist_lock);
51549+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51550+ p, acl_sp_role_value);
51551+ } else {
51552+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51553+ error = -EPERM;
51554+ if(!(current->role->auth_attempts++))
51555+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51556+
51557+ goto out;
51558+ }
51559+ break;
51560+ case GR_UNSPROLE:
51561+ if (unlikely(!(gr_status & GR_READY))) {
51562+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51563+ error = -EAGAIN;
51564+ break;
51565+ }
51566+
51567+ if (current->role->roletype & GR_ROLE_SPECIAL) {
51568+ char *p = "";
51569+ int i = 0;
51570+
51571+ read_lock(&tasklist_lock);
51572+ if (current->real_parent) {
51573+ p = current->real_parent->role->rolename;
51574+ i = current->real_parent->acl_role_id;
51575+ }
51576+ read_unlock(&tasklist_lock);
51577+
51578+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51579+ gr_set_acls(1);
51580+ } else {
51581+ error = -EPERM;
51582+ goto out;
51583+ }
51584+ break;
51585+ default:
51586+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51587+ error = -EINVAL;
51588+ break;
51589+ }
51590+
51591+ if (error != -EPERM)
51592+ goto out;
51593+
51594+ if(!(gr_auth_attempts++))
51595+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51596+
51597+ out:
51598+ mutex_unlock(&gr_dev_mutex);
51599+ return error;
51600+}
51601+
51602+/* must be called with
51603+ rcu_read_lock();
51604+ read_lock(&tasklist_lock);
51605+ read_lock(&grsec_exec_file_lock);
51606+*/
51607+int gr_apply_subject_to_task(struct task_struct *task)
51608+{
51609+ struct acl_object_label *obj;
51610+ char *tmpname;
51611+ struct acl_subject_label *tmpsubj;
51612+ struct file *filp;
51613+ struct name_entry *nmatch;
51614+
51615+ filp = task->exec_file;
51616+ if (filp == NULL)
51617+ return 0;
51618+
51619+ /* the following is to apply the correct subject
51620+ on binaries running when the RBAC system
51621+ is enabled, when the binaries have been
51622+ replaced or deleted since their execution
51623+ -----
51624+ when the RBAC system starts, the inode/dev
51625+ from exec_file will be one the RBAC system
51626+ is unaware of. It only knows the inode/dev
51627+ of the present file on disk, or the absence
51628+ of it.
51629+ */
51630+ preempt_disable();
51631+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51632+
51633+ nmatch = lookup_name_entry(tmpname);
51634+ preempt_enable();
51635+ tmpsubj = NULL;
51636+ if (nmatch) {
51637+ if (nmatch->deleted)
51638+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51639+ else
51640+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51641+ if (tmpsubj != NULL)
51642+ task->acl = tmpsubj;
51643+ }
51644+ if (tmpsubj == NULL)
51645+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51646+ task->role);
51647+ if (task->acl) {
51648+ task->is_writable = 0;
51649+ /* ignore additional mmap checks for processes that are writable
51650+ by the default ACL */
51651+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51652+ if (unlikely(obj->mode & GR_WRITE))
51653+ task->is_writable = 1;
51654+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51655+ if (unlikely(obj->mode & GR_WRITE))
51656+ task->is_writable = 1;
51657+
51658+ gr_set_proc_res(task);
51659+
51660+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51661+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51662+#endif
51663+ } else {
51664+ return 1;
51665+ }
51666+
51667+ return 0;
51668+}
51669+
51670+int
51671+gr_set_acls(const int type)
51672+{
51673+ struct task_struct *task, *task2;
51674+ struct acl_role_label *role = current->role;
51675+ __u16 acl_role_id = current->acl_role_id;
51676+ const struct cred *cred;
51677+ int ret;
51678+
51679+ rcu_read_lock();
51680+ read_lock(&tasklist_lock);
51681+ read_lock(&grsec_exec_file_lock);
51682+ do_each_thread(task2, task) {
51683+ /* check to see if we're called from the exit handler,
51684+ if so, only replace ACLs that have inherited the admin
51685+ ACL */
51686+
51687+ if (type && (task->role != role ||
51688+ task->acl_role_id != acl_role_id))
51689+ continue;
51690+
51691+ task->acl_role_id = 0;
51692+ task->acl_sp_role = 0;
51693+
51694+ if (task->exec_file) {
51695+ cred = __task_cred(task);
51696+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51697+ ret = gr_apply_subject_to_task(task);
51698+ if (ret) {
51699+ read_unlock(&grsec_exec_file_lock);
51700+ read_unlock(&tasklist_lock);
51701+ rcu_read_unlock();
51702+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51703+ return ret;
51704+ }
51705+ } else {
51706+ // it's a kernel process
51707+ task->role = kernel_role;
51708+ task->acl = kernel_role->root_label;
51709+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51710+ task->acl->mode &= ~GR_PROCFIND;
51711+#endif
51712+ }
51713+ } while_each_thread(task2, task);
51714+ read_unlock(&grsec_exec_file_lock);
51715+ read_unlock(&tasklist_lock);
51716+ rcu_read_unlock();
51717+
51718+ return 0;
51719+}
51720+
51721+void
51722+gr_learn_resource(const struct task_struct *task,
51723+ const int res, const unsigned long wanted, const int gt)
51724+{
51725+ struct acl_subject_label *acl;
51726+ const struct cred *cred;
51727+
51728+ if (unlikely((gr_status & GR_READY) &&
51729+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51730+ goto skip_reslog;
51731+
51732+#ifdef CONFIG_GRKERNSEC_RESLOG
51733+ gr_log_resource(task, res, wanted, gt);
51734+#endif
51735+ skip_reslog:
51736+
51737+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51738+ return;
51739+
51740+ acl = task->acl;
51741+
51742+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51743+ !(acl->resmask & (1 << (unsigned short) res))))
51744+ return;
51745+
51746+ if (wanted >= acl->res[res].rlim_cur) {
51747+ unsigned long res_add;
51748+
51749+ res_add = wanted;
51750+ switch (res) {
51751+ case RLIMIT_CPU:
51752+ res_add += GR_RLIM_CPU_BUMP;
51753+ break;
51754+ case RLIMIT_FSIZE:
51755+ res_add += GR_RLIM_FSIZE_BUMP;
51756+ break;
51757+ case RLIMIT_DATA:
51758+ res_add += GR_RLIM_DATA_BUMP;
51759+ break;
51760+ case RLIMIT_STACK:
51761+ res_add += GR_RLIM_STACK_BUMP;
51762+ break;
51763+ case RLIMIT_CORE:
51764+ res_add += GR_RLIM_CORE_BUMP;
51765+ break;
51766+ case RLIMIT_RSS:
51767+ res_add += GR_RLIM_RSS_BUMP;
51768+ break;
51769+ case RLIMIT_NPROC:
51770+ res_add += GR_RLIM_NPROC_BUMP;
51771+ break;
51772+ case RLIMIT_NOFILE:
51773+ res_add += GR_RLIM_NOFILE_BUMP;
51774+ break;
51775+ case RLIMIT_MEMLOCK:
51776+ res_add += GR_RLIM_MEMLOCK_BUMP;
51777+ break;
51778+ case RLIMIT_AS:
51779+ res_add += GR_RLIM_AS_BUMP;
51780+ break;
51781+ case RLIMIT_LOCKS:
51782+ res_add += GR_RLIM_LOCKS_BUMP;
51783+ break;
51784+ case RLIMIT_SIGPENDING:
51785+ res_add += GR_RLIM_SIGPENDING_BUMP;
51786+ break;
51787+ case RLIMIT_MSGQUEUE:
51788+ res_add += GR_RLIM_MSGQUEUE_BUMP;
51789+ break;
51790+ case RLIMIT_NICE:
51791+ res_add += GR_RLIM_NICE_BUMP;
51792+ break;
51793+ case RLIMIT_RTPRIO:
51794+ res_add += GR_RLIM_RTPRIO_BUMP;
51795+ break;
51796+ case RLIMIT_RTTIME:
51797+ res_add += GR_RLIM_RTTIME_BUMP;
51798+ break;
51799+ }
51800+
51801+ acl->res[res].rlim_cur = res_add;
51802+
51803+ if (wanted > acl->res[res].rlim_max)
51804+ acl->res[res].rlim_max = res_add;
51805+
51806+ /* only log the subject filename, since resource logging is supported for
51807+ single-subject learning only */
51808+ rcu_read_lock();
51809+ cred = __task_cred(task);
51810+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51811+ task->role->roletype, cred->uid, cred->gid, acl->filename,
51812+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
51813+ "", (unsigned long) res, &task->signal->saved_ip);
51814+ rcu_read_unlock();
51815+ }
51816+
51817+ return;
51818+}
51819+
51820+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
51821+void
51822+pax_set_initial_flags(struct linux_binprm *bprm)
51823+{
51824+ struct task_struct *task = current;
51825+ struct acl_subject_label *proc;
51826+ unsigned long flags;
51827+
51828+ if (unlikely(!(gr_status & GR_READY)))
51829+ return;
51830+
51831+ flags = pax_get_flags(task);
51832+
51833+ proc = task->acl;
51834+
51835+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
51836+ flags &= ~MF_PAX_PAGEEXEC;
51837+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
51838+ flags &= ~MF_PAX_SEGMEXEC;
51839+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
51840+ flags &= ~MF_PAX_RANDMMAP;
51841+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
51842+ flags &= ~MF_PAX_EMUTRAMP;
51843+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
51844+ flags &= ~MF_PAX_MPROTECT;
51845+
51846+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
51847+ flags |= MF_PAX_PAGEEXEC;
51848+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
51849+ flags |= MF_PAX_SEGMEXEC;
51850+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
51851+ flags |= MF_PAX_RANDMMAP;
51852+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
51853+ flags |= MF_PAX_EMUTRAMP;
51854+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
51855+ flags |= MF_PAX_MPROTECT;
51856+
51857+ pax_set_flags(task, flags);
51858+
51859+ return;
51860+}
51861+#endif
51862+
51863+#ifdef CONFIG_SYSCTL
51864+/* Eric Biederman likes breaking userland ABI and every inode-based security
51865+ system to save 35kb of memory */
51866+
51867+/* we modify the passed in filename, but adjust it back before returning */
51868+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
51869+{
51870+ struct name_entry *nmatch;
51871+ char *p, *lastp = NULL;
51872+ struct acl_object_label *obj = NULL, *tmp;
51873+ struct acl_subject_label *tmpsubj;
51874+ char c = '\0';
51875+
51876+ read_lock(&gr_inode_lock);
51877+
51878+ p = name + len - 1;
51879+ do {
51880+ nmatch = lookup_name_entry(name);
51881+ if (lastp != NULL)
51882+ *lastp = c;
51883+
51884+ if (nmatch == NULL)
51885+ goto next_component;
51886+ tmpsubj = current->acl;
51887+ do {
51888+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
51889+ if (obj != NULL) {
51890+ tmp = obj->globbed;
51891+ while (tmp) {
51892+ if (!glob_match(tmp->filename, name)) {
51893+ obj = tmp;
51894+ goto found_obj;
51895+ }
51896+ tmp = tmp->next;
51897+ }
51898+ goto found_obj;
51899+ }
51900+ } while ((tmpsubj = tmpsubj->parent_subject));
51901+next_component:
51902+ /* end case */
51903+ if (p == name)
51904+ break;
51905+
51906+ while (*p != '/')
51907+ p--;
51908+ if (p == name)
51909+ lastp = p + 1;
51910+ else {
51911+ lastp = p;
51912+ p--;
51913+ }
51914+ c = *lastp;
51915+ *lastp = '\0';
51916+ } while (1);
51917+found_obj:
51918+ read_unlock(&gr_inode_lock);
51919+ /* obj returned will always be non-null */
51920+ return obj;
51921+}
51922+
51923+/* returns 0 when allowing, non-zero on error
51924+ op of 0 is used for readdir, so we don't log the names of hidden files
51925+*/
51926+__u32
51927+gr_handle_sysctl(const struct ctl_table *table, const int op)
51928+{
51929+ struct ctl_table *tmp;
51930+ const char *proc_sys = "/proc/sys";
51931+ char *path;
51932+ struct acl_object_label *obj;
51933+ unsigned short len = 0, pos = 0, depth = 0, i;
51934+ __u32 err = 0;
51935+ __u32 mode = 0;
51936+
51937+ if (unlikely(!(gr_status & GR_READY)))
51938+ return 0;
51939+
51940+ /* for now, ignore operations on non-sysctl entries if it's not a
51941+ readdir*/
51942+ if (table->child != NULL && op != 0)
51943+ return 0;
51944+
51945+ mode |= GR_FIND;
51946+ /* it's only a read if it's an entry, read on dirs is for readdir */
51947+ if (op & MAY_READ)
51948+ mode |= GR_READ;
51949+ if (op & MAY_WRITE)
51950+ mode |= GR_WRITE;
51951+
51952+ preempt_disable();
51953+
51954+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51955+
51956+ /* it's only a read/write if it's an actual entry, not a dir
51957+ (which are opened for readdir)
51958+ */
51959+
51960+ /* convert the requested sysctl entry into a pathname */
51961+
51962+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51963+ len += strlen(tmp->procname);
51964+ len++;
51965+ depth++;
51966+ }
51967+
51968+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51969+ /* deny */
51970+ goto out;
51971+ }
51972+
51973+ memset(path, 0, PAGE_SIZE);
51974+
51975+ memcpy(path, proc_sys, strlen(proc_sys));
51976+
51977+ pos += strlen(proc_sys);
51978+
51979+ for (; depth > 0; depth--) {
51980+ path[pos] = '/';
51981+ pos++;
51982+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51983+ if (depth == i) {
51984+ memcpy(path + pos, tmp->procname,
51985+ strlen(tmp->procname));
51986+ pos += strlen(tmp->procname);
51987+ }
51988+ i++;
51989+ }
51990+ }
51991+
51992+ obj = gr_lookup_by_name(path, pos);
51993+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51994+
51995+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51996+ ((err & mode) != mode))) {
51997+ __u32 new_mode = mode;
51998+
51999+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52000+
52001+ err = 0;
52002+ gr_log_learn_sysctl(path, new_mode);
52003+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
52004+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
52005+ err = -ENOENT;
52006+ } else if (!(err & GR_FIND)) {
52007+ err = -ENOENT;
52008+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
52009+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
52010+ path, (mode & GR_READ) ? " reading" : "",
52011+ (mode & GR_WRITE) ? " writing" : "");
52012+ err = -EACCES;
52013+ } else if ((err & mode) != mode) {
52014+ err = -EACCES;
52015+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
52016+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
52017+ path, (mode & GR_READ) ? " reading" : "",
52018+ (mode & GR_WRITE) ? " writing" : "");
52019+ err = 0;
52020+ } else
52021+ err = 0;
52022+
52023+ out:
52024+ preempt_enable();
52025+
52026+ return err;
52027+}
52028+#endif
52029+
52030+int
52031+gr_handle_proc_ptrace(struct task_struct *task)
52032+{
52033+ struct file *filp;
52034+ struct task_struct *tmp = task;
52035+ struct task_struct *curtemp = current;
52036+ __u32 retmode;
52037+
52038+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52039+ if (unlikely(!(gr_status & GR_READY)))
52040+ return 0;
52041+#endif
52042+
52043+ read_lock(&tasklist_lock);
52044+ read_lock(&grsec_exec_file_lock);
52045+ filp = task->exec_file;
52046+
52047+ while (tmp->pid > 0) {
52048+ if (tmp == curtemp)
52049+ break;
52050+ tmp = tmp->real_parent;
52051+ }
52052+
52053+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52054+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
52055+ read_unlock(&grsec_exec_file_lock);
52056+ read_unlock(&tasklist_lock);
52057+ return 1;
52058+ }
52059+
52060+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52061+ if (!(gr_status & GR_READY)) {
52062+ read_unlock(&grsec_exec_file_lock);
52063+ read_unlock(&tasklist_lock);
52064+ return 0;
52065+ }
52066+#endif
52067+
52068+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
52069+ read_unlock(&grsec_exec_file_lock);
52070+ read_unlock(&tasklist_lock);
52071+
52072+ if (retmode & GR_NOPTRACE)
52073+ return 1;
52074+
52075+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
52076+ && (current->acl != task->acl || (current->acl != current->role->root_label
52077+ && current->pid != task->pid)))
52078+ return 1;
52079+
52080+ return 0;
52081+}
52082+
52083+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
52084+{
52085+ if (unlikely(!(gr_status & GR_READY)))
52086+ return;
52087+
52088+ if (!(current->role->roletype & GR_ROLE_GOD))
52089+ return;
52090+
52091+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
52092+ p->role->rolename, gr_task_roletype_to_char(p),
52093+ p->acl->filename);
52094+}
52095+
52096+int
52097+gr_handle_ptrace(struct task_struct *task, const long request)
52098+{
52099+ struct task_struct *tmp = task;
52100+ struct task_struct *curtemp = current;
52101+ __u32 retmode;
52102+
52103+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52104+ if (unlikely(!(gr_status & GR_READY)))
52105+ return 0;
52106+#endif
52107+
52108+ read_lock(&tasklist_lock);
52109+ while (tmp->pid > 0) {
52110+ if (tmp == curtemp)
52111+ break;
52112+ tmp = tmp->real_parent;
52113+ }
52114+
52115+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52116+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
52117+ read_unlock(&tasklist_lock);
52118+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52119+ return 1;
52120+ }
52121+ read_unlock(&tasklist_lock);
52122+
52123+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52124+ if (!(gr_status & GR_READY))
52125+ return 0;
52126+#endif
52127+
52128+ read_lock(&grsec_exec_file_lock);
52129+ if (unlikely(!task->exec_file)) {
52130+ read_unlock(&grsec_exec_file_lock);
52131+ return 0;
52132+ }
52133+
52134+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
52135+ read_unlock(&grsec_exec_file_lock);
52136+
52137+ if (retmode & GR_NOPTRACE) {
52138+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52139+ return 1;
52140+ }
52141+
52142+ if (retmode & GR_PTRACERD) {
52143+ switch (request) {
52144+ case PTRACE_SEIZE:
52145+ case PTRACE_POKETEXT:
52146+ case PTRACE_POKEDATA:
52147+ case PTRACE_POKEUSR:
52148+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
52149+ case PTRACE_SETREGS:
52150+ case PTRACE_SETFPREGS:
52151+#endif
52152+#ifdef CONFIG_X86
52153+ case PTRACE_SETFPXREGS:
52154+#endif
52155+#ifdef CONFIG_ALTIVEC
52156+ case PTRACE_SETVRREGS:
52157+#endif
52158+ return 1;
52159+ default:
52160+ return 0;
52161+ }
52162+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
52163+ !(current->role->roletype & GR_ROLE_GOD) &&
52164+ (current->acl != task->acl)) {
52165+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52166+ return 1;
52167+ }
52168+
52169+ return 0;
52170+}
52171+
52172+static int is_writable_mmap(const struct file *filp)
52173+{
52174+ struct task_struct *task = current;
52175+ struct acl_object_label *obj, *obj2;
52176+
52177+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52178+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52179+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52180+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52181+ task->role->root_label);
52182+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52183+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52184+ return 1;
52185+ }
52186+ }
52187+ return 0;
52188+}
52189+
52190+int
52191+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52192+{
52193+ __u32 mode;
52194+
52195+ if (unlikely(!file || !(prot & PROT_EXEC)))
52196+ return 1;
52197+
52198+ if (is_writable_mmap(file))
52199+ return 0;
52200+
52201+ mode =
52202+ gr_search_file(file->f_path.dentry,
52203+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52204+ file->f_path.mnt);
52205+
52206+ if (!gr_tpe_allow(file))
52207+ return 0;
52208+
52209+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52210+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52211+ return 0;
52212+ } else if (unlikely(!(mode & GR_EXEC))) {
52213+ return 0;
52214+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52215+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52216+ return 1;
52217+ }
52218+
52219+ return 1;
52220+}
52221+
52222+int
52223+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52224+{
52225+ __u32 mode;
52226+
52227+ if (unlikely(!file || !(prot & PROT_EXEC)))
52228+ return 1;
52229+
52230+ if (is_writable_mmap(file))
52231+ return 0;
52232+
52233+ mode =
52234+ gr_search_file(file->f_path.dentry,
52235+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52236+ file->f_path.mnt);
52237+
52238+ if (!gr_tpe_allow(file))
52239+ return 0;
52240+
52241+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52242+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52243+ return 0;
52244+ } else if (unlikely(!(mode & GR_EXEC))) {
52245+ return 0;
52246+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52247+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52248+ return 1;
52249+ }
52250+
52251+ return 1;
52252+}
52253+
52254+void
52255+gr_acl_handle_psacct(struct task_struct *task, const long code)
52256+{
52257+ unsigned long runtime;
52258+ unsigned long cputime;
52259+ unsigned int wday, cday;
52260+ __u8 whr, chr;
52261+ __u8 wmin, cmin;
52262+ __u8 wsec, csec;
52263+ struct timespec timeval;
52264+
52265+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52266+ !(task->acl->mode & GR_PROCACCT)))
52267+ return;
52268+
52269+ do_posix_clock_monotonic_gettime(&timeval);
52270+ runtime = timeval.tv_sec - task->start_time.tv_sec;
52271+ wday = runtime / (3600 * 24);
52272+ runtime -= wday * (3600 * 24);
52273+ whr = runtime / 3600;
52274+ runtime -= whr * 3600;
52275+ wmin = runtime / 60;
52276+ runtime -= wmin * 60;
52277+ wsec = runtime;
52278+
52279+ cputime = (task->utime + task->stime) / HZ;
52280+ cday = cputime / (3600 * 24);
52281+ cputime -= cday * (3600 * 24);
52282+ chr = cputime / 3600;
52283+ cputime -= chr * 3600;
52284+ cmin = cputime / 60;
52285+ cputime -= cmin * 60;
52286+ csec = cputime;
52287+
52288+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52289+
52290+ return;
52291+}
52292+
52293+void gr_set_kernel_label(struct task_struct *task)
52294+{
52295+ if (gr_status & GR_READY) {
52296+ task->role = kernel_role;
52297+ task->acl = kernel_role->root_label;
52298+ }
52299+ return;
52300+}
52301+
52302+#ifdef CONFIG_TASKSTATS
52303+int gr_is_taskstats_denied(int pid)
52304+{
52305+ struct task_struct *task;
52306+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52307+ const struct cred *cred;
52308+#endif
52309+ int ret = 0;
52310+
52311+ /* restrict taskstats viewing to un-chrooted root users
52312+ who have the 'view' subject flag if the RBAC system is enabled
52313+ */
52314+
52315+ rcu_read_lock();
52316+ read_lock(&tasklist_lock);
52317+ task = find_task_by_vpid(pid);
52318+ if (task) {
52319+#ifdef CONFIG_GRKERNSEC_CHROOT
52320+ if (proc_is_chrooted(task))
52321+ ret = -EACCES;
52322+#endif
52323+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52324+ cred = __task_cred(task);
52325+#ifdef CONFIG_GRKERNSEC_PROC_USER
52326+ if (cred->uid != 0)
52327+ ret = -EACCES;
52328+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52329+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52330+ ret = -EACCES;
52331+#endif
52332+#endif
52333+ if (gr_status & GR_READY) {
52334+ if (!(task->acl->mode & GR_VIEW))
52335+ ret = -EACCES;
52336+ }
52337+ } else
52338+ ret = -ENOENT;
52339+
52340+ read_unlock(&tasklist_lock);
52341+ rcu_read_unlock();
52342+
52343+ return ret;
52344+}
52345+#endif
52346+
52347+/* AUXV entries are filled via a descendant of search_binary_handler
52348+ after we've already applied the subject for the target
52349+*/
52350+int gr_acl_enable_at_secure(void)
52351+{
52352+ if (unlikely(!(gr_status & GR_READY)))
52353+ return 0;
52354+
52355+ if (current->acl->mode & GR_ATSECURE)
52356+ return 1;
52357+
52358+ return 0;
52359+}
52360+
52361+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52362+{
52363+ struct task_struct *task = current;
52364+ struct dentry *dentry = file->f_path.dentry;
52365+ struct vfsmount *mnt = file->f_path.mnt;
52366+ struct acl_object_label *obj, *tmp;
52367+ struct acl_subject_label *subj;
52368+ unsigned int bufsize;
52369+ int is_not_root;
52370+ char *path;
52371+ dev_t dev = __get_dev(dentry);
52372+
52373+ if (unlikely(!(gr_status & GR_READY)))
52374+ return 1;
52375+
52376+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52377+ return 1;
52378+
52379+ /* ignore Eric Biederman */
52380+ if (IS_PRIVATE(dentry->d_inode))
52381+ return 1;
52382+
52383+ subj = task->acl;
52384+ do {
52385+ obj = lookup_acl_obj_label(ino, dev, subj);
52386+ if (obj != NULL)
52387+ return (obj->mode & GR_FIND) ? 1 : 0;
52388+ } while ((subj = subj->parent_subject));
52389+
52390+ /* this is purely an optimization since we're looking for an object
52391+ for the directory we're doing a readdir on
52392+ if it's possible for any globbed object to match the entry we're
52393+ filling into the directory, then the object we find here will be
52394+ an anchor point with attached globbed objects
52395+ */
52396+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52397+ if (obj->globbed == NULL)
52398+ return (obj->mode & GR_FIND) ? 1 : 0;
52399+
52400+ is_not_root = ((obj->filename[0] == '/') &&
52401+ (obj->filename[1] == '\0')) ? 0 : 1;
52402+ bufsize = PAGE_SIZE - namelen - is_not_root;
52403+
52404+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
52405+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52406+ return 1;
52407+
52408+ preempt_disable();
52409+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52410+ bufsize);
52411+
52412+ bufsize = strlen(path);
52413+
52414+ /* if base is "/", don't append an additional slash */
52415+ if (is_not_root)
52416+ *(path + bufsize) = '/';
52417+ memcpy(path + bufsize + is_not_root, name, namelen);
52418+ *(path + bufsize + namelen + is_not_root) = '\0';
52419+
52420+ tmp = obj->globbed;
52421+ while (tmp) {
52422+ if (!glob_match(tmp->filename, path)) {
52423+ preempt_enable();
52424+ return (tmp->mode & GR_FIND) ? 1 : 0;
52425+ }
52426+ tmp = tmp->next;
52427+ }
52428+ preempt_enable();
52429+ return (obj->mode & GR_FIND) ? 1 : 0;
52430+}
52431+
52432+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52433+EXPORT_SYMBOL(gr_acl_is_enabled);
52434+#endif
52435+EXPORT_SYMBOL(gr_learn_resource);
52436+EXPORT_SYMBOL(gr_set_kernel_label);
52437+#ifdef CONFIG_SECURITY
52438+EXPORT_SYMBOL(gr_check_user_change);
52439+EXPORT_SYMBOL(gr_check_group_change);
52440+#endif
52441+
52442diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52443new file mode 100644
52444index 0000000..34fefda
52445--- /dev/null
52446+++ b/grsecurity/gracl_alloc.c
52447@@ -0,0 +1,105 @@
52448+#include <linux/kernel.h>
52449+#include <linux/mm.h>
52450+#include <linux/slab.h>
52451+#include <linux/vmalloc.h>
52452+#include <linux/gracl.h>
52453+#include <linux/grsecurity.h>
52454+
52455+static unsigned long alloc_stack_next = 1;
52456+static unsigned long alloc_stack_size = 1;
52457+static void **alloc_stack;
52458+
52459+static __inline__ int
52460+alloc_pop(void)
52461+{
52462+ if (alloc_stack_next == 1)
52463+ return 0;
52464+
52465+ kfree(alloc_stack[alloc_stack_next - 2]);
52466+
52467+ alloc_stack_next--;
52468+
52469+ return 1;
52470+}
52471+
52472+static __inline__ int
52473+alloc_push(void *buf)
52474+{
52475+ if (alloc_stack_next >= alloc_stack_size)
52476+ return 1;
52477+
52478+ alloc_stack[alloc_stack_next - 1] = buf;
52479+
52480+ alloc_stack_next++;
52481+
52482+ return 0;
52483+}
52484+
52485+void *
52486+acl_alloc(unsigned long len)
52487+{
52488+ void *ret = NULL;
52489+
52490+ if (!len || len > PAGE_SIZE)
52491+ goto out;
52492+
52493+ ret = kmalloc(len, GFP_KERNEL);
52494+
52495+ if (ret) {
52496+ if (alloc_push(ret)) {
52497+ kfree(ret);
52498+ ret = NULL;
52499+ }
52500+ }
52501+
52502+out:
52503+ return ret;
52504+}
52505+
52506+void *
52507+acl_alloc_num(unsigned long num, unsigned long len)
52508+{
52509+ if (!len || (num > (PAGE_SIZE / len)))
52510+ return NULL;
52511+
52512+ return acl_alloc(num * len);
52513+}
52514+
52515+void
52516+acl_free_all(void)
52517+{
52518+ if (gr_acl_is_enabled() || !alloc_stack)
52519+ return;
52520+
52521+ while (alloc_pop()) ;
52522+
52523+ if (alloc_stack) {
52524+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52525+ kfree(alloc_stack);
52526+ else
52527+ vfree(alloc_stack);
52528+ }
52529+
52530+ alloc_stack = NULL;
52531+ alloc_stack_size = 1;
52532+ alloc_stack_next = 1;
52533+
52534+ return;
52535+}
52536+
52537+int
52538+acl_alloc_stack_init(unsigned long size)
52539+{
52540+ if ((size * sizeof (void *)) <= PAGE_SIZE)
52541+ alloc_stack =
52542+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52543+ else
52544+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
52545+
52546+ alloc_stack_size = size;
52547+
52548+ if (!alloc_stack)
52549+ return 0;
52550+ else
52551+ return 1;
52552+}
52553diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52554new file mode 100644
52555index 0000000..955ddfb
52556--- /dev/null
52557+++ b/grsecurity/gracl_cap.c
52558@@ -0,0 +1,101 @@
52559+#include <linux/kernel.h>
52560+#include <linux/module.h>
52561+#include <linux/sched.h>
52562+#include <linux/gracl.h>
52563+#include <linux/grsecurity.h>
52564+#include <linux/grinternal.h>
52565+
52566+extern const char *captab_log[];
52567+extern int captab_log_entries;
52568+
52569+int
52570+gr_acl_is_capable(const int cap)
52571+{
52572+ struct task_struct *task = current;
52573+ const struct cred *cred = current_cred();
52574+ struct acl_subject_label *curracl;
52575+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52576+ kernel_cap_t cap_audit = __cap_empty_set;
52577+
52578+ if (!gr_acl_is_enabled())
52579+ return 1;
52580+
52581+ curracl = task->acl;
52582+
52583+ cap_drop = curracl->cap_lower;
52584+ cap_mask = curracl->cap_mask;
52585+ cap_audit = curracl->cap_invert_audit;
52586+
52587+ while ((curracl = curracl->parent_subject)) {
52588+ /* if the cap isn't specified in the current computed mask but is specified in the
52589+ current level subject, and is lowered in the current level subject, then add
52590+ it to the set of dropped capabilities
52591+ otherwise, add the current level subject's mask to the current computed mask
52592+ */
52593+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52594+ cap_raise(cap_mask, cap);
52595+ if (cap_raised(curracl->cap_lower, cap))
52596+ cap_raise(cap_drop, cap);
52597+ if (cap_raised(curracl->cap_invert_audit, cap))
52598+ cap_raise(cap_audit, cap);
52599+ }
52600+ }
52601+
52602+ if (!cap_raised(cap_drop, cap)) {
52603+ if (cap_raised(cap_audit, cap))
52604+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52605+ return 1;
52606+ }
52607+
52608+ curracl = task->acl;
52609+
52610+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52611+ && cap_raised(cred->cap_effective, cap)) {
52612+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52613+ task->role->roletype, cred->uid,
52614+ cred->gid, task->exec_file ?
52615+ gr_to_filename(task->exec_file->f_path.dentry,
52616+ task->exec_file->f_path.mnt) : curracl->filename,
52617+ curracl->filename, 0UL,
52618+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52619+ return 1;
52620+ }
52621+
52622+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52623+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52624+ return 0;
52625+}
52626+
52627+int
52628+gr_acl_is_capable_nolog(const int cap)
52629+{
52630+ struct acl_subject_label *curracl;
52631+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52632+
52633+ if (!gr_acl_is_enabled())
52634+ return 1;
52635+
52636+ curracl = current->acl;
52637+
52638+ cap_drop = curracl->cap_lower;
52639+ cap_mask = curracl->cap_mask;
52640+
52641+ while ((curracl = curracl->parent_subject)) {
52642+ /* if the cap isn't specified in the current computed mask but is specified in the
52643+ current level subject, and is lowered in the current level subject, then add
52644+ it to the set of dropped capabilities
52645+ otherwise, add the current level subject's mask to the current computed mask
52646+ */
52647+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52648+ cap_raise(cap_mask, cap);
52649+ if (cap_raised(curracl->cap_lower, cap))
52650+ cap_raise(cap_drop, cap);
52651+ }
52652+ }
52653+
52654+ if (!cap_raised(cap_drop, cap))
52655+ return 1;
52656+
52657+ return 0;
52658+}
52659+
52660diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52661new file mode 100644
52662index 0000000..4eda5c3
52663--- /dev/null
52664+++ b/grsecurity/gracl_fs.c
52665@@ -0,0 +1,433 @@
52666+#include <linux/kernel.h>
52667+#include <linux/sched.h>
52668+#include <linux/types.h>
52669+#include <linux/fs.h>
52670+#include <linux/file.h>
52671+#include <linux/stat.h>
52672+#include <linux/grsecurity.h>
52673+#include <linux/grinternal.h>
52674+#include <linux/gracl.h>
52675+
52676+__u32
52677+gr_acl_handle_hidden_file(const struct dentry * dentry,
52678+ const struct vfsmount * mnt)
52679+{
52680+ __u32 mode;
52681+
52682+ if (unlikely(!dentry->d_inode))
52683+ return GR_FIND;
52684+
52685+ mode =
52686+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52687+
52688+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52689+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52690+ return mode;
52691+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52692+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52693+ return 0;
52694+ } else if (unlikely(!(mode & GR_FIND)))
52695+ return 0;
52696+
52697+ return GR_FIND;
52698+}
52699+
52700+__u32
52701+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52702+ int acc_mode)
52703+{
52704+ __u32 reqmode = GR_FIND;
52705+ __u32 mode;
52706+
52707+ if (unlikely(!dentry->d_inode))
52708+ return reqmode;
52709+
52710+ if (acc_mode & MAY_APPEND)
52711+ reqmode |= GR_APPEND;
52712+ else if (acc_mode & MAY_WRITE)
52713+ reqmode |= GR_WRITE;
52714+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52715+ reqmode |= GR_READ;
52716+
52717+ mode =
52718+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52719+ mnt);
52720+
52721+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52722+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52723+ reqmode & GR_READ ? " reading" : "",
52724+ reqmode & GR_WRITE ? " writing" : reqmode &
52725+ GR_APPEND ? " appending" : "");
52726+ return reqmode;
52727+ } else
52728+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52729+ {
52730+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52731+ reqmode & GR_READ ? " reading" : "",
52732+ reqmode & GR_WRITE ? " writing" : reqmode &
52733+ GR_APPEND ? " appending" : "");
52734+ return 0;
52735+ } else if (unlikely((mode & reqmode) != reqmode))
52736+ return 0;
52737+
52738+ return reqmode;
52739+}
52740+
52741+__u32
52742+gr_acl_handle_creat(const struct dentry * dentry,
52743+ const struct dentry * p_dentry,
52744+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52745+ const int imode)
52746+{
52747+ __u32 reqmode = GR_WRITE | GR_CREATE;
52748+ __u32 mode;
52749+
52750+ if (acc_mode & MAY_APPEND)
52751+ reqmode |= GR_APPEND;
52752+ // if a directory was required or the directory already exists, then
52753+ // don't count this open as a read
52754+ if ((acc_mode & MAY_READ) &&
52755+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52756+ reqmode |= GR_READ;
52757+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52758+ reqmode |= GR_SETID;
52759+
52760+ mode =
52761+ gr_check_create(dentry, p_dentry, p_mnt,
52762+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52763+
52764+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52765+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52766+ reqmode & GR_READ ? " reading" : "",
52767+ reqmode & GR_WRITE ? " writing" : reqmode &
52768+ GR_APPEND ? " appending" : "");
52769+ return reqmode;
52770+ } else
52771+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52772+ {
52773+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52774+ reqmode & GR_READ ? " reading" : "",
52775+ reqmode & GR_WRITE ? " writing" : reqmode &
52776+ GR_APPEND ? " appending" : "");
52777+ return 0;
52778+ } else if (unlikely((mode & reqmode) != reqmode))
52779+ return 0;
52780+
52781+ return reqmode;
52782+}
52783+
52784+__u32
52785+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52786+ const int fmode)
52787+{
52788+ __u32 mode, reqmode = GR_FIND;
52789+
52790+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52791+ reqmode |= GR_EXEC;
52792+ if (fmode & S_IWOTH)
52793+ reqmode |= GR_WRITE;
52794+ if (fmode & S_IROTH)
52795+ reqmode |= GR_READ;
52796+
52797+ mode =
52798+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52799+ mnt);
52800+
52801+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52802+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52803+ reqmode & GR_READ ? " reading" : "",
52804+ reqmode & GR_WRITE ? " writing" : "",
52805+ reqmode & GR_EXEC ? " executing" : "");
52806+ return reqmode;
52807+ } else
52808+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52809+ {
52810+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52811+ reqmode & GR_READ ? " reading" : "",
52812+ reqmode & GR_WRITE ? " writing" : "",
52813+ reqmode & GR_EXEC ? " executing" : "");
52814+ return 0;
52815+ } else if (unlikely((mode & reqmode) != reqmode))
52816+ return 0;
52817+
52818+ return reqmode;
52819+}
52820+
52821+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
52822+{
52823+ __u32 mode;
52824+
52825+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
52826+
52827+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52828+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
52829+ return mode;
52830+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52831+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
52832+ return 0;
52833+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52834+ return 0;
52835+
52836+ return (reqmode);
52837+}
52838+
52839+__u32
52840+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52841+{
52842+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
52843+}
52844+
52845+__u32
52846+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
52847+{
52848+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
52849+}
52850+
52851+__u32
52852+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
52853+{
52854+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
52855+}
52856+
52857+__u32
52858+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
52859+{
52860+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
52861+}
52862+
52863+__u32
52864+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
52865+ mode_t mode)
52866+{
52867+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
52868+ return 1;
52869+
52870+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52871+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52872+ GR_FCHMOD_ACL_MSG);
52873+ } else {
52874+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
52875+ }
52876+}
52877+
52878+__u32
52879+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
52880+ mode_t mode)
52881+{
52882+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52883+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52884+ GR_CHMOD_ACL_MSG);
52885+ } else {
52886+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
52887+ }
52888+}
52889+
52890+__u32
52891+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
52892+{
52893+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
52894+}
52895+
52896+__u32
52897+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
52898+{
52899+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
52900+}
52901+
52902+__u32
52903+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
52904+{
52905+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
52906+}
52907+
52908+__u32
52909+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
52910+{
52911+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
52912+ GR_UNIXCONNECT_ACL_MSG);
52913+}
52914+
52915+/* hardlinks require at minimum create and link permission,
52916+ any additional privilege required is based on the
52917+ privilege of the file being linked to
52918+*/
52919+__u32
52920+gr_acl_handle_link(const struct dentry * new_dentry,
52921+ const struct dentry * parent_dentry,
52922+ const struct vfsmount * parent_mnt,
52923+ const struct dentry * old_dentry,
52924+ const struct vfsmount * old_mnt, const char *to)
52925+{
52926+ __u32 mode;
52927+ __u32 needmode = GR_CREATE | GR_LINK;
52928+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
52929+
52930+ mode =
52931+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
52932+ old_mnt);
52933+
52934+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
52935+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52936+ return mode;
52937+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52938+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52939+ return 0;
52940+ } else if (unlikely((mode & needmode) != needmode))
52941+ return 0;
52942+
52943+ return 1;
52944+}
52945+
52946+__u32
52947+gr_acl_handle_symlink(const struct dentry * new_dentry,
52948+ const struct dentry * parent_dentry,
52949+ const struct vfsmount * parent_mnt, const char *from)
52950+{
52951+ __u32 needmode = GR_WRITE | GR_CREATE;
52952+ __u32 mode;
52953+
52954+ mode =
52955+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
52956+ GR_CREATE | GR_AUDIT_CREATE |
52957+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
52958+
52959+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
52960+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52961+ return mode;
52962+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52963+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52964+ return 0;
52965+ } else if (unlikely((mode & needmode) != needmode))
52966+ return 0;
52967+
52968+ return (GR_WRITE | GR_CREATE);
52969+}
52970+
52971+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
52972+{
52973+ __u32 mode;
52974+
52975+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52976+
52977+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52978+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
52979+ return mode;
52980+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52981+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
52982+ return 0;
52983+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52984+ return 0;
52985+
52986+ return (reqmode);
52987+}
52988+
52989+__u32
52990+gr_acl_handle_mknod(const struct dentry * new_dentry,
52991+ const struct dentry * parent_dentry,
52992+ const struct vfsmount * parent_mnt,
52993+ const int mode)
52994+{
52995+ __u32 reqmode = GR_WRITE | GR_CREATE;
52996+ if (unlikely(mode & (S_ISUID | S_ISGID)))
52997+ reqmode |= GR_SETID;
52998+
52999+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53000+ reqmode, GR_MKNOD_ACL_MSG);
53001+}
53002+
53003+__u32
53004+gr_acl_handle_mkdir(const struct dentry *new_dentry,
53005+ const struct dentry *parent_dentry,
53006+ const struct vfsmount *parent_mnt)
53007+{
53008+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53009+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
53010+}
53011+
53012+#define RENAME_CHECK_SUCCESS(old, new) \
53013+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
53014+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
53015+
53016+int
53017+gr_acl_handle_rename(struct dentry *new_dentry,
53018+ struct dentry *parent_dentry,
53019+ const struct vfsmount *parent_mnt,
53020+ struct dentry *old_dentry,
53021+ struct inode *old_parent_inode,
53022+ struct vfsmount *old_mnt, const char *newname)
53023+{
53024+ __u32 comp1, comp2;
53025+ int error = 0;
53026+
53027+ if (unlikely(!gr_acl_is_enabled()))
53028+ return 0;
53029+
53030+ if (!new_dentry->d_inode) {
53031+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
53032+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
53033+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
53034+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
53035+ GR_DELETE | GR_AUDIT_DELETE |
53036+ GR_AUDIT_READ | GR_AUDIT_WRITE |
53037+ GR_SUPPRESS, old_mnt);
53038+ } else {
53039+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
53040+ GR_CREATE | GR_DELETE |
53041+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
53042+ GR_AUDIT_READ | GR_AUDIT_WRITE |
53043+ GR_SUPPRESS, parent_mnt);
53044+ comp2 =
53045+ gr_search_file(old_dentry,
53046+ GR_READ | GR_WRITE | GR_AUDIT_READ |
53047+ GR_DELETE | GR_AUDIT_DELETE |
53048+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
53049+ }
53050+
53051+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
53052+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
53053+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53054+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
53055+ && !(comp2 & GR_SUPPRESS)) {
53056+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53057+ error = -EACCES;
53058+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
53059+ error = -EACCES;
53060+
53061+ return error;
53062+}
53063+
53064+void
53065+gr_acl_handle_exit(void)
53066+{
53067+ u16 id;
53068+ char *rolename;
53069+ struct file *exec_file;
53070+
53071+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
53072+ !(current->role->roletype & GR_ROLE_PERSIST))) {
53073+ id = current->acl_role_id;
53074+ rolename = current->role->rolename;
53075+ gr_set_acls(1);
53076+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
53077+ }
53078+
53079+ write_lock(&grsec_exec_file_lock);
53080+ exec_file = current->exec_file;
53081+ current->exec_file = NULL;
53082+ write_unlock(&grsec_exec_file_lock);
53083+
53084+ if (exec_file)
53085+ fput(exec_file);
53086+}
53087+
53088+int
53089+gr_acl_handle_procpidmem(const struct task_struct *task)
53090+{
53091+ if (unlikely(!gr_acl_is_enabled()))
53092+ return 0;
53093+
53094+ if (task != current && task->acl->mode & GR_PROTPROCFD)
53095+ return -EACCES;
53096+
53097+ return 0;
53098+}
53099diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
53100new file mode 100644
53101index 0000000..17050ca
53102--- /dev/null
53103+++ b/grsecurity/gracl_ip.c
53104@@ -0,0 +1,381 @@
53105+#include <linux/kernel.h>
53106+#include <asm/uaccess.h>
53107+#include <asm/errno.h>
53108+#include <net/sock.h>
53109+#include <linux/file.h>
53110+#include <linux/fs.h>
53111+#include <linux/net.h>
53112+#include <linux/in.h>
53113+#include <linux/skbuff.h>
53114+#include <linux/ip.h>
53115+#include <linux/udp.h>
53116+#include <linux/types.h>
53117+#include <linux/sched.h>
53118+#include <linux/netdevice.h>
53119+#include <linux/inetdevice.h>
53120+#include <linux/gracl.h>
53121+#include <linux/grsecurity.h>
53122+#include <linux/grinternal.h>
53123+
53124+#define GR_BIND 0x01
53125+#define GR_CONNECT 0x02
53126+#define GR_INVERT 0x04
53127+#define GR_BINDOVERRIDE 0x08
53128+#define GR_CONNECTOVERRIDE 0x10
53129+#define GR_SOCK_FAMILY 0x20
53130+
53131+static const char * gr_protocols[IPPROTO_MAX] = {
53132+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
53133+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
53134+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
53135+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
53136+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
53137+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
53138+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
53139+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
53140+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
53141+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
53142+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
53143+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
53144+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
53145+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
53146+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
53147+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
53148+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
53149+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
53150+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
53151+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
53152+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
53153+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
53154+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
53155+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
53156+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
53157+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
53158+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
53159+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
53160+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
53161+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
53162+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
53163+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
53164+ };
53165+
53166+static const char * gr_socktypes[SOCK_MAX] = {
53167+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
53168+ "unknown:7", "unknown:8", "unknown:9", "packet"
53169+ };
53170+
53171+static const char * gr_sockfamilies[AF_MAX+1] = {
53172+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
53173+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
53174+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
53175+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
53176+ };
53177+
53178+const char *
53179+gr_proto_to_name(unsigned char proto)
53180+{
53181+ return gr_protocols[proto];
53182+}
53183+
53184+const char *
53185+gr_socktype_to_name(unsigned char type)
53186+{
53187+ return gr_socktypes[type];
53188+}
53189+
53190+const char *
53191+gr_sockfamily_to_name(unsigned char family)
53192+{
53193+ return gr_sockfamilies[family];
53194+}
53195+
53196+int
53197+gr_search_socket(const int domain, const int type, const int protocol)
53198+{
53199+ struct acl_subject_label *curr;
53200+ const struct cred *cred = current_cred();
53201+
53202+ if (unlikely(!gr_acl_is_enabled()))
53203+ goto exit;
53204+
53205+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
53206+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53207+ goto exit; // let the kernel handle it
53208+
53209+ curr = current->acl;
53210+
53211+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53212+ /* the family is allowed, if this is PF_INET allow it only if
53213+ the extra sock type/protocol checks pass */
53214+ if (domain == PF_INET)
53215+ goto inet_check;
53216+ goto exit;
53217+ } else {
53218+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53219+ __u32 fakeip = 0;
53220+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53221+ current->role->roletype, cred->uid,
53222+ cred->gid, current->exec_file ?
53223+ gr_to_filename(current->exec_file->f_path.dentry,
53224+ current->exec_file->f_path.mnt) :
53225+ curr->filename, curr->filename,
53226+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53227+ &current->signal->saved_ip);
53228+ goto exit;
53229+ }
53230+ goto exit_fail;
53231+ }
53232+
53233+inet_check:
53234+ /* the rest of this checking is for IPv4 only */
53235+ if (!curr->ips)
53236+ goto exit;
53237+
53238+ if ((curr->ip_type & (1 << type)) &&
53239+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53240+ goto exit;
53241+
53242+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53243+ /* we don't place acls on raw sockets , and sometimes
53244+ dgram/ip sockets are opened for ioctl and not
53245+ bind/connect, so we'll fake a bind learn log */
53246+ if (type == SOCK_RAW || type == SOCK_PACKET) {
53247+ __u32 fakeip = 0;
53248+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53249+ current->role->roletype, cred->uid,
53250+ cred->gid, current->exec_file ?
53251+ gr_to_filename(current->exec_file->f_path.dentry,
53252+ current->exec_file->f_path.mnt) :
53253+ curr->filename, curr->filename,
53254+ &fakeip, 0, type,
53255+ protocol, GR_CONNECT, &current->signal->saved_ip);
53256+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53257+ __u32 fakeip = 0;
53258+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53259+ current->role->roletype, cred->uid,
53260+ cred->gid, current->exec_file ?
53261+ gr_to_filename(current->exec_file->f_path.dentry,
53262+ current->exec_file->f_path.mnt) :
53263+ curr->filename, curr->filename,
53264+ &fakeip, 0, type,
53265+ protocol, GR_BIND, &current->signal->saved_ip);
53266+ }
53267+ /* we'll log when they use connect or bind */
53268+ goto exit;
53269+ }
53270+
53271+exit_fail:
53272+ if (domain == PF_INET)
53273+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53274+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
53275+ else
53276+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53277+ gr_socktype_to_name(type), protocol);
53278+
53279+ return 0;
53280+exit:
53281+ return 1;
53282+}
53283+
53284+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53285+{
53286+ if ((ip->mode & mode) &&
53287+ (ip_port >= ip->low) &&
53288+ (ip_port <= ip->high) &&
53289+ ((ntohl(ip_addr) & our_netmask) ==
53290+ (ntohl(our_addr) & our_netmask))
53291+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53292+ && (ip->type & (1 << type))) {
53293+ if (ip->mode & GR_INVERT)
53294+ return 2; // specifically denied
53295+ else
53296+ return 1; // allowed
53297+ }
53298+
53299+ return 0; // not specifically allowed, may continue parsing
53300+}
53301+
53302+static int
53303+gr_search_connectbind(const int full_mode, struct sock *sk,
53304+ struct sockaddr_in *addr, const int type)
53305+{
53306+ char iface[IFNAMSIZ] = {0};
53307+ struct acl_subject_label *curr;
53308+ struct acl_ip_label *ip;
53309+ struct inet_sock *isk;
53310+ struct net_device *dev;
53311+ struct in_device *idev;
53312+ unsigned long i;
53313+ int ret;
53314+ int mode = full_mode & (GR_BIND | GR_CONNECT);
53315+ __u32 ip_addr = 0;
53316+ __u32 our_addr;
53317+ __u32 our_netmask;
53318+ char *p;
53319+ __u16 ip_port = 0;
53320+ const struct cred *cred = current_cred();
53321+
53322+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53323+ return 0;
53324+
53325+ curr = current->acl;
53326+ isk = inet_sk(sk);
53327+
53328+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53329+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53330+ addr->sin_addr.s_addr = curr->inaddr_any_override;
53331+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53332+ struct sockaddr_in saddr;
53333+ int err;
53334+
53335+ saddr.sin_family = AF_INET;
53336+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
53337+ saddr.sin_port = isk->inet_sport;
53338+
53339+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53340+ if (err)
53341+ return err;
53342+
53343+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53344+ if (err)
53345+ return err;
53346+ }
53347+
53348+ if (!curr->ips)
53349+ return 0;
53350+
53351+ ip_addr = addr->sin_addr.s_addr;
53352+ ip_port = ntohs(addr->sin_port);
53353+
53354+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53355+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53356+ current->role->roletype, cred->uid,
53357+ cred->gid, current->exec_file ?
53358+ gr_to_filename(current->exec_file->f_path.dentry,
53359+ current->exec_file->f_path.mnt) :
53360+ curr->filename, curr->filename,
53361+ &ip_addr, ip_port, type,
53362+ sk->sk_protocol, mode, &current->signal->saved_ip);
53363+ return 0;
53364+ }
53365+
53366+ for (i = 0; i < curr->ip_num; i++) {
53367+ ip = *(curr->ips + i);
53368+ if (ip->iface != NULL) {
53369+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
53370+ p = strchr(iface, ':');
53371+ if (p != NULL)
53372+ *p = '\0';
53373+ dev = dev_get_by_name(sock_net(sk), iface);
53374+ if (dev == NULL)
53375+ continue;
53376+ idev = in_dev_get(dev);
53377+ if (idev == NULL) {
53378+ dev_put(dev);
53379+ continue;
53380+ }
53381+ rcu_read_lock();
53382+ for_ifa(idev) {
53383+ if (!strcmp(ip->iface, ifa->ifa_label)) {
53384+ our_addr = ifa->ifa_address;
53385+ our_netmask = 0xffffffff;
53386+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53387+ if (ret == 1) {
53388+ rcu_read_unlock();
53389+ in_dev_put(idev);
53390+ dev_put(dev);
53391+ return 0;
53392+ } else if (ret == 2) {
53393+ rcu_read_unlock();
53394+ in_dev_put(idev);
53395+ dev_put(dev);
53396+ goto denied;
53397+ }
53398+ }
53399+ } endfor_ifa(idev);
53400+ rcu_read_unlock();
53401+ in_dev_put(idev);
53402+ dev_put(dev);
53403+ } else {
53404+ our_addr = ip->addr;
53405+ our_netmask = ip->netmask;
53406+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53407+ if (ret == 1)
53408+ return 0;
53409+ else if (ret == 2)
53410+ goto denied;
53411+ }
53412+ }
53413+
53414+denied:
53415+ if (mode == GR_BIND)
53416+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53417+ else if (mode == GR_CONNECT)
53418+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53419+
53420+ return -EACCES;
53421+}
53422+
53423+int
53424+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53425+{
53426+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53427+}
53428+
53429+int
53430+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53431+{
53432+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53433+}
53434+
53435+int gr_search_listen(struct socket *sock)
53436+{
53437+ struct sock *sk = sock->sk;
53438+ struct sockaddr_in addr;
53439+
53440+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53441+ addr.sin_port = inet_sk(sk)->inet_sport;
53442+
53443+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53444+}
53445+
53446+int gr_search_accept(struct socket *sock)
53447+{
53448+ struct sock *sk = sock->sk;
53449+ struct sockaddr_in addr;
53450+
53451+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53452+ addr.sin_port = inet_sk(sk)->inet_sport;
53453+
53454+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53455+}
53456+
53457+int
53458+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53459+{
53460+ if (addr)
53461+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53462+ else {
53463+ struct sockaddr_in sin;
53464+ const struct inet_sock *inet = inet_sk(sk);
53465+
53466+ sin.sin_addr.s_addr = inet->inet_daddr;
53467+ sin.sin_port = inet->inet_dport;
53468+
53469+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53470+ }
53471+}
53472+
53473+int
53474+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53475+{
53476+ struct sockaddr_in sin;
53477+
53478+ if (unlikely(skb->len < sizeof (struct udphdr)))
53479+ return 0; // skip this packet
53480+
53481+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53482+ sin.sin_port = udp_hdr(skb)->source;
53483+
53484+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53485+}
53486diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53487new file mode 100644
53488index 0000000..25f54ef
53489--- /dev/null
53490+++ b/grsecurity/gracl_learn.c
53491@@ -0,0 +1,207 @@
53492+#include <linux/kernel.h>
53493+#include <linux/mm.h>
53494+#include <linux/sched.h>
53495+#include <linux/poll.h>
53496+#include <linux/string.h>
53497+#include <linux/file.h>
53498+#include <linux/types.h>
53499+#include <linux/vmalloc.h>
53500+#include <linux/grinternal.h>
53501+
53502+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53503+ size_t count, loff_t *ppos);
53504+extern int gr_acl_is_enabled(void);
53505+
53506+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53507+static int gr_learn_attached;
53508+
53509+/* use a 512k buffer */
53510+#define LEARN_BUFFER_SIZE (512 * 1024)
53511+
53512+static DEFINE_SPINLOCK(gr_learn_lock);
53513+static DEFINE_MUTEX(gr_learn_user_mutex);
53514+
53515+/* we need to maintain two buffers, so that the kernel context of grlearn
53516+ uses a semaphore around the userspace copying, and the other kernel contexts
53517+ use a spinlock when copying into the buffer, since they cannot sleep
53518+*/
53519+static char *learn_buffer;
53520+static char *learn_buffer_user;
53521+static int learn_buffer_len;
53522+static int learn_buffer_user_len;
53523+
53524+static ssize_t
53525+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53526+{
53527+ DECLARE_WAITQUEUE(wait, current);
53528+ ssize_t retval = 0;
53529+
53530+ add_wait_queue(&learn_wait, &wait);
53531+ set_current_state(TASK_INTERRUPTIBLE);
53532+ do {
53533+ mutex_lock(&gr_learn_user_mutex);
53534+ spin_lock(&gr_learn_lock);
53535+ if (learn_buffer_len)
53536+ break;
53537+ spin_unlock(&gr_learn_lock);
53538+ mutex_unlock(&gr_learn_user_mutex);
53539+ if (file->f_flags & O_NONBLOCK) {
53540+ retval = -EAGAIN;
53541+ goto out;
53542+ }
53543+ if (signal_pending(current)) {
53544+ retval = -ERESTARTSYS;
53545+ goto out;
53546+ }
53547+
53548+ schedule();
53549+ } while (1);
53550+
53551+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53552+ learn_buffer_user_len = learn_buffer_len;
53553+ retval = learn_buffer_len;
53554+ learn_buffer_len = 0;
53555+
53556+ spin_unlock(&gr_learn_lock);
53557+
53558+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53559+ retval = -EFAULT;
53560+
53561+ mutex_unlock(&gr_learn_user_mutex);
53562+out:
53563+ set_current_state(TASK_RUNNING);
53564+ remove_wait_queue(&learn_wait, &wait);
53565+ return retval;
53566+}
53567+
53568+static unsigned int
53569+poll_learn(struct file * file, poll_table * wait)
53570+{
53571+ poll_wait(file, &learn_wait, wait);
53572+
53573+ if (learn_buffer_len)
53574+ return (POLLIN | POLLRDNORM);
53575+
53576+ return 0;
53577+}
53578+
53579+void
53580+gr_clear_learn_entries(void)
53581+{
53582+ char *tmp;
53583+
53584+ mutex_lock(&gr_learn_user_mutex);
53585+ spin_lock(&gr_learn_lock);
53586+ tmp = learn_buffer;
53587+ learn_buffer = NULL;
53588+ spin_unlock(&gr_learn_lock);
53589+ if (tmp)
53590+ vfree(tmp);
53591+ if (learn_buffer_user != NULL) {
53592+ vfree(learn_buffer_user);
53593+ learn_buffer_user = NULL;
53594+ }
53595+ learn_buffer_len = 0;
53596+ mutex_unlock(&gr_learn_user_mutex);
53597+
53598+ return;
53599+}
53600+
53601+void
53602+gr_add_learn_entry(const char *fmt, ...)
53603+{
53604+ va_list args;
53605+ unsigned int len;
53606+
53607+ if (!gr_learn_attached)
53608+ return;
53609+
53610+ spin_lock(&gr_learn_lock);
53611+
53612+ /* leave a gap at the end so we know when it's "full" but don't have to
53613+ compute the exact length of the string we're trying to append
53614+ */
53615+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53616+ spin_unlock(&gr_learn_lock);
53617+ wake_up_interruptible(&learn_wait);
53618+ return;
53619+ }
53620+ if (learn_buffer == NULL) {
53621+ spin_unlock(&gr_learn_lock);
53622+ return;
53623+ }
53624+
53625+ va_start(args, fmt);
53626+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53627+ va_end(args);
53628+
53629+ learn_buffer_len += len + 1;
53630+
53631+ spin_unlock(&gr_learn_lock);
53632+ wake_up_interruptible(&learn_wait);
53633+
53634+ return;
53635+}
53636+
53637+static int
53638+open_learn(struct inode *inode, struct file *file)
53639+{
53640+ if (file->f_mode & FMODE_READ && gr_learn_attached)
53641+ return -EBUSY;
53642+ if (file->f_mode & FMODE_READ) {
53643+ int retval = 0;
53644+ mutex_lock(&gr_learn_user_mutex);
53645+ if (learn_buffer == NULL)
53646+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53647+ if (learn_buffer_user == NULL)
53648+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53649+ if (learn_buffer == NULL) {
53650+ retval = -ENOMEM;
53651+ goto out_error;
53652+ }
53653+ if (learn_buffer_user == NULL) {
53654+ retval = -ENOMEM;
53655+ goto out_error;
53656+ }
53657+ learn_buffer_len = 0;
53658+ learn_buffer_user_len = 0;
53659+ gr_learn_attached = 1;
53660+out_error:
53661+ mutex_unlock(&gr_learn_user_mutex);
53662+ return retval;
53663+ }
53664+ return 0;
53665+}
53666+
53667+static int
53668+close_learn(struct inode *inode, struct file *file)
53669+{
53670+ if (file->f_mode & FMODE_READ) {
53671+ char *tmp = NULL;
53672+ mutex_lock(&gr_learn_user_mutex);
53673+ spin_lock(&gr_learn_lock);
53674+ tmp = learn_buffer;
53675+ learn_buffer = NULL;
53676+ spin_unlock(&gr_learn_lock);
53677+ if (tmp)
53678+ vfree(tmp);
53679+ if (learn_buffer_user != NULL) {
53680+ vfree(learn_buffer_user);
53681+ learn_buffer_user = NULL;
53682+ }
53683+ learn_buffer_len = 0;
53684+ learn_buffer_user_len = 0;
53685+ gr_learn_attached = 0;
53686+ mutex_unlock(&gr_learn_user_mutex);
53687+ }
53688+
53689+ return 0;
53690+}
53691+
53692+const struct file_operations grsec_fops = {
53693+ .read = read_learn,
53694+ .write = write_grsec_handler,
53695+ .open = open_learn,
53696+ .release = close_learn,
53697+ .poll = poll_learn,
53698+};
53699diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53700new file mode 100644
53701index 0000000..39645c9
53702--- /dev/null
53703+++ b/grsecurity/gracl_res.c
53704@@ -0,0 +1,68 @@
53705+#include <linux/kernel.h>
53706+#include <linux/sched.h>
53707+#include <linux/gracl.h>
53708+#include <linux/grinternal.h>
53709+
53710+static const char *restab_log[] = {
53711+ [RLIMIT_CPU] = "RLIMIT_CPU",
53712+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53713+ [RLIMIT_DATA] = "RLIMIT_DATA",
53714+ [RLIMIT_STACK] = "RLIMIT_STACK",
53715+ [RLIMIT_CORE] = "RLIMIT_CORE",
53716+ [RLIMIT_RSS] = "RLIMIT_RSS",
53717+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
53718+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53719+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53720+ [RLIMIT_AS] = "RLIMIT_AS",
53721+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53722+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53723+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53724+ [RLIMIT_NICE] = "RLIMIT_NICE",
53725+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53726+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53727+ [GR_CRASH_RES] = "RLIMIT_CRASH"
53728+};
53729+
53730+void
53731+gr_log_resource(const struct task_struct *task,
53732+ const int res, const unsigned long wanted, const int gt)
53733+{
53734+ const struct cred *cred;
53735+ unsigned long rlim;
53736+
53737+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
53738+ return;
53739+
53740+ // not yet supported resource
53741+ if (unlikely(!restab_log[res]))
53742+ return;
53743+
53744+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53745+ rlim = task_rlimit_max(task, res);
53746+ else
53747+ rlim = task_rlimit(task, res);
53748+
53749+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53750+ return;
53751+
53752+ rcu_read_lock();
53753+ cred = __task_cred(task);
53754+
53755+ if (res == RLIMIT_NPROC &&
53756+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53757+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53758+ goto out_rcu_unlock;
53759+ else if (res == RLIMIT_MEMLOCK &&
53760+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53761+ goto out_rcu_unlock;
53762+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53763+ goto out_rcu_unlock;
53764+ rcu_read_unlock();
53765+
53766+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53767+
53768+ return;
53769+out_rcu_unlock:
53770+ rcu_read_unlock();
53771+ return;
53772+}
53773diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53774new file mode 100644
53775index 0000000..5556be3
53776--- /dev/null
53777+++ b/grsecurity/gracl_segv.c
53778@@ -0,0 +1,299 @@
53779+#include <linux/kernel.h>
53780+#include <linux/mm.h>
53781+#include <asm/uaccess.h>
53782+#include <asm/errno.h>
53783+#include <asm/mman.h>
53784+#include <net/sock.h>
53785+#include <linux/file.h>
53786+#include <linux/fs.h>
53787+#include <linux/net.h>
53788+#include <linux/in.h>
53789+#include <linux/slab.h>
53790+#include <linux/types.h>
53791+#include <linux/sched.h>
53792+#include <linux/timer.h>
53793+#include <linux/gracl.h>
53794+#include <linux/grsecurity.h>
53795+#include <linux/grinternal.h>
53796+
53797+static struct crash_uid *uid_set;
53798+static unsigned short uid_used;
53799+static DEFINE_SPINLOCK(gr_uid_lock);
53800+extern rwlock_t gr_inode_lock;
53801+extern struct acl_subject_label *
53802+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53803+ struct acl_role_label *role);
53804+
53805+#ifdef CONFIG_BTRFS_FS
53806+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53807+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53808+#endif
53809+
53810+static inline dev_t __get_dev(const struct dentry *dentry)
53811+{
53812+#ifdef CONFIG_BTRFS_FS
53813+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53814+ return get_btrfs_dev_from_inode(dentry->d_inode);
53815+ else
53816+#endif
53817+ return dentry->d_inode->i_sb->s_dev;
53818+}
53819+
53820+int
53821+gr_init_uidset(void)
53822+{
53823+ uid_set =
53824+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
53825+ uid_used = 0;
53826+
53827+ return uid_set ? 1 : 0;
53828+}
53829+
53830+void
53831+gr_free_uidset(void)
53832+{
53833+ if (uid_set)
53834+ kfree(uid_set);
53835+
53836+ return;
53837+}
53838+
53839+int
53840+gr_find_uid(const uid_t uid)
53841+{
53842+ struct crash_uid *tmp = uid_set;
53843+ uid_t buid;
53844+ int low = 0, high = uid_used - 1, mid;
53845+
53846+ while (high >= low) {
53847+ mid = (low + high) >> 1;
53848+ buid = tmp[mid].uid;
53849+ if (buid == uid)
53850+ return mid;
53851+ if (buid > uid)
53852+ high = mid - 1;
53853+ if (buid < uid)
53854+ low = mid + 1;
53855+ }
53856+
53857+ return -1;
53858+}
53859+
53860+static __inline__ void
53861+gr_insertsort(void)
53862+{
53863+ unsigned short i, j;
53864+ struct crash_uid index;
53865+
53866+ for (i = 1; i < uid_used; i++) {
53867+ index = uid_set[i];
53868+ j = i;
53869+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
53870+ uid_set[j] = uid_set[j - 1];
53871+ j--;
53872+ }
53873+ uid_set[j] = index;
53874+ }
53875+
53876+ return;
53877+}
53878+
53879+static __inline__ void
53880+gr_insert_uid(const uid_t uid, const unsigned long expires)
53881+{
53882+ int loc;
53883+
53884+ if (uid_used == GR_UIDTABLE_MAX)
53885+ return;
53886+
53887+ loc = gr_find_uid(uid);
53888+
53889+ if (loc >= 0) {
53890+ uid_set[loc].expires = expires;
53891+ return;
53892+ }
53893+
53894+ uid_set[uid_used].uid = uid;
53895+ uid_set[uid_used].expires = expires;
53896+ uid_used++;
53897+
53898+ gr_insertsort();
53899+
53900+ return;
53901+}
53902+
53903+void
53904+gr_remove_uid(const unsigned short loc)
53905+{
53906+ unsigned short i;
53907+
53908+ for (i = loc + 1; i < uid_used; i++)
53909+ uid_set[i - 1] = uid_set[i];
53910+
53911+ uid_used--;
53912+
53913+ return;
53914+}
53915+
53916+int
53917+gr_check_crash_uid(const uid_t uid)
53918+{
53919+ int loc;
53920+ int ret = 0;
53921+
53922+ if (unlikely(!gr_acl_is_enabled()))
53923+ return 0;
53924+
53925+ spin_lock(&gr_uid_lock);
53926+ loc = gr_find_uid(uid);
53927+
53928+ if (loc < 0)
53929+ goto out_unlock;
53930+
53931+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
53932+ gr_remove_uid(loc);
53933+ else
53934+ ret = 1;
53935+
53936+out_unlock:
53937+ spin_unlock(&gr_uid_lock);
53938+ return ret;
53939+}
53940+
53941+static __inline__ int
53942+proc_is_setxid(const struct cred *cred)
53943+{
53944+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
53945+ cred->uid != cred->fsuid)
53946+ return 1;
53947+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
53948+ cred->gid != cred->fsgid)
53949+ return 1;
53950+
53951+ return 0;
53952+}
53953+
53954+extern int gr_fake_force_sig(int sig, struct task_struct *t);
53955+
53956+void
53957+gr_handle_crash(struct task_struct *task, const int sig)
53958+{
53959+ struct acl_subject_label *curr;
53960+ struct task_struct *tsk, *tsk2;
53961+ const struct cred *cred;
53962+ const struct cred *cred2;
53963+
53964+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
53965+ return;
53966+
53967+ if (unlikely(!gr_acl_is_enabled()))
53968+ return;
53969+
53970+ curr = task->acl;
53971+
53972+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
53973+ return;
53974+
53975+ if (time_before_eq(curr->expires, get_seconds())) {
53976+ curr->expires = 0;
53977+ curr->crashes = 0;
53978+ }
53979+
53980+ curr->crashes++;
53981+
53982+ if (!curr->expires)
53983+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
53984+
53985+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53986+ time_after(curr->expires, get_seconds())) {
53987+ rcu_read_lock();
53988+ cred = __task_cred(task);
53989+ if (cred->uid && proc_is_setxid(cred)) {
53990+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53991+ spin_lock(&gr_uid_lock);
53992+ gr_insert_uid(cred->uid, curr->expires);
53993+ spin_unlock(&gr_uid_lock);
53994+ curr->expires = 0;
53995+ curr->crashes = 0;
53996+ read_lock(&tasklist_lock);
53997+ do_each_thread(tsk2, tsk) {
53998+ cred2 = __task_cred(tsk);
53999+ if (tsk != task && cred2->uid == cred->uid)
54000+ gr_fake_force_sig(SIGKILL, tsk);
54001+ } while_each_thread(tsk2, tsk);
54002+ read_unlock(&tasklist_lock);
54003+ } else {
54004+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54005+ read_lock(&tasklist_lock);
54006+ read_lock(&grsec_exec_file_lock);
54007+ do_each_thread(tsk2, tsk) {
54008+ if (likely(tsk != task)) {
54009+ // if this thread has the same subject as the one that triggered
54010+ // RES_CRASH and it's the same binary, kill it
54011+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
54012+ gr_fake_force_sig(SIGKILL, tsk);
54013+ }
54014+ } while_each_thread(tsk2, tsk);
54015+ read_unlock(&grsec_exec_file_lock);
54016+ read_unlock(&tasklist_lock);
54017+ }
54018+ rcu_read_unlock();
54019+ }
54020+
54021+ return;
54022+}
54023+
54024+int
54025+gr_check_crash_exec(const struct file *filp)
54026+{
54027+ struct acl_subject_label *curr;
54028+
54029+ if (unlikely(!gr_acl_is_enabled()))
54030+ return 0;
54031+
54032+ read_lock(&gr_inode_lock);
54033+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
54034+ __get_dev(filp->f_path.dentry),
54035+ current->role);
54036+ read_unlock(&gr_inode_lock);
54037+
54038+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
54039+ (!curr->crashes && !curr->expires))
54040+ return 0;
54041+
54042+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54043+ time_after(curr->expires, get_seconds()))
54044+ return 1;
54045+ else if (time_before_eq(curr->expires, get_seconds())) {
54046+ curr->crashes = 0;
54047+ curr->expires = 0;
54048+ }
54049+
54050+ return 0;
54051+}
54052+
54053+void
54054+gr_handle_alertkill(struct task_struct *task)
54055+{
54056+ struct acl_subject_label *curracl;
54057+ __u32 curr_ip;
54058+ struct task_struct *p, *p2;
54059+
54060+ if (unlikely(!gr_acl_is_enabled()))
54061+ return;
54062+
54063+ curracl = task->acl;
54064+ curr_ip = task->signal->curr_ip;
54065+
54066+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
54067+ read_lock(&tasklist_lock);
54068+ do_each_thread(p2, p) {
54069+ if (p->signal->curr_ip == curr_ip)
54070+ gr_fake_force_sig(SIGKILL, p);
54071+ } while_each_thread(p2, p);
54072+ read_unlock(&tasklist_lock);
54073+ } else if (curracl->mode & GR_KILLPROC)
54074+ gr_fake_force_sig(SIGKILL, task);
54075+
54076+ return;
54077+}
54078diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
54079new file mode 100644
54080index 0000000..9d83a69
54081--- /dev/null
54082+++ b/grsecurity/gracl_shm.c
54083@@ -0,0 +1,40 @@
54084+#include <linux/kernel.h>
54085+#include <linux/mm.h>
54086+#include <linux/sched.h>
54087+#include <linux/file.h>
54088+#include <linux/ipc.h>
54089+#include <linux/gracl.h>
54090+#include <linux/grsecurity.h>
54091+#include <linux/grinternal.h>
54092+
54093+int
54094+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54095+ const time_t shm_createtime, const uid_t cuid, const int shmid)
54096+{
54097+ struct task_struct *task;
54098+
54099+ if (!gr_acl_is_enabled())
54100+ return 1;
54101+
54102+ rcu_read_lock();
54103+ read_lock(&tasklist_lock);
54104+
54105+ task = find_task_by_vpid(shm_cprid);
54106+
54107+ if (unlikely(!task))
54108+ task = find_task_by_vpid(shm_lapid);
54109+
54110+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
54111+ (task->pid == shm_lapid)) &&
54112+ (task->acl->mode & GR_PROTSHM) &&
54113+ (task->acl != current->acl))) {
54114+ read_unlock(&tasklist_lock);
54115+ rcu_read_unlock();
54116+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
54117+ return 0;
54118+ }
54119+ read_unlock(&tasklist_lock);
54120+ rcu_read_unlock();
54121+
54122+ return 1;
54123+}
54124diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
54125new file mode 100644
54126index 0000000..bc0be01
54127--- /dev/null
54128+++ b/grsecurity/grsec_chdir.c
54129@@ -0,0 +1,19 @@
54130+#include <linux/kernel.h>
54131+#include <linux/sched.h>
54132+#include <linux/fs.h>
54133+#include <linux/file.h>
54134+#include <linux/grsecurity.h>
54135+#include <linux/grinternal.h>
54136+
54137+void
54138+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
54139+{
54140+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54141+ if ((grsec_enable_chdir && grsec_enable_group &&
54142+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
54143+ !grsec_enable_group)) {
54144+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
54145+ }
54146+#endif
54147+ return;
54148+}
54149diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
54150new file mode 100644
54151index 0000000..a2dc675
54152--- /dev/null
54153+++ b/grsecurity/grsec_chroot.c
54154@@ -0,0 +1,351 @@
54155+#include <linux/kernel.h>
54156+#include <linux/module.h>
54157+#include <linux/sched.h>
54158+#include <linux/file.h>
54159+#include <linux/fs.h>
54160+#include <linux/mount.h>
54161+#include <linux/types.h>
54162+#include <linux/pid_namespace.h>
54163+#include <linux/grsecurity.h>
54164+#include <linux/grinternal.h>
54165+
54166+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
54167+{
54168+#ifdef CONFIG_GRKERNSEC
54169+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
54170+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
54171+ task->gr_is_chrooted = 1;
54172+ else
54173+ task->gr_is_chrooted = 0;
54174+
54175+ task->gr_chroot_dentry = path->dentry;
54176+#endif
54177+ return;
54178+}
54179+
54180+void gr_clear_chroot_entries(struct task_struct *task)
54181+{
54182+#ifdef CONFIG_GRKERNSEC
54183+ task->gr_is_chrooted = 0;
54184+ task->gr_chroot_dentry = NULL;
54185+#endif
54186+ return;
54187+}
54188+
54189+int
54190+gr_handle_chroot_unix(const pid_t pid)
54191+{
54192+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54193+ struct task_struct *p;
54194+
54195+ if (unlikely(!grsec_enable_chroot_unix))
54196+ return 1;
54197+
54198+ if (likely(!proc_is_chrooted(current)))
54199+ return 1;
54200+
54201+ rcu_read_lock();
54202+ read_lock(&tasklist_lock);
54203+ p = find_task_by_vpid_unrestricted(pid);
54204+ if (unlikely(p && !have_same_root(current, p))) {
54205+ read_unlock(&tasklist_lock);
54206+ rcu_read_unlock();
54207+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54208+ return 0;
54209+ }
54210+ read_unlock(&tasklist_lock);
54211+ rcu_read_unlock();
54212+#endif
54213+ return 1;
54214+}
54215+
54216+int
54217+gr_handle_chroot_nice(void)
54218+{
54219+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54220+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54221+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54222+ return -EPERM;
54223+ }
54224+#endif
54225+ return 0;
54226+}
54227+
54228+int
54229+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54230+{
54231+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54232+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54233+ && proc_is_chrooted(current)) {
54234+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54235+ return -EACCES;
54236+ }
54237+#endif
54238+ return 0;
54239+}
54240+
54241+int
54242+gr_handle_chroot_rawio(const struct inode *inode)
54243+{
54244+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54245+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54246+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54247+ return 1;
54248+#endif
54249+ return 0;
54250+}
54251+
54252+int
54253+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54254+{
54255+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54256+ struct task_struct *p;
54257+ int ret = 0;
54258+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54259+ return ret;
54260+
54261+ read_lock(&tasklist_lock);
54262+ do_each_pid_task(pid, type, p) {
54263+ if (!have_same_root(current, p)) {
54264+ ret = 1;
54265+ goto out;
54266+ }
54267+ } while_each_pid_task(pid, type, p);
54268+out:
54269+ read_unlock(&tasklist_lock);
54270+ return ret;
54271+#endif
54272+ return 0;
54273+}
54274+
54275+int
54276+gr_pid_is_chrooted(struct task_struct *p)
54277+{
54278+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54279+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54280+ return 0;
54281+
54282+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54283+ !have_same_root(current, p)) {
54284+ return 1;
54285+ }
54286+#endif
54287+ return 0;
54288+}
54289+
54290+EXPORT_SYMBOL(gr_pid_is_chrooted);
54291+
54292+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54293+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54294+{
54295+ struct path path, currentroot;
54296+ int ret = 0;
54297+
54298+ path.dentry = (struct dentry *)u_dentry;
54299+ path.mnt = (struct vfsmount *)u_mnt;
54300+ get_fs_root(current->fs, &currentroot);
54301+ if (path_is_under(&path, &currentroot))
54302+ ret = 1;
54303+ path_put(&currentroot);
54304+
54305+ return ret;
54306+}
54307+#endif
54308+
54309+int
54310+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54311+{
54312+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54313+ if (!grsec_enable_chroot_fchdir)
54314+ return 1;
54315+
54316+ if (!proc_is_chrooted(current))
54317+ return 1;
54318+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54319+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54320+ return 0;
54321+ }
54322+#endif
54323+ return 1;
54324+}
54325+
54326+int
54327+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54328+ const time_t shm_createtime)
54329+{
54330+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54331+ struct task_struct *p;
54332+ time_t starttime;
54333+
54334+ if (unlikely(!grsec_enable_chroot_shmat))
54335+ return 1;
54336+
54337+ if (likely(!proc_is_chrooted(current)))
54338+ return 1;
54339+
54340+ rcu_read_lock();
54341+ read_lock(&tasklist_lock);
54342+
54343+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54344+ starttime = p->start_time.tv_sec;
54345+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54346+ if (have_same_root(current, p)) {
54347+ goto allow;
54348+ } else {
54349+ read_unlock(&tasklist_lock);
54350+ rcu_read_unlock();
54351+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54352+ return 0;
54353+ }
54354+ }
54355+ /* creator exited, pid reuse, fall through to next check */
54356+ }
54357+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54358+ if (unlikely(!have_same_root(current, p))) {
54359+ read_unlock(&tasklist_lock);
54360+ rcu_read_unlock();
54361+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54362+ return 0;
54363+ }
54364+ }
54365+
54366+allow:
54367+ read_unlock(&tasklist_lock);
54368+ rcu_read_unlock();
54369+#endif
54370+ return 1;
54371+}
54372+
54373+void
54374+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54375+{
54376+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54377+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54378+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54379+#endif
54380+ return;
54381+}
54382+
54383+int
54384+gr_handle_chroot_mknod(const struct dentry *dentry,
54385+ const struct vfsmount *mnt, const int mode)
54386+{
54387+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54388+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54389+ proc_is_chrooted(current)) {
54390+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54391+ return -EPERM;
54392+ }
54393+#endif
54394+ return 0;
54395+}
54396+
54397+int
54398+gr_handle_chroot_mount(const struct dentry *dentry,
54399+ const struct vfsmount *mnt, const char *dev_name)
54400+{
54401+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54402+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54403+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54404+ return -EPERM;
54405+ }
54406+#endif
54407+ return 0;
54408+}
54409+
54410+int
54411+gr_handle_chroot_pivot(void)
54412+{
54413+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54414+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54415+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54416+ return -EPERM;
54417+ }
54418+#endif
54419+ return 0;
54420+}
54421+
54422+int
54423+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54424+{
54425+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54426+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54427+ !gr_is_outside_chroot(dentry, mnt)) {
54428+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54429+ return -EPERM;
54430+ }
54431+#endif
54432+ return 0;
54433+}
54434+
54435+extern const char *captab_log[];
54436+extern int captab_log_entries;
54437+
54438+int
54439+gr_chroot_is_capable(const int cap)
54440+{
54441+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54442+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54443+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54444+ if (cap_raised(chroot_caps, cap)) {
54445+ const struct cred *creds = current_cred();
54446+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54447+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54448+ }
54449+ return 0;
54450+ }
54451+ }
54452+#endif
54453+ return 1;
54454+}
54455+
54456+int
54457+gr_chroot_is_capable_nolog(const int cap)
54458+{
54459+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54460+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54461+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54462+ if (cap_raised(chroot_caps, cap)) {
54463+ return 0;
54464+ }
54465+ }
54466+#endif
54467+ return 1;
54468+}
54469+
54470+int
54471+gr_handle_chroot_sysctl(const int op)
54472+{
54473+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54474+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54475+ proc_is_chrooted(current))
54476+ return -EACCES;
54477+#endif
54478+ return 0;
54479+}
54480+
54481+void
54482+gr_handle_chroot_chdir(struct path *path)
54483+{
54484+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54485+ if (grsec_enable_chroot_chdir)
54486+ set_fs_pwd(current->fs, path);
54487+#endif
54488+ return;
54489+}
54490+
54491+int
54492+gr_handle_chroot_chmod(const struct dentry *dentry,
54493+ const struct vfsmount *mnt, const int mode)
54494+{
54495+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54496+ /* allow chmod +s on directories, but not files */
54497+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54498+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54499+ proc_is_chrooted(current)) {
54500+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54501+ return -EPERM;
54502+ }
54503+#endif
54504+ return 0;
54505+}
54506diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54507new file mode 100644
54508index 0000000..d81a586
54509--- /dev/null
54510+++ b/grsecurity/grsec_disabled.c
54511@@ -0,0 +1,439 @@
54512+#include <linux/kernel.h>
54513+#include <linux/module.h>
54514+#include <linux/sched.h>
54515+#include <linux/file.h>
54516+#include <linux/fs.h>
54517+#include <linux/kdev_t.h>
54518+#include <linux/net.h>
54519+#include <linux/in.h>
54520+#include <linux/ip.h>
54521+#include <linux/skbuff.h>
54522+#include <linux/sysctl.h>
54523+
54524+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54525+void
54526+pax_set_initial_flags(struct linux_binprm *bprm)
54527+{
54528+ return;
54529+}
54530+#endif
54531+
54532+#ifdef CONFIG_SYSCTL
54533+__u32
54534+gr_handle_sysctl(const struct ctl_table * table, const int op)
54535+{
54536+ return 0;
54537+}
54538+#endif
54539+
54540+#ifdef CONFIG_TASKSTATS
54541+int gr_is_taskstats_denied(int pid)
54542+{
54543+ return 0;
54544+}
54545+#endif
54546+
54547+int
54548+gr_acl_is_enabled(void)
54549+{
54550+ return 0;
54551+}
54552+
54553+void
54554+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54555+{
54556+ return;
54557+}
54558+
54559+int
54560+gr_handle_rawio(const struct inode *inode)
54561+{
54562+ return 0;
54563+}
54564+
54565+void
54566+gr_acl_handle_psacct(struct task_struct *task, const long code)
54567+{
54568+ return;
54569+}
54570+
54571+int
54572+gr_handle_ptrace(struct task_struct *task, const long request)
54573+{
54574+ return 0;
54575+}
54576+
54577+int
54578+gr_handle_proc_ptrace(struct task_struct *task)
54579+{
54580+ return 0;
54581+}
54582+
54583+void
54584+gr_learn_resource(const struct task_struct *task,
54585+ const int res, const unsigned long wanted, const int gt)
54586+{
54587+ return;
54588+}
54589+
54590+int
54591+gr_set_acls(const int type)
54592+{
54593+ return 0;
54594+}
54595+
54596+int
54597+gr_check_hidden_task(const struct task_struct *tsk)
54598+{
54599+ return 0;
54600+}
54601+
54602+int
54603+gr_check_protected_task(const struct task_struct *task)
54604+{
54605+ return 0;
54606+}
54607+
54608+int
54609+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54610+{
54611+ return 0;
54612+}
54613+
54614+void
54615+gr_copy_label(struct task_struct *tsk)
54616+{
54617+ return;
54618+}
54619+
54620+void
54621+gr_set_pax_flags(struct task_struct *task)
54622+{
54623+ return;
54624+}
54625+
54626+int
54627+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54628+ const int unsafe_share)
54629+{
54630+ return 0;
54631+}
54632+
54633+void
54634+gr_handle_delete(const ino_t ino, const dev_t dev)
54635+{
54636+ return;
54637+}
54638+
54639+void
54640+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54641+{
54642+ return;
54643+}
54644+
54645+void
54646+gr_handle_crash(struct task_struct *task, const int sig)
54647+{
54648+ return;
54649+}
54650+
54651+int
54652+gr_check_crash_exec(const struct file *filp)
54653+{
54654+ return 0;
54655+}
54656+
54657+int
54658+gr_check_crash_uid(const uid_t uid)
54659+{
54660+ return 0;
54661+}
54662+
54663+void
54664+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54665+ struct dentry *old_dentry,
54666+ struct dentry *new_dentry,
54667+ struct vfsmount *mnt, const __u8 replace)
54668+{
54669+ return;
54670+}
54671+
54672+int
54673+gr_search_socket(const int family, const int type, const int protocol)
54674+{
54675+ return 1;
54676+}
54677+
54678+int
54679+gr_search_connectbind(const int mode, const struct socket *sock,
54680+ const struct sockaddr_in *addr)
54681+{
54682+ return 0;
54683+}
54684+
54685+void
54686+gr_handle_alertkill(struct task_struct *task)
54687+{
54688+ return;
54689+}
54690+
54691+__u32
54692+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54693+{
54694+ return 1;
54695+}
54696+
54697+__u32
54698+gr_acl_handle_hidden_file(const struct dentry * dentry,
54699+ const struct vfsmount * mnt)
54700+{
54701+ return 1;
54702+}
54703+
54704+__u32
54705+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54706+ int acc_mode)
54707+{
54708+ return 1;
54709+}
54710+
54711+__u32
54712+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54713+{
54714+ return 1;
54715+}
54716+
54717+__u32
54718+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54719+{
54720+ return 1;
54721+}
54722+
54723+int
54724+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54725+ unsigned int *vm_flags)
54726+{
54727+ return 1;
54728+}
54729+
54730+__u32
54731+gr_acl_handle_truncate(const struct dentry * dentry,
54732+ const struct vfsmount * mnt)
54733+{
54734+ return 1;
54735+}
54736+
54737+__u32
54738+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54739+{
54740+ return 1;
54741+}
54742+
54743+__u32
54744+gr_acl_handle_access(const struct dentry * dentry,
54745+ const struct vfsmount * mnt, const int fmode)
54746+{
54747+ return 1;
54748+}
54749+
54750+__u32
54751+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
54752+ mode_t mode)
54753+{
54754+ return 1;
54755+}
54756+
54757+__u32
54758+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54759+ mode_t mode)
54760+{
54761+ return 1;
54762+}
54763+
54764+__u32
54765+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54766+{
54767+ return 1;
54768+}
54769+
54770+__u32
54771+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54772+{
54773+ return 1;
54774+}
54775+
54776+void
54777+grsecurity_init(void)
54778+{
54779+ return;
54780+}
54781+
54782+__u32
54783+gr_acl_handle_mknod(const struct dentry * new_dentry,
54784+ const struct dentry * parent_dentry,
54785+ const struct vfsmount * parent_mnt,
54786+ const int mode)
54787+{
54788+ return 1;
54789+}
54790+
54791+__u32
54792+gr_acl_handle_mkdir(const struct dentry * new_dentry,
54793+ const struct dentry * parent_dentry,
54794+ const struct vfsmount * parent_mnt)
54795+{
54796+ return 1;
54797+}
54798+
54799+__u32
54800+gr_acl_handle_symlink(const struct dentry * new_dentry,
54801+ const struct dentry * parent_dentry,
54802+ const struct vfsmount * parent_mnt, const char *from)
54803+{
54804+ return 1;
54805+}
54806+
54807+__u32
54808+gr_acl_handle_link(const struct dentry * new_dentry,
54809+ const struct dentry * parent_dentry,
54810+ const struct vfsmount * parent_mnt,
54811+ const struct dentry * old_dentry,
54812+ const struct vfsmount * old_mnt, const char *to)
54813+{
54814+ return 1;
54815+}
54816+
54817+int
54818+gr_acl_handle_rename(const struct dentry *new_dentry,
54819+ const struct dentry *parent_dentry,
54820+ const struct vfsmount *parent_mnt,
54821+ const struct dentry *old_dentry,
54822+ const struct inode *old_parent_inode,
54823+ const struct vfsmount *old_mnt, const char *newname)
54824+{
54825+ return 0;
54826+}
54827+
54828+int
54829+gr_acl_handle_filldir(const struct file *file, const char *name,
54830+ const int namelen, const ino_t ino)
54831+{
54832+ return 1;
54833+}
54834+
54835+int
54836+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54837+ const time_t shm_createtime, const uid_t cuid, const int shmid)
54838+{
54839+ return 1;
54840+}
54841+
54842+int
54843+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
54844+{
54845+ return 0;
54846+}
54847+
54848+int
54849+gr_search_accept(const struct socket *sock)
54850+{
54851+ return 0;
54852+}
54853+
54854+int
54855+gr_search_listen(const struct socket *sock)
54856+{
54857+ return 0;
54858+}
54859+
54860+int
54861+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
54862+{
54863+ return 0;
54864+}
54865+
54866+__u32
54867+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
54868+{
54869+ return 1;
54870+}
54871+
54872+__u32
54873+gr_acl_handle_creat(const struct dentry * dentry,
54874+ const struct dentry * p_dentry,
54875+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54876+ const int imode)
54877+{
54878+ return 1;
54879+}
54880+
54881+void
54882+gr_acl_handle_exit(void)
54883+{
54884+ return;
54885+}
54886+
54887+int
54888+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54889+{
54890+ return 1;
54891+}
54892+
54893+void
54894+gr_set_role_label(const uid_t uid, const gid_t gid)
54895+{
54896+ return;
54897+}
54898+
54899+int
54900+gr_acl_handle_procpidmem(const struct task_struct *task)
54901+{
54902+ return 0;
54903+}
54904+
54905+int
54906+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
54907+{
54908+ return 0;
54909+}
54910+
54911+int
54912+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
54913+{
54914+ return 0;
54915+}
54916+
54917+void
54918+gr_set_kernel_label(struct task_struct *task)
54919+{
54920+ return;
54921+}
54922+
54923+int
54924+gr_check_user_change(int real, int effective, int fs)
54925+{
54926+ return 0;
54927+}
54928+
54929+int
54930+gr_check_group_change(int real, int effective, int fs)
54931+{
54932+ return 0;
54933+}
54934+
54935+int gr_acl_enable_at_secure(void)
54936+{
54937+ return 0;
54938+}
54939+
54940+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
54941+{
54942+ return dentry->d_inode->i_sb->s_dev;
54943+}
54944+
54945+EXPORT_SYMBOL(gr_learn_resource);
54946+EXPORT_SYMBOL(gr_set_kernel_label);
54947+#ifdef CONFIG_SECURITY
54948+EXPORT_SYMBOL(gr_check_user_change);
54949+EXPORT_SYMBOL(gr_check_group_change);
54950+#endif
54951diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
54952new file mode 100644
54953index 0000000..2b05ada
54954--- /dev/null
54955+++ b/grsecurity/grsec_exec.c
54956@@ -0,0 +1,146 @@
54957+#include <linux/kernel.h>
54958+#include <linux/sched.h>
54959+#include <linux/file.h>
54960+#include <linux/binfmts.h>
54961+#include <linux/fs.h>
54962+#include <linux/types.h>
54963+#include <linux/grdefs.h>
54964+#include <linux/grsecurity.h>
54965+#include <linux/grinternal.h>
54966+#include <linux/capability.h>
54967+#include <linux/module.h>
54968+
54969+#include <asm/uaccess.h>
54970+
54971+#ifdef CONFIG_GRKERNSEC_EXECLOG
54972+static char gr_exec_arg_buf[132];
54973+static DEFINE_MUTEX(gr_exec_arg_mutex);
54974+#endif
54975+
54976+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
54977+
54978+void
54979+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
54980+{
54981+#ifdef CONFIG_GRKERNSEC_EXECLOG
54982+ char *grarg = gr_exec_arg_buf;
54983+ unsigned int i, x, execlen = 0;
54984+ char c;
54985+
54986+ if (!((grsec_enable_execlog && grsec_enable_group &&
54987+ in_group_p(grsec_audit_gid))
54988+ || (grsec_enable_execlog && !grsec_enable_group)))
54989+ return;
54990+
54991+ mutex_lock(&gr_exec_arg_mutex);
54992+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
54993+
54994+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
54995+ const char __user *p;
54996+ unsigned int len;
54997+
54998+ p = get_user_arg_ptr(argv, i);
54999+ if (IS_ERR(p))
55000+ goto log;
55001+
55002+ len = strnlen_user(p, 128 - execlen);
55003+ if (len > 128 - execlen)
55004+ len = 128 - execlen;
55005+ else if (len > 0)
55006+ len--;
55007+ if (copy_from_user(grarg + execlen, p, len))
55008+ goto log;
55009+
55010+ /* rewrite unprintable characters */
55011+ for (x = 0; x < len; x++) {
55012+ c = *(grarg + execlen + x);
55013+ if (c < 32 || c > 126)
55014+ *(grarg + execlen + x) = ' ';
55015+ }
55016+
55017+ execlen += len;
55018+ *(grarg + execlen) = ' ';
55019+ *(grarg + execlen + 1) = '\0';
55020+ execlen++;
55021+ }
55022+
55023+ log:
55024+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
55025+ bprm->file->f_path.mnt, grarg);
55026+ mutex_unlock(&gr_exec_arg_mutex);
55027+#endif
55028+ return;
55029+}
55030+
55031+#ifdef CONFIG_GRKERNSEC
55032+extern int gr_acl_is_capable(const int cap);
55033+extern int gr_acl_is_capable_nolog(const int cap);
55034+extern int gr_chroot_is_capable(const int cap);
55035+extern int gr_chroot_is_capable_nolog(const int cap);
55036+#endif
55037+
55038+const char *captab_log[] = {
55039+ "CAP_CHOWN",
55040+ "CAP_DAC_OVERRIDE",
55041+ "CAP_DAC_READ_SEARCH",
55042+ "CAP_FOWNER",
55043+ "CAP_FSETID",
55044+ "CAP_KILL",
55045+ "CAP_SETGID",
55046+ "CAP_SETUID",
55047+ "CAP_SETPCAP",
55048+ "CAP_LINUX_IMMUTABLE",
55049+ "CAP_NET_BIND_SERVICE",
55050+ "CAP_NET_BROADCAST",
55051+ "CAP_NET_ADMIN",
55052+ "CAP_NET_RAW",
55053+ "CAP_IPC_LOCK",
55054+ "CAP_IPC_OWNER",
55055+ "CAP_SYS_MODULE",
55056+ "CAP_SYS_RAWIO",
55057+ "CAP_SYS_CHROOT",
55058+ "CAP_SYS_PTRACE",
55059+ "CAP_SYS_PACCT",
55060+ "CAP_SYS_ADMIN",
55061+ "CAP_SYS_BOOT",
55062+ "CAP_SYS_NICE",
55063+ "CAP_SYS_RESOURCE",
55064+ "CAP_SYS_TIME",
55065+ "CAP_SYS_TTY_CONFIG",
55066+ "CAP_MKNOD",
55067+ "CAP_LEASE",
55068+ "CAP_AUDIT_WRITE",
55069+ "CAP_AUDIT_CONTROL",
55070+ "CAP_SETFCAP",
55071+ "CAP_MAC_OVERRIDE",
55072+ "CAP_MAC_ADMIN",
55073+ "CAP_SYSLOG",
55074+ "CAP_WAKE_ALARM"
55075+};
55076+
55077+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
55078+
55079+int gr_is_capable(const int cap)
55080+{
55081+#ifdef CONFIG_GRKERNSEC
55082+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
55083+ return 1;
55084+ return 0;
55085+#else
55086+ return 1;
55087+#endif
55088+}
55089+
55090+int gr_is_capable_nolog(const int cap)
55091+{
55092+#ifdef CONFIG_GRKERNSEC
55093+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
55094+ return 1;
55095+ return 0;
55096+#else
55097+ return 1;
55098+#endif
55099+}
55100+
55101+EXPORT_SYMBOL(gr_is_capable);
55102+EXPORT_SYMBOL(gr_is_capable_nolog);
55103diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
55104new file mode 100644
55105index 0000000..d3ee748
55106--- /dev/null
55107+++ b/grsecurity/grsec_fifo.c
55108@@ -0,0 +1,24 @@
55109+#include <linux/kernel.h>
55110+#include <linux/sched.h>
55111+#include <linux/fs.h>
55112+#include <linux/file.h>
55113+#include <linux/grinternal.h>
55114+
55115+int
55116+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
55117+ const struct dentry *dir, const int flag, const int acc_mode)
55118+{
55119+#ifdef CONFIG_GRKERNSEC_FIFO
55120+ const struct cred *cred = current_cred();
55121+
55122+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
55123+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
55124+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
55125+ (cred->fsuid != dentry->d_inode->i_uid)) {
55126+ if (!inode_permission(dentry->d_inode, acc_mode))
55127+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
55128+ return -EACCES;
55129+ }
55130+#endif
55131+ return 0;
55132+}
55133diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
55134new file mode 100644
55135index 0000000..8ca18bf
55136--- /dev/null
55137+++ b/grsecurity/grsec_fork.c
55138@@ -0,0 +1,23 @@
55139+#include <linux/kernel.h>
55140+#include <linux/sched.h>
55141+#include <linux/grsecurity.h>
55142+#include <linux/grinternal.h>
55143+#include <linux/errno.h>
55144+
55145+void
55146+gr_log_forkfail(const int retval)
55147+{
55148+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55149+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
55150+ switch (retval) {
55151+ case -EAGAIN:
55152+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
55153+ break;
55154+ case -ENOMEM:
55155+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
55156+ break;
55157+ }
55158+ }
55159+#endif
55160+ return;
55161+}
55162diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
55163new file mode 100644
55164index 0000000..01ddde4
55165--- /dev/null
55166+++ b/grsecurity/grsec_init.c
55167@@ -0,0 +1,277 @@
55168+#include <linux/kernel.h>
55169+#include <linux/sched.h>
55170+#include <linux/mm.h>
55171+#include <linux/gracl.h>
55172+#include <linux/slab.h>
55173+#include <linux/vmalloc.h>
55174+#include <linux/percpu.h>
55175+#include <linux/module.h>
55176+
55177+int grsec_enable_ptrace_readexec;
55178+int grsec_enable_setxid;
55179+int grsec_enable_brute;
55180+int grsec_enable_link;
55181+int grsec_enable_dmesg;
55182+int grsec_enable_harden_ptrace;
55183+int grsec_enable_fifo;
55184+int grsec_enable_execlog;
55185+int grsec_enable_signal;
55186+int grsec_enable_forkfail;
55187+int grsec_enable_audit_ptrace;
55188+int grsec_enable_time;
55189+int grsec_enable_audit_textrel;
55190+int grsec_enable_group;
55191+int grsec_audit_gid;
55192+int grsec_enable_chdir;
55193+int grsec_enable_mount;
55194+int grsec_enable_rofs;
55195+int grsec_enable_chroot_findtask;
55196+int grsec_enable_chroot_mount;
55197+int grsec_enable_chroot_shmat;
55198+int grsec_enable_chroot_fchdir;
55199+int grsec_enable_chroot_double;
55200+int grsec_enable_chroot_pivot;
55201+int grsec_enable_chroot_chdir;
55202+int grsec_enable_chroot_chmod;
55203+int grsec_enable_chroot_mknod;
55204+int grsec_enable_chroot_nice;
55205+int grsec_enable_chroot_execlog;
55206+int grsec_enable_chroot_caps;
55207+int grsec_enable_chroot_sysctl;
55208+int grsec_enable_chroot_unix;
55209+int grsec_enable_tpe;
55210+int grsec_tpe_gid;
55211+int grsec_enable_blackhole;
55212+#ifdef CONFIG_IPV6_MODULE
55213+EXPORT_SYMBOL(grsec_enable_blackhole);
55214+#endif
55215+int grsec_lastack_retries;
55216+int grsec_enable_tpe_all;
55217+int grsec_enable_tpe_invert;
55218+int grsec_enable_socket_all;
55219+int grsec_socket_all_gid;
55220+int grsec_enable_socket_client;
55221+int grsec_socket_client_gid;
55222+int grsec_enable_socket_server;
55223+int grsec_socket_server_gid;
55224+int grsec_resource_logging;
55225+int grsec_disable_privio;
55226+int grsec_enable_log_rwxmaps;
55227+int grsec_lock;
55228+
55229+DEFINE_SPINLOCK(grsec_alert_lock);
55230+unsigned long grsec_alert_wtime = 0;
55231+unsigned long grsec_alert_fyet = 0;
55232+
55233+DEFINE_SPINLOCK(grsec_audit_lock);
55234+
55235+DEFINE_RWLOCK(grsec_exec_file_lock);
55236+
55237+char *gr_shared_page[4];
55238+
55239+char *gr_alert_log_fmt;
55240+char *gr_audit_log_fmt;
55241+char *gr_alert_log_buf;
55242+char *gr_audit_log_buf;
55243+
55244+extern struct gr_arg *gr_usermode;
55245+extern unsigned char *gr_system_salt;
55246+extern unsigned char *gr_system_sum;
55247+
55248+void __init
55249+grsecurity_init(void)
55250+{
55251+ int j;
55252+ /* create the per-cpu shared pages */
55253+
55254+#ifdef CONFIG_X86
55255+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55256+#endif
55257+
55258+ for (j = 0; j < 4; j++) {
55259+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55260+ if (gr_shared_page[j] == NULL) {
55261+ panic("Unable to allocate grsecurity shared page");
55262+ return;
55263+ }
55264+ }
55265+
55266+ /* allocate log buffers */
55267+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55268+ if (!gr_alert_log_fmt) {
55269+ panic("Unable to allocate grsecurity alert log format buffer");
55270+ return;
55271+ }
55272+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55273+ if (!gr_audit_log_fmt) {
55274+ panic("Unable to allocate grsecurity audit log format buffer");
55275+ return;
55276+ }
55277+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55278+ if (!gr_alert_log_buf) {
55279+ panic("Unable to allocate grsecurity alert log buffer");
55280+ return;
55281+ }
55282+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55283+ if (!gr_audit_log_buf) {
55284+ panic("Unable to allocate grsecurity audit log buffer");
55285+ return;
55286+ }
55287+
55288+ /* allocate memory for authentication structure */
55289+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55290+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55291+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55292+
55293+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55294+ panic("Unable to allocate grsecurity authentication structure");
55295+ return;
55296+ }
55297+
55298+
55299+#ifdef CONFIG_GRKERNSEC_IO
55300+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55301+ grsec_disable_privio = 1;
55302+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55303+ grsec_disable_privio = 1;
55304+#else
55305+ grsec_disable_privio = 0;
55306+#endif
55307+#endif
55308+
55309+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55310+ /* for backward compatibility, tpe_invert always defaults to on if
55311+ enabled in the kernel
55312+ */
55313+ grsec_enable_tpe_invert = 1;
55314+#endif
55315+
55316+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55317+#ifndef CONFIG_GRKERNSEC_SYSCTL
55318+ grsec_lock = 1;
55319+#endif
55320+
55321+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55322+ grsec_enable_audit_textrel = 1;
55323+#endif
55324+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55325+ grsec_enable_log_rwxmaps = 1;
55326+#endif
55327+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55328+ grsec_enable_group = 1;
55329+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55330+#endif
55331+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55332+ grsec_enable_ptrace_readexec = 1;
55333+#endif
55334+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55335+ grsec_enable_chdir = 1;
55336+#endif
55337+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55338+ grsec_enable_harden_ptrace = 1;
55339+#endif
55340+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55341+ grsec_enable_mount = 1;
55342+#endif
55343+#ifdef CONFIG_GRKERNSEC_LINK
55344+ grsec_enable_link = 1;
55345+#endif
55346+#ifdef CONFIG_GRKERNSEC_BRUTE
55347+ grsec_enable_brute = 1;
55348+#endif
55349+#ifdef CONFIG_GRKERNSEC_DMESG
55350+ grsec_enable_dmesg = 1;
55351+#endif
55352+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55353+ grsec_enable_blackhole = 1;
55354+ grsec_lastack_retries = 4;
55355+#endif
55356+#ifdef CONFIG_GRKERNSEC_FIFO
55357+ grsec_enable_fifo = 1;
55358+#endif
55359+#ifdef CONFIG_GRKERNSEC_EXECLOG
55360+ grsec_enable_execlog = 1;
55361+#endif
55362+#ifdef CONFIG_GRKERNSEC_SETXID
55363+ grsec_enable_setxid = 1;
55364+#endif
55365+#ifdef CONFIG_GRKERNSEC_SIGNAL
55366+ grsec_enable_signal = 1;
55367+#endif
55368+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55369+ grsec_enable_forkfail = 1;
55370+#endif
55371+#ifdef CONFIG_GRKERNSEC_TIME
55372+ grsec_enable_time = 1;
55373+#endif
55374+#ifdef CONFIG_GRKERNSEC_RESLOG
55375+ grsec_resource_logging = 1;
55376+#endif
55377+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55378+ grsec_enable_chroot_findtask = 1;
55379+#endif
55380+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55381+ grsec_enable_chroot_unix = 1;
55382+#endif
55383+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55384+ grsec_enable_chroot_mount = 1;
55385+#endif
55386+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55387+ grsec_enable_chroot_fchdir = 1;
55388+#endif
55389+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55390+ grsec_enable_chroot_shmat = 1;
55391+#endif
55392+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55393+ grsec_enable_audit_ptrace = 1;
55394+#endif
55395+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55396+ grsec_enable_chroot_double = 1;
55397+#endif
55398+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55399+ grsec_enable_chroot_pivot = 1;
55400+#endif
55401+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55402+ grsec_enable_chroot_chdir = 1;
55403+#endif
55404+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55405+ grsec_enable_chroot_chmod = 1;
55406+#endif
55407+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55408+ grsec_enable_chroot_mknod = 1;
55409+#endif
55410+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55411+ grsec_enable_chroot_nice = 1;
55412+#endif
55413+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55414+ grsec_enable_chroot_execlog = 1;
55415+#endif
55416+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55417+ grsec_enable_chroot_caps = 1;
55418+#endif
55419+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55420+ grsec_enable_chroot_sysctl = 1;
55421+#endif
55422+#ifdef CONFIG_GRKERNSEC_TPE
55423+ grsec_enable_tpe = 1;
55424+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55425+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55426+ grsec_enable_tpe_all = 1;
55427+#endif
55428+#endif
55429+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55430+ grsec_enable_socket_all = 1;
55431+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55432+#endif
55433+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55434+ grsec_enable_socket_client = 1;
55435+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55436+#endif
55437+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55438+ grsec_enable_socket_server = 1;
55439+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55440+#endif
55441+#endif
55442+
55443+ return;
55444+}
55445diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55446new file mode 100644
55447index 0000000..3efe141
55448--- /dev/null
55449+++ b/grsecurity/grsec_link.c
55450@@ -0,0 +1,43 @@
55451+#include <linux/kernel.h>
55452+#include <linux/sched.h>
55453+#include <linux/fs.h>
55454+#include <linux/file.h>
55455+#include <linux/grinternal.h>
55456+
55457+int
55458+gr_handle_follow_link(const struct inode *parent,
55459+ const struct inode *inode,
55460+ const struct dentry *dentry, const struct vfsmount *mnt)
55461+{
55462+#ifdef CONFIG_GRKERNSEC_LINK
55463+ const struct cred *cred = current_cred();
55464+
55465+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55466+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55467+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55468+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55469+ return -EACCES;
55470+ }
55471+#endif
55472+ return 0;
55473+}
55474+
55475+int
55476+gr_handle_hardlink(const struct dentry *dentry,
55477+ const struct vfsmount *mnt,
55478+ struct inode *inode, const int mode, const char *to)
55479+{
55480+#ifdef CONFIG_GRKERNSEC_LINK
55481+ const struct cred *cred = current_cred();
55482+
55483+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55484+ (!S_ISREG(mode) || (mode & S_ISUID) ||
55485+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55486+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55487+ !capable(CAP_FOWNER) && cred->uid) {
55488+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55489+ return -EPERM;
55490+ }
55491+#endif
55492+ return 0;
55493+}
55494diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55495new file mode 100644
55496index 0000000..a45d2e9
55497--- /dev/null
55498+++ b/grsecurity/grsec_log.c
55499@@ -0,0 +1,322 @@
55500+#include <linux/kernel.h>
55501+#include <linux/sched.h>
55502+#include <linux/file.h>
55503+#include <linux/tty.h>
55504+#include <linux/fs.h>
55505+#include <linux/grinternal.h>
55506+
55507+#ifdef CONFIG_TREE_PREEMPT_RCU
55508+#define DISABLE_PREEMPT() preempt_disable()
55509+#define ENABLE_PREEMPT() preempt_enable()
55510+#else
55511+#define DISABLE_PREEMPT()
55512+#define ENABLE_PREEMPT()
55513+#endif
55514+
55515+#define BEGIN_LOCKS(x) \
55516+ DISABLE_PREEMPT(); \
55517+ rcu_read_lock(); \
55518+ read_lock(&tasklist_lock); \
55519+ read_lock(&grsec_exec_file_lock); \
55520+ if (x != GR_DO_AUDIT) \
55521+ spin_lock(&grsec_alert_lock); \
55522+ else \
55523+ spin_lock(&grsec_audit_lock)
55524+
55525+#define END_LOCKS(x) \
55526+ if (x != GR_DO_AUDIT) \
55527+ spin_unlock(&grsec_alert_lock); \
55528+ else \
55529+ spin_unlock(&grsec_audit_lock); \
55530+ read_unlock(&grsec_exec_file_lock); \
55531+ read_unlock(&tasklist_lock); \
55532+ rcu_read_unlock(); \
55533+ ENABLE_PREEMPT(); \
55534+ if (x == GR_DONT_AUDIT) \
55535+ gr_handle_alertkill(current)
55536+
55537+enum {
55538+ FLOODING,
55539+ NO_FLOODING
55540+};
55541+
55542+extern char *gr_alert_log_fmt;
55543+extern char *gr_audit_log_fmt;
55544+extern char *gr_alert_log_buf;
55545+extern char *gr_audit_log_buf;
55546+
55547+static int gr_log_start(int audit)
55548+{
55549+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55550+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55551+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55552+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55553+ unsigned long curr_secs = get_seconds();
55554+
55555+ if (audit == GR_DO_AUDIT)
55556+ goto set_fmt;
55557+
55558+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55559+ grsec_alert_wtime = curr_secs;
55560+ grsec_alert_fyet = 0;
55561+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55562+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55563+ grsec_alert_fyet++;
55564+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55565+ grsec_alert_wtime = curr_secs;
55566+ grsec_alert_fyet++;
55567+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55568+ return FLOODING;
55569+ }
55570+ else return FLOODING;
55571+
55572+set_fmt:
55573+#endif
55574+ memset(buf, 0, PAGE_SIZE);
55575+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
55576+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55577+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55578+ } else if (current->signal->curr_ip) {
55579+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55580+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55581+ } else if (gr_acl_is_enabled()) {
55582+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55583+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55584+ } else {
55585+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
55586+ strcpy(buf, fmt);
55587+ }
55588+
55589+ return NO_FLOODING;
55590+}
55591+
55592+static void gr_log_middle(int audit, const char *msg, va_list ap)
55593+ __attribute__ ((format (printf, 2, 0)));
55594+
55595+static void gr_log_middle(int audit, const char *msg, va_list ap)
55596+{
55597+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55598+ unsigned int len = strlen(buf);
55599+
55600+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55601+
55602+ return;
55603+}
55604+
55605+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55606+ __attribute__ ((format (printf, 2, 3)));
55607+
55608+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55609+{
55610+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55611+ unsigned int len = strlen(buf);
55612+ va_list ap;
55613+
55614+ va_start(ap, msg);
55615+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55616+ va_end(ap);
55617+
55618+ return;
55619+}
55620+
55621+static void gr_log_end(int audit, int append_default)
55622+{
55623+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55624+
55625+ if (append_default) {
55626+ unsigned int len = strlen(buf);
55627+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55628+ }
55629+
55630+ printk("%s\n", buf);
55631+
55632+ return;
55633+}
55634+
55635+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55636+{
55637+ int logtype;
55638+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55639+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55640+ void *voidptr = NULL;
55641+ int num1 = 0, num2 = 0;
55642+ unsigned long ulong1 = 0, ulong2 = 0;
55643+ struct dentry *dentry = NULL;
55644+ struct vfsmount *mnt = NULL;
55645+ struct file *file = NULL;
55646+ struct task_struct *task = NULL;
55647+ const struct cred *cred, *pcred;
55648+ va_list ap;
55649+
55650+ BEGIN_LOCKS(audit);
55651+ logtype = gr_log_start(audit);
55652+ if (logtype == FLOODING) {
55653+ END_LOCKS(audit);
55654+ return;
55655+ }
55656+ va_start(ap, argtypes);
55657+ switch (argtypes) {
55658+ case GR_TTYSNIFF:
55659+ task = va_arg(ap, struct task_struct *);
55660+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55661+ break;
55662+ case GR_SYSCTL_HIDDEN:
55663+ str1 = va_arg(ap, char *);
55664+ gr_log_middle_varargs(audit, msg, result, str1);
55665+ break;
55666+ case GR_RBAC:
55667+ dentry = va_arg(ap, struct dentry *);
55668+ mnt = va_arg(ap, struct vfsmount *);
55669+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55670+ break;
55671+ case GR_RBAC_STR:
55672+ dentry = va_arg(ap, struct dentry *);
55673+ mnt = va_arg(ap, struct vfsmount *);
55674+ str1 = va_arg(ap, char *);
55675+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55676+ break;
55677+ case GR_STR_RBAC:
55678+ str1 = va_arg(ap, char *);
55679+ dentry = va_arg(ap, struct dentry *);
55680+ mnt = va_arg(ap, struct vfsmount *);
55681+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55682+ break;
55683+ case GR_RBAC_MODE2:
55684+ dentry = va_arg(ap, struct dentry *);
55685+ mnt = va_arg(ap, struct vfsmount *);
55686+ str1 = va_arg(ap, char *);
55687+ str2 = va_arg(ap, char *);
55688+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55689+ break;
55690+ case GR_RBAC_MODE3:
55691+ dentry = va_arg(ap, struct dentry *);
55692+ mnt = va_arg(ap, struct vfsmount *);
55693+ str1 = va_arg(ap, char *);
55694+ str2 = va_arg(ap, char *);
55695+ str3 = va_arg(ap, char *);
55696+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55697+ break;
55698+ case GR_FILENAME:
55699+ dentry = va_arg(ap, struct dentry *);
55700+ mnt = va_arg(ap, struct vfsmount *);
55701+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55702+ break;
55703+ case GR_STR_FILENAME:
55704+ str1 = va_arg(ap, char *);
55705+ dentry = va_arg(ap, struct dentry *);
55706+ mnt = va_arg(ap, struct vfsmount *);
55707+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55708+ break;
55709+ case GR_FILENAME_STR:
55710+ dentry = va_arg(ap, struct dentry *);
55711+ mnt = va_arg(ap, struct vfsmount *);
55712+ str1 = va_arg(ap, char *);
55713+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55714+ break;
55715+ case GR_FILENAME_TWO_INT:
55716+ dentry = va_arg(ap, struct dentry *);
55717+ mnt = va_arg(ap, struct vfsmount *);
55718+ num1 = va_arg(ap, int);
55719+ num2 = va_arg(ap, int);
55720+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55721+ break;
55722+ case GR_FILENAME_TWO_INT_STR:
55723+ dentry = va_arg(ap, struct dentry *);
55724+ mnt = va_arg(ap, struct vfsmount *);
55725+ num1 = va_arg(ap, int);
55726+ num2 = va_arg(ap, int);
55727+ str1 = va_arg(ap, char *);
55728+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55729+ break;
55730+ case GR_TEXTREL:
55731+ file = va_arg(ap, struct file *);
55732+ ulong1 = va_arg(ap, unsigned long);
55733+ ulong2 = va_arg(ap, unsigned long);
55734+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55735+ break;
55736+ case GR_PTRACE:
55737+ task = va_arg(ap, struct task_struct *);
55738+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55739+ break;
55740+ case GR_RESOURCE:
55741+ task = va_arg(ap, struct task_struct *);
55742+ cred = __task_cred(task);
55743+ pcred = __task_cred(task->real_parent);
55744+ ulong1 = va_arg(ap, unsigned long);
55745+ str1 = va_arg(ap, char *);
55746+ ulong2 = va_arg(ap, unsigned long);
55747+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55748+ break;
55749+ case GR_CAP:
55750+ task = va_arg(ap, struct task_struct *);
55751+ cred = __task_cred(task);
55752+ pcred = __task_cred(task->real_parent);
55753+ str1 = va_arg(ap, char *);
55754+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55755+ break;
55756+ case GR_SIG:
55757+ str1 = va_arg(ap, char *);
55758+ voidptr = va_arg(ap, void *);
55759+ gr_log_middle_varargs(audit, msg, str1, voidptr);
55760+ break;
55761+ case GR_SIG2:
55762+ task = va_arg(ap, struct task_struct *);
55763+ cred = __task_cred(task);
55764+ pcred = __task_cred(task->real_parent);
55765+ num1 = va_arg(ap, int);
55766+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55767+ break;
55768+ case GR_CRASH1:
55769+ task = va_arg(ap, struct task_struct *);
55770+ cred = __task_cred(task);
55771+ pcred = __task_cred(task->real_parent);
55772+ ulong1 = va_arg(ap, unsigned long);
55773+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55774+ break;
55775+ case GR_CRASH2:
55776+ task = va_arg(ap, struct task_struct *);
55777+ cred = __task_cred(task);
55778+ pcred = __task_cred(task->real_parent);
55779+ ulong1 = va_arg(ap, unsigned long);
55780+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55781+ break;
55782+ case GR_RWXMAP:
55783+ file = va_arg(ap, struct file *);
55784+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55785+ break;
55786+ case GR_PSACCT:
55787+ {
55788+ unsigned int wday, cday;
55789+ __u8 whr, chr;
55790+ __u8 wmin, cmin;
55791+ __u8 wsec, csec;
55792+ char cur_tty[64] = { 0 };
55793+ char parent_tty[64] = { 0 };
55794+
55795+ task = va_arg(ap, struct task_struct *);
55796+ wday = va_arg(ap, unsigned int);
55797+ cday = va_arg(ap, unsigned int);
55798+ whr = va_arg(ap, int);
55799+ chr = va_arg(ap, int);
55800+ wmin = va_arg(ap, int);
55801+ cmin = va_arg(ap, int);
55802+ wsec = va_arg(ap, int);
55803+ csec = va_arg(ap, int);
55804+ ulong1 = va_arg(ap, unsigned long);
55805+ cred = __task_cred(task);
55806+ pcred = __task_cred(task->real_parent);
55807+
55808+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55809+ }
55810+ break;
55811+ default:
55812+ gr_log_middle(audit, msg, ap);
55813+ }
55814+ va_end(ap);
55815+ // these don't need DEFAULTSECARGS printed on the end
55816+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
55817+ gr_log_end(audit, 0);
55818+ else
55819+ gr_log_end(audit, 1);
55820+ END_LOCKS(audit);
55821+}
55822diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
55823new file mode 100644
55824index 0000000..6c0416b
55825--- /dev/null
55826+++ b/grsecurity/grsec_mem.c
55827@@ -0,0 +1,33 @@
55828+#include <linux/kernel.h>
55829+#include <linux/sched.h>
55830+#include <linux/mm.h>
55831+#include <linux/mman.h>
55832+#include <linux/grinternal.h>
55833+
55834+void
55835+gr_handle_ioperm(void)
55836+{
55837+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
55838+ return;
55839+}
55840+
55841+void
55842+gr_handle_iopl(void)
55843+{
55844+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
55845+ return;
55846+}
55847+
55848+void
55849+gr_handle_mem_readwrite(u64 from, u64 to)
55850+{
55851+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
55852+ return;
55853+}
55854+
55855+void
55856+gr_handle_vm86(void)
55857+{
55858+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
55859+ return;
55860+}
55861diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
55862new file mode 100644
55863index 0000000..2131422
55864--- /dev/null
55865+++ b/grsecurity/grsec_mount.c
55866@@ -0,0 +1,62 @@
55867+#include <linux/kernel.h>
55868+#include <linux/sched.h>
55869+#include <linux/mount.h>
55870+#include <linux/grsecurity.h>
55871+#include <linux/grinternal.h>
55872+
55873+void
55874+gr_log_remount(const char *devname, const int retval)
55875+{
55876+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55877+ if (grsec_enable_mount && (retval >= 0))
55878+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
55879+#endif
55880+ return;
55881+}
55882+
55883+void
55884+gr_log_unmount(const char *devname, const int retval)
55885+{
55886+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55887+ if (grsec_enable_mount && (retval >= 0))
55888+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
55889+#endif
55890+ return;
55891+}
55892+
55893+void
55894+gr_log_mount(const char *from, const char *to, const int retval)
55895+{
55896+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55897+ if (grsec_enable_mount && (retval >= 0))
55898+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
55899+#endif
55900+ return;
55901+}
55902+
55903+int
55904+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
55905+{
55906+#ifdef CONFIG_GRKERNSEC_ROFS
55907+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
55908+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
55909+ return -EPERM;
55910+ } else
55911+ return 0;
55912+#endif
55913+ return 0;
55914+}
55915+
55916+int
55917+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
55918+{
55919+#ifdef CONFIG_GRKERNSEC_ROFS
55920+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
55921+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
55922+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
55923+ return -EPERM;
55924+ } else
55925+ return 0;
55926+#endif
55927+ return 0;
55928+}
55929diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
55930new file mode 100644
55931index 0000000..a3b12a0
55932--- /dev/null
55933+++ b/grsecurity/grsec_pax.c
55934@@ -0,0 +1,36 @@
55935+#include <linux/kernel.h>
55936+#include <linux/sched.h>
55937+#include <linux/mm.h>
55938+#include <linux/file.h>
55939+#include <linux/grinternal.h>
55940+#include <linux/grsecurity.h>
55941+
55942+void
55943+gr_log_textrel(struct vm_area_struct * vma)
55944+{
55945+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55946+ if (grsec_enable_audit_textrel)
55947+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
55948+#endif
55949+ return;
55950+}
55951+
55952+void
55953+gr_log_rwxmmap(struct file *file)
55954+{
55955+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55956+ if (grsec_enable_log_rwxmaps)
55957+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
55958+#endif
55959+ return;
55960+}
55961+
55962+void
55963+gr_log_rwxmprotect(struct file *file)
55964+{
55965+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55966+ if (grsec_enable_log_rwxmaps)
55967+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
55968+#endif
55969+ return;
55970+}
55971diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
55972new file mode 100644
55973index 0000000..f7f29aa
55974--- /dev/null
55975+++ b/grsecurity/grsec_ptrace.c
55976@@ -0,0 +1,30 @@
55977+#include <linux/kernel.h>
55978+#include <linux/sched.h>
55979+#include <linux/grinternal.h>
55980+#include <linux/security.h>
55981+
55982+void
55983+gr_audit_ptrace(struct task_struct *task)
55984+{
55985+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55986+ if (grsec_enable_audit_ptrace)
55987+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
55988+#endif
55989+ return;
55990+}
55991+
55992+int
55993+gr_ptrace_readexec(struct file *file, int unsafe_flags)
55994+{
55995+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55996+ const struct dentry *dentry = file->f_path.dentry;
55997+ const struct vfsmount *mnt = file->f_path.mnt;
55998+
55999+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
56000+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
56001+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
56002+ return -EACCES;
56003+ }
56004+#endif
56005+ return 0;
56006+}
56007diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
56008new file mode 100644
56009index 0000000..7a5b2de
56010--- /dev/null
56011+++ b/grsecurity/grsec_sig.c
56012@@ -0,0 +1,207 @@
56013+#include <linux/kernel.h>
56014+#include <linux/sched.h>
56015+#include <linux/delay.h>
56016+#include <linux/grsecurity.h>
56017+#include <linux/grinternal.h>
56018+#include <linux/hardirq.h>
56019+
56020+char *signames[] = {
56021+ [SIGSEGV] = "Segmentation fault",
56022+ [SIGILL] = "Illegal instruction",
56023+ [SIGABRT] = "Abort",
56024+ [SIGBUS] = "Invalid alignment/Bus error"
56025+};
56026+
56027+void
56028+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
56029+{
56030+#ifdef CONFIG_GRKERNSEC_SIGNAL
56031+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
56032+ (sig == SIGABRT) || (sig == SIGBUS))) {
56033+ if (t->pid == current->pid) {
56034+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
56035+ } else {
56036+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
56037+ }
56038+ }
56039+#endif
56040+ return;
56041+}
56042+
56043+int
56044+gr_handle_signal(const struct task_struct *p, const int sig)
56045+{
56046+#ifdef CONFIG_GRKERNSEC
56047+ /* ignore the 0 signal for protected task checks */
56048+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
56049+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
56050+ return -EPERM;
56051+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
56052+ return -EPERM;
56053+ }
56054+#endif
56055+ return 0;
56056+}
56057+
56058+#ifdef CONFIG_GRKERNSEC
56059+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
56060+
56061+int gr_fake_force_sig(int sig, struct task_struct *t)
56062+{
56063+ unsigned long int flags;
56064+ int ret, blocked, ignored;
56065+ struct k_sigaction *action;
56066+
56067+ spin_lock_irqsave(&t->sighand->siglock, flags);
56068+ action = &t->sighand->action[sig-1];
56069+ ignored = action->sa.sa_handler == SIG_IGN;
56070+ blocked = sigismember(&t->blocked, sig);
56071+ if (blocked || ignored) {
56072+ action->sa.sa_handler = SIG_DFL;
56073+ if (blocked) {
56074+ sigdelset(&t->blocked, sig);
56075+ recalc_sigpending_and_wake(t);
56076+ }
56077+ }
56078+ if (action->sa.sa_handler == SIG_DFL)
56079+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
56080+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
56081+
56082+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
56083+
56084+ return ret;
56085+}
56086+#endif
56087+
56088+#ifdef CONFIG_GRKERNSEC_BRUTE
56089+#define GR_USER_BAN_TIME (15 * 60)
56090+
56091+static int __get_dumpable(unsigned long mm_flags)
56092+{
56093+ int ret;
56094+
56095+ ret = mm_flags & MMF_DUMPABLE_MASK;
56096+ return (ret >= 2) ? 2 : ret;
56097+}
56098+#endif
56099+
56100+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
56101+{
56102+#ifdef CONFIG_GRKERNSEC_BRUTE
56103+ uid_t uid = 0;
56104+
56105+ if (!grsec_enable_brute)
56106+ return;
56107+
56108+ rcu_read_lock();
56109+ read_lock(&tasklist_lock);
56110+ read_lock(&grsec_exec_file_lock);
56111+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
56112+ p->real_parent->brute = 1;
56113+ else {
56114+ const struct cred *cred = __task_cred(p), *cred2;
56115+ struct task_struct *tsk, *tsk2;
56116+
56117+ if (!__get_dumpable(mm_flags) && cred->uid) {
56118+ struct user_struct *user;
56119+
56120+ uid = cred->uid;
56121+
56122+ /* this is put upon execution past expiration */
56123+ user = find_user(uid);
56124+ if (user == NULL)
56125+ goto unlock;
56126+ user->banned = 1;
56127+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
56128+ if (user->ban_expires == ~0UL)
56129+ user->ban_expires--;
56130+
56131+ do_each_thread(tsk2, tsk) {
56132+ cred2 = __task_cred(tsk);
56133+ if (tsk != p && cred2->uid == uid)
56134+ gr_fake_force_sig(SIGKILL, tsk);
56135+ } while_each_thread(tsk2, tsk);
56136+ }
56137+ }
56138+unlock:
56139+ read_unlock(&grsec_exec_file_lock);
56140+ read_unlock(&tasklist_lock);
56141+ rcu_read_unlock();
56142+
56143+ if (uid)
56144+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
56145+
56146+#endif
56147+ return;
56148+}
56149+
56150+void gr_handle_brute_check(void)
56151+{
56152+#ifdef CONFIG_GRKERNSEC_BRUTE
56153+ if (current->brute)
56154+ msleep(30 * 1000);
56155+#endif
56156+ return;
56157+}
56158+
56159+void gr_handle_kernel_exploit(void)
56160+{
56161+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
56162+ const struct cred *cred;
56163+ struct task_struct *tsk, *tsk2;
56164+ struct user_struct *user;
56165+ uid_t uid;
56166+
56167+ if (in_irq() || in_serving_softirq() || in_nmi())
56168+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
56169+
56170+ uid = current_uid();
56171+
56172+ if (uid == 0)
56173+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
56174+ else {
56175+ /* kill all the processes of this user, hold a reference
56176+ to their creds struct, and prevent them from creating
56177+ another process until system reset
56178+ */
56179+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
56180+ /* we intentionally leak this ref */
56181+ user = get_uid(current->cred->user);
56182+ if (user) {
56183+ user->banned = 1;
56184+ user->ban_expires = ~0UL;
56185+ }
56186+
56187+ read_lock(&tasklist_lock);
56188+ do_each_thread(tsk2, tsk) {
56189+ cred = __task_cred(tsk);
56190+ if (cred->uid == uid)
56191+ gr_fake_force_sig(SIGKILL, tsk);
56192+ } while_each_thread(tsk2, tsk);
56193+ read_unlock(&tasklist_lock);
56194+ }
56195+#endif
56196+}
56197+
56198+int __gr_process_user_ban(struct user_struct *user)
56199+{
56200+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56201+ if (unlikely(user->banned)) {
56202+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
56203+ user->banned = 0;
56204+ user->ban_expires = 0;
56205+ free_uid(user);
56206+ } else
56207+ return -EPERM;
56208+ }
56209+#endif
56210+ return 0;
56211+}
56212+
56213+int gr_process_user_ban(void)
56214+{
56215+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56216+ return __gr_process_user_ban(current->cred->user);
56217+#endif
56218+ return 0;
56219+}
56220diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
56221new file mode 100644
56222index 0000000..4030d57
56223--- /dev/null
56224+++ b/grsecurity/grsec_sock.c
56225@@ -0,0 +1,244 @@
56226+#include <linux/kernel.h>
56227+#include <linux/module.h>
56228+#include <linux/sched.h>
56229+#include <linux/file.h>
56230+#include <linux/net.h>
56231+#include <linux/in.h>
56232+#include <linux/ip.h>
56233+#include <net/sock.h>
56234+#include <net/inet_sock.h>
56235+#include <linux/grsecurity.h>
56236+#include <linux/grinternal.h>
56237+#include <linux/gracl.h>
56238+
56239+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
56240+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
56241+
56242+EXPORT_SYMBOL(gr_search_udp_recvmsg);
56243+EXPORT_SYMBOL(gr_search_udp_sendmsg);
56244+
56245+#ifdef CONFIG_UNIX_MODULE
56246+EXPORT_SYMBOL(gr_acl_handle_unix);
56247+EXPORT_SYMBOL(gr_acl_handle_mknod);
56248+EXPORT_SYMBOL(gr_handle_chroot_unix);
56249+EXPORT_SYMBOL(gr_handle_create);
56250+#endif
56251+
56252+#ifdef CONFIG_GRKERNSEC
56253+#define gr_conn_table_size 32749
56254+struct conn_table_entry {
56255+ struct conn_table_entry *next;
56256+ struct signal_struct *sig;
56257+};
56258+
56259+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56260+DEFINE_SPINLOCK(gr_conn_table_lock);
56261+
56262+extern const char * gr_socktype_to_name(unsigned char type);
56263+extern const char * gr_proto_to_name(unsigned char proto);
56264+extern const char * gr_sockfamily_to_name(unsigned char family);
56265+
56266+static __inline__ int
56267+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56268+{
56269+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56270+}
56271+
56272+static __inline__ int
56273+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56274+ __u16 sport, __u16 dport)
56275+{
56276+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56277+ sig->gr_sport == sport && sig->gr_dport == dport))
56278+ return 1;
56279+ else
56280+ return 0;
56281+}
56282+
56283+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56284+{
56285+ struct conn_table_entry **match;
56286+ unsigned int index;
56287+
56288+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56289+ sig->gr_sport, sig->gr_dport,
56290+ gr_conn_table_size);
56291+
56292+ newent->sig = sig;
56293+
56294+ match = &gr_conn_table[index];
56295+ newent->next = *match;
56296+ *match = newent;
56297+
56298+ return;
56299+}
56300+
56301+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56302+{
56303+ struct conn_table_entry *match, *last = NULL;
56304+ unsigned int index;
56305+
56306+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56307+ sig->gr_sport, sig->gr_dport,
56308+ gr_conn_table_size);
56309+
56310+ match = gr_conn_table[index];
56311+ while (match && !conn_match(match->sig,
56312+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56313+ sig->gr_dport)) {
56314+ last = match;
56315+ match = match->next;
56316+ }
56317+
56318+ if (match) {
56319+ if (last)
56320+ last->next = match->next;
56321+ else
56322+ gr_conn_table[index] = NULL;
56323+ kfree(match);
56324+ }
56325+
56326+ return;
56327+}
56328+
56329+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56330+ __u16 sport, __u16 dport)
56331+{
56332+ struct conn_table_entry *match;
56333+ unsigned int index;
56334+
56335+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56336+
56337+ match = gr_conn_table[index];
56338+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56339+ match = match->next;
56340+
56341+ if (match)
56342+ return match->sig;
56343+ else
56344+ return NULL;
56345+}
56346+
56347+#endif
56348+
56349+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56350+{
56351+#ifdef CONFIG_GRKERNSEC
56352+ struct signal_struct *sig = task->signal;
56353+ struct conn_table_entry *newent;
56354+
56355+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56356+ if (newent == NULL)
56357+ return;
56358+ /* no bh lock needed since we are called with bh disabled */
56359+ spin_lock(&gr_conn_table_lock);
56360+ gr_del_task_from_ip_table_nolock(sig);
56361+ sig->gr_saddr = inet->inet_rcv_saddr;
56362+ sig->gr_daddr = inet->inet_daddr;
56363+ sig->gr_sport = inet->inet_sport;
56364+ sig->gr_dport = inet->inet_dport;
56365+ gr_add_to_task_ip_table_nolock(sig, newent);
56366+ spin_unlock(&gr_conn_table_lock);
56367+#endif
56368+ return;
56369+}
56370+
56371+void gr_del_task_from_ip_table(struct task_struct *task)
56372+{
56373+#ifdef CONFIG_GRKERNSEC
56374+ spin_lock_bh(&gr_conn_table_lock);
56375+ gr_del_task_from_ip_table_nolock(task->signal);
56376+ spin_unlock_bh(&gr_conn_table_lock);
56377+#endif
56378+ return;
56379+}
56380+
56381+void
56382+gr_attach_curr_ip(const struct sock *sk)
56383+{
56384+#ifdef CONFIG_GRKERNSEC
56385+ struct signal_struct *p, *set;
56386+ const struct inet_sock *inet = inet_sk(sk);
56387+
56388+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56389+ return;
56390+
56391+ set = current->signal;
56392+
56393+ spin_lock_bh(&gr_conn_table_lock);
56394+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56395+ inet->inet_dport, inet->inet_sport);
56396+ if (unlikely(p != NULL)) {
56397+ set->curr_ip = p->curr_ip;
56398+ set->used_accept = 1;
56399+ gr_del_task_from_ip_table_nolock(p);
56400+ spin_unlock_bh(&gr_conn_table_lock);
56401+ return;
56402+ }
56403+ spin_unlock_bh(&gr_conn_table_lock);
56404+
56405+ set->curr_ip = inet->inet_daddr;
56406+ set->used_accept = 1;
56407+#endif
56408+ return;
56409+}
56410+
56411+int
56412+gr_handle_sock_all(const int family, const int type, const int protocol)
56413+{
56414+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56415+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56416+ (family != AF_UNIX)) {
56417+ if (family == AF_INET)
56418+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56419+ else
56420+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56421+ return -EACCES;
56422+ }
56423+#endif
56424+ return 0;
56425+}
56426+
56427+int
56428+gr_handle_sock_server(const struct sockaddr *sck)
56429+{
56430+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56431+ if (grsec_enable_socket_server &&
56432+ in_group_p(grsec_socket_server_gid) &&
56433+ sck && (sck->sa_family != AF_UNIX) &&
56434+ (sck->sa_family != AF_LOCAL)) {
56435+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56436+ return -EACCES;
56437+ }
56438+#endif
56439+ return 0;
56440+}
56441+
56442+int
56443+gr_handle_sock_server_other(const struct sock *sck)
56444+{
56445+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56446+ if (grsec_enable_socket_server &&
56447+ in_group_p(grsec_socket_server_gid) &&
56448+ sck && (sck->sk_family != AF_UNIX) &&
56449+ (sck->sk_family != AF_LOCAL)) {
56450+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56451+ return -EACCES;
56452+ }
56453+#endif
56454+ return 0;
56455+}
56456+
56457+int
56458+gr_handle_sock_client(const struct sockaddr *sck)
56459+{
56460+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56461+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56462+ sck && (sck->sa_family != AF_UNIX) &&
56463+ (sck->sa_family != AF_LOCAL)) {
56464+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56465+ return -EACCES;
56466+ }
56467+#endif
56468+ return 0;
56469+}
56470diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56471new file mode 100644
56472index 0000000..a1aedd7
56473--- /dev/null
56474+++ b/grsecurity/grsec_sysctl.c
56475@@ -0,0 +1,451 @@
56476+#include <linux/kernel.h>
56477+#include <linux/sched.h>
56478+#include <linux/sysctl.h>
56479+#include <linux/grsecurity.h>
56480+#include <linux/grinternal.h>
56481+
56482+int
56483+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56484+{
56485+#ifdef CONFIG_GRKERNSEC_SYSCTL
56486+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56487+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56488+ return -EACCES;
56489+ }
56490+#endif
56491+ return 0;
56492+}
56493+
56494+#ifdef CONFIG_GRKERNSEC_ROFS
56495+static int __maybe_unused one = 1;
56496+#endif
56497+
56498+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56499+struct ctl_table grsecurity_table[] = {
56500+#ifdef CONFIG_GRKERNSEC_SYSCTL
56501+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56502+#ifdef CONFIG_GRKERNSEC_IO
56503+ {
56504+ .procname = "disable_priv_io",
56505+ .data = &grsec_disable_privio,
56506+ .maxlen = sizeof(int),
56507+ .mode = 0600,
56508+ .proc_handler = &proc_dointvec,
56509+ },
56510+#endif
56511+#endif
56512+#ifdef CONFIG_GRKERNSEC_LINK
56513+ {
56514+ .procname = "linking_restrictions",
56515+ .data = &grsec_enable_link,
56516+ .maxlen = sizeof(int),
56517+ .mode = 0600,
56518+ .proc_handler = &proc_dointvec,
56519+ },
56520+#endif
56521+#ifdef CONFIG_GRKERNSEC_BRUTE
56522+ {
56523+ .procname = "deter_bruteforce",
56524+ .data = &grsec_enable_brute,
56525+ .maxlen = sizeof(int),
56526+ .mode = 0600,
56527+ .proc_handler = &proc_dointvec,
56528+ },
56529+#endif
56530+#ifdef CONFIG_GRKERNSEC_FIFO
56531+ {
56532+ .procname = "fifo_restrictions",
56533+ .data = &grsec_enable_fifo,
56534+ .maxlen = sizeof(int),
56535+ .mode = 0600,
56536+ .proc_handler = &proc_dointvec,
56537+ },
56538+#endif
56539+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56540+ {
56541+ .procname = "ptrace_readexec",
56542+ .data = &grsec_enable_ptrace_readexec,
56543+ .maxlen = sizeof(int),
56544+ .mode = 0600,
56545+ .proc_handler = &proc_dointvec,
56546+ },
56547+#endif
56548+#ifdef CONFIG_GRKERNSEC_SETXID
56549+ {
56550+ .procname = "consistent_setxid",
56551+ .data = &grsec_enable_setxid,
56552+ .maxlen = sizeof(int),
56553+ .mode = 0600,
56554+ .proc_handler = &proc_dointvec,
56555+ },
56556+#endif
56557+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56558+ {
56559+ .procname = "ip_blackhole",
56560+ .data = &grsec_enable_blackhole,
56561+ .maxlen = sizeof(int),
56562+ .mode = 0600,
56563+ .proc_handler = &proc_dointvec,
56564+ },
56565+ {
56566+ .procname = "lastack_retries",
56567+ .data = &grsec_lastack_retries,
56568+ .maxlen = sizeof(int),
56569+ .mode = 0600,
56570+ .proc_handler = &proc_dointvec,
56571+ },
56572+#endif
56573+#ifdef CONFIG_GRKERNSEC_EXECLOG
56574+ {
56575+ .procname = "exec_logging",
56576+ .data = &grsec_enable_execlog,
56577+ .maxlen = sizeof(int),
56578+ .mode = 0600,
56579+ .proc_handler = &proc_dointvec,
56580+ },
56581+#endif
56582+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56583+ {
56584+ .procname = "rwxmap_logging",
56585+ .data = &grsec_enable_log_rwxmaps,
56586+ .maxlen = sizeof(int),
56587+ .mode = 0600,
56588+ .proc_handler = &proc_dointvec,
56589+ },
56590+#endif
56591+#ifdef CONFIG_GRKERNSEC_SIGNAL
56592+ {
56593+ .procname = "signal_logging",
56594+ .data = &grsec_enable_signal,
56595+ .maxlen = sizeof(int),
56596+ .mode = 0600,
56597+ .proc_handler = &proc_dointvec,
56598+ },
56599+#endif
56600+#ifdef CONFIG_GRKERNSEC_FORKFAIL
56601+ {
56602+ .procname = "forkfail_logging",
56603+ .data = &grsec_enable_forkfail,
56604+ .maxlen = sizeof(int),
56605+ .mode = 0600,
56606+ .proc_handler = &proc_dointvec,
56607+ },
56608+#endif
56609+#ifdef CONFIG_GRKERNSEC_TIME
56610+ {
56611+ .procname = "timechange_logging",
56612+ .data = &grsec_enable_time,
56613+ .maxlen = sizeof(int),
56614+ .mode = 0600,
56615+ .proc_handler = &proc_dointvec,
56616+ },
56617+#endif
56618+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56619+ {
56620+ .procname = "chroot_deny_shmat",
56621+ .data = &grsec_enable_chroot_shmat,
56622+ .maxlen = sizeof(int),
56623+ .mode = 0600,
56624+ .proc_handler = &proc_dointvec,
56625+ },
56626+#endif
56627+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56628+ {
56629+ .procname = "chroot_deny_unix",
56630+ .data = &grsec_enable_chroot_unix,
56631+ .maxlen = sizeof(int),
56632+ .mode = 0600,
56633+ .proc_handler = &proc_dointvec,
56634+ },
56635+#endif
56636+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56637+ {
56638+ .procname = "chroot_deny_mount",
56639+ .data = &grsec_enable_chroot_mount,
56640+ .maxlen = sizeof(int),
56641+ .mode = 0600,
56642+ .proc_handler = &proc_dointvec,
56643+ },
56644+#endif
56645+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56646+ {
56647+ .procname = "chroot_deny_fchdir",
56648+ .data = &grsec_enable_chroot_fchdir,
56649+ .maxlen = sizeof(int),
56650+ .mode = 0600,
56651+ .proc_handler = &proc_dointvec,
56652+ },
56653+#endif
56654+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56655+ {
56656+ .procname = "chroot_deny_chroot",
56657+ .data = &grsec_enable_chroot_double,
56658+ .maxlen = sizeof(int),
56659+ .mode = 0600,
56660+ .proc_handler = &proc_dointvec,
56661+ },
56662+#endif
56663+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56664+ {
56665+ .procname = "chroot_deny_pivot",
56666+ .data = &grsec_enable_chroot_pivot,
56667+ .maxlen = sizeof(int),
56668+ .mode = 0600,
56669+ .proc_handler = &proc_dointvec,
56670+ },
56671+#endif
56672+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56673+ {
56674+ .procname = "chroot_enforce_chdir",
56675+ .data = &grsec_enable_chroot_chdir,
56676+ .maxlen = sizeof(int),
56677+ .mode = 0600,
56678+ .proc_handler = &proc_dointvec,
56679+ },
56680+#endif
56681+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56682+ {
56683+ .procname = "chroot_deny_chmod",
56684+ .data = &grsec_enable_chroot_chmod,
56685+ .maxlen = sizeof(int),
56686+ .mode = 0600,
56687+ .proc_handler = &proc_dointvec,
56688+ },
56689+#endif
56690+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56691+ {
56692+ .procname = "chroot_deny_mknod",
56693+ .data = &grsec_enable_chroot_mknod,
56694+ .maxlen = sizeof(int),
56695+ .mode = 0600,
56696+ .proc_handler = &proc_dointvec,
56697+ },
56698+#endif
56699+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56700+ {
56701+ .procname = "chroot_restrict_nice",
56702+ .data = &grsec_enable_chroot_nice,
56703+ .maxlen = sizeof(int),
56704+ .mode = 0600,
56705+ .proc_handler = &proc_dointvec,
56706+ },
56707+#endif
56708+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56709+ {
56710+ .procname = "chroot_execlog",
56711+ .data = &grsec_enable_chroot_execlog,
56712+ .maxlen = sizeof(int),
56713+ .mode = 0600,
56714+ .proc_handler = &proc_dointvec,
56715+ },
56716+#endif
56717+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56718+ {
56719+ .procname = "chroot_caps",
56720+ .data = &grsec_enable_chroot_caps,
56721+ .maxlen = sizeof(int),
56722+ .mode = 0600,
56723+ .proc_handler = &proc_dointvec,
56724+ },
56725+#endif
56726+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56727+ {
56728+ .procname = "chroot_deny_sysctl",
56729+ .data = &grsec_enable_chroot_sysctl,
56730+ .maxlen = sizeof(int),
56731+ .mode = 0600,
56732+ .proc_handler = &proc_dointvec,
56733+ },
56734+#endif
56735+#ifdef CONFIG_GRKERNSEC_TPE
56736+ {
56737+ .procname = "tpe",
56738+ .data = &grsec_enable_tpe,
56739+ .maxlen = sizeof(int),
56740+ .mode = 0600,
56741+ .proc_handler = &proc_dointvec,
56742+ },
56743+ {
56744+ .procname = "tpe_gid",
56745+ .data = &grsec_tpe_gid,
56746+ .maxlen = sizeof(int),
56747+ .mode = 0600,
56748+ .proc_handler = &proc_dointvec,
56749+ },
56750+#endif
56751+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56752+ {
56753+ .procname = "tpe_invert",
56754+ .data = &grsec_enable_tpe_invert,
56755+ .maxlen = sizeof(int),
56756+ .mode = 0600,
56757+ .proc_handler = &proc_dointvec,
56758+ },
56759+#endif
56760+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56761+ {
56762+ .procname = "tpe_restrict_all",
56763+ .data = &grsec_enable_tpe_all,
56764+ .maxlen = sizeof(int),
56765+ .mode = 0600,
56766+ .proc_handler = &proc_dointvec,
56767+ },
56768+#endif
56769+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56770+ {
56771+ .procname = "socket_all",
56772+ .data = &grsec_enable_socket_all,
56773+ .maxlen = sizeof(int),
56774+ .mode = 0600,
56775+ .proc_handler = &proc_dointvec,
56776+ },
56777+ {
56778+ .procname = "socket_all_gid",
56779+ .data = &grsec_socket_all_gid,
56780+ .maxlen = sizeof(int),
56781+ .mode = 0600,
56782+ .proc_handler = &proc_dointvec,
56783+ },
56784+#endif
56785+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56786+ {
56787+ .procname = "socket_client",
56788+ .data = &grsec_enable_socket_client,
56789+ .maxlen = sizeof(int),
56790+ .mode = 0600,
56791+ .proc_handler = &proc_dointvec,
56792+ },
56793+ {
56794+ .procname = "socket_client_gid",
56795+ .data = &grsec_socket_client_gid,
56796+ .maxlen = sizeof(int),
56797+ .mode = 0600,
56798+ .proc_handler = &proc_dointvec,
56799+ },
56800+#endif
56801+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56802+ {
56803+ .procname = "socket_server",
56804+ .data = &grsec_enable_socket_server,
56805+ .maxlen = sizeof(int),
56806+ .mode = 0600,
56807+ .proc_handler = &proc_dointvec,
56808+ },
56809+ {
56810+ .procname = "socket_server_gid",
56811+ .data = &grsec_socket_server_gid,
56812+ .maxlen = sizeof(int),
56813+ .mode = 0600,
56814+ .proc_handler = &proc_dointvec,
56815+ },
56816+#endif
56817+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56818+ {
56819+ .procname = "audit_group",
56820+ .data = &grsec_enable_group,
56821+ .maxlen = sizeof(int),
56822+ .mode = 0600,
56823+ .proc_handler = &proc_dointvec,
56824+ },
56825+ {
56826+ .procname = "audit_gid",
56827+ .data = &grsec_audit_gid,
56828+ .maxlen = sizeof(int),
56829+ .mode = 0600,
56830+ .proc_handler = &proc_dointvec,
56831+ },
56832+#endif
56833+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56834+ {
56835+ .procname = "audit_chdir",
56836+ .data = &grsec_enable_chdir,
56837+ .maxlen = sizeof(int),
56838+ .mode = 0600,
56839+ .proc_handler = &proc_dointvec,
56840+ },
56841+#endif
56842+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56843+ {
56844+ .procname = "audit_mount",
56845+ .data = &grsec_enable_mount,
56846+ .maxlen = sizeof(int),
56847+ .mode = 0600,
56848+ .proc_handler = &proc_dointvec,
56849+ },
56850+#endif
56851+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56852+ {
56853+ .procname = "audit_textrel",
56854+ .data = &grsec_enable_audit_textrel,
56855+ .maxlen = sizeof(int),
56856+ .mode = 0600,
56857+ .proc_handler = &proc_dointvec,
56858+ },
56859+#endif
56860+#ifdef CONFIG_GRKERNSEC_DMESG
56861+ {
56862+ .procname = "dmesg",
56863+ .data = &grsec_enable_dmesg,
56864+ .maxlen = sizeof(int),
56865+ .mode = 0600,
56866+ .proc_handler = &proc_dointvec,
56867+ },
56868+#endif
56869+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56870+ {
56871+ .procname = "chroot_findtask",
56872+ .data = &grsec_enable_chroot_findtask,
56873+ .maxlen = sizeof(int),
56874+ .mode = 0600,
56875+ .proc_handler = &proc_dointvec,
56876+ },
56877+#endif
56878+#ifdef CONFIG_GRKERNSEC_RESLOG
56879+ {
56880+ .procname = "resource_logging",
56881+ .data = &grsec_resource_logging,
56882+ .maxlen = sizeof(int),
56883+ .mode = 0600,
56884+ .proc_handler = &proc_dointvec,
56885+ },
56886+#endif
56887+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56888+ {
56889+ .procname = "audit_ptrace",
56890+ .data = &grsec_enable_audit_ptrace,
56891+ .maxlen = sizeof(int),
56892+ .mode = 0600,
56893+ .proc_handler = &proc_dointvec,
56894+ },
56895+#endif
56896+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56897+ {
56898+ .procname = "harden_ptrace",
56899+ .data = &grsec_enable_harden_ptrace,
56900+ .maxlen = sizeof(int),
56901+ .mode = 0600,
56902+ .proc_handler = &proc_dointvec,
56903+ },
56904+#endif
56905+ {
56906+ .procname = "grsec_lock",
56907+ .data = &grsec_lock,
56908+ .maxlen = sizeof(int),
56909+ .mode = 0600,
56910+ .proc_handler = &proc_dointvec,
56911+ },
56912+#endif
56913+#ifdef CONFIG_GRKERNSEC_ROFS
56914+ {
56915+ .procname = "romount_protect",
56916+ .data = &grsec_enable_rofs,
56917+ .maxlen = sizeof(int),
56918+ .mode = 0600,
56919+ .proc_handler = &proc_dointvec_minmax,
56920+ .extra1 = &one,
56921+ .extra2 = &one,
56922+ },
56923+#endif
56924+ { }
56925+};
56926+#endif
56927diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
56928new file mode 100644
56929index 0000000..0dc13c3
56930--- /dev/null
56931+++ b/grsecurity/grsec_time.c
56932@@ -0,0 +1,16 @@
56933+#include <linux/kernel.h>
56934+#include <linux/sched.h>
56935+#include <linux/grinternal.h>
56936+#include <linux/module.h>
56937+
56938+void
56939+gr_log_timechange(void)
56940+{
56941+#ifdef CONFIG_GRKERNSEC_TIME
56942+ if (grsec_enable_time)
56943+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
56944+#endif
56945+ return;
56946+}
56947+
56948+EXPORT_SYMBOL(gr_log_timechange);
56949diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
56950new file mode 100644
56951index 0000000..a35ba33
56952--- /dev/null
56953+++ b/grsecurity/grsec_tpe.c
56954@@ -0,0 +1,73 @@
56955+#include <linux/kernel.h>
56956+#include <linux/sched.h>
56957+#include <linux/file.h>
56958+#include <linux/fs.h>
56959+#include <linux/grinternal.h>
56960+
56961+extern int gr_acl_tpe_check(void);
56962+
56963+int
56964+gr_tpe_allow(const struct file *file)
56965+{
56966+#ifdef CONFIG_GRKERNSEC
56967+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
56968+ const struct cred *cred = current_cred();
56969+ char *msg = NULL;
56970+ char *msg2 = NULL;
56971+
56972+ // never restrict root
56973+ if (!cred->uid)
56974+ return 1;
56975+
56976+ if (grsec_enable_tpe) {
56977+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56978+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
56979+ msg = "not being in trusted group";
56980+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
56981+ msg = "being in untrusted group";
56982+#else
56983+ if (in_group_p(grsec_tpe_gid))
56984+ msg = "being in untrusted group";
56985+#endif
56986+ }
56987+ if (!msg && gr_acl_tpe_check())
56988+ msg = "being in untrusted role";
56989+
56990+ // not in any affected group/role
56991+ if (!msg)
56992+ goto next_check;
56993+
56994+ if (inode->i_uid)
56995+ msg2 = "file in non-root-owned directory";
56996+ else if (inode->i_mode & S_IWOTH)
56997+ msg2 = "file in world-writable directory";
56998+ else if (inode->i_mode & S_IWGRP)
56999+ msg2 = "file in group-writable directory";
57000+
57001+ if (msg && msg2) {
57002+ char fullmsg[64] = {0};
57003+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
57004+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
57005+ return 0;
57006+ }
57007+ msg = NULL;
57008+next_check:
57009+#ifdef CONFIG_GRKERNSEC_TPE_ALL
57010+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
57011+ return 1;
57012+
57013+ if (inode->i_uid && (inode->i_uid != cred->uid))
57014+ msg = "directory not owned by user";
57015+ else if (inode->i_mode & S_IWOTH)
57016+ msg = "file in world-writable directory";
57017+ else if (inode->i_mode & S_IWGRP)
57018+ msg = "file in group-writable directory";
57019+
57020+ if (msg) {
57021+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
57022+ return 0;
57023+ }
57024+#endif
57025+#endif
57026+ return 1;
57027+}
57028diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
57029new file mode 100644
57030index 0000000..9f7b1ac
57031--- /dev/null
57032+++ b/grsecurity/grsum.c
57033@@ -0,0 +1,61 @@
57034+#include <linux/err.h>
57035+#include <linux/kernel.h>
57036+#include <linux/sched.h>
57037+#include <linux/mm.h>
57038+#include <linux/scatterlist.h>
57039+#include <linux/crypto.h>
57040+#include <linux/gracl.h>
57041+
57042+
57043+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
57044+#error "crypto and sha256 must be built into the kernel"
57045+#endif
57046+
57047+int
57048+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
57049+{
57050+ char *p;
57051+ struct crypto_hash *tfm;
57052+ struct hash_desc desc;
57053+ struct scatterlist sg;
57054+ unsigned char temp_sum[GR_SHA_LEN];
57055+ volatile int retval = 0;
57056+ volatile int dummy = 0;
57057+ unsigned int i;
57058+
57059+ sg_init_table(&sg, 1);
57060+
57061+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
57062+ if (IS_ERR(tfm)) {
57063+ /* should never happen, since sha256 should be built in */
57064+ return 1;
57065+ }
57066+
57067+ desc.tfm = tfm;
57068+ desc.flags = 0;
57069+
57070+ crypto_hash_init(&desc);
57071+
57072+ p = salt;
57073+ sg_set_buf(&sg, p, GR_SALT_LEN);
57074+ crypto_hash_update(&desc, &sg, sg.length);
57075+
57076+ p = entry->pw;
57077+ sg_set_buf(&sg, p, strlen(p));
57078+
57079+ crypto_hash_update(&desc, &sg, sg.length);
57080+
57081+ crypto_hash_final(&desc, temp_sum);
57082+
57083+ memset(entry->pw, 0, GR_PW_LEN);
57084+
57085+ for (i = 0; i < GR_SHA_LEN; i++)
57086+ if (sum[i] != temp_sum[i])
57087+ retval = 1;
57088+ else
57089+ dummy = 1; // waste a cycle
57090+
57091+ crypto_free_hash(tfm);
57092+
57093+ return retval;
57094+}
57095diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
57096index 6cd5b64..f620d2d 100644
57097--- a/include/acpi/acpi_bus.h
57098+++ b/include/acpi/acpi_bus.h
57099@@ -107,7 +107,7 @@ struct acpi_device_ops {
57100 acpi_op_bind bind;
57101 acpi_op_unbind unbind;
57102 acpi_op_notify notify;
57103-};
57104+} __no_const;
57105
57106 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57107
57108diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
57109index b7babf0..71e4e74 100644
57110--- a/include/asm-generic/atomic-long.h
57111+++ b/include/asm-generic/atomic-long.h
57112@@ -22,6 +22,12 @@
57113
57114 typedef atomic64_t atomic_long_t;
57115
57116+#ifdef CONFIG_PAX_REFCOUNT
57117+typedef atomic64_unchecked_t atomic_long_unchecked_t;
57118+#else
57119+typedef atomic64_t atomic_long_unchecked_t;
57120+#endif
57121+
57122 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57123
57124 static inline long atomic_long_read(atomic_long_t *l)
57125@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57126 return (long)atomic64_read(v);
57127 }
57128
57129+#ifdef CONFIG_PAX_REFCOUNT
57130+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57131+{
57132+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57133+
57134+ return (long)atomic64_read_unchecked(v);
57135+}
57136+#endif
57137+
57138 static inline void atomic_long_set(atomic_long_t *l, long i)
57139 {
57140 atomic64_t *v = (atomic64_t *)l;
57141@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57142 atomic64_set(v, i);
57143 }
57144
57145+#ifdef CONFIG_PAX_REFCOUNT
57146+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57147+{
57148+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57149+
57150+ atomic64_set_unchecked(v, i);
57151+}
57152+#endif
57153+
57154 static inline void atomic_long_inc(atomic_long_t *l)
57155 {
57156 atomic64_t *v = (atomic64_t *)l;
57157@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57158 atomic64_inc(v);
57159 }
57160
57161+#ifdef CONFIG_PAX_REFCOUNT
57162+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57163+{
57164+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57165+
57166+ atomic64_inc_unchecked(v);
57167+}
57168+#endif
57169+
57170 static inline void atomic_long_dec(atomic_long_t *l)
57171 {
57172 atomic64_t *v = (atomic64_t *)l;
57173@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57174 atomic64_dec(v);
57175 }
57176
57177+#ifdef CONFIG_PAX_REFCOUNT
57178+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57179+{
57180+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57181+
57182+ atomic64_dec_unchecked(v);
57183+}
57184+#endif
57185+
57186 static inline void atomic_long_add(long i, atomic_long_t *l)
57187 {
57188 atomic64_t *v = (atomic64_t *)l;
57189@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57190 atomic64_add(i, v);
57191 }
57192
57193+#ifdef CONFIG_PAX_REFCOUNT
57194+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57195+{
57196+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57197+
57198+ atomic64_add_unchecked(i, v);
57199+}
57200+#endif
57201+
57202 static inline void atomic_long_sub(long i, atomic_long_t *l)
57203 {
57204 atomic64_t *v = (atomic64_t *)l;
57205@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57206 atomic64_sub(i, v);
57207 }
57208
57209+#ifdef CONFIG_PAX_REFCOUNT
57210+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57211+{
57212+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57213+
57214+ atomic64_sub_unchecked(i, v);
57215+}
57216+#endif
57217+
57218 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57219 {
57220 atomic64_t *v = (atomic64_t *)l;
57221@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57222 return (long)atomic64_inc_return(v);
57223 }
57224
57225+#ifdef CONFIG_PAX_REFCOUNT
57226+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57227+{
57228+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57229+
57230+ return (long)atomic64_inc_return_unchecked(v);
57231+}
57232+#endif
57233+
57234 static inline long atomic_long_dec_return(atomic_long_t *l)
57235 {
57236 atomic64_t *v = (atomic64_t *)l;
57237@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57238
57239 typedef atomic_t atomic_long_t;
57240
57241+#ifdef CONFIG_PAX_REFCOUNT
57242+typedef atomic_unchecked_t atomic_long_unchecked_t;
57243+#else
57244+typedef atomic_t atomic_long_unchecked_t;
57245+#endif
57246+
57247 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57248 static inline long atomic_long_read(atomic_long_t *l)
57249 {
57250@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57251 return (long)atomic_read(v);
57252 }
57253
57254+#ifdef CONFIG_PAX_REFCOUNT
57255+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57256+{
57257+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57258+
57259+ return (long)atomic_read_unchecked(v);
57260+}
57261+#endif
57262+
57263 static inline void atomic_long_set(atomic_long_t *l, long i)
57264 {
57265 atomic_t *v = (atomic_t *)l;
57266@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57267 atomic_set(v, i);
57268 }
57269
57270+#ifdef CONFIG_PAX_REFCOUNT
57271+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57272+{
57273+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57274+
57275+ atomic_set_unchecked(v, i);
57276+}
57277+#endif
57278+
57279 static inline void atomic_long_inc(atomic_long_t *l)
57280 {
57281 atomic_t *v = (atomic_t *)l;
57282@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57283 atomic_inc(v);
57284 }
57285
57286+#ifdef CONFIG_PAX_REFCOUNT
57287+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57288+{
57289+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57290+
57291+ atomic_inc_unchecked(v);
57292+}
57293+#endif
57294+
57295 static inline void atomic_long_dec(atomic_long_t *l)
57296 {
57297 atomic_t *v = (atomic_t *)l;
57298@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57299 atomic_dec(v);
57300 }
57301
57302+#ifdef CONFIG_PAX_REFCOUNT
57303+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57304+{
57305+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57306+
57307+ atomic_dec_unchecked(v);
57308+}
57309+#endif
57310+
57311 static inline void atomic_long_add(long i, atomic_long_t *l)
57312 {
57313 atomic_t *v = (atomic_t *)l;
57314@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57315 atomic_add(i, v);
57316 }
57317
57318+#ifdef CONFIG_PAX_REFCOUNT
57319+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57320+{
57321+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57322+
57323+ atomic_add_unchecked(i, v);
57324+}
57325+#endif
57326+
57327 static inline void atomic_long_sub(long i, atomic_long_t *l)
57328 {
57329 atomic_t *v = (atomic_t *)l;
57330@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57331 atomic_sub(i, v);
57332 }
57333
57334+#ifdef CONFIG_PAX_REFCOUNT
57335+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57336+{
57337+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57338+
57339+ atomic_sub_unchecked(i, v);
57340+}
57341+#endif
57342+
57343 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57344 {
57345 atomic_t *v = (atomic_t *)l;
57346@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57347 return (long)atomic_inc_return(v);
57348 }
57349
57350+#ifdef CONFIG_PAX_REFCOUNT
57351+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57352+{
57353+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57354+
57355+ return (long)atomic_inc_return_unchecked(v);
57356+}
57357+#endif
57358+
57359 static inline long atomic_long_dec_return(atomic_long_t *l)
57360 {
57361 atomic_t *v = (atomic_t *)l;
57362@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57363
57364 #endif /* BITS_PER_LONG == 64 */
57365
57366+#ifdef CONFIG_PAX_REFCOUNT
57367+static inline void pax_refcount_needs_these_functions(void)
57368+{
57369+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
57370+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57371+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57372+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57373+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57374+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57375+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57376+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57377+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57378+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57379+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57380+
57381+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57382+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57383+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57384+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57385+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57386+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57387+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57388+}
57389+#else
57390+#define atomic_read_unchecked(v) atomic_read(v)
57391+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57392+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57393+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57394+#define atomic_inc_unchecked(v) atomic_inc(v)
57395+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57396+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57397+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57398+#define atomic_dec_unchecked(v) atomic_dec(v)
57399+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57400+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57401+
57402+#define atomic_long_read_unchecked(v) atomic_long_read(v)
57403+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57404+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57405+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57406+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57407+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57408+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57409+#endif
57410+
57411 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57412diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
57413index b18ce4f..2ee2843 100644
57414--- a/include/asm-generic/atomic64.h
57415+++ b/include/asm-generic/atomic64.h
57416@@ -16,6 +16,8 @@ typedef struct {
57417 long long counter;
57418 } atomic64_t;
57419
57420+typedef atomic64_t atomic64_unchecked_t;
57421+
57422 #define ATOMIC64_INIT(i) { (i) }
57423
57424 extern long long atomic64_read(const atomic64_t *v);
57425@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
57426 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
57427 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
57428
57429+#define atomic64_read_unchecked(v) atomic64_read(v)
57430+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
57431+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
57432+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
57433+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57434+#define atomic64_inc_unchecked(v) atomic64_inc(v)
57435+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57436+#define atomic64_dec_unchecked(v) atomic64_dec(v)
57437+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57438+
57439 #endif /* _ASM_GENERIC_ATOMIC64_H */
57440diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57441index 1bfcfe5..e04c5c9 100644
57442--- a/include/asm-generic/cache.h
57443+++ b/include/asm-generic/cache.h
57444@@ -6,7 +6,7 @@
57445 * cache lines need to provide their own cache.h.
57446 */
57447
57448-#define L1_CACHE_SHIFT 5
57449-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57450+#define L1_CACHE_SHIFT 5UL
57451+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57452
57453 #endif /* __ASM_GENERIC_CACHE_H */
57454diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57455index 1ca3efc..e3dc852 100644
57456--- a/include/asm-generic/int-l64.h
57457+++ b/include/asm-generic/int-l64.h
57458@@ -46,6 +46,8 @@ typedef unsigned int u32;
57459 typedef signed long s64;
57460 typedef unsigned long u64;
57461
57462+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57463+
57464 #define S8_C(x) x
57465 #define U8_C(x) x ## U
57466 #define S16_C(x) x
57467diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57468index f394147..b6152b9 100644
57469--- a/include/asm-generic/int-ll64.h
57470+++ b/include/asm-generic/int-ll64.h
57471@@ -51,6 +51,8 @@ typedef unsigned int u32;
57472 typedef signed long long s64;
57473 typedef unsigned long long u64;
57474
57475+typedef unsigned long long intoverflow_t;
57476+
57477 #define S8_C(x) x
57478 #define U8_C(x) x ## U
57479 #define S16_C(x) x
57480diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57481index 0232ccb..13d9165 100644
57482--- a/include/asm-generic/kmap_types.h
57483+++ b/include/asm-generic/kmap_types.h
57484@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57485 KMAP_D(17) KM_NMI,
57486 KMAP_D(18) KM_NMI_PTE,
57487 KMAP_D(19) KM_KDB,
57488+KMAP_D(20) KM_CLEARPAGE,
57489 /*
57490 * Remember to update debug_kmap_atomic() when adding new kmap types!
57491 */
57492-KMAP_D(20) KM_TYPE_NR
57493+KMAP_D(21) KM_TYPE_NR
57494 };
57495
57496 #undef KMAP_D
57497diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57498index 725612b..9cc513a 100644
57499--- a/include/asm-generic/pgtable-nopmd.h
57500+++ b/include/asm-generic/pgtable-nopmd.h
57501@@ -1,14 +1,19 @@
57502 #ifndef _PGTABLE_NOPMD_H
57503 #define _PGTABLE_NOPMD_H
57504
57505-#ifndef __ASSEMBLY__
57506-
57507 #include <asm-generic/pgtable-nopud.h>
57508
57509-struct mm_struct;
57510-
57511 #define __PAGETABLE_PMD_FOLDED
57512
57513+#define PMD_SHIFT PUD_SHIFT
57514+#define PTRS_PER_PMD 1
57515+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57516+#define PMD_MASK (~(PMD_SIZE-1))
57517+
57518+#ifndef __ASSEMBLY__
57519+
57520+struct mm_struct;
57521+
57522 /*
57523 * Having the pmd type consist of a pud gets the size right, and allows
57524 * us to conceptually access the pud entry that this pmd is folded into
57525@@ -16,11 +21,6 @@ struct mm_struct;
57526 */
57527 typedef struct { pud_t pud; } pmd_t;
57528
57529-#define PMD_SHIFT PUD_SHIFT
57530-#define PTRS_PER_PMD 1
57531-#define PMD_SIZE (1UL << PMD_SHIFT)
57532-#define PMD_MASK (~(PMD_SIZE-1))
57533-
57534 /*
57535 * The "pud_xxx()" functions here are trivial for a folded two-level
57536 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57537diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57538index 810431d..ccc3638 100644
57539--- a/include/asm-generic/pgtable-nopud.h
57540+++ b/include/asm-generic/pgtable-nopud.h
57541@@ -1,10 +1,15 @@
57542 #ifndef _PGTABLE_NOPUD_H
57543 #define _PGTABLE_NOPUD_H
57544
57545-#ifndef __ASSEMBLY__
57546-
57547 #define __PAGETABLE_PUD_FOLDED
57548
57549+#define PUD_SHIFT PGDIR_SHIFT
57550+#define PTRS_PER_PUD 1
57551+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57552+#define PUD_MASK (~(PUD_SIZE-1))
57553+
57554+#ifndef __ASSEMBLY__
57555+
57556 /*
57557 * Having the pud type consist of a pgd gets the size right, and allows
57558 * us to conceptually access the pgd entry that this pud is folded into
57559@@ -12,11 +17,6 @@
57560 */
57561 typedef struct { pgd_t pgd; } pud_t;
57562
57563-#define PUD_SHIFT PGDIR_SHIFT
57564-#define PTRS_PER_PUD 1
57565-#define PUD_SIZE (1UL << PUD_SHIFT)
57566-#define PUD_MASK (~(PUD_SIZE-1))
57567-
57568 /*
57569 * The "pgd_xxx()" functions here are trivial for a folded two-level
57570 * setup: the pud is never bad, and a pud always exists (as it's folded
57571diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57572index 76bff2b..c7a14e2 100644
57573--- a/include/asm-generic/pgtable.h
57574+++ b/include/asm-generic/pgtable.h
57575@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57576 #endif /* __HAVE_ARCH_PMD_WRITE */
57577 #endif
57578
57579+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57580+static inline unsigned long pax_open_kernel(void) { return 0; }
57581+#endif
57582+
57583+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57584+static inline unsigned long pax_close_kernel(void) { return 0; }
57585+#endif
57586+
57587 #endif /* !__ASSEMBLY__ */
57588
57589 #endif /* _ASM_GENERIC_PGTABLE_H */
57590diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57591index b5e2e4c..6a5373e 100644
57592--- a/include/asm-generic/vmlinux.lds.h
57593+++ b/include/asm-generic/vmlinux.lds.h
57594@@ -217,6 +217,7 @@
57595 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57596 VMLINUX_SYMBOL(__start_rodata) = .; \
57597 *(.rodata) *(.rodata.*) \
57598+ *(.data..read_only) \
57599 *(__vermagic) /* Kernel version magic */ \
57600 . = ALIGN(8); \
57601 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57602@@ -722,17 +723,18 @@
57603 * section in the linker script will go there too. @phdr should have
57604 * a leading colon.
57605 *
57606- * Note that this macros defines __per_cpu_load as an absolute symbol.
57607+ * Note that this macros defines per_cpu_load as an absolute symbol.
57608 * If there is no need to put the percpu section at a predetermined
57609 * address, use PERCPU_SECTION.
57610 */
57611 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57612- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57613- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57614+ per_cpu_load = .; \
57615+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57616 - LOAD_OFFSET) { \
57617+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57618 PERCPU_INPUT(cacheline) \
57619 } phdr \
57620- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57621+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57622
57623 /**
57624 * PERCPU_SECTION - define output section for percpu area, simple version
57625diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57626index 1f9e951..14ef517 100644
57627--- a/include/drm/drmP.h
57628+++ b/include/drm/drmP.h
57629@@ -72,6 +72,7 @@
57630 #include <linux/workqueue.h>
57631 #include <linux/poll.h>
57632 #include <asm/pgalloc.h>
57633+#include <asm/local.h>
57634 #include "drm.h"
57635
57636 #include <linux/idr.h>
57637@@ -1038,7 +1039,7 @@ struct drm_device {
57638
57639 /** \name Usage Counters */
57640 /*@{ */
57641- int open_count; /**< Outstanding files open */
57642+ local_t open_count; /**< Outstanding files open */
57643 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57644 atomic_t vma_count; /**< Outstanding vma areas open */
57645 int buf_use; /**< Buffers in use -- cannot alloc */
57646@@ -1049,7 +1050,7 @@ struct drm_device {
57647 /*@{ */
57648 unsigned long counters;
57649 enum drm_stat_type types[15];
57650- atomic_t counts[15];
57651+ atomic_unchecked_t counts[15];
57652 /*@} */
57653
57654 struct list_head filelist;
57655diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57656index 73b0712..0b7ef2f 100644
57657--- a/include/drm/drm_crtc_helper.h
57658+++ b/include/drm/drm_crtc_helper.h
57659@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57660
57661 /* disable crtc when not in use - more explicit than dpms off */
57662 void (*disable)(struct drm_crtc *crtc);
57663-};
57664+} __no_const;
57665
57666 struct drm_encoder_helper_funcs {
57667 void (*dpms)(struct drm_encoder *encoder, int mode);
57668@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57669 struct drm_connector *connector);
57670 /* disable encoder when not in use - more explicit than dpms off */
57671 void (*disable)(struct drm_encoder *encoder);
57672-};
57673+} __no_const;
57674
57675 struct drm_connector_helper_funcs {
57676 int (*get_modes)(struct drm_connector *connector);
57677diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57678index 26c1f78..6722682 100644
57679--- a/include/drm/ttm/ttm_memory.h
57680+++ b/include/drm/ttm/ttm_memory.h
57681@@ -47,7 +47,7 @@
57682
57683 struct ttm_mem_shrink {
57684 int (*do_shrink) (struct ttm_mem_shrink *);
57685-};
57686+} __no_const;
57687
57688 /**
57689 * struct ttm_mem_global - Global memory accounting structure.
57690diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57691index e86dfca..40cc55f 100644
57692--- a/include/linux/a.out.h
57693+++ b/include/linux/a.out.h
57694@@ -39,6 +39,14 @@ enum machine_type {
57695 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57696 };
57697
57698+/* Constants for the N_FLAGS field */
57699+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57700+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57701+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57702+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57703+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57704+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57705+
57706 #if !defined (N_MAGIC)
57707 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57708 #endif
57709diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57710index 49a83ca..df96b54 100644
57711--- a/include/linux/atmdev.h
57712+++ b/include/linux/atmdev.h
57713@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57714 #endif
57715
57716 struct k_atm_aal_stats {
57717-#define __HANDLE_ITEM(i) atomic_t i
57718+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57719 __AAL_STAT_ITEMS
57720 #undef __HANDLE_ITEM
57721 };
57722diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57723index fd88a39..f4d0bad 100644
57724--- a/include/linux/binfmts.h
57725+++ b/include/linux/binfmts.h
57726@@ -88,6 +88,7 @@ struct linux_binfmt {
57727 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57728 int (*load_shlib)(struct file *);
57729 int (*core_dump)(struct coredump_params *cprm);
57730+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57731 unsigned long min_coredump; /* minimal dump size */
57732 };
57733
57734diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57735index 0ed1eb0..3ab569b 100644
57736--- a/include/linux/blkdev.h
57737+++ b/include/linux/blkdev.h
57738@@ -1315,7 +1315,7 @@ struct block_device_operations {
57739 /* this callback is with swap_lock and sometimes page table lock held */
57740 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57741 struct module *owner;
57742-};
57743+} __do_const;
57744
57745 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57746 unsigned long);
57747diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57748index 4d1a074..88f929a 100644
57749--- a/include/linux/blktrace_api.h
57750+++ b/include/linux/blktrace_api.h
57751@@ -162,7 +162,7 @@ struct blk_trace {
57752 struct dentry *dir;
57753 struct dentry *dropped_file;
57754 struct dentry *msg_file;
57755- atomic_t dropped;
57756+ atomic_unchecked_t dropped;
57757 };
57758
57759 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57760diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57761index 83195fb..0b0f77d 100644
57762--- a/include/linux/byteorder/little_endian.h
57763+++ b/include/linux/byteorder/little_endian.h
57764@@ -42,51 +42,51 @@
57765
57766 static inline __le64 __cpu_to_le64p(const __u64 *p)
57767 {
57768- return (__force __le64)*p;
57769+ return (__force const __le64)*p;
57770 }
57771 static inline __u64 __le64_to_cpup(const __le64 *p)
57772 {
57773- return (__force __u64)*p;
57774+ return (__force const __u64)*p;
57775 }
57776 static inline __le32 __cpu_to_le32p(const __u32 *p)
57777 {
57778- return (__force __le32)*p;
57779+ return (__force const __le32)*p;
57780 }
57781 static inline __u32 __le32_to_cpup(const __le32 *p)
57782 {
57783- return (__force __u32)*p;
57784+ return (__force const __u32)*p;
57785 }
57786 static inline __le16 __cpu_to_le16p(const __u16 *p)
57787 {
57788- return (__force __le16)*p;
57789+ return (__force const __le16)*p;
57790 }
57791 static inline __u16 __le16_to_cpup(const __le16 *p)
57792 {
57793- return (__force __u16)*p;
57794+ return (__force const __u16)*p;
57795 }
57796 static inline __be64 __cpu_to_be64p(const __u64 *p)
57797 {
57798- return (__force __be64)__swab64p(p);
57799+ return (__force const __be64)__swab64p(p);
57800 }
57801 static inline __u64 __be64_to_cpup(const __be64 *p)
57802 {
57803- return __swab64p((__u64 *)p);
57804+ return __swab64p((const __u64 *)p);
57805 }
57806 static inline __be32 __cpu_to_be32p(const __u32 *p)
57807 {
57808- return (__force __be32)__swab32p(p);
57809+ return (__force const __be32)__swab32p(p);
57810 }
57811 static inline __u32 __be32_to_cpup(const __be32 *p)
57812 {
57813- return __swab32p((__u32 *)p);
57814+ return __swab32p((const __u32 *)p);
57815 }
57816 static inline __be16 __cpu_to_be16p(const __u16 *p)
57817 {
57818- return (__force __be16)__swab16p(p);
57819+ return (__force const __be16)__swab16p(p);
57820 }
57821 static inline __u16 __be16_to_cpup(const __be16 *p)
57822 {
57823- return __swab16p((__u16 *)p);
57824+ return __swab16p((const __u16 *)p);
57825 }
57826 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57827 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57828diff --git a/include/linux/cache.h b/include/linux/cache.h
57829index 4c57065..4307975 100644
57830--- a/include/linux/cache.h
57831+++ b/include/linux/cache.h
57832@@ -16,6 +16,10 @@
57833 #define __read_mostly
57834 #endif
57835
57836+#ifndef __read_only
57837+#define __read_only __read_mostly
57838+#endif
57839+
57840 #ifndef ____cacheline_aligned
57841 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57842 #endif
57843diff --git a/include/linux/capability.h b/include/linux/capability.h
57844index a63d13d..069bfd5 100644
57845--- a/include/linux/capability.h
57846+++ b/include/linux/capability.h
57847@@ -548,6 +548,9 @@ extern bool capable(int cap);
57848 extern bool ns_capable(struct user_namespace *ns, int cap);
57849 extern bool task_ns_capable(struct task_struct *t, int cap);
57850 extern bool nsown_capable(int cap);
57851+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57852+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57853+extern bool capable_nolog(int cap);
57854
57855 /* audit system wants to get cap info from files as well */
57856 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57857diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
57858index 04ffb2e..6799180 100644
57859--- a/include/linux/cleancache.h
57860+++ b/include/linux/cleancache.h
57861@@ -31,7 +31,7 @@ struct cleancache_ops {
57862 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57863 void (*flush_inode)(int, struct cleancache_filekey);
57864 void (*flush_fs)(int);
57865-};
57866+} __no_const;
57867
57868 extern struct cleancache_ops
57869 cleancache_register_ops(struct cleancache_ops *ops);
57870diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
57871index dfadc96..c0e70c1 100644
57872--- a/include/linux/compiler-gcc4.h
57873+++ b/include/linux/compiler-gcc4.h
57874@@ -31,6 +31,12 @@
57875
57876
57877 #if __GNUC_MINOR__ >= 5
57878+
57879+#ifdef CONSTIFY_PLUGIN
57880+#define __no_const __attribute__((no_const))
57881+#define __do_const __attribute__((do_const))
57882+#endif
57883+
57884 /*
57885 * Mark a position in code as unreachable. This can be used to
57886 * suppress control flow warnings after asm blocks that transfer
57887@@ -46,6 +52,11 @@
57888 #define __noclone __attribute__((__noclone__))
57889
57890 #endif
57891+
57892+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57893+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57894+#define __bos0(ptr) __bos((ptr), 0)
57895+#define __bos1(ptr) __bos((ptr), 1)
57896 #endif
57897
57898 #if __GNUC_MINOR__ > 0
57899diff --git a/include/linux/compiler.h b/include/linux/compiler.h
57900index 320d6c9..8573a1c 100644
57901--- a/include/linux/compiler.h
57902+++ b/include/linux/compiler.h
57903@@ -5,31 +5,62 @@
57904
57905 #ifdef __CHECKER__
57906 # define __user __attribute__((noderef, address_space(1)))
57907+# define __force_user __force __user
57908 # define __kernel __attribute__((address_space(0)))
57909+# define __force_kernel __force __kernel
57910 # define __safe __attribute__((safe))
57911 # define __force __attribute__((force))
57912 # define __nocast __attribute__((nocast))
57913 # define __iomem __attribute__((noderef, address_space(2)))
57914+# define __force_iomem __force __iomem
57915 # define __acquires(x) __attribute__((context(x,0,1)))
57916 # define __releases(x) __attribute__((context(x,1,0)))
57917 # define __acquire(x) __context__(x,1)
57918 # define __release(x) __context__(x,-1)
57919 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57920 # define __percpu __attribute__((noderef, address_space(3)))
57921+# define __force_percpu __force __percpu
57922 #ifdef CONFIG_SPARSE_RCU_POINTER
57923 # define __rcu __attribute__((noderef, address_space(4)))
57924+# define __force_rcu __force __rcu
57925 #else
57926 # define __rcu
57927+# define __force_rcu
57928 #endif
57929 extern void __chk_user_ptr(const volatile void __user *);
57930 extern void __chk_io_ptr(const volatile void __iomem *);
57931+#elif defined(CHECKER_PLUGIN)
57932+//# define __user
57933+//# define __force_user
57934+//# define __kernel
57935+//# define __force_kernel
57936+# define __safe
57937+# define __force
57938+# define __nocast
57939+# define __iomem
57940+# define __force_iomem
57941+# define __chk_user_ptr(x) (void)0
57942+# define __chk_io_ptr(x) (void)0
57943+# define __builtin_warning(x, y...) (1)
57944+# define __acquires(x)
57945+# define __releases(x)
57946+# define __acquire(x) (void)0
57947+# define __release(x) (void)0
57948+# define __cond_lock(x,c) (c)
57949+# define __percpu
57950+# define __force_percpu
57951+# define __rcu
57952+# define __force_rcu
57953 #else
57954 # define __user
57955+# define __force_user
57956 # define __kernel
57957+# define __force_kernel
57958 # define __safe
57959 # define __force
57960 # define __nocast
57961 # define __iomem
57962+# define __force_iomem
57963 # define __chk_user_ptr(x) (void)0
57964 # define __chk_io_ptr(x) (void)0
57965 # define __builtin_warning(x, y...) (1)
57966@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
57967 # define __release(x) (void)0
57968 # define __cond_lock(x,c) (c)
57969 # define __percpu
57970+# define __force_percpu
57971 # define __rcu
57972+# define __force_rcu
57973 #endif
57974
57975 #ifdef __KERNEL__
57976@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57977 # define __attribute_const__ /* unimplemented */
57978 #endif
57979
57980+#ifndef __no_const
57981+# define __no_const
57982+#endif
57983+
57984+#ifndef __do_const
57985+# define __do_const
57986+#endif
57987+
57988 /*
57989 * Tell gcc if a function is cold. The compiler will assume any path
57990 * directly leading to the call is unlikely.
57991@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57992 #define __cold
57993 #endif
57994
57995+#ifndef __alloc_size
57996+#define __alloc_size(...)
57997+#endif
57998+
57999+#ifndef __bos
58000+#define __bos(ptr, arg)
58001+#endif
58002+
58003+#ifndef __bos0
58004+#define __bos0(ptr)
58005+#endif
58006+
58007+#ifndef __bos1
58008+#define __bos1(ptr)
58009+#endif
58010+
58011 /* Simple shorthand for a section definition */
58012 #ifndef __section
58013 # define __section(S) __attribute__ ((__section__(#S)))
58014@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58015 * use is to mediate communication between process-level code and irq/NMI
58016 * handlers, all running on the same CPU.
58017 */
58018-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
58019+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
58020+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
58021
58022 #endif /* __LINUX_COMPILER_H */
58023diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
58024index e9eaec5..bfeb9bb 100644
58025--- a/include/linux/cpuset.h
58026+++ b/include/linux/cpuset.h
58027@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
58028 * nodemask.
58029 */
58030 smp_mb();
58031- --ACCESS_ONCE(current->mems_allowed_change_disable);
58032+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
58033 }
58034
58035 static inline void set_mems_allowed(nodemask_t nodemask)
58036diff --git a/include/linux/cred.h b/include/linux/cred.h
58037index 4030896..8d6f342 100644
58038--- a/include/linux/cred.h
58039+++ b/include/linux/cred.h
58040@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
58041 static inline void validate_process_creds(void)
58042 {
58043 }
58044+static inline void validate_task_creds(struct task_struct *task)
58045+{
58046+}
58047 #endif
58048
58049 /**
58050diff --git a/include/linux/crypto.h b/include/linux/crypto.h
58051index 8a94217..15d49e3 100644
58052--- a/include/linux/crypto.h
58053+++ b/include/linux/crypto.h
58054@@ -365,7 +365,7 @@ struct cipher_tfm {
58055 const u8 *key, unsigned int keylen);
58056 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58057 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58058-};
58059+} __no_const;
58060
58061 struct hash_tfm {
58062 int (*init)(struct hash_desc *desc);
58063@@ -386,13 +386,13 @@ struct compress_tfm {
58064 int (*cot_decompress)(struct crypto_tfm *tfm,
58065 const u8 *src, unsigned int slen,
58066 u8 *dst, unsigned int *dlen);
58067-};
58068+} __no_const;
58069
58070 struct rng_tfm {
58071 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58072 unsigned int dlen);
58073 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58074-};
58075+} __no_const;
58076
58077 #define crt_ablkcipher crt_u.ablkcipher
58078 #define crt_aead crt_u.aead
58079diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
58080index 7925bf0..d5143d2 100644
58081--- a/include/linux/decompress/mm.h
58082+++ b/include/linux/decompress/mm.h
58083@@ -77,7 +77,7 @@ static void free(void *where)
58084 * warnings when not needed (indeed large_malloc / large_free are not
58085 * needed by inflate */
58086
58087-#define malloc(a) kmalloc(a, GFP_KERNEL)
58088+#define malloc(a) kmalloc((a), GFP_KERNEL)
58089 #define free(a) kfree(a)
58090
58091 #define large_malloc(a) vmalloc(a)
58092diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
58093index e13117c..e9fc938 100644
58094--- a/include/linux/dma-mapping.h
58095+++ b/include/linux/dma-mapping.h
58096@@ -46,7 +46,7 @@ struct dma_map_ops {
58097 u64 (*get_required_mask)(struct device *dev);
58098 #endif
58099 int is_phys;
58100-};
58101+} __do_const;
58102
58103 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58104
58105diff --git a/include/linux/efi.h b/include/linux/efi.h
58106index 2362a0b..cfaf8fcc 100644
58107--- a/include/linux/efi.h
58108+++ b/include/linux/efi.h
58109@@ -446,7 +446,7 @@ struct efivar_operations {
58110 efi_get_variable_t *get_variable;
58111 efi_get_next_variable_t *get_next_variable;
58112 efi_set_variable_t *set_variable;
58113-};
58114+} __no_const;
58115
58116 struct efivars {
58117 /*
58118diff --git a/include/linux/elf.h b/include/linux/elf.h
58119index 31f0508..5421c01 100644
58120--- a/include/linux/elf.h
58121+++ b/include/linux/elf.h
58122@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58123 #define PT_GNU_EH_FRAME 0x6474e550
58124
58125 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58126+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58127+
58128+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58129+
58130+/* Constants for the e_flags field */
58131+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58132+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58133+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58134+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58135+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58136+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58137
58138 /*
58139 * Extended Numbering
58140@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58141 #define DT_DEBUG 21
58142 #define DT_TEXTREL 22
58143 #define DT_JMPREL 23
58144+#define DT_FLAGS 30
58145+ #define DF_TEXTREL 0x00000004
58146 #define DT_ENCODING 32
58147 #define OLD_DT_LOOS 0x60000000
58148 #define DT_LOOS 0x6000000d
58149@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58150 #define PF_W 0x2
58151 #define PF_X 0x1
58152
58153+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58154+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58155+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58156+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58157+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58158+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58159+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58160+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58161+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58162+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58163+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58164+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58165+
58166 typedef struct elf32_phdr{
58167 Elf32_Word p_type;
58168 Elf32_Off p_offset;
58169@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58170 #define EI_OSABI 7
58171 #define EI_PAD 8
58172
58173+#define EI_PAX 14
58174+
58175 #define ELFMAG0 0x7f /* EI_MAG */
58176 #define ELFMAG1 'E'
58177 #define ELFMAG2 'L'
58178@@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
58179 #define elf_note elf32_note
58180 #define elf_addr_t Elf32_Off
58181 #define Elf_Half Elf32_Half
58182+#define elf_dyn Elf32_Dyn
58183
58184 #else
58185
58186@@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
58187 #define elf_note elf64_note
58188 #define elf_addr_t Elf64_Off
58189 #define Elf_Half Elf64_Half
58190+#define elf_dyn Elf64_Dyn
58191
58192 #endif
58193
58194diff --git a/include/linux/filter.h b/include/linux/filter.h
58195index 8eeb205..d59bfa2 100644
58196--- a/include/linux/filter.h
58197+++ b/include/linux/filter.h
58198@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
58199
58200 struct sk_buff;
58201 struct sock;
58202+struct bpf_jit_work;
58203
58204 struct sk_filter
58205 {
58206@@ -141,6 +142,9 @@ struct sk_filter
58207 unsigned int len; /* Number of filter blocks */
58208 unsigned int (*bpf_func)(const struct sk_buff *skb,
58209 const struct sock_filter *filter);
58210+#ifdef CONFIG_BPF_JIT
58211+ struct bpf_jit_work *work;
58212+#endif
58213 struct rcu_head rcu;
58214 struct sock_filter insns[0];
58215 };
58216diff --git a/include/linux/firewire.h b/include/linux/firewire.h
58217index 84ccf8e..2e9b14c 100644
58218--- a/include/linux/firewire.h
58219+++ b/include/linux/firewire.h
58220@@ -428,7 +428,7 @@ struct fw_iso_context {
58221 union {
58222 fw_iso_callback_t sc;
58223 fw_iso_mc_callback_t mc;
58224- } callback;
58225+ } __no_const callback;
58226 void *callback_data;
58227 };
58228
58229diff --git a/include/linux/fs.h b/include/linux/fs.h
58230index e0bc4ff..d79c2fa 100644
58231--- a/include/linux/fs.h
58232+++ b/include/linux/fs.h
58233@@ -1608,7 +1608,8 @@ struct file_operations {
58234 int (*setlease)(struct file *, long, struct file_lock **);
58235 long (*fallocate)(struct file *file, int mode, loff_t offset,
58236 loff_t len);
58237-};
58238+} __do_const;
58239+typedef struct file_operations __no_const file_operations_no_const;
58240
58241 struct inode_operations {
58242 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58243diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
58244index 003dc0f..3c4ea97 100644
58245--- a/include/linux/fs_struct.h
58246+++ b/include/linux/fs_struct.h
58247@@ -6,7 +6,7 @@
58248 #include <linux/seqlock.h>
58249
58250 struct fs_struct {
58251- int users;
58252+ atomic_t users;
58253 spinlock_t lock;
58254 seqcount_t seq;
58255 int umask;
58256diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
58257index ce31408..b1ad003 100644
58258--- a/include/linux/fscache-cache.h
58259+++ b/include/linux/fscache-cache.h
58260@@ -102,7 +102,7 @@ struct fscache_operation {
58261 fscache_operation_release_t release;
58262 };
58263
58264-extern atomic_t fscache_op_debug_id;
58265+extern atomic_unchecked_t fscache_op_debug_id;
58266 extern void fscache_op_work_func(struct work_struct *work);
58267
58268 extern void fscache_enqueue_operation(struct fscache_operation *);
58269@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
58270 {
58271 INIT_WORK(&op->work, fscache_op_work_func);
58272 atomic_set(&op->usage, 1);
58273- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58274+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58275 op->processor = processor;
58276 op->release = release;
58277 INIT_LIST_HEAD(&op->pend_link);
58278diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
58279index 2a53f10..0187fdf 100644
58280--- a/include/linux/fsnotify.h
58281+++ b/include/linux/fsnotify.h
58282@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
58283 */
58284 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58285 {
58286- return kstrdup(name, GFP_KERNEL);
58287+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58288 }
58289
58290 /*
58291diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
58292index 91d0e0a3..035666b 100644
58293--- a/include/linux/fsnotify_backend.h
58294+++ b/include/linux/fsnotify_backend.h
58295@@ -105,6 +105,7 @@ struct fsnotify_ops {
58296 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
58297 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
58298 };
58299+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
58300
58301 /*
58302 * A group is a "thing" that wants to receive notification about filesystem
58303diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
58304index c3da42d..c70e0df 100644
58305--- a/include/linux/ftrace_event.h
58306+++ b/include/linux/ftrace_event.h
58307@@ -97,7 +97,7 @@ struct trace_event_functions {
58308 trace_print_func raw;
58309 trace_print_func hex;
58310 trace_print_func binary;
58311-};
58312+} __no_const;
58313
58314 struct trace_event {
58315 struct hlist_node node;
58316@@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
58317 extern int trace_add_event_call(struct ftrace_event_call *call);
58318 extern void trace_remove_event_call(struct ftrace_event_call *call);
58319
58320-#define is_signed_type(type) (((type)(-1)) < 0)
58321+#define is_signed_type(type) (((type)(-1)) < (type)1)
58322
58323 int trace_set_clr_event(const char *system, const char *event, int set);
58324
58325diff --git a/include/linux/genhd.h b/include/linux/genhd.h
58326index 6d18f35..ab71e2c 100644
58327--- a/include/linux/genhd.h
58328+++ b/include/linux/genhd.h
58329@@ -185,7 +185,7 @@ struct gendisk {
58330 struct kobject *slave_dir;
58331
58332 struct timer_rand_state *random;
58333- atomic_t sync_io; /* RAID */
58334+ atomic_unchecked_t sync_io; /* RAID */
58335 struct disk_events *ev;
58336 #ifdef CONFIG_BLK_DEV_INTEGRITY
58337 struct blk_integrity *integrity;
58338diff --git a/include/linux/gracl.h b/include/linux/gracl.h
58339new file mode 100644
58340index 0000000..0dc3943
58341--- /dev/null
58342+++ b/include/linux/gracl.h
58343@@ -0,0 +1,317 @@
58344+#ifndef GR_ACL_H
58345+#define GR_ACL_H
58346+
58347+#include <linux/grdefs.h>
58348+#include <linux/resource.h>
58349+#include <linux/capability.h>
58350+#include <linux/dcache.h>
58351+#include <asm/resource.h>
58352+
58353+/* Major status information */
58354+
58355+#define GR_VERSION "grsecurity 2.2.2"
58356+#define GRSECURITY_VERSION 0x2202
58357+
58358+enum {
58359+ GR_SHUTDOWN = 0,
58360+ GR_ENABLE = 1,
58361+ GR_SPROLE = 2,
58362+ GR_RELOAD = 3,
58363+ GR_SEGVMOD = 4,
58364+ GR_STATUS = 5,
58365+ GR_UNSPROLE = 6,
58366+ GR_PASSSET = 7,
58367+ GR_SPROLEPAM = 8,
58368+};
58369+
58370+/* Password setup definitions
58371+ * kernel/grhash.c */
58372+enum {
58373+ GR_PW_LEN = 128,
58374+ GR_SALT_LEN = 16,
58375+ GR_SHA_LEN = 32,
58376+};
58377+
58378+enum {
58379+ GR_SPROLE_LEN = 64,
58380+};
58381+
58382+enum {
58383+ GR_NO_GLOB = 0,
58384+ GR_REG_GLOB,
58385+ GR_CREATE_GLOB
58386+};
58387+
58388+#define GR_NLIMITS 32
58389+
58390+/* Begin Data Structures */
58391+
58392+struct sprole_pw {
58393+ unsigned char *rolename;
58394+ unsigned char salt[GR_SALT_LEN];
58395+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58396+};
58397+
58398+struct name_entry {
58399+ __u32 key;
58400+ ino_t inode;
58401+ dev_t device;
58402+ char *name;
58403+ __u16 len;
58404+ __u8 deleted;
58405+ struct name_entry *prev;
58406+ struct name_entry *next;
58407+};
58408+
58409+struct inodev_entry {
58410+ struct name_entry *nentry;
58411+ struct inodev_entry *prev;
58412+ struct inodev_entry *next;
58413+};
58414+
58415+struct acl_role_db {
58416+ struct acl_role_label **r_hash;
58417+ __u32 r_size;
58418+};
58419+
58420+struct inodev_db {
58421+ struct inodev_entry **i_hash;
58422+ __u32 i_size;
58423+};
58424+
58425+struct name_db {
58426+ struct name_entry **n_hash;
58427+ __u32 n_size;
58428+};
58429+
58430+struct crash_uid {
58431+ uid_t uid;
58432+ unsigned long expires;
58433+};
58434+
58435+struct gr_hash_struct {
58436+ void **table;
58437+ void **nametable;
58438+ void *first;
58439+ __u32 table_size;
58440+ __u32 used_size;
58441+ int type;
58442+};
58443+
58444+/* Userspace Grsecurity ACL data structures */
58445+
58446+struct acl_subject_label {
58447+ char *filename;
58448+ ino_t inode;
58449+ dev_t device;
58450+ __u32 mode;
58451+ kernel_cap_t cap_mask;
58452+ kernel_cap_t cap_lower;
58453+ kernel_cap_t cap_invert_audit;
58454+
58455+ struct rlimit res[GR_NLIMITS];
58456+ __u32 resmask;
58457+
58458+ __u8 user_trans_type;
58459+ __u8 group_trans_type;
58460+ uid_t *user_transitions;
58461+ gid_t *group_transitions;
58462+ __u16 user_trans_num;
58463+ __u16 group_trans_num;
58464+
58465+ __u32 sock_families[2];
58466+ __u32 ip_proto[8];
58467+ __u32 ip_type;
58468+ struct acl_ip_label **ips;
58469+ __u32 ip_num;
58470+ __u32 inaddr_any_override;
58471+
58472+ __u32 crashes;
58473+ unsigned long expires;
58474+
58475+ struct acl_subject_label *parent_subject;
58476+ struct gr_hash_struct *hash;
58477+ struct acl_subject_label *prev;
58478+ struct acl_subject_label *next;
58479+
58480+ struct acl_object_label **obj_hash;
58481+ __u32 obj_hash_size;
58482+ __u16 pax_flags;
58483+};
58484+
58485+struct role_allowed_ip {
58486+ __u32 addr;
58487+ __u32 netmask;
58488+
58489+ struct role_allowed_ip *prev;
58490+ struct role_allowed_ip *next;
58491+};
58492+
58493+struct role_transition {
58494+ char *rolename;
58495+
58496+ struct role_transition *prev;
58497+ struct role_transition *next;
58498+};
58499+
58500+struct acl_role_label {
58501+ char *rolename;
58502+ uid_t uidgid;
58503+ __u16 roletype;
58504+
58505+ __u16 auth_attempts;
58506+ unsigned long expires;
58507+
58508+ struct acl_subject_label *root_label;
58509+ struct gr_hash_struct *hash;
58510+
58511+ struct acl_role_label *prev;
58512+ struct acl_role_label *next;
58513+
58514+ struct role_transition *transitions;
58515+ struct role_allowed_ip *allowed_ips;
58516+ uid_t *domain_children;
58517+ __u16 domain_child_num;
58518+
58519+ struct acl_subject_label **subj_hash;
58520+ __u32 subj_hash_size;
58521+};
58522+
58523+struct user_acl_role_db {
58524+ struct acl_role_label **r_table;
58525+ __u32 num_pointers; /* Number of allocations to track */
58526+ __u32 num_roles; /* Number of roles */
58527+ __u32 num_domain_children; /* Number of domain children */
58528+ __u32 num_subjects; /* Number of subjects */
58529+ __u32 num_objects; /* Number of objects */
58530+};
58531+
58532+struct acl_object_label {
58533+ char *filename;
58534+ ino_t inode;
58535+ dev_t device;
58536+ __u32 mode;
58537+
58538+ struct acl_subject_label *nested;
58539+ struct acl_object_label *globbed;
58540+
58541+ /* next two structures not used */
58542+
58543+ struct acl_object_label *prev;
58544+ struct acl_object_label *next;
58545+};
58546+
58547+struct acl_ip_label {
58548+ char *iface;
58549+ __u32 addr;
58550+ __u32 netmask;
58551+ __u16 low, high;
58552+ __u8 mode;
58553+ __u32 type;
58554+ __u32 proto[8];
58555+
58556+ /* next two structures not used */
58557+
58558+ struct acl_ip_label *prev;
58559+ struct acl_ip_label *next;
58560+};
58561+
58562+struct gr_arg {
58563+ struct user_acl_role_db role_db;
58564+ unsigned char pw[GR_PW_LEN];
58565+ unsigned char salt[GR_SALT_LEN];
58566+ unsigned char sum[GR_SHA_LEN];
58567+ unsigned char sp_role[GR_SPROLE_LEN];
58568+ struct sprole_pw *sprole_pws;
58569+ dev_t segv_device;
58570+ ino_t segv_inode;
58571+ uid_t segv_uid;
58572+ __u16 num_sprole_pws;
58573+ __u16 mode;
58574+};
58575+
58576+struct gr_arg_wrapper {
58577+ struct gr_arg *arg;
58578+ __u32 version;
58579+ __u32 size;
58580+};
58581+
58582+struct subject_map {
58583+ struct acl_subject_label *user;
58584+ struct acl_subject_label *kernel;
58585+ struct subject_map *prev;
58586+ struct subject_map *next;
58587+};
58588+
58589+struct acl_subj_map_db {
58590+ struct subject_map **s_hash;
58591+ __u32 s_size;
58592+};
58593+
58594+/* End Data Structures Section */
58595+
58596+/* Hash functions generated by empirical testing by Brad Spengler
58597+ Makes good use of the low bits of the inode. Generally 0-1 times
58598+ in loop for successful match. 0-3 for unsuccessful match.
58599+ Shift/add algorithm with modulus of table size and an XOR*/
58600+
58601+static __inline__ unsigned int
58602+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58603+{
58604+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58605+}
58606+
58607+ static __inline__ unsigned int
58608+shash(const struct acl_subject_label *userp, const unsigned int sz)
58609+{
58610+ return ((const unsigned long)userp % sz);
58611+}
58612+
58613+static __inline__ unsigned int
58614+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58615+{
58616+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58617+}
58618+
58619+static __inline__ unsigned int
58620+nhash(const char *name, const __u16 len, const unsigned int sz)
58621+{
58622+ return full_name_hash((const unsigned char *)name, len) % sz;
58623+}
58624+
58625+#define FOR_EACH_ROLE_START(role) \
58626+ role = role_list; \
58627+ while (role) {
58628+
58629+#define FOR_EACH_ROLE_END(role) \
58630+ role = role->prev; \
58631+ }
58632+
58633+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58634+ subj = NULL; \
58635+ iter = 0; \
58636+ while (iter < role->subj_hash_size) { \
58637+ if (subj == NULL) \
58638+ subj = role->subj_hash[iter]; \
58639+ if (subj == NULL) { \
58640+ iter++; \
58641+ continue; \
58642+ }
58643+
58644+#define FOR_EACH_SUBJECT_END(subj,iter) \
58645+ subj = subj->next; \
58646+ if (subj == NULL) \
58647+ iter++; \
58648+ }
58649+
58650+
58651+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58652+ subj = role->hash->first; \
58653+ while (subj != NULL) {
58654+
58655+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58656+ subj = subj->next; \
58657+ }
58658+
58659+#endif
58660+
58661diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58662new file mode 100644
58663index 0000000..323ecf2
58664--- /dev/null
58665+++ b/include/linux/gralloc.h
58666@@ -0,0 +1,9 @@
58667+#ifndef __GRALLOC_H
58668+#define __GRALLOC_H
58669+
58670+void acl_free_all(void);
58671+int acl_alloc_stack_init(unsigned long size);
58672+void *acl_alloc(unsigned long len);
58673+void *acl_alloc_num(unsigned long num, unsigned long len);
58674+
58675+#endif
58676diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58677new file mode 100644
58678index 0000000..b30e9bc
58679--- /dev/null
58680+++ b/include/linux/grdefs.h
58681@@ -0,0 +1,140 @@
58682+#ifndef GRDEFS_H
58683+#define GRDEFS_H
58684+
58685+/* Begin grsecurity status declarations */
58686+
58687+enum {
58688+ GR_READY = 0x01,
58689+ GR_STATUS_INIT = 0x00 // disabled state
58690+};
58691+
58692+/* Begin ACL declarations */
58693+
58694+/* Role flags */
58695+
58696+enum {
58697+ GR_ROLE_USER = 0x0001,
58698+ GR_ROLE_GROUP = 0x0002,
58699+ GR_ROLE_DEFAULT = 0x0004,
58700+ GR_ROLE_SPECIAL = 0x0008,
58701+ GR_ROLE_AUTH = 0x0010,
58702+ GR_ROLE_NOPW = 0x0020,
58703+ GR_ROLE_GOD = 0x0040,
58704+ GR_ROLE_LEARN = 0x0080,
58705+ GR_ROLE_TPE = 0x0100,
58706+ GR_ROLE_DOMAIN = 0x0200,
58707+ GR_ROLE_PAM = 0x0400,
58708+ GR_ROLE_PERSIST = 0x0800
58709+};
58710+
58711+/* ACL Subject and Object mode flags */
58712+enum {
58713+ GR_DELETED = 0x80000000
58714+};
58715+
58716+/* ACL Object-only mode flags */
58717+enum {
58718+ GR_READ = 0x00000001,
58719+ GR_APPEND = 0x00000002,
58720+ GR_WRITE = 0x00000004,
58721+ GR_EXEC = 0x00000008,
58722+ GR_FIND = 0x00000010,
58723+ GR_INHERIT = 0x00000020,
58724+ GR_SETID = 0x00000040,
58725+ GR_CREATE = 0x00000080,
58726+ GR_DELETE = 0x00000100,
58727+ GR_LINK = 0x00000200,
58728+ GR_AUDIT_READ = 0x00000400,
58729+ GR_AUDIT_APPEND = 0x00000800,
58730+ GR_AUDIT_WRITE = 0x00001000,
58731+ GR_AUDIT_EXEC = 0x00002000,
58732+ GR_AUDIT_FIND = 0x00004000,
58733+ GR_AUDIT_INHERIT= 0x00008000,
58734+ GR_AUDIT_SETID = 0x00010000,
58735+ GR_AUDIT_CREATE = 0x00020000,
58736+ GR_AUDIT_DELETE = 0x00040000,
58737+ GR_AUDIT_LINK = 0x00080000,
58738+ GR_PTRACERD = 0x00100000,
58739+ GR_NOPTRACE = 0x00200000,
58740+ GR_SUPPRESS = 0x00400000,
58741+ GR_NOLEARN = 0x00800000,
58742+ GR_INIT_TRANSFER= 0x01000000
58743+};
58744+
58745+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58746+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58747+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58748+
58749+/* ACL subject-only mode flags */
58750+enum {
58751+ GR_KILL = 0x00000001,
58752+ GR_VIEW = 0x00000002,
58753+ GR_PROTECTED = 0x00000004,
58754+ GR_LEARN = 0x00000008,
58755+ GR_OVERRIDE = 0x00000010,
58756+ /* just a placeholder, this mode is only used in userspace */
58757+ GR_DUMMY = 0x00000020,
58758+ GR_PROTSHM = 0x00000040,
58759+ GR_KILLPROC = 0x00000080,
58760+ GR_KILLIPPROC = 0x00000100,
58761+ /* just a placeholder, this mode is only used in userspace */
58762+ GR_NOTROJAN = 0x00000200,
58763+ GR_PROTPROCFD = 0x00000400,
58764+ GR_PROCACCT = 0x00000800,
58765+ GR_RELAXPTRACE = 0x00001000,
58766+ GR_NESTED = 0x00002000,
58767+ GR_INHERITLEARN = 0x00004000,
58768+ GR_PROCFIND = 0x00008000,
58769+ GR_POVERRIDE = 0x00010000,
58770+ GR_KERNELAUTH = 0x00020000,
58771+ GR_ATSECURE = 0x00040000,
58772+ GR_SHMEXEC = 0x00080000
58773+};
58774+
58775+enum {
58776+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58777+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58778+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58779+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58780+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58781+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58782+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58783+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58784+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58785+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58786+};
58787+
58788+enum {
58789+ GR_ID_USER = 0x01,
58790+ GR_ID_GROUP = 0x02,
58791+};
58792+
58793+enum {
58794+ GR_ID_ALLOW = 0x01,
58795+ GR_ID_DENY = 0x02,
58796+};
58797+
58798+#define GR_CRASH_RES 31
58799+#define GR_UIDTABLE_MAX 500
58800+
58801+/* begin resource learning section */
58802+enum {
58803+ GR_RLIM_CPU_BUMP = 60,
58804+ GR_RLIM_FSIZE_BUMP = 50000,
58805+ GR_RLIM_DATA_BUMP = 10000,
58806+ GR_RLIM_STACK_BUMP = 1000,
58807+ GR_RLIM_CORE_BUMP = 10000,
58808+ GR_RLIM_RSS_BUMP = 500000,
58809+ GR_RLIM_NPROC_BUMP = 1,
58810+ GR_RLIM_NOFILE_BUMP = 5,
58811+ GR_RLIM_MEMLOCK_BUMP = 50000,
58812+ GR_RLIM_AS_BUMP = 500000,
58813+ GR_RLIM_LOCKS_BUMP = 2,
58814+ GR_RLIM_SIGPENDING_BUMP = 5,
58815+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58816+ GR_RLIM_NICE_BUMP = 1,
58817+ GR_RLIM_RTPRIO_BUMP = 1,
58818+ GR_RLIM_RTTIME_BUMP = 1000000
58819+};
58820+
58821+#endif
58822diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
58823new file mode 100644
58824index 0000000..da390f1
58825--- /dev/null
58826+++ b/include/linux/grinternal.h
58827@@ -0,0 +1,221 @@
58828+#ifndef __GRINTERNAL_H
58829+#define __GRINTERNAL_H
58830+
58831+#ifdef CONFIG_GRKERNSEC
58832+
58833+#include <linux/fs.h>
58834+#include <linux/mnt_namespace.h>
58835+#include <linux/nsproxy.h>
58836+#include <linux/gracl.h>
58837+#include <linux/grdefs.h>
58838+#include <linux/grmsg.h>
58839+
58840+void gr_add_learn_entry(const char *fmt, ...)
58841+ __attribute__ ((format (printf, 1, 2)));
58842+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58843+ const struct vfsmount *mnt);
58844+__u32 gr_check_create(const struct dentry *new_dentry,
58845+ const struct dentry *parent,
58846+ const struct vfsmount *mnt, const __u32 mode);
58847+int gr_check_protected_task(const struct task_struct *task);
58848+__u32 to_gr_audit(const __u32 reqmode);
58849+int gr_set_acls(const int type);
58850+int gr_apply_subject_to_task(struct task_struct *task);
58851+int gr_acl_is_enabled(void);
58852+char gr_roletype_to_char(void);
58853+
58854+void gr_handle_alertkill(struct task_struct *task);
58855+char *gr_to_filename(const struct dentry *dentry,
58856+ const struct vfsmount *mnt);
58857+char *gr_to_filename1(const struct dentry *dentry,
58858+ const struct vfsmount *mnt);
58859+char *gr_to_filename2(const struct dentry *dentry,
58860+ const struct vfsmount *mnt);
58861+char *gr_to_filename3(const struct dentry *dentry,
58862+ const struct vfsmount *mnt);
58863+
58864+extern int grsec_enable_ptrace_readexec;
58865+extern int grsec_enable_harden_ptrace;
58866+extern int grsec_enable_link;
58867+extern int grsec_enable_fifo;
58868+extern int grsec_enable_execve;
58869+extern int grsec_enable_shm;
58870+extern int grsec_enable_execlog;
58871+extern int grsec_enable_signal;
58872+extern int grsec_enable_audit_ptrace;
58873+extern int grsec_enable_forkfail;
58874+extern int grsec_enable_time;
58875+extern int grsec_enable_rofs;
58876+extern int grsec_enable_chroot_shmat;
58877+extern int grsec_enable_chroot_mount;
58878+extern int grsec_enable_chroot_double;
58879+extern int grsec_enable_chroot_pivot;
58880+extern int grsec_enable_chroot_chdir;
58881+extern int grsec_enable_chroot_chmod;
58882+extern int grsec_enable_chroot_mknod;
58883+extern int grsec_enable_chroot_fchdir;
58884+extern int grsec_enable_chroot_nice;
58885+extern int grsec_enable_chroot_execlog;
58886+extern int grsec_enable_chroot_caps;
58887+extern int grsec_enable_chroot_sysctl;
58888+extern int grsec_enable_chroot_unix;
58889+extern int grsec_enable_tpe;
58890+extern int grsec_tpe_gid;
58891+extern int grsec_enable_tpe_all;
58892+extern int grsec_enable_tpe_invert;
58893+extern int grsec_enable_socket_all;
58894+extern int grsec_socket_all_gid;
58895+extern int grsec_enable_socket_client;
58896+extern int grsec_socket_client_gid;
58897+extern int grsec_enable_socket_server;
58898+extern int grsec_socket_server_gid;
58899+extern int grsec_audit_gid;
58900+extern int grsec_enable_group;
58901+extern int grsec_enable_audit_textrel;
58902+extern int grsec_enable_log_rwxmaps;
58903+extern int grsec_enable_mount;
58904+extern int grsec_enable_chdir;
58905+extern int grsec_resource_logging;
58906+extern int grsec_enable_blackhole;
58907+extern int grsec_lastack_retries;
58908+extern int grsec_enable_brute;
58909+extern int grsec_lock;
58910+
58911+extern spinlock_t grsec_alert_lock;
58912+extern unsigned long grsec_alert_wtime;
58913+extern unsigned long grsec_alert_fyet;
58914+
58915+extern spinlock_t grsec_audit_lock;
58916+
58917+extern rwlock_t grsec_exec_file_lock;
58918+
58919+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58920+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58921+ (tsk)->exec_file->f_vfsmnt) : "/")
58922+
58923+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58924+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58925+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58926+
58927+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58928+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58929+ (tsk)->exec_file->f_vfsmnt) : "/")
58930+
58931+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58932+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58933+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58934+
58935+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58936+
58937+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58938+
58939+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58940+ (task)->pid, (cred)->uid, \
58941+ (cred)->euid, (cred)->gid, (cred)->egid, \
58942+ gr_parent_task_fullpath(task), \
58943+ (task)->real_parent->comm, (task)->real_parent->pid, \
58944+ (pcred)->uid, (pcred)->euid, \
58945+ (pcred)->gid, (pcred)->egid
58946+
58947+#define GR_CHROOT_CAPS {{ \
58948+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58949+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58950+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58951+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58952+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58953+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58954+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58955+
58956+#define security_learn(normal_msg,args...) \
58957+({ \
58958+ read_lock(&grsec_exec_file_lock); \
58959+ gr_add_learn_entry(normal_msg "\n", ## args); \
58960+ read_unlock(&grsec_exec_file_lock); \
58961+})
58962+
58963+enum {
58964+ GR_DO_AUDIT,
58965+ GR_DONT_AUDIT,
58966+ /* used for non-audit messages that we shouldn't kill the task on */
58967+ GR_DONT_AUDIT_GOOD
58968+};
58969+
58970+enum {
58971+ GR_TTYSNIFF,
58972+ GR_RBAC,
58973+ GR_RBAC_STR,
58974+ GR_STR_RBAC,
58975+ GR_RBAC_MODE2,
58976+ GR_RBAC_MODE3,
58977+ GR_FILENAME,
58978+ GR_SYSCTL_HIDDEN,
58979+ GR_NOARGS,
58980+ GR_ONE_INT,
58981+ GR_ONE_INT_TWO_STR,
58982+ GR_ONE_STR,
58983+ GR_STR_INT,
58984+ GR_TWO_STR_INT,
58985+ GR_TWO_INT,
58986+ GR_TWO_U64,
58987+ GR_THREE_INT,
58988+ GR_FIVE_INT_TWO_STR,
58989+ GR_TWO_STR,
58990+ GR_THREE_STR,
58991+ GR_FOUR_STR,
58992+ GR_STR_FILENAME,
58993+ GR_FILENAME_STR,
58994+ GR_FILENAME_TWO_INT,
58995+ GR_FILENAME_TWO_INT_STR,
58996+ GR_TEXTREL,
58997+ GR_PTRACE,
58998+ GR_RESOURCE,
58999+ GR_CAP,
59000+ GR_SIG,
59001+ GR_SIG2,
59002+ GR_CRASH1,
59003+ GR_CRASH2,
59004+ GR_PSACCT,
59005+ GR_RWXMAP
59006+};
59007+
59008+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
59009+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
59010+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
59011+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
59012+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
59013+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
59014+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
59015+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
59016+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
59017+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
59018+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
59019+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
59020+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
59021+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
59022+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
59023+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
59024+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
59025+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
59026+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
59027+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
59028+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
59029+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
59030+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
59031+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
59032+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
59033+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
59034+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
59035+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
59036+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
59037+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
59038+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
59039+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
59040+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
59041+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
59042+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
59043+
59044+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
59045+
59046+#endif
59047+
59048+#endif
59049diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
59050new file mode 100644
59051index 0000000..b3347e2
59052--- /dev/null
59053+++ b/include/linux/grmsg.h
59054@@ -0,0 +1,109 @@
59055+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
59056+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
59057+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
59058+#define GR_STOPMOD_MSG "denied modification of module state by "
59059+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59060+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59061+#define GR_IOPERM_MSG "denied use of ioperm() by "
59062+#define GR_IOPL_MSG "denied use of iopl() by "
59063+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59064+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59065+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59066+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59067+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59068+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59069+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59070+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59071+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59072+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59073+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59074+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59075+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59076+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59077+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59078+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59079+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59080+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59081+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59082+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59083+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59084+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59085+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59086+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59087+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59088+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59089+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.64s) of %.950s by "
59090+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59091+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59092+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59093+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59094+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59095+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59096+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59097+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59098+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
59099+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59100+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59101+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59102+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59103+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59104+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59105+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59106+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59107+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
59108+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59109+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59110+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59111+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59112+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59113+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59114+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59115+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59116+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59117+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59118+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59119+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59120+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59121+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59122+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59123+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59124+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59125+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59126+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59127+#define GR_FAILFORK_MSG "failed fork with errno %s by "
59128+#define GR_NICE_CHROOT_MSG "denied priority change by "
59129+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59130+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59131+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59132+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59133+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59134+#define GR_TIME_MSG "time set by "
59135+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59136+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59137+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59138+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59139+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59140+#define GR_BIND_MSG "denied bind() by "
59141+#define GR_CONNECT_MSG "denied connect() by "
59142+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59143+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59144+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59145+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59146+#define GR_CAP_ACL_MSG "use of %s denied for "
59147+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
59148+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59149+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59150+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59151+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59152+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59153+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59154+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59155+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59156+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59157+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59158+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59159+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59160+#define GR_VM86_MSG "denied use of vm86 by "
59161+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59162+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
59163+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59164diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
59165new file mode 100644
59166index 0000000..eb4885f
59167--- /dev/null
59168+++ b/include/linux/grsecurity.h
59169@@ -0,0 +1,233 @@
59170+#ifndef GR_SECURITY_H
59171+#define GR_SECURITY_H
59172+#include <linux/fs.h>
59173+#include <linux/fs_struct.h>
59174+#include <linux/binfmts.h>
59175+#include <linux/gracl.h>
59176+
59177+/* notify of brain-dead configs */
59178+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59179+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59180+#endif
59181+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59182+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59183+#endif
59184+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59185+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59186+#endif
59187+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59188+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59189+#endif
59190+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59191+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59192+#endif
59193+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59194+#error "CONFIG_PAX enabled, but no PaX options are enabled."
59195+#endif
59196+
59197+#include <linux/compat.h>
59198+
59199+struct user_arg_ptr {
59200+#ifdef CONFIG_COMPAT
59201+ bool is_compat;
59202+#endif
59203+ union {
59204+ const char __user *const __user *native;
59205+#ifdef CONFIG_COMPAT
59206+ compat_uptr_t __user *compat;
59207+#endif
59208+ } ptr;
59209+};
59210+
59211+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59212+void gr_handle_brute_check(void);
59213+void gr_handle_kernel_exploit(void);
59214+int gr_process_user_ban(void);
59215+
59216+char gr_roletype_to_char(void);
59217+
59218+int gr_acl_enable_at_secure(void);
59219+
59220+int gr_check_user_change(int real, int effective, int fs);
59221+int gr_check_group_change(int real, int effective, int fs);
59222+
59223+void gr_del_task_from_ip_table(struct task_struct *p);
59224+
59225+int gr_pid_is_chrooted(struct task_struct *p);
59226+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59227+int gr_handle_chroot_nice(void);
59228+int gr_handle_chroot_sysctl(const int op);
59229+int gr_handle_chroot_setpriority(struct task_struct *p,
59230+ const int niceval);
59231+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59232+int gr_handle_chroot_chroot(const struct dentry *dentry,
59233+ const struct vfsmount *mnt);
59234+void gr_handle_chroot_chdir(struct path *path);
59235+int gr_handle_chroot_chmod(const struct dentry *dentry,
59236+ const struct vfsmount *mnt, const int mode);
59237+int gr_handle_chroot_mknod(const struct dentry *dentry,
59238+ const struct vfsmount *mnt, const int mode);
59239+int gr_handle_chroot_mount(const struct dentry *dentry,
59240+ const struct vfsmount *mnt,
59241+ const char *dev_name);
59242+int gr_handle_chroot_pivot(void);
59243+int gr_handle_chroot_unix(const pid_t pid);
59244+
59245+int gr_handle_rawio(const struct inode *inode);
59246+
59247+void gr_handle_ioperm(void);
59248+void gr_handle_iopl(void);
59249+
59250+int gr_tpe_allow(const struct file *file);
59251+
59252+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59253+void gr_clear_chroot_entries(struct task_struct *task);
59254+
59255+void gr_log_forkfail(const int retval);
59256+void gr_log_timechange(void);
59257+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59258+void gr_log_chdir(const struct dentry *dentry,
59259+ const struct vfsmount *mnt);
59260+void gr_log_chroot_exec(const struct dentry *dentry,
59261+ const struct vfsmount *mnt);
59262+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59263+void gr_log_remount(const char *devname, const int retval);
59264+void gr_log_unmount(const char *devname, const int retval);
59265+void gr_log_mount(const char *from, const char *to, const int retval);
59266+void gr_log_textrel(struct vm_area_struct *vma);
59267+void gr_log_rwxmmap(struct file *file);
59268+void gr_log_rwxmprotect(struct file *file);
59269+
59270+int gr_handle_follow_link(const struct inode *parent,
59271+ const struct inode *inode,
59272+ const struct dentry *dentry,
59273+ const struct vfsmount *mnt);
59274+int gr_handle_fifo(const struct dentry *dentry,
59275+ const struct vfsmount *mnt,
59276+ const struct dentry *dir, const int flag,
59277+ const int acc_mode);
59278+int gr_handle_hardlink(const struct dentry *dentry,
59279+ const struct vfsmount *mnt,
59280+ struct inode *inode,
59281+ const int mode, const char *to);
59282+
59283+int gr_is_capable(const int cap);
59284+int gr_is_capable_nolog(const int cap);
59285+void gr_learn_resource(const struct task_struct *task, const int limit,
59286+ const unsigned long wanted, const int gt);
59287+void gr_copy_label(struct task_struct *tsk);
59288+void gr_handle_crash(struct task_struct *task, const int sig);
59289+int gr_handle_signal(const struct task_struct *p, const int sig);
59290+int gr_check_crash_uid(const uid_t uid);
59291+int gr_check_protected_task(const struct task_struct *task);
59292+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59293+int gr_acl_handle_mmap(const struct file *file,
59294+ const unsigned long prot);
59295+int gr_acl_handle_mprotect(const struct file *file,
59296+ const unsigned long prot);
59297+int gr_check_hidden_task(const struct task_struct *tsk);
59298+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59299+ const struct vfsmount *mnt);
59300+__u32 gr_acl_handle_utime(const struct dentry *dentry,
59301+ const struct vfsmount *mnt);
59302+__u32 gr_acl_handle_access(const struct dentry *dentry,
59303+ const struct vfsmount *mnt, const int fmode);
59304+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59305+ const struct vfsmount *mnt, mode_t mode);
59306+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59307+ const struct vfsmount *mnt, mode_t mode);
59308+__u32 gr_acl_handle_chown(const struct dentry *dentry,
59309+ const struct vfsmount *mnt);
59310+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59311+ const struct vfsmount *mnt);
59312+int gr_handle_ptrace(struct task_struct *task, const long request);
59313+int gr_handle_proc_ptrace(struct task_struct *task);
59314+__u32 gr_acl_handle_execve(const struct dentry *dentry,
59315+ const struct vfsmount *mnt);
59316+int gr_check_crash_exec(const struct file *filp);
59317+int gr_acl_is_enabled(void);
59318+void gr_set_kernel_label(struct task_struct *task);
59319+void gr_set_role_label(struct task_struct *task, const uid_t uid,
59320+ const gid_t gid);
59321+int gr_set_proc_label(const struct dentry *dentry,
59322+ const struct vfsmount *mnt,
59323+ const int unsafe_flags);
59324+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59325+ const struct vfsmount *mnt);
59326+__u32 gr_acl_handle_open(const struct dentry *dentry,
59327+ const struct vfsmount *mnt, int acc_mode);
59328+__u32 gr_acl_handle_creat(const struct dentry *dentry,
59329+ const struct dentry *p_dentry,
59330+ const struct vfsmount *p_mnt,
59331+ int open_flags, int acc_mode, const int imode);
59332+void gr_handle_create(const struct dentry *dentry,
59333+ const struct vfsmount *mnt);
59334+void gr_handle_proc_create(const struct dentry *dentry,
59335+ const struct inode *inode);
59336+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59337+ const struct dentry *parent_dentry,
59338+ const struct vfsmount *parent_mnt,
59339+ const int mode);
59340+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59341+ const struct dentry *parent_dentry,
59342+ const struct vfsmount *parent_mnt);
59343+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59344+ const struct vfsmount *mnt);
59345+void gr_handle_delete(const ino_t ino, const dev_t dev);
59346+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59347+ const struct vfsmount *mnt);
59348+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59349+ const struct dentry *parent_dentry,
59350+ const struct vfsmount *parent_mnt,
59351+ const char *from);
59352+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59353+ const struct dentry *parent_dentry,
59354+ const struct vfsmount *parent_mnt,
59355+ const struct dentry *old_dentry,
59356+ const struct vfsmount *old_mnt, const char *to);
59357+int gr_acl_handle_rename(struct dentry *new_dentry,
59358+ struct dentry *parent_dentry,
59359+ const struct vfsmount *parent_mnt,
59360+ struct dentry *old_dentry,
59361+ struct inode *old_parent_inode,
59362+ struct vfsmount *old_mnt, const char *newname);
59363+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59364+ struct dentry *old_dentry,
59365+ struct dentry *new_dentry,
59366+ struct vfsmount *mnt, const __u8 replace);
59367+__u32 gr_check_link(const struct dentry *new_dentry,
59368+ const struct dentry *parent_dentry,
59369+ const struct vfsmount *parent_mnt,
59370+ const struct dentry *old_dentry,
59371+ const struct vfsmount *old_mnt);
59372+int gr_acl_handle_filldir(const struct file *file, const char *name,
59373+ const unsigned int namelen, const ino_t ino);
59374+
59375+__u32 gr_acl_handle_unix(const struct dentry *dentry,
59376+ const struct vfsmount *mnt);
59377+void gr_acl_handle_exit(void);
59378+void gr_acl_handle_psacct(struct task_struct *task, const long code);
59379+int gr_acl_handle_procpidmem(const struct task_struct *task);
59380+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59381+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59382+void gr_audit_ptrace(struct task_struct *task);
59383+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59384+
59385+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
59386+
59387+#ifdef CONFIG_GRKERNSEC
59388+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59389+void gr_handle_vm86(void);
59390+void gr_handle_mem_readwrite(u64 from, u64 to);
59391+
59392+extern int grsec_enable_dmesg;
59393+extern int grsec_disable_privio;
59394+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59395+extern int grsec_enable_chroot_findtask;
59396+#endif
59397+#ifdef CONFIG_GRKERNSEC_SETXID
59398+extern int grsec_enable_setxid;
59399+#endif
59400+#endif
59401+
59402+#endif
59403diff --git a/include/linux/grsock.h b/include/linux/grsock.h
59404new file mode 100644
59405index 0000000..e7ffaaf
59406--- /dev/null
59407+++ b/include/linux/grsock.h
59408@@ -0,0 +1,19 @@
59409+#ifndef __GRSOCK_H
59410+#define __GRSOCK_H
59411+
59412+extern void gr_attach_curr_ip(const struct sock *sk);
59413+extern int gr_handle_sock_all(const int family, const int type,
59414+ const int protocol);
59415+extern int gr_handle_sock_server(const struct sockaddr *sck);
59416+extern int gr_handle_sock_server_other(const struct sock *sck);
59417+extern int gr_handle_sock_client(const struct sockaddr *sck);
59418+extern int gr_search_connect(struct socket * sock,
59419+ struct sockaddr_in * addr);
59420+extern int gr_search_bind(struct socket * sock,
59421+ struct sockaddr_in * addr);
59422+extern int gr_search_listen(struct socket * sock);
59423+extern int gr_search_accept(struct socket * sock);
59424+extern int gr_search_socket(const int domain, const int type,
59425+ const int protocol);
59426+
59427+#endif
59428diff --git a/include/linux/hid.h b/include/linux/hid.h
59429index c235e4e..f0cf7a0 100644
59430--- a/include/linux/hid.h
59431+++ b/include/linux/hid.h
59432@@ -679,7 +679,7 @@ struct hid_ll_driver {
59433 unsigned int code, int value);
59434
59435 int (*parse)(struct hid_device *hdev);
59436-};
59437+} __no_const;
59438
59439 #define PM_HINT_FULLON 1<<5
59440 #define PM_HINT_NORMAL 1<<1
59441diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59442index 3a93f73..b19d0b3 100644
59443--- a/include/linux/highmem.h
59444+++ b/include/linux/highmem.h
59445@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59446 kunmap_atomic(kaddr, KM_USER0);
59447 }
59448
59449+static inline void sanitize_highpage(struct page *page)
59450+{
59451+ void *kaddr;
59452+ unsigned long flags;
59453+
59454+ local_irq_save(flags);
59455+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59456+ clear_page(kaddr);
59457+ kunmap_atomic(kaddr, KM_CLEARPAGE);
59458+ local_irq_restore(flags);
59459+}
59460+
59461 static inline void zero_user_segments(struct page *page,
59462 unsigned start1, unsigned end1,
59463 unsigned start2, unsigned end2)
59464diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59465index 07d103a..04ec65b 100644
59466--- a/include/linux/i2c.h
59467+++ b/include/linux/i2c.h
59468@@ -364,6 +364,7 @@ struct i2c_algorithm {
59469 /* To determine what the adapter supports */
59470 u32 (*functionality) (struct i2c_adapter *);
59471 };
59472+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59473
59474 /*
59475 * i2c_adapter is the structure used to identify a physical i2c bus along
59476diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59477index a6deef4..c56a7f2 100644
59478--- a/include/linux/i2o.h
59479+++ b/include/linux/i2o.h
59480@@ -564,7 +564,7 @@ struct i2o_controller {
59481 struct i2o_device *exec; /* Executive */
59482 #if BITS_PER_LONG == 64
59483 spinlock_t context_list_lock; /* lock for context_list */
59484- atomic_t context_list_counter; /* needed for unique contexts */
59485+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59486 struct list_head context_list; /* list of context id's
59487 and pointers */
59488 #endif
59489diff --git a/include/linux/init.h b/include/linux/init.h
59490index 9146f39..885354d 100644
59491--- a/include/linux/init.h
59492+++ b/include/linux/init.h
59493@@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59494
59495 /* Each module must use one module_init(). */
59496 #define module_init(initfn) \
59497- static inline initcall_t __inittest(void) \
59498+ static inline __used initcall_t __inittest(void) \
59499 { return initfn; } \
59500 int init_module(void) __attribute__((alias(#initfn)));
59501
59502 /* This is only required if you want to be unloadable. */
59503 #define module_exit(exitfn) \
59504- static inline exitcall_t __exittest(void) \
59505+ static inline __used exitcall_t __exittest(void) \
59506 { return exitfn; } \
59507 void cleanup_module(void) __attribute__((alias(#exitfn)));
59508
59509diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59510index 32574ee..00d4ef1 100644
59511--- a/include/linux/init_task.h
59512+++ b/include/linux/init_task.h
59513@@ -128,6 +128,12 @@ extern struct cred init_cred;
59514
59515 #define INIT_TASK_COMM "swapper"
59516
59517+#ifdef CONFIG_X86
59518+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59519+#else
59520+#define INIT_TASK_THREAD_INFO
59521+#endif
59522+
59523 /*
59524 * INIT_TASK is used to set up the first task table, touch at
59525 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59526@@ -166,6 +172,7 @@ extern struct cred init_cred;
59527 RCU_INIT_POINTER(.cred, &init_cred), \
59528 .comm = INIT_TASK_COMM, \
59529 .thread = INIT_THREAD, \
59530+ INIT_TASK_THREAD_INFO \
59531 .fs = &init_fs, \
59532 .files = &init_files, \
59533 .signal = &init_signals, \
59534diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59535index e6ca56d..8583707 100644
59536--- a/include/linux/intel-iommu.h
59537+++ b/include/linux/intel-iommu.h
59538@@ -296,7 +296,7 @@ struct iommu_flush {
59539 u8 fm, u64 type);
59540 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59541 unsigned int size_order, u64 type);
59542-};
59543+} __no_const;
59544
59545 enum {
59546 SR_DMAR_FECTL_REG,
59547diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59548index a64b00e..464d8bc 100644
59549--- a/include/linux/interrupt.h
59550+++ b/include/linux/interrupt.h
59551@@ -441,7 +441,7 @@ enum
59552 /* map softirq index to softirq name. update 'softirq_to_name' in
59553 * kernel/softirq.c when adding a new softirq.
59554 */
59555-extern char *softirq_to_name[NR_SOFTIRQS];
59556+extern const char * const softirq_to_name[NR_SOFTIRQS];
59557
59558 /* softirq mask and active fields moved to irq_cpustat_t in
59559 * asm/hardirq.h to get better cache usage. KAO
59560@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59561
59562 struct softirq_action
59563 {
59564- void (*action)(struct softirq_action *);
59565+ void (*action)(void);
59566 };
59567
59568 asmlinkage void do_softirq(void);
59569 asmlinkage void __do_softirq(void);
59570-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59571+extern void open_softirq(int nr, void (*action)(void));
59572 extern void softirq_init(void);
59573 static inline void __raise_softirq_irqoff(unsigned int nr)
59574 {
59575diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59576index 3875719..4cd454c 100644
59577--- a/include/linux/kallsyms.h
59578+++ b/include/linux/kallsyms.h
59579@@ -15,7 +15,8 @@
59580
59581 struct module;
59582
59583-#ifdef CONFIG_KALLSYMS
59584+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59585+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59586 /* Lookup the address for a symbol. Returns 0 if not found. */
59587 unsigned long kallsyms_lookup_name(const char *name);
59588
59589@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59590 /* Stupid that this does nothing, but I didn't create this mess. */
59591 #define __print_symbol(fmt, addr)
59592 #endif /*CONFIG_KALLSYMS*/
59593+#else /* when included by kallsyms.c, vsnprintf.c, or
59594+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59595+extern void __print_symbol(const char *fmt, unsigned long address);
59596+extern int sprint_backtrace(char *buffer, unsigned long address);
59597+extern int sprint_symbol(char *buffer, unsigned long address);
59598+const char *kallsyms_lookup(unsigned long addr,
59599+ unsigned long *symbolsize,
59600+ unsigned long *offset,
59601+ char **modname, char *namebuf);
59602+#endif
59603
59604 /* This macro allows us to keep printk typechecking */
59605 static __printf(1, 2)
59606diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59607index fa39183..40160be 100644
59608--- a/include/linux/kgdb.h
59609+++ b/include/linux/kgdb.h
59610@@ -53,7 +53,7 @@ extern int kgdb_connected;
59611 extern int kgdb_io_module_registered;
59612
59613 extern atomic_t kgdb_setting_breakpoint;
59614-extern atomic_t kgdb_cpu_doing_single_step;
59615+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59616
59617 extern struct task_struct *kgdb_usethread;
59618 extern struct task_struct *kgdb_contthread;
59619@@ -251,7 +251,7 @@ struct kgdb_arch {
59620 void (*disable_hw_break)(struct pt_regs *regs);
59621 void (*remove_all_hw_break)(void);
59622 void (*correct_hw_break)(void);
59623-};
59624+} __do_const;
59625
59626 /**
59627 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59628@@ -276,7 +276,7 @@ struct kgdb_io {
59629 void (*pre_exception) (void);
59630 void (*post_exception) (void);
59631 int is_console;
59632-};
59633+} __do_const;
59634
59635 extern struct kgdb_arch arch_kgdb_ops;
59636
59637diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59638index b16f653..eb908f4 100644
59639--- a/include/linux/kmod.h
59640+++ b/include/linux/kmod.h
59641@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59642 * usually useless though. */
59643 extern __printf(2, 3)
59644 int __request_module(bool wait, const char *name, ...);
59645+extern __printf(3, 4)
59646+int ___request_module(bool wait, char *param_name, const char *name, ...);
59647 #define request_module(mod...) __request_module(true, mod)
59648 #define request_module_nowait(mod...) __request_module(false, mod)
59649 #define try_then_request_module(x, mod...) \
59650diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59651index d526231..086e89b 100644
59652--- a/include/linux/kvm_host.h
59653+++ b/include/linux/kvm_host.h
59654@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59655 void vcpu_load(struct kvm_vcpu *vcpu);
59656 void vcpu_put(struct kvm_vcpu *vcpu);
59657
59658-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59659+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59660 struct module *module);
59661 void kvm_exit(void);
59662
59663@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59664 struct kvm_guest_debug *dbg);
59665 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59666
59667-int kvm_arch_init(void *opaque);
59668+int kvm_arch_init(const void *opaque);
59669 void kvm_arch_exit(void);
59670
59671 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59672diff --git a/include/linux/libata.h b/include/linux/libata.h
59673index cafc09a..d7e7829 100644
59674--- a/include/linux/libata.h
59675+++ b/include/linux/libata.h
59676@@ -909,7 +909,7 @@ struct ata_port_operations {
59677 * fields must be pointers.
59678 */
59679 const struct ata_port_operations *inherits;
59680-};
59681+} __do_const;
59682
59683 struct ata_port_info {
59684 unsigned long flags;
59685diff --git a/include/linux/mca.h b/include/linux/mca.h
59686index 3797270..7765ede 100644
59687--- a/include/linux/mca.h
59688+++ b/include/linux/mca.h
59689@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59690 int region);
59691 void * (*mca_transform_memory)(struct mca_device *,
59692 void *memory);
59693-};
59694+} __no_const;
59695
59696 struct mca_bus {
59697 u64 default_dma_mask;
59698diff --git a/include/linux/memory.h b/include/linux/memory.h
59699index 935699b..11042cc 100644
59700--- a/include/linux/memory.h
59701+++ b/include/linux/memory.h
59702@@ -144,7 +144,7 @@ struct memory_accessor {
59703 size_t count);
59704 ssize_t (*write)(struct memory_accessor *, const char *buf,
59705 off_t offset, size_t count);
59706-};
59707+} __no_const;
59708
59709 /*
59710 * Kernel text modification mutex, used for code patching. Users of this lock
59711diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59712index 9970337..9444122 100644
59713--- a/include/linux/mfd/abx500.h
59714+++ b/include/linux/mfd/abx500.h
59715@@ -188,6 +188,7 @@ struct abx500_ops {
59716 int (*event_registers_startup_state_get) (struct device *, u8 *);
59717 int (*startup_irq_enabled) (struct device *, unsigned int);
59718 };
59719+typedef struct abx500_ops __no_const abx500_ops_no_const;
59720
59721 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59722 void abx500_remove_ops(struct device *dev);
59723diff --git a/include/linux/mm.h b/include/linux/mm.h
59724index 4baadd1..2e0b45e 100644
59725--- a/include/linux/mm.h
59726+++ b/include/linux/mm.h
59727@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59728
59729 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59730 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59731+
59732+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59733+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59734+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59735+#else
59736 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59737+#endif
59738+
59739 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59740 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59741
59742@@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59743 int set_page_dirty_lock(struct page *page);
59744 int clear_page_dirty_for_io(struct page *page);
59745
59746-/* Is the vma a continuation of the stack vma above it? */
59747-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59748-{
59749- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59750-}
59751-
59752-static inline int stack_guard_page_start(struct vm_area_struct *vma,
59753- unsigned long addr)
59754-{
59755- return (vma->vm_flags & VM_GROWSDOWN) &&
59756- (vma->vm_start == addr) &&
59757- !vma_growsdown(vma->vm_prev, addr);
59758-}
59759-
59760-/* Is the vma a continuation of the stack vma below it? */
59761-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59762-{
59763- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59764-}
59765-
59766-static inline int stack_guard_page_end(struct vm_area_struct *vma,
59767- unsigned long addr)
59768-{
59769- return (vma->vm_flags & VM_GROWSUP) &&
59770- (vma->vm_end == addr) &&
59771- !vma_growsup(vma->vm_next, addr);
59772-}
59773-
59774 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59775 unsigned long old_addr, struct vm_area_struct *new_vma,
59776 unsigned long new_addr, unsigned long len);
59777@@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59778 }
59779 #endif
59780
59781+#ifdef CONFIG_MMU
59782+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59783+#else
59784+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59785+{
59786+ return __pgprot(0);
59787+}
59788+#endif
59789+
59790 int vma_wants_writenotify(struct vm_area_struct *vma);
59791
59792 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59793@@ -1419,6 +1407,7 @@ out:
59794 }
59795
59796 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59797+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59798
59799 extern unsigned long do_brk(unsigned long, unsigned long);
59800
59801@@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
59802 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59803 struct vm_area_struct **pprev);
59804
59805+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59806+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59807+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59808+
59809 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59810 NULL if none. Assume start_addr < end_addr. */
59811 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59812@@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
59813 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59814 }
59815
59816-#ifdef CONFIG_MMU
59817-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59818-#else
59819-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59820-{
59821- return __pgprot(0);
59822-}
59823-#endif
59824-
59825 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59826 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59827 unsigned long pfn, unsigned long size, pgprot_t);
59828@@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
59829 extern int sysctl_memory_failure_early_kill;
59830 extern int sysctl_memory_failure_recovery;
59831 extern void shake_page(struct page *p, int access);
59832-extern atomic_long_t mce_bad_pages;
59833+extern atomic_long_unchecked_t mce_bad_pages;
59834 extern int soft_offline_page(struct page *page, int flags);
59835
59836 extern void dump_page(struct page *page);
59837@@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
59838 unsigned int pages_per_huge_page);
59839 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59840
59841+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59842+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59843+#else
59844+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59845+#endif
59846+
59847 #endif /* __KERNEL__ */
59848 #endif /* _LINUX_MM_H */
59849diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
59850index 5b42f1b..759e4b4 100644
59851--- a/include/linux/mm_types.h
59852+++ b/include/linux/mm_types.h
59853@@ -253,6 +253,8 @@ struct vm_area_struct {
59854 #ifdef CONFIG_NUMA
59855 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59856 #endif
59857+
59858+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59859 };
59860
59861 struct core_thread {
59862@@ -389,6 +391,24 @@ struct mm_struct {
59863 #ifdef CONFIG_CPUMASK_OFFSTACK
59864 struct cpumask cpumask_allocation;
59865 #endif
59866+
59867+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59868+ unsigned long pax_flags;
59869+#endif
59870+
59871+#ifdef CONFIG_PAX_DLRESOLVE
59872+ unsigned long call_dl_resolve;
59873+#endif
59874+
59875+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59876+ unsigned long call_syscall;
59877+#endif
59878+
59879+#ifdef CONFIG_PAX_ASLR
59880+ unsigned long delta_mmap; /* randomized offset */
59881+ unsigned long delta_stack; /* randomized offset */
59882+#endif
59883+
59884 };
59885
59886 static inline void mm_init_cpumask(struct mm_struct *mm)
59887diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
59888index 1d1b1e1..2a13c78 100644
59889--- a/include/linux/mmu_notifier.h
59890+++ b/include/linux/mmu_notifier.h
59891@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
59892 */
59893 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59894 ({ \
59895- pte_t __pte; \
59896+ pte_t ___pte; \
59897 struct vm_area_struct *___vma = __vma; \
59898 unsigned long ___address = __address; \
59899- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59900+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59901 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59902- __pte; \
59903+ ___pte; \
59904 })
59905
59906 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59907diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
59908index 188cb2f..d78409b 100644
59909--- a/include/linux/mmzone.h
59910+++ b/include/linux/mmzone.h
59911@@ -369,7 +369,7 @@ struct zone {
59912 unsigned long flags; /* zone flags, see below */
59913
59914 /* Zone statistics */
59915- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59916+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59917
59918 /*
59919 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59920diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
59921index 468819c..17b9db3 100644
59922--- a/include/linux/mod_devicetable.h
59923+++ b/include/linux/mod_devicetable.h
59924@@ -12,7 +12,7 @@
59925 typedef unsigned long kernel_ulong_t;
59926 #endif
59927
59928-#define PCI_ANY_ID (~0)
59929+#define PCI_ANY_ID ((__u16)~0)
59930
59931 struct pci_device_id {
59932 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59933@@ -131,7 +131,7 @@ struct usb_device_id {
59934 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59935 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59936
59937-#define HID_ANY_ID (~0)
59938+#define HID_ANY_ID (~0U)
59939
59940 struct hid_device_id {
59941 __u16 bus;
59942diff --git a/include/linux/module.h b/include/linux/module.h
59943index 3cb7839..511cb87 100644
59944--- a/include/linux/module.h
59945+++ b/include/linux/module.h
59946@@ -17,6 +17,7 @@
59947 #include <linux/moduleparam.h>
59948 #include <linux/tracepoint.h>
59949 #include <linux/export.h>
59950+#include <linux/fs.h>
59951
59952 #include <linux/percpu.h>
59953 #include <asm/module.h>
59954@@ -261,19 +262,16 @@ struct module
59955 int (*init)(void);
59956
59957 /* If this is non-NULL, vfree after init() returns */
59958- void *module_init;
59959+ void *module_init_rx, *module_init_rw;
59960
59961 /* Here is the actual code + data, vfree'd on unload. */
59962- void *module_core;
59963+ void *module_core_rx, *module_core_rw;
59964
59965 /* Here are the sizes of the init and core sections */
59966- unsigned int init_size, core_size;
59967+ unsigned int init_size_rw, core_size_rw;
59968
59969 /* The size of the executable code in each section. */
59970- unsigned int init_text_size, core_text_size;
59971-
59972- /* Size of RO sections of the module (text+rodata) */
59973- unsigned int init_ro_size, core_ro_size;
59974+ unsigned int init_size_rx, core_size_rx;
59975
59976 /* Arch-specific module values */
59977 struct mod_arch_specific arch;
59978@@ -329,6 +327,10 @@ struct module
59979 #ifdef CONFIG_EVENT_TRACING
59980 struct ftrace_event_call **trace_events;
59981 unsigned int num_trace_events;
59982+ struct file_operations trace_id;
59983+ struct file_operations trace_enable;
59984+ struct file_operations trace_format;
59985+ struct file_operations trace_filter;
59986 #endif
59987 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59988 unsigned int num_ftrace_callsites;
59989@@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
59990 bool is_module_percpu_address(unsigned long addr);
59991 bool is_module_text_address(unsigned long addr);
59992
59993+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59994+{
59995+
59996+#ifdef CONFIG_PAX_KERNEXEC
59997+ if (ktla_ktva(addr) >= (unsigned long)start &&
59998+ ktla_ktva(addr) < (unsigned long)start + size)
59999+ return 1;
60000+#endif
60001+
60002+ return ((void *)addr >= start && (void *)addr < start + size);
60003+}
60004+
60005+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
60006+{
60007+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
60008+}
60009+
60010+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
60011+{
60012+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
60013+}
60014+
60015+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
60016+{
60017+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
60018+}
60019+
60020+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
60021+{
60022+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
60023+}
60024+
60025 static inline int within_module_core(unsigned long addr, struct module *mod)
60026 {
60027- return (unsigned long)mod->module_core <= addr &&
60028- addr < (unsigned long)mod->module_core + mod->core_size;
60029+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
60030 }
60031
60032 static inline int within_module_init(unsigned long addr, struct module *mod)
60033 {
60034- return (unsigned long)mod->module_init <= addr &&
60035- addr < (unsigned long)mod->module_init + mod->init_size;
60036+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
60037 }
60038
60039 /* Search for module by name: must hold module_mutex. */
60040diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
60041index b2be02e..6a9fdb1 100644
60042--- a/include/linux/moduleloader.h
60043+++ b/include/linux/moduleloader.h
60044@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
60045 sections. Returns NULL on failure. */
60046 void *module_alloc(unsigned long size);
60047
60048+#ifdef CONFIG_PAX_KERNEXEC
60049+void *module_alloc_exec(unsigned long size);
60050+#else
60051+#define module_alloc_exec(x) module_alloc(x)
60052+#endif
60053+
60054 /* Free memory returned from module_alloc. */
60055 void module_free(struct module *mod, void *module_region);
60056
60057+#ifdef CONFIG_PAX_KERNEXEC
60058+void module_free_exec(struct module *mod, void *module_region);
60059+#else
60060+#define module_free_exec(x, y) module_free((x), (y))
60061+#endif
60062+
60063 /* Apply the given relocation to the (simplified) ELF. Return -error
60064 or 0. */
60065 int apply_relocate(Elf_Shdr *sechdrs,
60066diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
60067index 7939f63..ec6df57 100644
60068--- a/include/linux/moduleparam.h
60069+++ b/include/linux/moduleparam.h
60070@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
60071 * @len is usually just sizeof(string).
60072 */
60073 #define module_param_string(name, string, len, perm) \
60074- static const struct kparam_string __param_string_##name \
60075+ static const struct kparam_string __param_string_##name __used \
60076 = { len, string }; \
60077 __module_param_call(MODULE_PARAM_PREFIX, name, \
60078 &param_ops_string, \
60079@@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
60080 * module_param_named() for why this might be necessary.
60081 */
60082 #define module_param_array_named(name, array, type, nump, perm) \
60083- static const struct kparam_array __param_arr_##name \
60084+ static const struct kparam_array __param_arr_##name __used \
60085 = { .max = ARRAY_SIZE(array), .num = nump, \
60086 .ops = &param_ops_##type, \
60087 .elemsize = sizeof(array[0]), .elem = array }; \
60088diff --git a/include/linux/namei.h b/include/linux/namei.h
60089index ffc0213..2c1f2cb 100644
60090--- a/include/linux/namei.h
60091+++ b/include/linux/namei.h
60092@@ -24,7 +24,7 @@ struct nameidata {
60093 unsigned seq;
60094 int last_type;
60095 unsigned depth;
60096- char *saved_names[MAX_NESTED_LINKS + 1];
60097+ const char *saved_names[MAX_NESTED_LINKS + 1];
60098
60099 /* Intent data */
60100 union {
60101@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
60102 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60103 extern void unlock_rename(struct dentry *, struct dentry *);
60104
60105-static inline void nd_set_link(struct nameidata *nd, char *path)
60106+static inline void nd_set_link(struct nameidata *nd, const char *path)
60107 {
60108 nd->saved_names[nd->depth] = path;
60109 }
60110
60111-static inline char *nd_get_link(struct nameidata *nd)
60112+static inline const char *nd_get_link(const struct nameidata *nd)
60113 {
60114 return nd->saved_names[nd->depth];
60115 }
60116diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
60117index a82ad4d..90d15b7 100644
60118--- a/include/linux/netdevice.h
60119+++ b/include/linux/netdevice.h
60120@@ -949,6 +949,7 @@ struct net_device_ops {
60121 int (*ndo_set_features)(struct net_device *dev,
60122 u32 features);
60123 };
60124+typedef struct net_device_ops __no_const net_device_ops_no_const;
60125
60126 /*
60127 * The DEVICE structure.
60128@@ -1088,7 +1089,7 @@ struct net_device {
60129 int iflink;
60130
60131 struct net_device_stats stats;
60132- atomic_long_t rx_dropped; /* dropped packets by core network
60133+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
60134 * Do not use this in drivers.
60135 */
60136
60137diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
60138new file mode 100644
60139index 0000000..33f4af8
60140--- /dev/null
60141+++ b/include/linux/netfilter/xt_gradm.h
60142@@ -0,0 +1,9 @@
60143+#ifndef _LINUX_NETFILTER_XT_GRADM_H
60144+#define _LINUX_NETFILTER_XT_GRADM_H 1
60145+
60146+struct xt_gradm_mtinfo {
60147+ __u16 flags;
60148+ __u16 invflags;
60149+};
60150+
60151+#endif
60152diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
60153index c65a18a..0c05f3a 100644
60154--- a/include/linux/of_pdt.h
60155+++ b/include/linux/of_pdt.h
60156@@ -32,7 +32,7 @@ struct of_pdt_ops {
60157
60158 /* return 0 on success; fill in 'len' with number of bytes in path */
60159 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
60160-};
60161+} __no_const;
60162
60163 extern void *prom_early_alloc(unsigned long size);
60164
60165diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
60166index a4c5624..79d6d88 100644
60167--- a/include/linux/oprofile.h
60168+++ b/include/linux/oprofile.h
60169@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
60170 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60171 char const * name, ulong * val);
60172
60173-/** Create a file for read-only access to an atomic_t. */
60174+/** Create a file for read-only access to an atomic_unchecked_t. */
60175 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60176- char const * name, atomic_t * val);
60177+ char const * name, atomic_unchecked_t * val);
60178
60179 /** create a directory */
60180 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60181diff --git a/include/linux/padata.h b/include/linux/padata.h
60182index 4633b2f..988bc08 100644
60183--- a/include/linux/padata.h
60184+++ b/include/linux/padata.h
60185@@ -129,7 +129,7 @@ struct parallel_data {
60186 struct padata_instance *pinst;
60187 struct padata_parallel_queue __percpu *pqueue;
60188 struct padata_serial_queue __percpu *squeue;
60189- atomic_t seq_nr;
60190+ atomic_unchecked_t seq_nr;
60191 atomic_t reorder_objects;
60192 atomic_t refcnt;
60193 unsigned int max_seq_nr;
60194diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
60195index b1f8912..c955bff 100644
60196--- a/include/linux/perf_event.h
60197+++ b/include/linux/perf_event.h
60198@@ -748,8 +748,8 @@ struct perf_event {
60199
60200 enum perf_event_active_state state;
60201 unsigned int attach_state;
60202- local64_t count;
60203- atomic64_t child_count;
60204+ local64_t count; /* PaX: fix it one day */
60205+ atomic64_unchecked_t child_count;
60206
60207 /*
60208 * These are the total time in nanoseconds that the event
60209@@ -800,8 +800,8 @@ struct perf_event {
60210 * These accumulate total time (in nanoseconds) that children
60211 * events have been enabled and running, respectively.
60212 */
60213- atomic64_t child_total_time_enabled;
60214- atomic64_t child_total_time_running;
60215+ atomic64_unchecked_t child_total_time_enabled;
60216+ atomic64_unchecked_t child_total_time_running;
60217
60218 /*
60219 * Protect attach/detach and child_list:
60220diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
60221index 77257c9..51d473a 100644
60222--- a/include/linux/pipe_fs_i.h
60223+++ b/include/linux/pipe_fs_i.h
60224@@ -46,9 +46,9 @@ struct pipe_buffer {
60225 struct pipe_inode_info {
60226 wait_queue_head_t wait;
60227 unsigned int nrbufs, curbuf, buffers;
60228- unsigned int readers;
60229- unsigned int writers;
60230- unsigned int waiting_writers;
60231+ atomic_t readers;
60232+ atomic_t writers;
60233+ atomic_t waiting_writers;
60234 unsigned int r_counter;
60235 unsigned int w_counter;
60236 struct page *tmp_page;
60237diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
60238index d3085e7..fd01052 100644
60239--- a/include/linux/pm_runtime.h
60240+++ b/include/linux/pm_runtime.h
60241@@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
60242
60243 static inline void pm_runtime_mark_last_busy(struct device *dev)
60244 {
60245- ACCESS_ONCE(dev->power.last_busy) = jiffies;
60246+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60247 }
60248
60249 #else /* !CONFIG_PM_RUNTIME */
60250diff --git a/include/linux/poison.h b/include/linux/poison.h
60251index 79159de..f1233a9 100644
60252--- a/include/linux/poison.h
60253+++ b/include/linux/poison.h
60254@@ -19,8 +19,8 @@
60255 * under normal circumstances, used to verify that nobody uses
60256 * non-initialized list entries.
60257 */
60258-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60259-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60260+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60261+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60262
60263 /********** include/linux/timer.h **********/
60264 /*
60265diff --git a/include/linux/preempt.h b/include/linux/preempt.h
60266index 58969b2..ead129b 100644
60267--- a/include/linux/preempt.h
60268+++ b/include/linux/preempt.h
60269@@ -123,7 +123,7 @@ struct preempt_ops {
60270 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60271 void (*sched_out)(struct preempt_notifier *notifier,
60272 struct task_struct *next);
60273-};
60274+} __no_const;
60275
60276 /**
60277 * preempt_notifier - key for installing preemption notifiers
60278diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
60279index 643b96c..ef55a9c 100644
60280--- a/include/linux/proc_fs.h
60281+++ b/include/linux/proc_fs.h
60282@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
60283 return proc_create_data(name, mode, parent, proc_fops, NULL);
60284 }
60285
60286+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60287+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60288+{
60289+#ifdef CONFIG_GRKERNSEC_PROC_USER
60290+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60291+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60292+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60293+#else
60294+ return proc_create_data(name, mode, parent, proc_fops, NULL);
60295+#endif
60296+}
60297+
60298+
60299 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60300 mode_t mode, struct proc_dir_entry *base,
60301 read_proc_t *read_proc, void * data)
60302@@ -258,7 +271,7 @@ union proc_op {
60303 int (*proc_show)(struct seq_file *m,
60304 struct pid_namespace *ns, struct pid *pid,
60305 struct task_struct *task);
60306-};
60307+} __no_const;
60308
60309 struct ctl_table_header;
60310 struct ctl_table;
60311diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
60312index 800f113..e9ee2e3 100644
60313--- a/include/linux/ptrace.h
60314+++ b/include/linux/ptrace.h
60315@@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
60316 extern void exit_ptrace(struct task_struct *tracer);
60317 #define PTRACE_MODE_READ 1
60318 #define PTRACE_MODE_ATTACH 2
60319-/* Returns 0 on success, -errno on denial. */
60320-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60321 /* Returns true on success, false on denial. */
60322 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60323+/* Returns true on success, false on denial. */
60324+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60325+/* Returns true on success, false on denial. */
60326+extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
60327
60328 static inline int ptrace_reparented(struct task_struct *child)
60329 {
60330diff --git a/include/linux/random.h b/include/linux/random.h
60331index 8f74538..02a1012 100644
60332--- a/include/linux/random.h
60333+++ b/include/linux/random.h
60334@@ -69,12 +69,17 @@ void srandom32(u32 seed);
60335
60336 u32 prandom32(struct rnd_state *);
60337
60338+static inline unsigned long pax_get_random_long(void)
60339+{
60340+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60341+}
60342+
60343 /*
60344 * Handle minimum values for seeds
60345 */
60346 static inline u32 __seed(u32 x, u32 m)
60347 {
60348- return (x < m) ? x + m : x;
60349+ return (x <= m) ? x + m + 1 : x;
60350 }
60351
60352 /**
60353diff --git a/include/linux/reboot.h b/include/linux/reboot.h
60354index e0879a7..a12f962 100644
60355--- a/include/linux/reboot.h
60356+++ b/include/linux/reboot.h
60357@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
60358 * Architecture-specific implementations of sys_reboot commands.
60359 */
60360
60361-extern void machine_restart(char *cmd);
60362-extern void machine_halt(void);
60363-extern void machine_power_off(void);
60364+extern void machine_restart(char *cmd) __noreturn;
60365+extern void machine_halt(void) __noreturn;
60366+extern void machine_power_off(void) __noreturn;
60367
60368 extern void machine_shutdown(void);
60369 struct pt_regs;
60370@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
60371 */
60372
60373 extern void kernel_restart_prepare(char *cmd);
60374-extern void kernel_restart(char *cmd);
60375-extern void kernel_halt(void);
60376-extern void kernel_power_off(void);
60377+extern void kernel_restart(char *cmd) __noreturn;
60378+extern void kernel_halt(void) __noreturn;
60379+extern void kernel_power_off(void) __noreturn;
60380
60381 extern int C_A_D; /* for sysctl */
60382 void ctrl_alt_del(void);
60383@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60384 * Emergency restart, callable from an interrupt handler.
60385 */
60386
60387-extern void emergency_restart(void);
60388+extern void emergency_restart(void) __noreturn;
60389 #include <asm/emergency-restart.h>
60390
60391 #endif
60392diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
60393index 96d465f..b084e05 100644
60394--- a/include/linux/reiserfs_fs.h
60395+++ b/include/linux/reiserfs_fs.h
60396@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60397 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60398
60399 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60400-#define get_generation(s) atomic_read (&fs_generation(s))
60401+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60402 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60403 #define __fs_changed(gen,s) (gen != get_generation (s))
60404 #define fs_changed(gen,s) \
60405diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
60406index 52c83b6..18ed7eb 100644
60407--- a/include/linux/reiserfs_fs_sb.h
60408+++ b/include/linux/reiserfs_fs_sb.h
60409@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60410 /* Comment? -Hans */
60411 wait_queue_head_t s_wait;
60412 /* To be obsoleted soon by per buffer seals.. -Hans */
60413- atomic_t s_generation_counter; // increased by one every time the
60414+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60415 // tree gets re-balanced
60416 unsigned long s_properties; /* File system properties. Currently holds
60417 on-disk FS format */
60418diff --git a/include/linux/relay.h b/include/linux/relay.h
60419index 14a86bc..17d0700 100644
60420--- a/include/linux/relay.h
60421+++ b/include/linux/relay.h
60422@@ -159,7 +159,7 @@ struct rchan_callbacks
60423 * The callback should return 0 if successful, negative if not.
60424 */
60425 int (*remove_buf_file)(struct dentry *dentry);
60426-};
60427+} __no_const;
60428
60429 /*
60430 * CONFIG_RELAY kernel API, kernel/relay.c
60431diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
60432index c6c6084..5bf1212 100644
60433--- a/include/linux/rfkill.h
60434+++ b/include/linux/rfkill.h
60435@@ -147,6 +147,7 @@ struct rfkill_ops {
60436 void (*query)(struct rfkill *rfkill, void *data);
60437 int (*set_block)(void *data, bool blocked);
60438 };
60439+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60440
60441 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60442 /**
60443diff --git a/include/linux/rio.h b/include/linux/rio.h
60444index 4d50611..c6858a2 100644
60445--- a/include/linux/rio.h
60446+++ b/include/linux/rio.h
60447@@ -315,7 +315,7 @@ struct rio_ops {
60448 int mbox, void *buffer, size_t len);
60449 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60450 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60451-};
60452+} __no_const;
60453
60454 #define RIO_RESOURCE_MEM 0x00000100
60455 #define RIO_RESOURCE_DOORBELL 0x00000200
60456diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60457index 2148b12..519b820 100644
60458--- a/include/linux/rmap.h
60459+++ b/include/linux/rmap.h
60460@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60461 void anon_vma_init(void); /* create anon_vma_cachep */
60462 int anon_vma_prepare(struct vm_area_struct *);
60463 void unlink_anon_vmas(struct vm_area_struct *);
60464-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60465-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60466+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60467+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60468 void __anon_vma_link(struct vm_area_struct *);
60469
60470 static inline void anon_vma_merge(struct vm_area_struct *vma,
60471diff --git a/include/linux/sched.h b/include/linux/sched.h
60472index 1c4f3e9..c5b241a 100644
60473--- a/include/linux/sched.h
60474+++ b/include/linux/sched.h
60475@@ -101,6 +101,7 @@ struct bio_list;
60476 struct fs_struct;
60477 struct perf_event_context;
60478 struct blk_plug;
60479+struct linux_binprm;
60480
60481 /*
60482 * List of flags we want to share for kernel threads,
60483@@ -380,10 +381,13 @@ struct user_namespace;
60484 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60485
60486 extern int sysctl_max_map_count;
60487+extern unsigned long sysctl_heap_stack_gap;
60488
60489 #include <linux/aio.h>
60490
60491 #ifdef CONFIG_MMU
60492+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60493+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60494 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60495 extern unsigned long
60496 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60497@@ -629,6 +633,17 @@ struct signal_struct {
60498 #ifdef CONFIG_TASKSTATS
60499 struct taskstats *stats;
60500 #endif
60501+
60502+#ifdef CONFIG_GRKERNSEC
60503+ u32 curr_ip;
60504+ u32 saved_ip;
60505+ u32 gr_saddr;
60506+ u32 gr_daddr;
60507+ u16 gr_sport;
60508+ u16 gr_dport;
60509+ u8 used_accept:1;
60510+#endif
60511+
60512 #ifdef CONFIG_AUDIT
60513 unsigned audit_tty;
60514 struct tty_audit_buf *tty_audit_buf;
60515@@ -710,6 +725,11 @@ struct user_struct {
60516 struct key *session_keyring; /* UID's default session keyring */
60517 #endif
60518
60519+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60520+ unsigned int banned;
60521+ unsigned long ban_expires;
60522+#endif
60523+
60524 /* Hash table maintenance information */
60525 struct hlist_node uidhash_node;
60526 uid_t uid;
60527@@ -1337,8 +1357,8 @@ struct task_struct {
60528 struct list_head thread_group;
60529
60530 struct completion *vfork_done; /* for vfork() */
60531- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60532- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60533+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60534+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60535
60536 cputime_t utime, stime, utimescaled, stimescaled;
60537 cputime_t gtime;
60538@@ -1354,13 +1374,6 @@ struct task_struct {
60539 struct task_cputime cputime_expires;
60540 struct list_head cpu_timers[3];
60541
60542-/* process credentials */
60543- const struct cred __rcu *real_cred; /* objective and real subjective task
60544- * credentials (COW) */
60545- const struct cred __rcu *cred; /* effective (overridable) subjective task
60546- * credentials (COW) */
60547- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60548-
60549 char comm[TASK_COMM_LEN]; /* executable name excluding path
60550 - access with [gs]et_task_comm (which lock
60551 it with task_lock())
60552@@ -1377,8 +1390,16 @@ struct task_struct {
60553 #endif
60554 /* CPU-specific state of this task */
60555 struct thread_struct thread;
60556+/* thread_info moved to task_struct */
60557+#ifdef CONFIG_X86
60558+ struct thread_info tinfo;
60559+#endif
60560 /* filesystem information */
60561 struct fs_struct *fs;
60562+
60563+ const struct cred __rcu *cred; /* effective (overridable) subjective task
60564+ * credentials (COW) */
60565+
60566 /* open file information */
60567 struct files_struct *files;
60568 /* namespaces */
60569@@ -1425,6 +1446,11 @@ struct task_struct {
60570 struct rt_mutex_waiter *pi_blocked_on;
60571 #endif
60572
60573+/* process credentials */
60574+ const struct cred __rcu *real_cred; /* objective and real subjective task
60575+ * credentials (COW) */
60576+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60577+
60578 #ifdef CONFIG_DEBUG_MUTEXES
60579 /* mutex deadlock detection */
60580 struct mutex_waiter *blocked_on;
60581@@ -1540,6 +1566,24 @@ struct task_struct {
60582 unsigned long default_timer_slack_ns;
60583
60584 struct list_head *scm_work_list;
60585+
60586+#ifdef CONFIG_GRKERNSEC
60587+ /* grsecurity */
60588+#ifdef CONFIG_GRKERNSEC_SETXID
60589+ const struct cred *delayed_cred;
60590+#endif
60591+ struct dentry *gr_chroot_dentry;
60592+ struct acl_subject_label *acl;
60593+ struct acl_role_label *role;
60594+ struct file *exec_file;
60595+ u16 acl_role_id;
60596+ /* is this the task that authenticated to the special role */
60597+ u8 acl_sp_role;
60598+ u8 is_writable;
60599+ u8 brute;
60600+ u8 gr_is_chrooted;
60601+#endif
60602+
60603 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60604 /* Index of current stored address in ret_stack */
60605 int curr_ret_stack;
60606@@ -1574,6 +1618,51 @@ struct task_struct {
60607 #endif
60608 };
60609
60610+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60611+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60612+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60613+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60614+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60615+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60616+
60617+#ifdef CONFIG_PAX_SOFTMODE
60618+extern int pax_softmode;
60619+#endif
60620+
60621+extern int pax_check_flags(unsigned long *);
60622+
60623+/* if tsk != current then task_lock must be held on it */
60624+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60625+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60626+{
60627+ if (likely(tsk->mm))
60628+ return tsk->mm->pax_flags;
60629+ else
60630+ return 0UL;
60631+}
60632+
60633+/* if tsk != current then task_lock must be held on it */
60634+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60635+{
60636+ if (likely(tsk->mm)) {
60637+ tsk->mm->pax_flags = flags;
60638+ return 0;
60639+ }
60640+ return -EINVAL;
60641+}
60642+#endif
60643+
60644+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60645+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60646+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60647+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60648+#endif
60649+
60650+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60651+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60652+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60653+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60654+
60655 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60656 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60657
60658@@ -2081,7 +2170,9 @@ void yield(void);
60659 extern struct exec_domain default_exec_domain;
60660
60661 union thread_union {
60662+#ifndef CONFIG_X86
60663 struct thread_info thread_info;
60664+#endif
60665 unsigned long stack[THREAD_SIZE/sizeof(long)];
60666 };
60667
60668@@ -2114,6 +2205,7 @@ extern struct pid_namespace init_pid_ns;
60669 */
60670
60671 extern struct task_struct *find_task_by_vpid(pid_t nr);
60672+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60673 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60674 struct pid_namespace *ns);
60675
60676@@ -2251,7 +2343,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60677 extern void exit_itimers(struct signal_struct *);
60678 extern void flush_itimer_signals(void);
60679
60680-extern NORET_TYPE void do_group_exit(int);
60681+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60682
60683 extern void daemonize(const char *, ...);
60684 extern int allow_signal(int);
60685@@ -2416,13 +2508,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60686
60687 #endif
60688
60689-static inline int object_is_on_stack(void *obj)
60690+static inline int object_starts_on_stack(void *obj)
60691 {
60692- void *stack = task_stack_page(current);
60693+ const void *stack = task_stack_page(current);
60694
60695 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60696 }
60697
60698+#ifdef CONFIG_PAX_USERCOPY
60699+extern int object_is_on_stack(const void *obj, unsigned long len);
60700+#endif
60701+
60702 extern void thread_info_cache_init(void);
60703
60704 #ifdef CONFIG_DEBUG_STACK_USAGE
60705diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60706index 899fbb4..1cb4138 100644
60707--- a/include/linux/screen_info.h
60708+++ b/include/linux/screen_info.h
60709@@ -43,7 +43,8 @@ struct screen_info {
60710 __u16 pages; /* 0x32 */
60711 __u16 vesa_attributes; /* 0x34 */
60712 __u32 capabilities; /* 0x36 */
60713- __u8 _reserved[6]; /* 0x3a */
60714+ __u16 vesapm_size; /* 0x3a */
60715+ __u8 _reserved[4]; /* 0x3c */
60716 } __attribute__((packed));
60717
60718 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60719diff --git a/include/linux/security.h b/include/linux/security.h
60720index e8c619d..e0cbd1c 100644
60721--- a/include/linux/security.h
60722+++ b/include/linux/security.h
60723@@ -37,6 +37,7 @@
60724 #include <linux/xfrm.h>
60725 #include <linux/slab.h>
60726 #include <linux/xattr.h>
60727+#include <linux/grsecurity.h>
60728 #include <net/flow.h>
60729
60730 /* Maximum number of letters for an LSM name string */
60731diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60732index 0b69a46..e9e5538 100644
60733--- a/include/linux/seq_file.h
60734+++ b/include/linux/seq_file.h
60735@@ -33,6 +33,7 @@ struct seq_operations {
60736 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60737 int (*show) (struct seq_file *m, void *v);
60738 };
60739+typedef struct seq_operations __no_const seq_operations_no_const;
60740
60741 #define SEQ_SKIP 1
60742
60743diff --git a/include/linux/shm.h b/include/linux/shm.h
60744index 92808b8..c28cac4 100644
60745--- a/include/linux/shm.h
60746+++ b/include/linux/shm.h
60747@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
60748
60749 /* The task created the shm object. NULL if the task is dead. */
60750 struct task_struct *shm_creator;
60751+#ifdef CONFIG_GRKERNSEC
60752+ time_t shm_createtime;
60753+ pid_t shm_lapid;
60754+#endif
60755 };
60756
60757 /* shm_mode upper byte flags */
60758diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
60759index fe86488..1563c1c 100644
60760--- a/include/linux/skbuff.h
60761+++ b/include/linux/skbuff.h
60762@@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
60763 */
60764 static inline int skb_queue_empty(const struct sk_buff_head *list)
60765 {
60766- return list->next == (struct sk_buff *)list;
60767+ return list->next == (const struct sk_buff *)list;
60768 }
60769
60770 /**
60771@@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
60772 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60773 const struct sk_buff *skb)
60774 {
60775- return skb->next == (struct sk_buff *)list;
60776+ return skb->next == (const struct sk_buff *)list;
60777 }
60778
60779 /**
60780@@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60781 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60782 const struct sk_buff *skb)
60783 {
60784- return skb->prev == (struct sk_buff *)list;
60785+ return skb->prev == (const struct sk_buff *)list;
60786 }
60787
60788 /**
60789@@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
60790 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60791 */
60792 #ifndef NET_SKB_PAD
60793-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60794+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60795 #endif
60796
60797 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60798diff --git a/include/linux/slab.h b/include/linux/slab.h
60799index 573c809..e84c132 100644
60800--- a/include/linux/slab.h
60801+++ b/include/linux/slab.h
60802@@ -11,12 +11,20 @@
60803
60804 #include <linux/gfp.h>
60805 #include <linux/types.h>
60806+#include <linux/err.h>
60807
60808 /*
60809 * Flags to pass to kmem_cache_create().
60810 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60811 */
60812 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60813+
60814+#ifdef CONFIG_PAX_USERCOPY
60815+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60816+#else
60817+#define SLAB_USERCOPY 0x00000000UL
60818+#endif
60819+
60820 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60821 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60822 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60823@@ -87,10 +95,13 @@
60824 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60825 * Both make kfree a no-op.
60826 */
60827-#define ZERO_SIZE_PTR ((void *)16)
60828+#define ZERO_SIZE_PTR \
60829+({ \
60830+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60831+ (void *)(-MAX_ERRNO-1L); \
60832+})
60833
60834-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60835- (unsigned long)ZERO_SIZE_PTR)
60836+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60837
60838 /*
60839 * struct kmem_cache related prototypes
60840@@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
60841 void kfree(const void *);
60842 void kzfree(const void *);
60843 size_t ksize(const void *);
60844+void check_object_size(const void *ptr, unsigned long n, bool to);
60845
60846 /*
60847 * Allocator specific definitions. These are mainly used to establish optimized
60848@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
60849
60850 void __init kmem_cache_init_late(void);
60851
60852+#define kmalloc(x, y) \
60853+({ \
60854+ void *___retval; \
60855+ intoverflow_t ___x = (intoverflow_t)x; \
60856+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60857+ ___retval = NULL; \
60858+ else \
60859+ ___retval = kmalloc((size_t)___x, (y)); \
60860+ ___retval; \
60861+})
60862+
60863+#define kmalloc_node(x, y, z) \
60864+({ \
60865+ void *___retval; \
60866+ intoverflow_t ___x = (intoverflow_t)x; \
60867+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60868+ ___retval = NULL; \
60869+ else \
60870+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60871+ ___retval; \
60872+})
60873+
60874+#define kzalloc(x, y) \
60875+({ \
60876+ void *___retval; \
60877+ intoverflow_t ___x = (intoverflow_t)x; \
60878+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60879+ ___retval = NULL; \
60880+ else \
60881+ ___retval = kzalloc((size_t)___x, (y)); \
60882+ ___retval; \
60883+})
60884+
60885+#define __krealloc(x, y, z) \
60886+({ \
60887+ void *___retval; \
60888+ intoverflow_t ___y = (intoverflow_t)y; \
60889+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60890+ ___retval = NULL; \
60891+ else \
60892+ ___retval = __krealloc((x), (size_t)___y, (z)); \
60893+ ___retval; \
60894+})
60895+
60896+#define krealloc(x, y, z) \
60897+({ \
60898+ void *___retval; \
60899+ intoverflow_t ___y = (intoverflow_t)y; \
60900+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60901+ ___retval = NULL; \
60902+ else \
60903+ ___retval = krealloc((x), (size_t)___y, (z)); \
60904+ ___retval; \
60905+})
60906+
60907 #endif /* _LINUX_SLAB_H */
60908diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
60909index d00e0ba..1b3bf7b 100644
60910--- a/include/linux/slab_def.h
60911+++ b/include/linux/slab_def.h
60912@@ -68,10 +68,10 @@ struct kmem_cache {
60913 unsigned long node_allocs;
60914 unsigned long node_frees;
60915 unsigned long node_overflow;
60916- atomic_t allochit;
60917- atomic_t allocmiss;
60918- atomic_t freehit;
60919- atomic_t freemiss;
60920+ atomic_unchecked_t allochit;
60921+ atomic_unchecked_t allocmiss;
60922+ atomic_unchecked_t freehit;
60923+ atomic_unchecked_t freemiss;
60924
60925 /*
60926 * If debugging is enabled, then the allocator can add additional
60927diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
60928index a32bcfd..53b71f4 100644
60929--- a/include/linux/slub_def.h
60930+++ b/include/linux/slub_def.h
60931@@ -89,7 +89,7 @@ struct kmem_cache {
60932 struct kmem_cache_order_objects max;
60933 struct kmem_cache_order_objects min;
60934 gfp_t allocflags; /* gfp flags to use on each alloc */
60935- int refcount; /* Refcount for slab cache destroy */
60936+ atomic_t refcount; /* Refcount for slab cache destroy */
60937 void (*ctor)(void *);
60938 int inuse; /* Offset to metadata */
60939 int align; /* Alignment */
60940@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
60941 }
60942
60943 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60944-void *__kmalloc(size_t size, gfp_t flags);
60945+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60946
60947 static __always_inline void *
60948 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60949diff --git a/include/linux/sonet.h b/include/linux/sonet.h
60950index de8832d..0147b46 100644
60951--- a/include/linux/sonet.h
60952+++ b/include/linux/sonet.h
60953@@ -61,7 +61,7 @@ struct sonet_stats {
60954 #include <linux/atomic.h>
60955
60956 struct k_sonet_stats {
60957-#define __HANDLE_ITEM(i) atomic_t i
60958+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60959 __SONET_ITEMS
60960 #undef __HANDLE_ITEM
60961 };
60962diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
60963index 3d8f9c4..69f1c0a 100644
60964--- a/include/linux/sunrpc/clnt.h
60965+++ b/include/linux/sunrpc/clnt.h
60966@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
60967 {
60968 switch (sap->sa_family) {
60969 case AF_INET:
60970- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60971+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60972 case AF_INET6:
60973- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60974+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60975 }
60976 return 0;
60977 }
60978@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
60979 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60980 const struct sockaddr *src)
60981 {
60982- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60983+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60984 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60985
60986 dsin->sin_family = ssin->sin_family;
60987@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
60988 if (sa->sa_family != AF_INET6)
60989 return 0;
60990
60991- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60992+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60993 }
60994
60995 #endif /* __KERNEL__ */
60996diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
60997index e775689..9e206d9 100644
60998--- a/include/linux/sunrpc/sched.h
60999+++ b/include/linux/sunrpc/sched.h
61000@@ -105,6 +105,7 @@ struct rpc_call_ops {
61001 void (*rpc_call_done)(struct rpc_task *, void *);
61002 void (*rpc_release)(void *);
61003 };
61004+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
61005
61006 struct rpc_task_setup {
61007 struct rpc_task *task;
61008diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
61009index c14fe86..393245e 100644
61010--- a/include/linux/sunrpc/svc_rdma.h
61011+++ b/include/linux/sunrpc/svc_rdma.h
61012@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
61013 extern unsigned int svcrdma_max_requests;
61014 extern unsigned int svcrdma_max_req_size;
61015
61016-extern atomic_t rdma_stat_recv;
61017-extern atomic_t rdma_stat_read;
61018-extern atomic_t rdma_stat_write;
61019-extern atomic_t rdma_stat_sq_starve;
61020-extern atomic_t rdma_stat_rq_starve;
61021-extern atomic_t rdma_stat_rq_poll;
61022-extern atomic_t rdma_stat_rq_prod;
61023-extern atomic_t rdma_stat_sq_poll;
61024-extern atomic_t rdma_stat_sq_prod;
61025+extern atomic_unchecked_t rdma_stat_recv;
61026+extern atomic_unchecked_t rdma_stat_read;
61027+extern atomic_unchecked_t rdma_stat_write;
61028+extern atomic_unchecked_t rdma_stat_sq_starve;
61029+extern atomic_unchecked_t rdma_stat_rq_starve;
61030+extern atomic_unchecked_t rdma_stat_rq_poll;
61031+extern atomic_unchecked_t rdma_stat_rq_prod;
61032+extern atomic_unchecked_t rdma_stat_sq_poll;
61033+extern atomic_unchecked_t rdma_stat_sq_prod;
61034
61035 #define RPCRDMA_VERSION 1
61036
61037diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
61038index 703cfa3..0b8ca72ac 100644
61039--- a/include/linux/sysctl.h
61040+++ b/include/linux/sysctl.h
61041@@ -155,7 +155,11 @@ enum
61042 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61043 };
61044
61045-
61046+#ifdef CONFIG_PAX_SOFTMODE
61047+enum {
61048+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61049+};
61050+#endif
61051
61052 /* CTL_VM names: */
61053 enum
61054@@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
61055
61056 extern int proc_dostring(struct ctl_table *, int,
61057 void __user *, size_t *, loff_t *);
61058+extern int proc_dostring_modpriv(struct ctl_table *, int,
61059+ void __user *, size_t *, loff_t *);
61060 extern int proc_dointvec(struct ctl_table *, int,
61061 void __user *, size_t *, loff_t *);
61062 extern int proc_dointvec_minmax(struct ctl_table *, int,
61063diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
61064index ff7dc08..893e1bd 100644
61065--- a/include/linux/tty_ldisc.h
61066+++ b/include/linux/tty_ldisc.h
61067@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
61068
61069 struct module *owner;
61070
61071- int refcount;
61072+ atomic_t refcount;
61073 };
61074
61075 struct tty_ldisc {
61076diff --git a/include/linux/types.h b/include/linux/types.h
61077index 57a9723..dbe234a 100644
61078--- a/include/linux/types.h
61079+++ b/include/linux/types.h
61080@@ -213,10 +213,26 @@ typedef struct {
61081 int counter;
61082 } atomic_t;
61083
61084+#ifdef CONFIG_PAX_REFCOUNT
61085+typedef struct {
61086+ int counter;
61087+} atomic_unchecked_t;
61088+#else
61089+typedef atomic_t atomic_unchecked_t;
61090+#endif
61091+
61092 #ifdef CONFIG_64BIT
61093 typedef struct {
61094 long counter;
61095 } atomic64_t;
61096+
61097+#ifdef CONFIG_PAX_REFCOUNT
61098+typedef struct {
61099+ long counter;
61100+} atomic64_unchecked_t;
61101+#else
61102+typedef atomic64_t atomic64_unchecked_t;
61103+#endif
61104 #endif
61105
61106 struct list_head {
61107diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
61108index 5ca0951..ab496a5 100644
61109--- a/include/linux/uaccess.h
61110+++ b/include/linux/uaccess.h
61111@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
61112 long ret; \
61113 mm_segment_t old_fs = get_fs(); \
61114 \
61115- set_fs(KERNEL_DS); \
61116 pagefault_disable(); \
61117- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61118- pagefault_enable(); \
61119+ set_fs(KERNEL_DS); \
61120+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
61121 set_fs(old_fs); \
61122+ pagefault_enable(); \
61123 ret; \
61124 })
61125
61126diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
61127index 99c1b4d..bb94261 100644
61128--- a/include/linux/unaligned/access_ok.h
61129+++ b/include/linux/unaligned/access_ok.h
61130@@ -6,32 +6,32 @@
61131
61132 static inline u16 get_unaligned_le16(const void *p)
61133 {
61134- return le16_to_cpup((__le16 *)p);
61135+ return le16_to_cpup((const __le16 *)p);
61136 }
61137
61138 static inline u32 get_unaligned_le32(const void *p)
61139 {
61140- return le32_to_cpup((__le32 *)p);
61141+ return le32_to_cpup((const __le32 *)p);
61142 }
61143
61144 static inline u64 get_unaligned_le64(const void *p)
61145 {
61146- return le64_to_cpup((__le64 *)p);
61147+ return le64_to_cpup((const __le64 *)p);
61148 }
61149
61150 static inline u16 get_unaligned_be16(const void *p)
61151 {
61152- return be16_to_cpup((__be16 *)p);
61153+ return be16_to_cpup((const __be16 *)p);
61154 }
61155
61156 static inline u32 get_unaligned_be32(const void *p)
61157 {
61158- return be32_to_cpup((__be32 *)p);
61159+ return be32_to_cpup((const __be32 *)p);
61160 }
61161
61162 static inline u64 get_unaligned_be64(const void *p)
61163 {
61164- return be64_to_cpup((__be64 *)p);
61165+ return be64_to_cpup((const __be64 *)p);
61166 }
61167
61168 static inline void put_unaligned_le16(u16 val, void *p)
61169diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
61170index e5a40c3..20ab0f6 100644
61171--- a/include/linux/usb/renesas_usbhs.h
61172+++ b/include/linux/usb/renesas_usbhs.h
61173@@ -39,7 +39,7 @@ enum {
61174 */
61175 struct renesas_usbhs_driver_callback {
61176 int (*notify_hotplug)(struct platform_device *pdev);
61177-};
61178+} __no_const;
61179
61180 /*
61181 * callback functions for platform
61182@@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
61183 * VBUS control is needed for Host
61184 */
61185 int (*set_vbus)(struct platform_device *pdev, int enable);
61186-};
61187+} __no_const;
61188
61189 /*
61190 * parameters for renesas usbhs
61191diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
61192index 6f8fbcf..8259001 100644
61193--- a/include/linux/vermagic.h
61194+++ b/include/linux/vermagic.h
61195@@ -25,9 +25,35 @@
61196 #define MODULE_ARCH_VERMAGIC ""
61197 #endif
61198
61199+#ifdef CONFIG_PAX_REFCOUNT
61200+#define MODULE_PAX_REFCOUNT "REFCOUNT "
61201+#else
61202+#define MODULE_PAX_REFCOUNT ""
61203+#endif
61204+
61205+#ifdef CONSTIFY_PLUGIN
61206+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61207+#else
61208+#define MODULE_CONSTIFY_PLUGIN ""
61209+#endif
61210+
61211+#ifdef STACKLEAK_PLUGIN
61212+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61213+#else
61214+#define MODULE_STACKLEAK_PLUGIN ""
61215+#endif
61216+
61217+#ifdef CONFIG_GRKERNSEC
61218+#define MODULE_GRSEC "GRSEC "
61219+#else
61220+#define MODULE_GRSEC ""
61221+#endif
61222+
61223 #define VERMAGIC_STRING \
61224 UTS_RELEASE " " \
61225 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61226 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61227- MODULE_ARCH_VERMAGIC
61228+ MODULE_ARCH_VERMAGIC \
61229+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61230+ MODULE_GRSEC
61231
61232diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
61233index 4bde182..aec92c1 100644
61234--- a/include/linux/vmalloc.h
61235+++ b/include/linux/vmalloc.h
61236@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
61237 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61238 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61239 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61240+
61241+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61242+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61243+#endif
61244+
61245 /* bits [20..32] reserved for arch specific ioremap internals */
61246
61247 /*
61248@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
61249 # endif
61250 #endif
61251
61252+#define vmalloc(x) \
61253+({ \
61254+ void *___retval; \
61255+ intoverflow_t ___x = (intoverflow_t)x; \
61256+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61257+ ___retval = NULL; \
61258+ else \
61259+ ___retval = vmalloc((unsigned long)___x); \
61260+ ___retval; \
61261+})
61262+
61263+#define vzalloc(x) \
61264+({ \
61265+ void *___retval; \
61266+ intoverflow_t ___x = (intoverflow_t)x; \
61267+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61268+ ___retval = NULL; \
61269+ else \
61270+ ___retval = vzalloc((unsigned long)___x); \
61271+ ___retval; \
61272+})
61273+
61274+#define __vmalloc(x, y, z) \
61275+({ \
61276+ void *___retval; \
61277+ intoverflow_t ___x = (intoverflow_t)x; \
61278+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61279+ ___retval = NULL; \
61280+ else \
61281+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61282+ ___retval; \
61283+})
61284+
61285+#define vmalloc_user(x) \
61286+({ \
61287+ void *___retval; \
61288+ intoverflow_t ___x = (intoverflow_t)x; \
61289+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61290+ ___retval = NULL; \
61291+ else \
61292+ ___retval = vmalloc_user((unsigned long)___x); \
61293+ ___retval; \
61294+})
61295+
61296+#define vmalloc_exec(x) \
61297+({ \
61298+ void *___retval; \
61299+ intoverflow_t ___x = (intoverflow_t)x; \
61300+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61301+ ___retval = NULL; \
61302+ else \
61303+ ___retval = vmalloc_exec((unsigned long)___x); \
61304+ ___retval; \
61305+})
61306+
61307+#define vmalloc_node(x, y) \
61308+({ \
61309+ void *___retval; \
61310+ intoverflow_t ___x = (intoverflow_t)x; \
61311+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61312+ ___retval = NULL; \
61313+ else \
61314+ ___retval = vmalloc_node((unsigned long)___x, (y));\
61315+ ___retval; \
61316+})
61317+
61318+#define vzalloc_node(x, y) \
61319+({ \
61320+ void *___retval; \
61321+ intoverflow_t ___x = (intoverflow_t)x; \
61322+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61323+ ___retval = NULL; \
61324+ else \
61325+ ___retval = vzalloc_node((unsigned long)___x, (y));\
61326+ ___retval; \
61327+})
61328+
61329+#define vmalloc_32(x) \
61330+({ \
61331+ void *___retval; \
61332+ intoverflow_t ___x = (intoverflow_t)x; \
61333+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61334+ ___retval = NULL; \
61335+ else \
61336+ ___retval = vmalloc_32((unsigned long)___x); \
61337+ ___retval; \
61338+})
61339+
61340+#define vmalloc_32_user(x) \
61341+({ \
61342+void *___retval; \
61343+ intoverflow_t ___x = (intoverflow_t)x; \
61344+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61345+ ___retval = NULL; \
61346+ else \
61347+ ___retval = vmalloc_32_user((unsigned long)___x);\
61348+ ___retval; \
61349+})
61350+
61351 #endif /* _LINUX_VMALLOC_H */
61352diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
61353index 65efb92..137adbb 100644
61354--- a/include/linux/vmstat.h
61355+++ b/include/linux/vmstat.h
61356@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
61357 /*
61358 * Zone based page accounting with per cpu differentials.
61359 */
61360-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61361+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61362
61363 static inline void zone_page_state_add(long x, struct zone *zone,
61364 enum zone_stat_item item)
61365 {
61366- atomic_long_add(x, &zone->vm_stat[item]);
61367- atomic_long_add(x, &vm_stat[item]);
61368+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61369+ atomic_long_add_unchecked(x, &vm_stat[item]);
61370 }
61371
61372 static inline unsigned long global_page_state(enum zone_stat_item item)
61373 {
61374- long x = atomic_long_read(&vm_stat[item]);
61375+ long x = atomic_long_read_unchecked(&vm_stat[item]);
61376 #ifdef CONFIG_SMP
61377 if (x < 0)
61378 x = 0;
61379@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
61380 static inline unsigned long zone_page_state(struct zone *zone,
61381 enum zone_stat_item item)
61382 {
61383- long x = atomic_long_read(&zone->vm_stat[item]);
61384+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61385 #ifdef CONFIG_SMP
61386 if (x < 0)
61387 x = 0;
61388@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
61389 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61390 enum zone_stat_item item)
61391 {
61392- long x = atomic_long_read(&zone->vm_stat[item]);
61393+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61394
61395 #ifdef CONFIG_SMP
61396 int cpu;
61397@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
61398
61399 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61400 {
61401- atomic_long_inc(&zone->vm_stat[item]);
61402- atomic_long_inc(&vm_stat[item]);
61403+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
61404+ atomic_long_inc_unchecked(&vm_stat[item]);
61405 }
61406
61407 static inline void __inc_zone_page_state(struct page *page,
61408@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
61409
61410 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61411 {
61412- atomic_long_dec(&zone->vm_stat[item]);
61413- atomic_long_dec(&vm_stat[item]);
61414+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
61415+ atomic_long_dec_unchecked(&vm_stat[item]);
61416 }
61417
61418 static inline void __dec_zone_page_state(struct page *page,
61419diff --git a/include/linux/xattr.h b/include/linux/xattr.h
61420index e5d1220..ef6e406 100644
61421--- a/include/linux/xattr.h
61422+++ b/include/linux/xattr.h
61423@@ -57,6 +57,11 @@
61424 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
61425 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
61426
61427+/* User namespace */
61428+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
61429+#define XATTR_PAX_FLAGS_SUFFIX "flags"
61430+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
61431+
61432 #ifdef __KERNEL__
61433
61434 #include <linux/types.h>
61435diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
61436index 4aeff96..b378cdc 100644
61437--- a/include/media/saa7146_vv.h
61438+++ b/include/media/saa7146_vv.h
61439@@ -163,7 +163,7 @@ struct saa7146_ext_vv
61440 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61441
61442 /* the extension can override this */
61443- struct v4l2_ioctl_ops ops;
61444+ v4l2_ioctl_ops_no_const ops;
61445 /* pointer to the saa7146 core ops */
61446 const struct v4l2_ioctl_ops *core_ops;
61447
61448diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61449index c7c40f1..4f01585 100644
61450--- a/include/media/v4l2-dev.h
61451+++ b/include/media/v4l2-dev.h
61452@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61453
61454
61455 struct v4l2_file_operations {
61456- struct module *owner;
61457+ struct module * const owner;
61458 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61459 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61460 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61461@@ -68,6 +68,7 @@ struct v4l2_file_operations {
61462 int (*open) (struct file *);
61463 int (*release) (struct file *);
61464 };
61465+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61466
61467 /*
61468 * Newer version of video_device, handled by videodev2.c
61469diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61470index 4d1c74a..65e1221 100644
61471--- a/include/media/v4l2-ioctl.h
61472+++ b/include/media/v4l2-ioctl.h
61473@@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61474 long (*vidioc_default) (struct file *file, void *fh,
61475 bool valid_prio, int cmd, void *arg);
61476 };
61477-
61478+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61479
61480 /* v4l debugging and diagnostics */
61481
61482diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61483index 8d55251..dfe5b0a 100644
61484--- a/include/net/caif/caif_hsi.h
61485+++ b/include/net/caif/caif_hsi.h
61486@@ -98,7 +98,7 @@ struct cfhsi_drv {
61487 void (*rx_done_cb) (struct cfhsi_drv *drv);
61488 void (*wake_up_cb) (struct cfhsi_drv *drv);
61489 void (*wake_down_cb) (struct cfhsi_drv *drv);
61490-};
61491+} __no_const;
61492
61493 /* Structure implemented by HSI device. */
61494 struct cfhsi_dev {
61495diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61496index 9e5425b..8136ffc 100644
61497--- a/include/net/caif/cfctrl.h
61498+++ b/include/net/caif/cfctrl.h
61499@@ -52,7 +52,7 @@ struct cfctrl_rsp {
61500 void (*radioset_rsp)(void);
61501 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61502 struct cflayer *client_layer);
61503-};
61504+} __no_const;
61505
61506 /* Link Setup Parameters for CAIF-Links. */
61507 struct cfctrl_link_param {
61508@@ -101,8 +101,8 @@ struct cfctrl_request_info {
61509 struct cfctrl {
61510 struct cfsrvl serv;
61511 struct cfctrl_rsp res;
61512- atomic_t req_seq_no;
61513- atomic_t rsp_seq_no;
61514+ atomic_unchecked_t req_seq_no;
61515+ atomic_unchecked_t rsp_seq_no;
61516 struct list_head list;
61517 /* Protects from simultaneous access to first_req list */
61518 spinlock_t info_list_lock;
61519diff --git a/include/net/flow.h b/include/net/flow.h
61520index 57f15a7..0de26c6 100644
61521--- a/include/net/flow.h
61522+++ b/include/net/flow.h
61523@@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61524
61525 extern void flow_cache_flush(void);
61526 extern void flow_cache_flush_deferred(void);
61527-extern atomic_t flow_cache_genid;
61528+extern atomic_unchecked_t flow_cache_genid;
61529
61530 #endif
61531diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61532index e9ff3fc..9d3e5c7 100644
61533--- a/include/net/inetpeer.h
61534+++ b/include/net/inetpeer.h
61535@@ -48,8 +48,8 @@ struct inet_peer {
61536 */
61537 union {
61538 struct {
61539- atomic_t rid; /* Frag reception counter */
61540- atomic_t ip_id_count; /* IP ID for the next packet */
61541+ atomic_unchecked_t rid; /* Frag reception counter */
61542+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61543 __u32 tcp_ts;
61544 __u32 tcp_ts_stamp;
61545 };
61546@@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61547 more++;
61548 inet_peer_refcheck(p);
61549 do {
61550- old = atomic_read(&p->ip_id_count);
61551+ old = atomic_read_unchecked(&p->ip_id_count);
61552 new = old + more;
61553 if (!new)
61554 new = 1;
61555- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61556+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61557 return new;
61558 }
61559
61560diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61561index 10422ef..662570f 100644
61562--- a/include/net/ip_fib.h
61563+++ b/include/net/ip_fib.h
61564@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61565
61566 #define FIB_RES_SADDR(net, res) \
61567 ((FIB_RES_NH(res).nh_saddr_genid == \
61568- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61569+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61570 FIB_RES_NH(res).nh_saddr : \
61571 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61572 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61573diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61574index e5a7b9a..f4fc44b 100644
61575--- a/include/net/ip_vs.h
61576+++ b/include/net/ip_vs.h
61577@@ -509,7 +509,7 @@ struct ip_vs_conn {
61578 struct ip_vs_conn *control; /* Master control connection */
61579 atomic_t n_control; /* Number of controlled ones */
61580 struct ip_vs_dest *dest; /* real server */
61581- atomic_t in_pkts; /* incoming packet counter */
61582+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61583
61584 /* packet transmitter for different forwarding methods. If it
61585 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61586@@ -647,7 +647,7 @@ struct ip_vs_dest {
61587 __be16 port; /* port number of the server */
61588 union nf_inet_addr addr; /* IP address of the server */
61589 volatile unsigned flags; /* dest status flags */
61590- atomic_t conn_flags; /* flags to copy to conn */
61591+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61592 atomic_t weight; /* server weight */
61593
61594 atomic_t refcnt; /* reference counter */
61595diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61596index 69b610a..fe3962c 100644
61597--- a/include/net/irda/ircomm_core.h
61598+++ b/include/net/irda/ircomm_core.h
61599@@ -51,7 +51,7 @@ typedef struct {
61600 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61601 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61602 struct ircomm_info *);
61603-} call_t;
61604+} __no_const call_t;
61605
61606 struct ircomm_cb {
61607 irda_queue_t queue;
61608diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61609index 59ba38bc..d515662 100644
61610--- a/include/net/irda/ircomm_tty.h
61611+++ b/include/net/irda/ircomm_tty.h
61612@@ -35,6 +35,7 @@
61613 #include <linux/termios.h>
61614 #include <linux/timer.h>
61615 #include <linux/tty.h> /* struct tty_struct */
61616+#include <asm/local.h>
61617
61618 #include <net/irda/irias_object.h>
61619 #include <net/irda/ircomm_core.h>
61620@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61621 unsigned short close_delay;
61622 unsigned short closing_wait; /* time to wait before closing */
61623
61624- int open_count;
61625- int blocked_open; /* # of blocked opens */
61626+ local_t open_count;
61627+ local_t blocked_open; /* # of blocked opens */
61628
61629 /* Protect concurent access to :
61630 * o self->open_count
61631diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61632index f2419cf..473679f 100644
61633--- a/include/net/iucv/af_iucv.h
61634+++ b/include/net/iucv/af_iucv.h
61635@@ -139,7 +139,7 @@ struct iucv_sock {
61636 struct iucv_sock_list {
61637 struct hlist_head head;
61638 rwlock_t lock;
61639- atomic_t autobind_name;
61640+ atomic_unchecked_t autobind_name;
61641 };
61642
61643 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61644diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61645index 2720884..3aa5c25 100644
61646--- a/include/net/neighbour.h
61647+++ b/include/net/neighbour.h
61648@@ -122,7 +122,7 @@ struct neigh_ops {
61649 void (*error_report)(struct neighbour *, struct sk_buff *);
61650 int (*output)(struct neighbour *, struct sk_buff *);
61651 int (*connected_output)(struct neighbour *, struct sk_buff *);
61652-};
61653+} __do_const;
61654
61655 struct pneigh_entry {
61656 struct pneigh_entry *next;
61657diff --git a/include/net/netlink.h b/include/net/netlink.h
61658index cb1f350..3279d2c 100644
61659--- a/include/net/netlink.h
61660+++ b/include/net/netlink.h
61661@@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61662 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61663 {
61664 if (mark)
61665- skb_trim(skb, (unsigned char *) mark - skb->data);
61666+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61667 }
61668
61669 /**
61670diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61671index d786b4f..4c3dd41 100644
61672--- a/include/net/netns/ipv4.h
61673+++ b/include/net/netns/ipv4.h
61674@@ -56,8 +56,8 @@ struct netns_ipv4 {
61675
61676 unsigned int sysctl_ping_group_range[2];
61677
61678- atomic_t rt_genid;
61679- atomic_t dev_addr_genid;
61680+ atomic_unchecked_t rt_genid;
61681+ atomic_unchecked_t dev_addr_genid;
61682
61683 #ifdef CONFIG_IP_MROUTE
61684 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61685diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61686index 6a72a58..e6a127d 100644
61687--- a/include/net/sctp/sctp.h
61688+++ b/include/net/sctp/sctp.h
61689@@ -318,9 +318,9 @@ do { \
61690
61691 #else /* SCTP_DEBUG */
61692
61693-#define SCTP_DEBUG_PRINTK(whatever...)
61694-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61695-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61696+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61697+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61698+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61699 #define SCTP_ENABLE_DEBUG
61700 #define SCTP_DISABLE_DEBUG
61701 #define SCTP_ASSERT(expr, str, func)
61702diff --git a/include/net/sock.h b/include/net/sock.h
61703index 32e3937..87a1dbc 100644
61704--- a/include/net/sock.h
61705+++ b/include/net/sock.h
61706@@ -277,7 +277,7 @@ struct sock {
61707 #ifdef CONFIG_RPS
61708 __u32 sk_rxhash;
61709 #endif
61710- atomic_t sk_drops;
61711+ atomic_unchecked_t sk_drops;
61712 int sk_rcvbuf;
61713
61714 struct sk_filter __rcu *sk_filter;
61715@@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
61716 }
61717
61718 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61719- char __user *from, char *to,
61720+ char __user *from, unsigned char *to,
61721 int copy, int offset)
61722 {
61723 if (skb->ip_summed == CHECKSUM_NONE) {
61724diff --git a/include/net/tcp.h b/include/net/tcp.h
61725index bb18c4d..bb87972 100644
61726--- a/include/net/tcp.h
61727+++ b/include/net/tcp.h
61728@@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
61729 char *name;
61730 sa_family_t family;
61731 const struct file_operations *seq_fops;
61732- struct seq_operations seq_ops;
61733+ seq_operations_no_const seq_ops;
61734 };
61735
61736 struct tcp_iter_state {
61737diff --git a/include/net/udp.h b/include/net/udp.h
61738index 3b285f4..0219639 100644
61739--- a/include/net/udp.h
61740+++ b/include/net/udp.h
61741@@ -237,7 +237,7 @@ struct udp_seq_afinfo {
61742 sa_family_t family;
61743 struct udp_table *udp_table;
61744 const struct file_operations *seq_fops;
61745- struct seq_operations seq_ops;
61746+ seq_operations_no_const seq_ops;
61747 };
61748
61749 struct udp_iter_state {
61750diff --git a/include/net/xfrm.h b/include/net/xfrm.h
61751index b203e14..1df3991 100644
61752--- a/include/net/xfrm.h
61753+++ b/include/net/xfrm.h
61754@@ -505,7 +505,7 @@ struct xfrm_policy {
61755 struct timer_list timer;
61756
61757 struct flow_cache_object flo;
61758- atomic_t genid;
61759+ atomic_unchecked_t genid;
61760 u32 priority;
61761 u32 index;
61762 struct xfrm_mark mark;
61763diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
61764index 1a046b1..ee0bef0 100644
61765--- a/include/rdma/iw_cm.h
61766+++ b/include/rdma/iw_cm.h
61767@@ -122,7 +122,7 @@ struct iw_cm_verbs {
61768 int backlog);
61769
61770 int (*destroy_listen)(struct iw_cm_id *cm_id);
61771-};
61772+} __no_const;
61773
61774 /**
61775 * iw_create_cm_id - Create an IW CM identifier.
61776diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
61777index 5d1a758..1dbf795 100644
61778--- a/include/scsi/libfc.h
61779+++ b/include/scsi/libfc.h
61780@@ -748,6 +748,7 @@ struct libfc_function_template {
61781 */
61782 void (*disc_stop_final) (struct fc_lport *);
61783 };
61784+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61785
61786 /**
61787 * struct fc_disc - Discovery context
61788@@ -851,7 +852,7 @@ struct fc_lport {
61789 struct fc_vport *vport;
61790
61791 /* Operational Information */
61792- struct libfc_function_template tt;
61793+ libfc_function_template_no_const tt;
61794 u8 link_up;
61795 u8 qfull;
61796 enum fc_lport_state state;
61797diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
61798index 5591ed5..13eb457 100644
61799--- a/include/scsi/scsi_device.h
61800+++ b/include/scsi/scsi_device.h
61801@@ -161,9 +161,9 @@ struct scsi_device {
61802 unsigned int max_device_blocked; /* what device_blocked counts down from */
61803 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61804
61805- atomic_t iorequest_cnt;
61806- atomic_t iodone_cnt;
61807- atomic_t ioerr_cnt;
61808+ atomic_unchecked_t iorequest_cnt;
61809+ atomic_unchecked_t iodone_cnt;
61810+ atomic_unchecked_t ioerr_cnt;
61811
61812 struct device sdev_gendev,
61813 sdev_dev;
61814diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
61815index 2a65167..91e01f8 100644
61816--- a/include/scsi/scsi_transport_fc.h
61817+++ b/include/scsi/scsi_transport_fc.h
61818@@ -711,7 +711,7 @@ struct fc_function_template {
61819 unsigned long show_host_system_hostname:1;
61820
61821 unsigned long disable_target_scan:1;
61822-};
61823+} __do_const;
61824
61825
61826 /**
61827diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
61828index 030b87c..98a6954 100644
61829--- a/include/sound/ak4xxx-adda.h
61830+++ b/include/sound/ak4xxx-adda.h
61831@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61832 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61833 unsigned char val);
61834 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61835-};
61836+} __no_const;
61837
61838 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61839
61840diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
61841index 8c05e47..2b5df97 100644
61842--- a/include/sound/hwdep.h
61843+++ b/include/sound/hwdep.h
61844@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61845 struct snd_hwdep_dsp_status *status);
61846 int (*dsp_load)(struct snd_hwdep *hw,
61847 struct snd_hwdep_dsp_image *image);
61848-};
61849+} __no_const;
61850
61851 struct snd_hwdep {
61852 struct snd_card *card;
61853diff --git a/include/sound/info.h b/include/sound/info.h
61854index 5492cc4..1a65278 100644
61855--- a/include/sound/info.h
61856+++ b/include/sound/info.h
61857@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61858 struct snd_info_buffer *buffer);
61859 void (*write)(struct snd_info_entry *entry,
61860 struct snd_info_buffer *buffer);
61861-};
61862+} __no_const;
61863
61864 struct snd_info_entry_ops {
61865 int (*open)(struct snd_info_entry *entry,
61866diff --git a/include/sound/pcm.h b/include/sound/pcm.h
61867index 0cf91b2..b70cae4 100644
61868--- a/include/sound/pcm.h
61869+++ b/include/sound/pcm.h
61870@@ -81,6 +81,7 @@ struct snd_pcm_ops {
61871 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61872 int (*ack)(struct snd_pcm_substream *substream);
61873 };
61874+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61875
61876 /*
61877 *
61878diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
61879index af1b49e..a5d55a5 100644
61880--- a/include/sound/sb16_csp.h
61881+++ b/include/sound/sb16_csp.h
61882@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61883 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61884 int (*csp_stop) (struct snd_sb_csp * p);
61885 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61886-};
61887+} __no_const;
61888
61889 /*
61890 * CSP private data
61891diff --git a/include/sound/soc.h b/include/sound/soc.h
61892index 11cfb59..e3f93f4 100644
61893--- a/include/sound/soc.h
61894+++ b/include/sound/soc.h
61895@@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
61896 /* platform IO - used for platform DAPM */
61897 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
61898 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
61899-};
61900+} __do_const;
61901
61902 struct snd_soc_platform {
61903 const char *name;
61904diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
61905index 444cd6b..3327cc5 100644
61906--- a/include/sound/ymfpci.h
61907+++ b/include/sound/ymfpci.h
61908@@ -358,7 +358,7 @@ struct snd_ymfpci {
61909 spinlock_t reg_lock;
61910 spinlock_t voice_lock;
61911 wait_queue_head_t interrupt_sleep;
61912- atomic_t interrupt_sleep_count;
61913+ atomic_unchecked_t interrupt_sleep_count;
61914 struct snd_info_entry *proc_entry;
61915 const struct firmware *dsp_microcode;
61916 const struct firmware *controller_microcode;
61917diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
61918index a79886c..b483af6 100644
61919--- a/include/target/target_core_base.h
61920+++ b/include/target/target_core_base.h
61921@@ -346,7 +346,7 @@ struct t10_reservation_ops {
61922 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61923 int (*t10_pr_register)(struct se_cmd *);
61924 int (*t10_pr_clear)(struct se_cmd *);
61925-};
61926+} __no_const;
61927
61928 struct t10_reservation {
61929 /* Reservation effects all target ports */
61930@@ -465,8 +465,8 @@ struct se_cmd {
61931 atomic_t t_se_count;
61932 atomic_t t_task_cdbs_left;
61933 atomic_t t_task_cdbs_ex_left;
61934- atomic_t t_task_cdbs_sent;
61935- atomic_t t_transport_aborted;
61936+ atomic_unchecked_t t_task_cdbs_sent;
61937+ atomic_unchecked_t t_transport_aborted;
61938 atomic_t t_transport_active;
61939 atomic_t t_transport_complete;
61940 atomic_t t_transport_queue_active;
61941@@ -704,7 +704,7 @@ struct se_device {
61942 /* Active commands on this virtual SE device */
61943 atomic_t simple_cmds;
61944 atomic_t depth_left;
61945- atomic_t dev_ordered_id;
61946+ atomic_unchecked_t dev_ordered_id;
61947 atomic_t execute_tasks;
61948 atomic_t dev_ordered_sync;
61949 atomic_t dev_qf_count;
61950diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
61951index 1c09820..7f5ec79 100644
61952--- a/include/trace/events/irq.h
61953+++ b/include/trace/events/irq.h
61954@@ -36,7 +36,7 @@ struct softirq_action;
61955 */
61956 TRACE_EVENT(irq_handler_entry,
61957
61958- TP_PROTO(int irq, struct irqaction *action),
61959+ TP_PROTO(int irq, const struct irqaction *action),
61960
61961 TP_ARGS(irq, action),
61962
61963@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61964 */
61965 TRACE_EVENT(irq_handler_exit,
61966
61967- TP_PROTO(int irq, struct irqaction *action, int ret),
61968+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61969
61970 TP_ARGS(irq, action, ret),
61971
61972diff --git a/include/video/udlfb.h b/include/video/udlfb.h
61973index c41f308..6918de3 100644
61974--- a/include/video/udlfb.h
61975+++ b/include/video/udlfb.h
61976@@ -52,10 +52,10 @@ struct dlfb_data {
61977 u32 pseudo_palette[256];
61978 int blank_mode; /*one of FB_BLANK_ */
61979 /* blit-only rendering path metrics, exposed through sysfs */
61980- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61981- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61982- atomic_t bytes_sent; /* to usb, after compression including overhead */
61983- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61984+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61985+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61986+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61987+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61988 };
61989
61990 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61991diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
61992index 0993a22..32ba2fe 100644
61993--- a/include/video/uvesafb.h
61994+++ b/include/video/uvesafb.h
61995@@ -177,6 +177,7 @@ struct uvesafb_par {
61996 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61997 u8 pmi_setpal; /* PMI for palette changes */
61998 u16 *pmi_base; /* protected mode interface location */
61999+ u8 *pmi_code; /* protected mode code location */
62000 void *pmi_start;
62001 void *pmi_pal;
62002 u8 *vbe_state_orig; /*
62003diff --git a/init/Kconfig b/init/Kconfig
62004index 43298f9..2f56c12 100644
62005--- a/init/Kconfig
62006+++ b/init/Kconfig
62007@@ -1214,7 +1214,7 @@ config SLUB_DEBUG
62008
62009 config COMPAT_BRK
62010 bool "Disable heap randomization"
62011- default y
62012+ default n
62013 help
62014 Randomizing heap placement makes heap exploits harder, but it
62015 also breaks ancient binaries (including anything libc5 based).
62016diff --git a/init/do_mounts.c b/init/do_mounts.c
62017index db6e5ee..7677ff7 100644
62018--- a/init/do_mounts.c
62019+++ b/init/do_mounts.c
62020@@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
62021
62022 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
62023 {
62024- int err = sys_mount(name, "/root", fs, flags, data);
62025+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
62026 if (err)
62027 return err;
62028
62029- sys_chdir((const char __user __force *)"/root");
62030+ sys_chdir((const char __force_user*)"/root");
62031 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
62032 printk(KERN_INFO
62033 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62034@@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
62035 va_start(args, fmt);
62036 vsprintf(buf, fmt, args);
62037 va_end(args);
62038- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62039+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62040 if (fd >= 0) {
62041 sys_ioctl(fd, FDEJECT, 0);
62042 sys_close(fd);
62043 }
62044 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62045- fd = sys_open("/dev/console", O_RDWR, 0);
62046+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
62047 if (fd >= 0) {
62048 sys_ioctl(fd, TCGETS, (long)&termios);
62049 termios.c_lflag &= ~ICANON;
62050 sys_ioctl(fd, TCSETSF, (long)&termios);
62051- sys_read(fd, &c, 1);
62052+ sys_read(fd, (char __user *)&c, 1);
62053 termios.c_lflag |= ICANON;
62054 sys_ioctl(fd, TCSETSF, (long)&termios);
62055 sys_close(fd);
62056@@ -553,6 +553,6 @@ void __init prepare_namespace(void)
62057 mount_root();
62058 out:
62059 devtmpfs_mount("dev");
62060- sys_mount(".", "/", NULL, MS_MOVE, NULL);
62061- sys_chroot((const char __user __force *)".");
62062+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62063+ sys_chroot((const char __force_user *)".");
62064 }
62065diff --git a/init/do_mounts.h b/init/do_mounts.h
62066index f5b978a..69dbfe8 100644
62067--- a/init/do_mounts.h
62068+++ b/init/do_mounts.h
62069@@ -15,15 +15,15 @@ extern int root_mountflags;
62070
62071 static inline int create_dev(char *name, dev_t dev)
62072 {
62073- sys_unlink(name);
62074- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62075+ sys_unlink((char __force_user *)name);
62076+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
62077 }
62078
62079 #if BITS_PER_LONG == 32
62080 static inline u32 bstat(char *name)
62081 {
62082 struct stat64 stat;
62083- if (sys_stat64(name, &stat) != 0)
62084+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
62085 return 0;
62086 if (!S_ISBLK(stat.st_mode))
62087 return 0;
62088@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
62089 static inline u32 bstat(char *name)
62090 {
62091 struct stat stat;
62092- if (sys_newstat(name, &stat) != 0)
62093+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
62094 return 0;
62095 if (!S_ISBLK(stat.st_mode))
62096 return 0;
62097diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
62098index 3098a38..253064e 100644
62099--- a/init/do_mounts_initrd.c
62100+++ b/init/do_mounts_initrd.c
62101@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
62102 create_dev("/dev/root.old", Root_RAM0);
62103 /* mount initrd on rootfs' /root */
62104 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62105- sys_mkdir("/old", 0700);
62106- root_fd = sys_open("/", 0, 0);
62107- old_fd = sys_open("/old", 0, 0);
62108+ sys_mkdir((const char __force_user *)"/old", 0700);
62109+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
62110+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
62111 /* move initrd over / and chdir/chroot in initrd root */
62112- sys_chdir("/root");
62113- sys_mount(".", "/", NULL, MS_MOVE, NULL);
62114- sys_chroot(".");
62115+ sys_chdir((const char __force_user *)"/root");
62116+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62117+ sys_chroot((const char __force_user *)".");
62118
62119 /*
62120 * In case that a resume from disk is carried out by linuxrc or one of
62121@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
62122
62123 /* move initrd to rootfs' /old */
62124 sys_fchdir(old_fd);
62125- sys_mount("/", ".", NULL, MS_MOVE, NULL);
62126+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
62127 /* switch root and cwd back to / of rootfs */
62128 sys_fchdir(root_fd);
62129- sys_chroot(".");
62130+ sys_chroot((const char __force_user *)".");
62131 sys_close(old_fd);
62132 sys_close(root_fd);
62133
62134 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62135- sys_chdir("/old");
62136+ sys_chdir((const char __force_user *)"/old");
62137 return;
62138 }
62139
62140@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
62141 mount_root();
62142
62143 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62144- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62145+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
62146 if (!error)
62147 printk("okay\n");
62148 else {
62149- int fd = sys_open("/dev/root.old", O_RDWR, 0);
62150+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
62151 if (error == -ENOENT)
62152 printk("/initrd does not exist. Ignored.\n");
62153 else
62154 printk("failed\n");
62155 printk(KERN_NOTICE "Unmounting old root\n");
62156- sys_umount("/old", MNT_DETACH);
62157+ sys_umount((char __force_user *)"/old", MNT_DETACH);
62158 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62159 if (fd < 0) {
62160 error = fd;
62161@@ -116,11 +116,11 @@ int __init initrd_load(void)
62162 * mounted in the normal path.
62163 */
62164 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62165- sys_unlink("/initrd.image");
62166+ sys_unlink((const char __force_user *)"/initrd.image");
62167 handle_initrd();
62168 return 1;
62169 }
62170 }
62171- sys_unlink("/initrd.image");
62172+ sys_unlink((const char __force_user *)"/initrd.image");
62173 return 0;
62174 }
62175diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
62176index 32c4799..c27ee74 100644
62177--- a/init/do_mounts_md.c
62178+++ b/init/do_mounts_md.c
62179@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62180 partitioned ? "_d" : "", minor,
62181 md_setup_args[ent].device_names);
62182
62183- fd = sys_open(name, 0, 0);
62184+ fd = sys_open((char __force_user *)name, 0, 0);
62185 if (fd < 0) {
62186 printk(KERN_ERR "md: open failed - cannot start "
62187 "array %s\n", name);
62188@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62189 * array without it
62190 */
62191 sys_close(fd);
62192- fd = sys_open(name, 0, 0);
62193+ fd = sys_open((char __force_user *)name, 0, 0);
62194 sys_ioctl(fd, BLKRRPART, 0);
62195 }
62196 sys_close(fd);
62197@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62198
62199 wait_for_device_probe();
62200
62201- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62202+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62203 if (fd >= 0) {
62204 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62205 sys_close(fd);
62206diff --git a/init/initramfs.c b/init/initramfs.c
62207index 2531811..040d4d4 100644
62208--- a/init/initramfs.c
62209+++ b/init/initramfs.c
62210@@ -74,7 +74,7 @@ static void __init free_hash(void)
62211 }
62212 }
62213
62214-static long __init do_utime(char __user *filename, time_t mtime)
62215+static long __init do_utime(__force char __user *filename, time_t mtime)
62216 {
62217 struct timespec t[2];
62218
62219@@ -109,7 +109,7 @@ static void __init dir_utime(void)
62220 struct dir_entry *de, *tmp;
62221 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62222 list_del(&de->list);
62223- do_utime(de->name, de->mtime);
62224+ do_utime((char __force_user *)de->name, de->mtime);
62225 kfree(de->name);
62226 kfree(de);
62227 }
62228@@ -271,7 +271,7 @@ static int __init maybe_link(void)
62229 if (nlink >= 2) {
62230 char *old = find_link(major, minor, ino, mode, collected);
62231 if (old)
62232- return (sys_link(old, collected) < 0) ? -1 : 1;
62233+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62234 }
62235 return 0;
62236 }
62237@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
62238 {
62239 struct stat st;
62240
62241- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62242+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62243 if (S_ISDIR(st.st_mode))
62244- sys_rmdir(path);
62245+ sys_rmdir((char __force_user *)path);
62246 else
62247- sys_unlink(path);
62248+ sys_unlink((char __force_user *)path);
62249 }
62250 }
62251
62252@@ -305,7 +305,7 @@ static int __init do_name(void)
62253 int openflags = O_WRONLY|O_CREAT;
62254 if (ml != 1)
62255 openflags |= O_TRUNC;
62256- wfd = sys_open(collected, openflags, mode);
62257+ wfd = sys_open((char __force_user *)collected, openflags, mode);
62258
62259 if (wfd >= 0) {
62260 sys_fchown(wfd, uid, gid);
62261@@ -317,17 +317,17 @@ static int __init do_name(void)
62262 }
62263 }
62264 } else if (S_ISDIR(mode)) {
62265- sys_mkdir(collected, mode);
62266- sys_chown(collected, uid, gid);
62267- sys_chmod(collected, mode);
62268+ sys_mkdir((char __force_user *)collected, mode);
62269+ sys_chown((char __force_user *)collected, uid, gid);
62270+ sys_chmod((char __force_user *)collected, mode);
62271 dir_add(collected, mtime);
62272 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62273 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62274 if (maybe_link() == 0) {
62275- sys_mknod(collected, mode, rdev);
62276- sys_chown(collected, uid, gid);
62277- sys_chmod(collected, mode);
62278- do_utime(collected, mtime);
62279+ sys_mknod((char __force_user *)collected, mode, rdev);
62280+ sys_chown((char __force_user *)collected, uid, gid);
62281+ sys_chmod((char __force_user *)collected, mode);
62282+ do_utime((char __force_user *)collected, mtime);
62283 }
62284 }
62285 return 0;
62286@@ -336,15 +336,15 @@ static int __init do_name(void)
62287 static int __init do_copy(void)
62288 {
62289 if (count >= body_len) {
62290- sys_write(wfd, victim, body_len);
62291+ sys_write(wfd, (char __force_user *)victim, body_len);
62292 sys_close(wfd);
62293- do_utime(vcollected, mtime);
62294+ do_utime((char __force_user *)vcollected, mtime);
62295 kfree(vcollected);
62296 eat(body_len);
62297 state = SkipIt;
62298 return 0;
62299 } else {
62300- sys_write(wfd, victim, count);
62301+ sys_write(wfd, (char __force_user *)victim, count);
62302 body_len -= count;
62303 eat(count);
62304 return 1;
62305@@ -355,9 +355,9 @@ static int __init do_symlink(void)
62306 {
62307 collected[N_ALIGN(name_len) + body_len] = '\0';
62308 clean_path(collected, 0);
62309- sys_symlink(collected + N_ALIGN(name_len), collected);
62310- sys_lchown(collected, uid, gid);
62311- do_utime(collected, mtime);
62312+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62313+ sys_lchown((char __force_user *)collected, uid, gid);
62314+ do_utime((char __force_user *)collected, mtime);
62315 state = SkipIt;
62316 next_state = Reset;
62317 return 0;
62318diff --git a/init/main.c b/init/main.c
62319index 217ed23..32e5731 100644
62320--- a/init/main.c
62321+++ b/init/main.c
62322@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
62323 extern void tc_init(void);
62324 #endif
62325
62326+extern void grsecurity_init(void);
62327+
62328 /*
62329 * Debug helper: via this flag we know that we are in 'early bootup code'
62330 * where only the boot processor is running with IRQ disabled. This means
62331@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
62332
62333 __setup("reset_devices", set_reset_devices);
62334
62335+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62336+extern char pax_enter_kernel_user[];
62337+extern char pax_exit_kernel_user[];
62338+extern pgdval_t clone_pgd_mask;
62339+#endif
62340+
62341+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62342+static int __init setup_pax_nouderef(char *str)
62343+{
62344+#ifdef CONFIG_X86_32
62345+ unsigned int cpu;
62346+ struct desc_struct *gdt;
62347+
62348+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
62349+ gdt = get_cpu_gdt_table(cpu);
62350+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62351+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62352+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62353+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62354+ }
62355+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62356+#else
62357+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62358+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62359+ clone_pgd_mask = ~(pgdval_t)0UL;
62360+#endif
62361+
62362+ return 0;
62363+}
62364+early_param("pax_nouderef", setup_pax_nouderef);
62365+#endif
62366+
62367+#ifdef CONFIG_PAX_SOFTMODE
62368+int pax_softmode;
62369+
62370+static int __init setup_pax_softmode(char *str)
62371+{
62372+ get_option(&str, &pax_softmode);
62373+ return 1;
62374+}
62375+__setup("pax_softmode=", setup_pax_softmode);
62376+#endif
62377+
62378 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62379 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62380 static const char *panic_later, *panic_param;
62381@@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
62382 {
62383 int count = preempt_count();
62384 int ret;
62385+ const char *msg1 = "", *msg2 = "";
62386
62387 if (initcall_debug)
62388 ret = do_one_initcall_debug(fn);
62389@@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
62390 sprintf(msgbuf, "error code %d ", ret);
62391
62392 if (preempt_count() != count) {
62393- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62394+ msg1 = " preemption imbalance";
62395 preempt_count() = count;
62396 }
62397 if (irqs_disabled()) {
62398- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62399+ msg2 = " disabled interrupts";
62400 local_irq_enable();
62401 }
62402- if (msgbuf[0]) {
62403- printk("initcall %pF returned with %s\n", fn, msgbuf);
62404+ if (msgbuf[0] || *msg1 || *msg2) {
62405+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62406 }
62407
62408 return ret;
62409@@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
62410 do_basic_setup();
62411
62412 /* Open the /dev/console on the rootfs, this should never fail */
62413- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62414+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62415 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62416
62417 (void) sys_dup(0);
62418@@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
62419 if (!ramdisk_execute_command)
62420 ramdisk_execute_command = "/init";
62421
62422- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62423+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62424 ramdisk_execute_command = NULL;
62425 prepare_namespace();
62426 }
62427
62428+ grsecurity_init();
62429+
62430 /*
62431 * Ok, we have completed the initial bootup, and
62432 * we're essentially up and running. Get rid of the
62433diff --git a/ipc/mqueue.c b/ipc/mqueue.c
62434index 5b4293d..f179875 100644
62435--- a/ipc/mqueue.c
62436+++ b/ipc/mqueue.c
62437@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
62438 mq_bytes = (mq_msg_tblsz +
62439 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62440
62441+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62442 spin_lock(&mq_lock);
62443 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62444 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62445diff --git a/ipc/msg.c b/ipc/msg.c
62446index 7385de2..a8180e0 100644
62447--- a/ipc/msg.c
62448+++ b/ipc/msg.c
62449@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62450 return security_msg_queue_associate(msq, msgflg);
62451 }
62452
62453+static struct ipc_ops msg_ops = {
62454+ .getnew = newque,
62455+ .associate = msg_security,
62456+ .more_checks = NULL
62457+};
62458+
62459 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62460 {
62461 struct ipc_namespace *ns;
62462- struct ipc_ops msg_ops;
62463 struct ipc_params msg_params;
62464
62465 ns = current->nsproxy->ipc_ns;
62466
62467- msg_ops.getnew = newque;
62468- msg_ops.associate = msg_security;
62469- msg_ops.more_checks = NULL;
62470-
62471 msg_params.key = key;
62472 msg_params.flg = msgflg;
62473
62474diff --git a/ipc/sem.c b/ipc/sem.c
62475index 5215a81..cfc0cac 100644
62476--- a/ipc/sem.c
62477+++ b/ipc/sem.c
62478@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62479 return 0;
62480 }
62481
62482+static struct ipc_ops sem_ops = {
62483+ .getnew = newary,
62484+ .associate = sem_security,
62485+ .more_checks = sem_more_checks
62486+};
62487+
62488 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62489 {
62490 struct ipc_namespace *ns;
62491- struct ipc_ops sem_ops;
62492 struct ipc_params sem_params;
62493
62494 ns = current->nsproxy->ipc_ns;
62495@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62496 if (nsems < 0 || nsems > ns->sc_semmsl)
62497 return -EINVAL;
62498
62499- sem_ops.getnew = newary;
62500- sem_ops.associate = sem_security;
62501- sem_ops.more_checks = sem_more_checks;
62502-
62503 sem_params.key = key;
62504 sem_params.flg = semflg;
62505 sem_params.u.nsems = nsems;
62506diff --git a/ipc/shm.c b/ipc/shm.c
62507index b76be5b..859e750 100644
62508--- a/ipc/shm.c
62509+++ b/ipc/shm.c
62510@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62511 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62512 #endif
62513
62514+#ifdef CONFIG_GRKERNSEC
62515+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62516+ const time_t shm_createtime, const uid_t cuid,
62517+ const int shmid);
62518+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62519+ const time_t shm_createtime);
62520+#endif
62521+
62522 void shm_init_ns(struct ipc_namespace *ns)
62523 {
62524 ns->shm_ctlmax = SHMMAX;
62525@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62526 shp->shm_lprid = 0;
62527 shp->shm_atim = shp->shm_dtim = 0;
62528 shp->shm_ctim = get_seconds();
62529+#ifdef CONFIG_GRKERNSEC
62530+ {
62531+ struct timespec timeval;
62532+ do_posix_clock_monotonic_gettime(&timeval);
62533+
62534+ shp->shm_createtime = timeval.tv_sec;
62535+ }
62536+#endif
62537 shp->shm_segsz = size;
62538 shp->shm_nattch = 0;
62539 shp->shm_file = file;
62540@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62541 return 0;
62542 }
62543
62544+static struct ipc_ops shm_ops = {
62545+ .getnew = newseg,
62546+ .associate = shm_security,
62547+ .more_checks = shm_more_checks
62548+};
62549+
62550 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62551 {
62552 struct ipc_namespace *ns;
62553- struct ipc_ops shm_ops;
62554 struct ipc_params shm_params;
62555
62556 ns = current->nsproxy->ipc_ns;
62557
62558- shm_ops.getnew = newseg;
62559- shm_ops.associate = shm_security;
62560- shm_ops.more_checks = shm_more_checks;
62561-
62562 shm_params.key = key;
62563 shm_params.flg = shmflg;
62564 shm_params.u.size = size;
62565@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62566 f_mode = FMODE_READ | FMODE_WRITE;
62567 }
62568 if (shmflg & SHM_EXEC) {
62569+
62570+#ifdef CONFIG_PAX_MPROTECT
62571+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
62572+ goto out;
62573+#endif
62574+
62575 prot |= PROT_EXEC;
62576 acc_mode |= S_IXUGO;
62577 }
62578@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62579 if (err)
62580 goto out_unlock;
62581
62582+#ifdef CONFIG_GRKERNSEC
62583+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62584+ shp->shm_perm.cuid, shmid) ||
62585+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62586+ err = -EACCES;
62587+ goto out_unlock;
62588+ }
62589+#endif
62590+
62591 path = shp->shm_file->f_path;
62592 path_get(&path);
62593 shp->shm_nattch++;
62594+#ifdef CONFIG_GRKERNSEC
62595+ shp->shm_lapid = current->pid;
62596+#endif
62597 size = i_size_read(path.dentry->d_inode);
62598 shm_unlock(shp);
62599
62600diff --git a/kernel/acct.c b/kernel/acct.c
62601index fa7eb3d..7faf116 100644
62602--- a/kernel/acct.c
62603+++ b/kernel/acct.c
62604@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62605 */
62606 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62607 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62608- file->f_op->write(file, (char *)&ac,
62609+ file->f_op->write(file, (char __force_user *)&ac,
62610 sizeof(acct_t), &file->f_pos);
62611 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62612 set_fs(fs);
62613diff --git a/kernel/audit.c b/kernel/audit.c
62614index 09fae26..ed71d5b 100644
62615--- a/kernel/audit.c
62616+++ b/kernel/audit.c
62617@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62618 3) suppressed due to audit_rate_limit
62619 4) suppressed due to audit_backlog_limit
62620 */
62621-static atomic_t audit_lost = ATOMIC_INIT(0);
62622+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62623
62624 /* The netlink socket. */
62625 static struct sock *audit_sock;
62626@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62627 unsigned long now;
62628 int print;
62629
62630- atomic_inc(&audit_lost);
62631+ atomic_inc_unchecked(&audit_lost);
62632
62633 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62634
62635@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62636 printk(KERN_WARNING
62637 "audit: audit_lost=%d audit_rate_limit=%d "
62638 "audit_backlog_limit=%d\n",
62639- atomic_read(&audit_lost),
62640+ atomic_read_unchecked(&audit_lost),
62641 audit_rate_limit,
62642 audit_backlog_limit);
62643 audit_panic(message);
62644@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62645 status_set.pid = audit_pid;
62646 status_set.rate_limit = audit_rate_limit;
62647 status_set.backlog_limit = audit_backlog_limit;
62648- status_set.lost = atomic_read(&audit_lost);
62649+ status_set.lost = atomic_read_unchecked(&audit_lost);
62650 status_set.backlog = skb_queue_len(&audit_skb_queue);
62651 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62652 &status_set, sizeof(status_set));
62653@@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62654 avail = audit_expand(ab,
62655 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62656 if (!avail)
62657- goto out;
62658+ goto out_va_end;
62659 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62660 }
62661- va_end(args2);
62662 if (len > 0)
62663 skb_put(skb, len);
62664+out_va_end:
62665+ va_end(args2);
62666 out:
62667 return;
62668 }
62669diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62670index 47b7fc1..c003c33 100644
62671--- a/kernel/auditsc.c
62672+++ b/kernel/auditsc.c
62673@@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62674 struct audit_buffer **ab,
62675 struct audit_aux_data_execve *axi)
62676 {
62677- int i;
62678- size_t len, len_sent = 0;
62679+ int i, len;
62680+ size_t len_sent = 0;
62681 const char __user *p;
62682 char *buf;
62683
62684@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62685 }
62686
62687 /* global counter which is incremented every time something logs in */
62688-static atomic_t session_id = ATOMIC_INIT(0);
62689+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62690
62691 /**
62692 * audit_set_loginuid - set a task's audit_context loginuid
62693@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62694 */
62695 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62696 {
62697- unsigned int sessionid = atomic_inc_return(&session_id);
62698+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62699 struct audit_context *context = task->audit_context;
62700
62701 if (context && context->in_syscall) {
62702diff --git a/kernel/capability.c b/kernel/capability.c
62703index b463871..fa3ea1f 100644
62704--- a/kernel/capability.c
62705+++ b/kernel/capability.c
62706@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
62707 * before modification is attempted and the application
62708 * fails.
62709 */
62710+ if (tocopy > ARRAY_SIZE(kdata))
62711+ return -EFAULT;
62712+
62713 if (copy_to_user(dataptr, kdata, tocopy
62714 * sizeof(struct __user_cap_data_struct))) {
62715 return -EFAULT;
62716@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
62717 BUG();
62718 }
62719
62720- if (security_capable(ns, current_cred(), cap) == 0) {
62721+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62722 current->flags |= PF_SUPERPRIV;
62723 return true;
62724 }
62725@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
62726 }
62727 EXPORT_SYMBOL(ns_capable);
62728
62729+bool ns_capable_nolog(struct user_namespace *ns, int cap)
62730+{
62731+ if (unlikely(!cap_valid(cap))) {
62732+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62733+ BUG();
62734+ }
62735+
62736+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62737+ current->flags |= PF_SUPERPRIV;
62738+ return true;
62739+ }
62740+ return false;
62741+}
62742+EXPORT_SYMBOL(ns_capable_nolog);
62743+
62744+bool capable_nolog(int cap)
62745+{
62746+ return ns_capable_nolog(&init_user_ns, cap);
62747+}
62748+EXPORT_SYMBOL(capable_nolog);
62749+
62750 /**
62751 * task_ns_capable - Determine whether current task has a superior
62752 * capability targeted at a specific task's user namespace.
62753@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
62754 }
62755 EXPORT_SYMBOL(task_ns_capable);
62756
62757+bool task_ns_capable_nolog(struct task_struct *t, int cap)
62758+{
62759+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62760+}
62761+EXPORT_SYMBOL(task_ns_capable_nolog);
62762+
62763 /**
62764 * nsown_capable - Check superior capability to one's own user_ns
62765 * @cap: The capability in question
62766diff --git a/kernel/compat.c b/kernel/compat.c
62767index f346ced..aa2b1f4 100644
62768--- a/kernel/compat.c
62769+++ b/kernel/compat.c
62770@@ -13,6 +13,7 @@
62771
62772 #include <linux/linkage.h>
62773 #include <linux/compat.h>
62774+#include <linux/module.h>
62775 #include <linux/errno.h>
62776 #include <linux/time.h>
62777 #include <linux/signal.h>
62778@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
62779 mm_segment_t oldfs;
62780 long ret;
62781
62782- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62783+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62784 oldfs = get_fs();
62785 set_fs(KERNEL_DS);
62786 ret = hrtimer_nanosleep_restart(restart);
62787@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
62788 oldfs = get_fs();
62789 set_fs(KERNEL_DS);
62790 ret = hrtimer_nanosleep(&tu,
62791- rmtp ? (struct timespec __user *)&rmt : NULL,
62792+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
62793 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62794 set_fs(oldfs);
62795
62796@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
62797 mm_segment_t old_fs = get_fs();
62798
62799 set_fs(KERNEL_DS);
62800- ret = sys_sigpending((old_sigset_t __user *) &s);
62801+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
62802 set_fs(old_fs);
62803 if (ret == 0)
62804 ret = put_user(s, set);
62805@@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
62806 old_fs = get_fs();
62807 set_fs(KERNEL_DS);
62808 ret = sys_sigprocmask(how,
62809- set ? (old_sigset_t __user *) &s : NULL,
62810- oset ? (old_sigset_t __user *) &s : NULL);
62811+ set ? (old_sigset_t __force_user *) &s : NULL,
62812+ oset ? (old_sigset_t __force_user *) &s : NULL);
62813 set_fs(old_fs);
62814 if (ret == 0)
62815 if (oset)
62816@@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
62817 mm_segment_t old_fs = get_fs();
62818
62819 set_fs(KERNEL_DS);
62820- ret = sys_old_getrlimit(resource, &r);
62821+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62822 set_fs(old_fs);
62823
62824 if (!ret) {
62825@@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
62826 mm_segment_t old_fs = get_fs();
62827
62828 set_fs(KERNEL_DS);
62829- ret = sys_getrusage(who, (struct rusage __user *) &r);
62830+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62831 set_fs(old_fs);
62832
62833 if (ret)
62834@@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
62835 set_fs (KERNEL_DS);
62836 ret = sys_wait4(pid,
62837 (stat_addr ?
62838- (unsigned int __user *) &status : NULL),
62839- options, (struct rusage __user *) &r);
62840+ (unsigned int __force_user *) &status : NULL),
62841+ options, (struct rusage __force_user *) &r);
62842 set_fs (old_fs);
62843
62844 if (ret > 0) {
62845@@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
62846 memset(&info, 0, sizeof(info));
62847
62848 set_fs(KERNEL_DS);
62849- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62850- uru ? (struct rusage __user *)&ru : NULL);
62851+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62852+ uru ? (struct rusage __force_user *)&ru : NULL);
62853 set_fs(old_fs);
62854
62855 if ((ret < 0) || (info.si_signo == 0))
62856@@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
62857 oldfs = get_fs();
62858 set_fs(KERNEL_DS);
62859 err = sys_timer_settime(timer_id, flags,
62860- (struct itimerspec __user *) &newts,
62861- (struct itimerspec __user *) &oldts);
62862+ (struct itimerspec __force_user *) &newts,
62863+ (struct itimerspec __force_user *) &oldts);
62864 set_fs(oldfs);
62865 if (!err && old && put_compat_itimerspec(old, &oldts))
62866 return -EFAULT;
62867@@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
62868 oldfs = get_fs();
62869 set_fs(KERNEL_DS);
62870 err = sys_timer_gettime(timer_id,
62871- (struct itimerspec __user *) &ts);
62872+ (struct itimerspec __force_user *) &ts);
62873 set_fs(oldfs);
62874 if (!err && put_compat_itimerspec(setting, &ts))
62875 return -EFAULT;
62876@@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
62877 oldfs = get_fs();
62878 set_fs(KERNEL_DS);
62879 err = sys_clock_settime(which_clock,
62880- (struct timespec __user *) &ts);
62881+ (struct timespec __force_user *) &ts);
62882 set_fs(oldfs);
62883 return err;
62884 }
62885@@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
62886 oldfs = get_fs();
62887 set_fs(KERNEL_DS);
62888 err = sys_clock_gettime(which_clock,
62889- (struct timespec __user *) &ts);
62890+ (struct timespec __force_user *) &ts);
62891 set_fs(oldfs);
62892 if (!err && put_compat_timespec(&ts, tp))
62893 return -EFAULT;
62894@@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
62895
62896 oldfs = get_fs();
62897 set_fs(KERNEL_DS);
62898- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62899+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62900 set_fs(oldfs);
62901
62902 err = compat_put_timex(utp, &txc);
62903@@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
62904 oldfs = get_fs();
62905 set_fs(KERNEL_DS);
62906 err = sys_clock_getres(which_clock,
62907- (struct timespec __user *) &ts);
62908+ (struct timespec __force_user *) &ts);
62909 set_fs(oldfs);
62910 if (!err && tp && put_compat_timespec(&ts, tp))
62911 return -EFAULT;
62912@@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
62913 long err;
62914 mm_segment_t oldfs;
62915 struct timespec tu;
62916- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62917+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62918
62919- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62920+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62921 oldfs = get_fs();
62922 set_fs(KERNEL_DS);
62923 err = clock_nanosleep_restart(restart);
62924@@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
62925 oldfs = get_fs();
62926 set_fs(KERNEL_DS);
62927 err = sys_clock_nanosleep(which_clock, flags,
62928- (struct timespec __user *) &in,
62929- (struct timespec __user *) &out);
62930+ (struct timespec __force_user *) &in,
62931+ (struct timespec __force_user *) &out);
62932 set_fs(oldfs);
62933
62934 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62935diff --git a/kernel/configs.c b/kernel/configs.c
62936index 42e8fa0..9e7406b 100644
62937--- a/kernel/configs.c
62938+++ b/kernel/configs.c
62939@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62940 struct proc_dir_entry *entry;
62941
62942 /* create the current config file */
62943+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62944+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62945+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62946+ &ikconfig_file_ops);
62947+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62948+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62949+ &ikconfig_file_ops);
62950+#endif
62951+#else
62952 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62953 &ikconfig_file_ops);
62954+#endif
62955+
62956 if (!entry)
62957 return -ENOMEM;
62958
62959diff --git a/kernel/cred.c b/kernel/cred.c
62960index 5791612..a3c04dc 100644
62961--- a/kernel/cred.c
62962+++ b/kernel/cred.c
62963@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
62964 validate_creds(cred);
62965 put_cred(cred);
62966 }
62967+
62968+#ifdef CONFIG_GRKERNSEC_SETXID
62969+ cred = (struct cred *) tsk->delayed_cred;
62970+ if (cred) {
62971+ tsk->delayed_cred = NULL;
62972+ validate_creds(cred);
62973+ put_cred(cred);
62974+ }
62975+#endif
62976 }
62977
62978 /**
62979@@ -470,7 +479,7 @@ error_put:
62980 * Always returns 0 thus allowing this function to be tail-called at the end
62981 * of, say, sys_setgid().
62982 */
62983-int commit_creds(struct cred *new)
62984+static int __commit_creds(struct cred *new)
62985 {
62986 struct task_struct *task = current;
62987 const struct cred *old = task->real_cred;
62988@@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
62989
62990 get_cred(new); /* we will require a ref for the subj creds too */
62991
62992+ gr_set_role_label(task, new->uid, new->gid);
62993+
62994 /* dumpability changes */
62995 if (old->euid != new->euid ||
62996 old->egid != new->egid ||
62997@@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
62998 put_cred(old);
62999 return 0;
63000 }
63001+#ifdef CONFIG_GRKERNSEC_SETXID
63002+extern int set_user(struct cred *new);
63003+
63004+void gr_delayed_cred_worker(void)
63005+{
63006+ const struct cred *new = current->delayed_cred;
63007+ struct cred *ncred;
63008+
63009+ current->delayed_cred = NULL;
63010+
63011+ if (current_uid() && new != NULL) {
63012+ // from doing get_cred on it when queueing this
63013+ put_cred(new);
63014+ return;
63015+ } else if (new == NULL)
63016+ return;
63017+
63018+ ncred = prepare_creds();
63019+ if (!ncred)
63020+ goto die;
63021+ // uids
63022+ ncred->uid = new->uid;
63023+ ncred->euid = new->euid;
63024+ ncred->suid = new->suid;
63025+ ncred->fsuid = new->fsuid;
63026+ // gids
63027+ ncred->gid = new->gid;
63028+ ncred->egid = new->egid;
63029+ ncred->sgid = new->sgid;
63030+ ncred->fsgid = new->fsgid;
63031+ // groups
63032+ if (set_groups(ncred, new->group_info) < 0) {
63033+ abort_creds(ncred);
63034+ goto die;
63035+ }
63036+ // caps
63037+ ncred->securebits = new->securebits;
63038+ ncred->cap_inheritable = new->cap_inheritable;
63039+ ncred->cap_permitted = new->cap_permitted;
63040+ ncred->cap_effective = new->cap_effective;
63041+ ncred->cap_bset = new->cap_bset;
63042+
63043+ if (set_user(ncred)) {
63044+ abort_creds(ncred);
63045+ goto die;
63046+ }
63047+
63048+ // from doing get_cred on it when queueing this
63049+ put_cred(new);
63050+
63051+ __commit_creds(ncred);
63052+ return;
63053+die:
63054+ // from doing get_cred on it when queueing this
63055+ put_cred(new);
63056+ do_group_exit(SIGKILL);
63057+}
63058+#endif
63059+
63060+int commit_creds(struct cred *new)
63061+{
63062+#ifdef CONFIG_GRKERNSEC_SETXID
63063+ struct task_struct *t;
63064+
63065+ /* we won't get called with tasklist_lock held for writing
63066+ and interrupts disabled as the cred struct in that case is
63067+ init_cred
63068+ */
63069+ if (grsec_enable_setxid && !current_is_single_threaded() &&
63070+ !current_uid() && new->uid) {
63071+ rcu_read_lock();
63072+ read_lock(&tasklist_lock);
63073+ for (t = next_thread(current); t != current;
63074+ t = next_thread(t)) {
63075+ if (t->delayed_cred == NULL) {
63076+ t->delayed_cred = get_cred(new);
63077+ set_tsk_need_resched(t);
63078+ }
63079+ }
63080+ read_unlock(&tasklist_lock);
63081+ rcu_read_unlock();
63082+ }
63083+#endif
63084+ return __commit_creds(new);
63085+}
63086+
63087 EXPORT_SYMBOL(commit_creds);
63088
63089 /**
63090diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
63091index 0d7c087..01b8cef 100644
63092--- a/kernel/debug/debug_core.c
63093+++ b/kernel/debug/debug_core.c
63094@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
63095 */
63096 static atomic_t masters_in_kgdb;
63097 static atomic_t slaves_in_kgdb;
63098-static atomic_t kgdb_break_tasklet_var;
63099+static atomic_unchecked_t kgdb_break_tasklet_var;
63100 atomic_t kgdb_setting_breakpoint;
63101
63102 struct task_struct *kgdb_usethread;
63103@@ -129,7 +129,7 @@ int kgdb_single_step;
63104 static pid_t kgdb_sstep_pid;
63105
63106 /* to keep track of the CPU which is doing the single stepping*/
63107-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63108+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63109
63110 /*
63111 * If you are debugging a problem where roundup (the collection of
63112@@ -542,7 +542,7 @@ return_normal:
63113 * kernel will only try for the value of sstep_tries before
63114 * giving up and continuing on.
63115 */
63116- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63117+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63118 (kgdb_info[cpu].task &&
63119 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
63120 atomic_set(&kgdb_active, -1);
63121@@ -636,8 +636,8 @@ cpu_master_loop:
63122 }
63123
63124 kgdb_restore:
63125- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
63126- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
63127+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
63128+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
63129 if (kgdb_info[sstep_cpu].task)
63130 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
63131 else
63132@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
63133 static void kgdb_tasklet_bpt(unsigned long ing)
63134 {
63135 kgdb_breakpoint();
63136- atomic_set(&kgdb_break_tasklet_var, 0);
63137+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
63138 }
63139
63140 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
63141
63142 void kgdb_schedule_breakpoint(void)
63143 {
63144- if (atomic_read(&kgdb_break_tasklet_var) ||
63145+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
63146 atomic_read(&kgdb_active) != -1 ||
63147 atomic_read(&kgdb_setting_breakpoint))
63148 return;
63149- atomic_inc(&kgdb_break_tasklet_var);
63150+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
63151 tasklet_schedule(&kgdb_tasklet_breakpoint);
63152 }
63153 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
63154diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
63155index 63786e7..0780cac 100644
63156--- a/kernel/debug/kdb/kdb_main.c
63157+++ b/kernel/debug/kdb/kdb_main.c
63158@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
63159 list_for_each_entry(mod, kdb_modules, list) {
63160
63161 kdb_printf("%-20s%8u 0x%p ", mod->name,
63162- mod->core_size, (void *)mod);
63163+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
63164 #ifdef CONFIG_MODULE_UNLOAD
63165 kdb_printf("%4d ", module_refcount(mod));
63166 #endif
63167@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
63168 kdb_printf(" (Loading)");
63169 else
63170 kdb_printf(" (Live)");
63171- kdb_printf(" 0x%p", mod->module_core);
63172+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63173
63174 #ifdef CONFIG_MODULE_UNLOAD
63175 {
63176diff --git a/kernel/events/core.c b/kernel/events/core.c
63177index 58690af..d903d75 100644
63178--- a/kernel/events/core.c
63179+++ b/kernel/events/core.c
63180@@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
63181 return 0;
63182 }
63183
63184-static atomic64_t perf_event_id;
63185+static atomic64_unchecked_t perf_event_id;
63186
63187 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63188 enum event_type_t event_type);
63189@@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
63190
63191 static inline u64 perf_event_count(struct perf_event *event)
63192 {
63193- return local64_read(&event->count) + atomic64_read(&event->child_count);
63194+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63195 }
63196
63197 static u64 perf_event_read(struct perf_event *event)
63198@@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
63199 mutex_lock(&event->child_mutex);
63200 total += perf_event_read(event);
63201 *enabled += event->total_time_enabled +
63202- atomic64_read(&event->child_total_time_enabled);
63203+ atomic64_read_unchecked(&event->child_total_time_enabled);
63204 *running += event->total_time_running +
63205- atomic64_read(&event->child_total_time_running);
63206+ atomic64_read_unchecked(&event->child_total_time_running);
63207
63208 list_for_each_entry(child, &event->child_list, child_list) {
63209 total += perf_event_read(child);
63210@@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
63211 userpg->offset -= local64_read(&event->hw.prev_count);
63212
63213 userpg->time_enabled = enabled +
63214- atomic64_read(&event->child_total_time_enabled);
63215+ atomic64_read_unchecked(&event->child_total_time_enabled);
63216
63217 userpg->time_running = running +
63218- atomic64_read(&event->child_total_time_running);
63219+ atomic64_read_unchecked(&event->child_total_time_running);
63220
63221 barrier();
63222 ++userpg->lock;
63223@@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
63224 values[n++] = perf_event_count(event);
63225 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63226 values[n++] = enabled +
63227- atomic64_read(&event->child_total_time_enabled);
63228+ atomic64_read_unchecked(&event->child_total_time_enabled);
63229 }
63230 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63231 values[n++] = running +
63232- atomic64_read(&event->child_total_time_running);
63233+ atomic64_read_unchecked(&event->child_total_time_running);
63234 }
63235 if (read_format & PERF_FORMAT_ID)
63236 values[n++] = primary_event_id(event);
63237@@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
63238 * need to add enough zero bytes after the string to handle
63239 * the 64bit alignment we do later.
63240 */
63241- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63242+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
63243 if (!buf) {
63244 name = strncpy(tmp, "//enomem", sizeof(tmp));
63245 goto got_name;
63246 }
63247- name = d_path(&file->f_path, buf, PATH_MAX);
63248+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63249 if (IS_ERR(name)) {
63250 name = strncpy(tmp, "//toolong", sizeof(tmp));
63251 goto got_name;
63252@@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
63253 event->parent = parent_event;
63254
63255 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63256- event->id = atomic64_inc_return(&perf_event_id);
63257+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
63258
63259 event->state = PERF_EVENT_STATE_INACTIVE;
63260
63261@@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
63262 /*
63263 * Add back the child's count to the parent's count:
63264 */
63265- atomic64_add(child_val, &parent_event->child_count);
63266- atomic64_add(child_event->total_time_enabled,
63267+ atomic64_add_unchecked(child_val, &parent_event->child_count);
63268+ atomic64_add_unchecked(child_event->total_time_enabled,
63269 &parent_event->child_total_time_enabled);
63270- atomic64_add(child_event->total_time_running,
63271+ atomic64_add_unchecked(child_event->total_time_running,
63272 &parent_event->child_total_time_running);
63273
63274 /*
63275diff --git a/kernel/exit.c b/kernel/exit.c
63276index e6e01b9..619f837 100644
63277--- a/kernel/exit.c
63278+++ b/kernel/exit.c
63279@@ -57,6 +57,10 @@
63280 #include <asm/pgtable.h>
63281 #include <asm/mmu_context.h>
63282
63283+#ifdef CONFIG_GRKERNSEC
63284+extern rwlock_t grsec_exec_file_lock;
63285+#endif
63286+
63287 static void exit_mm(struct task_struct * tsk);
63288
63289 static void __unhash_process(struct task_struct *p, bool group_dead)
63290@@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
63291 struct task_struct *leader;
63292 int zap_leader;
63293 repeat:
63294+#ifdef CONFIG_NET
63295+ gr_del_task_from_ip_table(p);
63296+#endif
63297+
63298 /* don't need to get the RCU readlock here - the process is dead and
63299 * can't be modifying its own credentials. But shut RCU-lockdep up */
63300 rcu_read_lock();
63301@@ -380,7 +388,7 @@ int allow_signal(int sig)
63302 * know it'll be handled, so that they don't get converted to
63303 * SIGKILL or just silently dropped.
63304 */
63305- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63306+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63307 recalc_sigpending();
63308 spin_unlock_irq(&current->sighand->siglock);
63309 return 0;
63310@@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
63311 vsnprintf(current->comm, sizeof(current->comm), name, args);
63312 va_end(args);
63313
63314+#ifdef CONFIG_GRKERNSEC
63315+ write_lock(&grsec_exec_file_lock);
63316+ if (current->exec_file) {
63317+ fput(current->exec_file);
63318+ current->exec_file = NULL;
63319+ }
63320+ write_unlock(&grsec_exec_file_lock);
63321+#endif
63322+
63323+ gr_set_kernel_label(current);
63324+
63325 /*
63326 * If we were started as result of loading a module, close all of the
63327 * user space pages. We don't need them, and if we didn't close them
63328@@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
63329 struct task_struct *tsk = current;
63330 int group_dead;
63331
63332+ set_fs(USER_DS);
63333+
63334 profile_task_exit(tsk);
63335
63336 WARN_ON(blk_needs_flush_plug(tsk));
63337@@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
63338 * mm_release()->clear_child_tid() from writing to a user-controlled
63339 * kernel address.
63340 */
63341- set_fs(USER_DS);
63342
63343 ptrace_event(PTRACE_EVENT_EXIT, code);
63344
63345@@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
63346 tsk->exit_code = code;
63347 taskstats_exit(tsk, group_dead);
63348
63349+ gr_acl_handle_psacct(tsk, code);
63350+ gr_acl_handle_exit();
63351+
63352 exit_mm(tsk);
63353
63354 if (group_dead)
63355diff --git a/kernel/fork.c b/kernel/fork.c
63356index da4a6a1..c04943c 100644
63357--- a/kernel/fork.c
63358+++ b/kernel/fork.c
63359@@ -280,7 +280,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
63360 *stackend = STACK_END_MAGIC; /* for overflow detection */
63361
63362 #ifdef CONFIG_CC_STACKPROTECTOR
63363- tsk->stack_canary = get_random_int();
63364+ tsk->stack_canary = pax_get_random_long();
63365 #endif
63366
63367 /*
63368@@ -304,13 +304,77 @@ out:
63369 }
63370
63371 #ifdef CONFIG_MMU
63372+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63373+{
63374+ struct vm_area_struct *tmp;
63375+ unsigned long charge;
63376+ struct mempolicy *pol;
63377+ struct file *file;
63378+
63379+ charge = 0;
63380+ if (mpnt->vm_flags & VM_ACCOUNT) {
63381+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63382+ if (security_vm_enough_memory(len))
63383+ goto fail_nomem;
63384+ charge = len;
63385+ }
63386+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63387+ if (!tmp)
63388+ goto fail_nomem;
63389+ *tmp = *mpnt;
63390+ tmp->vm_mm = mm;
63391+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
63392+ pol = mpol_dup(vma_policy(mpnt));
63393+ if (IS_ERR(pol))
63394+ goto fail_nomem_policy;
63395+ vma_set_policy(tmp, pol);
63396+ if (anon_vma_fork(tmp, mpnt))
63397+ goto fail_nomem_anon_vma_fork;
63398+ tmp->vm_flags &= ~VM_LOCKED;
63399+ tmp->vm_next = tmp->vm_prev = NULL;
63400+ tmp->vm_mirror = NULL;
63401+ file = tmp->vm_file;
63402+ if (file) {
63403+ struct inode *inode = file->f_path.dentry->d_inode;
63404+ struct address_space *mapping = file->f_mapping;
63405+
63406+ get_file(file);
63407+ if (tmp->vm_flags & VM_DENYWRITE)
63408+ atomic_dec(&inode->i_writecount);
63409+ mutex_lock(&mapping->i_mmap_mutex);
63410+ if (tmp->vm_flags & VM_SHARED)
63411+ mapping->i_mmap_writable++;
63412+ flush_dcache_mmap_lock(mapping);
63413+ /* insert tmp into the share list, just after mpnt */
63414+ vma_prio_tree_add(tmp, mpnt);
63415+ flush_dcache_mmap_unlock(mapping);
63416+ mutex_unlock(&mapping->i_mmap_mutex);
63417+ }
63418+
63419+ /*
63420+ * Clear hugetlb-related page reserves for children. This only
63421+ * affects MAP_PRIVATE mappings. Faults generated by the child
63422+ * are not guaranteed to succeed, even if read-only
63423+ */
63424+ if (is_vm_hugetlb_page(tmp))
63425+ reset_vma_resv_huge_pages(tmp);
63426+
63427+ return tmp;
63428+
63429+fail_nomem_anon_vma_fork:
63430+ mpol_put(pol);
63431+fail_nomem_policy:
63432+ kmem_cache_free(vm_area_cachep, tmp);
63433+fail_nomem:
63434+ vm_unacct_memory(charge);
63435+ return NULL;
63436+}
63437+
63438 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63439 {
63440 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63441 struct rb_node **rb_link, *rb_parent;
63442 int retval;
63443- unsigned long charge;
63444- struct mempolicy *pol;
63445
63446 down_write(&oldmm->mmap_sem);
63447 flush_cache_dup_mm(oldmm);
63448@@ -322,8 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63449 mm->locked_vm = 0;
63450 mm->mmap = NULL;
63451 mm->mmap_cache = NULL;
63452- mm->free_area_cache = oldmm->mmap_base;
63453- mm->cached_hole_size = ~0UL;
63454+ mm->free_area_cache = oldmm->free_area_cache;
63455+ mm->cached_hole_size = oldmm->cached_hole_size;
63456 mm->map_count = 0;
63457 cpumask_clear(mm_cpumask(mm));
63458 mm->mm_rb = RB_ROOT;
63459@@ -339,8 +403,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63460
63461 prev = NULL;
63462 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63463- struct file *file;
63464-
63465 if (mpnt->vm_flags & VM_DONTCOPY) {
63466 long pages = vma_pages(mpnt);
63467 mm->total_vm -= pages;
63468@@ -348,53 +410,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63469 -pages);
63470 continue;
63471 }
63472- charge = 0;
63473- if (mpnt->vm_flags & VM_ACCOUNT) {
63474- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63475- if (security_vm_enough_memory(len))
63476- goto fail_nomem;
63477- charge = len;
63478+ tmp = dup_vma(mm, mpnt);
63479+ if (!tmp) {
63480+ retval = -ENOMEM;
63481+ goto out;
63482 }
63483- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63484- if (!tmp)
63485- goto fail_nomem;
63486- *tmp = *mpnt;
63487- INIT_LIST_HEAD(&tmp->anon_vma_chain);
63488- pol = mpol_dup(vma_policy(mpnt));
63489- retval = PTR_ERR(pol);
63490- if (IS_ERR(pol))
63491- goto fail_nomem_policy;
63492- vma_set_policy(tmp, pol);
63493- tmp->vm_mm = mm;
63494- if (anon_vma_fork(tmp, mpnt))
63495- goto fail_nomem_anon_vma_fork;
63496- tmp->vm_flags &= ~VM_LOCKED;
63497- tmp->vm_next = tmp->vm_prev = NULL;
63498- file = tmp->vm_file;
63499- if (file) {
63500- struct inode *inode = file->f_path.dentry->d_inode;
63501- struct address_space *mapping = file->f_mapping;
63502-
63503- get_file(file);
63504- if (tmp->vm_flags & VM_DENYWRITE)
63505- atomic_dec(&inode->i_writecount);
63506- mutex_lock(&mapping->i_mmap_mutex);
63507- if (tmp->vm_flags & VM_SHARED)
63508- mapping->i_mmap_writable++;
63509- flush_dcache_mmap_lock(mapping);
63510- /* insert tmp into the share list, just after mpnt */
63511- vma_prio_tree_add(tmp, mpnt);
63512- flush_dcache_mmap_unlock(mapping);
63513- mutex_unlock(&mapping->i_mmap_mutex);
63514- }
63515-
63516- /*
63517- * Clear hugetlb-related page reserves for children. This only
63518- * affects MAP_PRIVATE mappings. Faults generated by the child
63519- * are not guaranteed to succeed, even if read-only
63520- */
63521- if (is_vm_hugetlb_page(tmp))
63522- reset_vma_resv_huge_pages(tmp);
63523
63524 /*
63525 * Link in the new vma and copy the page table entries.
63526@@ -417,6 +437,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63527 if (retval)
63528 goto out;
63529 }
63530+
63531+#ifdef CONFIG_PAX_SEGMEXEC
63532+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63533+ struct vm_area_struct *mpnt_m;
63534+
63535+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63536+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63537+
63538+ if (!mpnt->vm_mirror)
63539+ continue;
63540+
63541+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63542+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63543+ mpnt->vm_mirror = mpnt_m;
63544+ } else {
63545+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63546+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63547+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63548+ mpnt->vm_mirror->vm_mirror = mpnt;
63549+ }
63550+ }
63551+ BUG_ON(mpnt_m);
63552+ }
63553+#endif
63554+
63555 /* a new mm has just been created */
63556 arch_dup_mmap(oldmm, mm);
63557 retval = 0;
63558@@ -425,14 +470,6 @@ out:
63559 flush_tlb_mm(oldmm);
63560 up_write(&oldmm->mmap_sem);
63561 return retval;
63562-fail_nomem_anon_vma_fork:
63563- mpol_put(pol);
63564-fail_nomem_policy:
63565- kmem_cache_free(vm_area_cachep, tmp);
63566-fail_nomem:
63567- retval = -ENOMEM;
63568- vm_unacct_memory(charge);
63569- goto out;
63570 }
63571
63572 static inline int mm_alloc_pgd(struct mm_struct *mm)
63573@@ -829,13 +866,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63574 spin_unlock(&fs->lock);
63575 return -EAGAIN;
63576 }
63577- fs->users++;
63578+ atomic_inc(&fs->users);
63579 spin_unlock(&fs->lock);
63580 return 0;
63581 }
63582 tsk->fs = copy_fs_struct(fs);
63583 if (!tsk->fs)
63584 return -ENOMEM;
63585+ gr_set_chroot_entries(tsk, &tsk->fs->root);
63586 return 0;
63587 }
63588
63589@@ -1097,6 +1135,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63590 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63591 #endif
63592 retval = -EAGAIN;
63593+
63594+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63595+
63596 if (atomic_read(&p->real_cred->user->processes) >=
63597 task_rlimit(p, RLIMIT_NPROC)) {
63598 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63599@@ -1256,6 +1297,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63600 if (clone_flags & CLONE_THREAD)
63601 p->tgid = current->tgid;
63602
63603+ gr_copy_label(p);
63604+
63605 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63606 /*
63607 * Clear TID on mm_release()?
63608@@ -1418,6 +1461,8 @@ bad_fork_cleanup_count:
63609 bad_fork_free:
63610 free_task(p);
63611 fork_out:
63612+ gr_log_forkfail(retval);
63613+
63614 return ERR_PTR(retval);
63615 }
63616
63617@@ -1518,6 +1563,8 @@ long do_fork(unsigned long clone_flags,
63618 if (clone_flags & CLONE_PARENT_SETTID)
63619 put_user(nr, parent_tidptr);
63620
63621+ gr_handle_brute_check();
63622+
63623 if (clone_flags & CLONE_VFORK) {
63624 p->vfork_done = &vfork;
63625 init_completion(&vfork);
63626@@ -1627,7 +1674,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63627 return 0;
63628
63629 /* don't need lock here; in the worst case we'll do useless copy */
63630- if (fs->users == 1)
63631+ if (atomic_read(&fs->users) == 1)
63632 return 0;
63633
63634 *new_fsp = copy_fs_struct(fs);
63635@@ -1716,7 +1763,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63636 fs = current->fs;
63637 spin_lock(&fs->lock);
63638 current->fs = new_fs;
63639- if (--fs->users)
63640+ gr_set_chroot_entries(current, &current->fs->root);
63641+ if (atomic_dec_return(&fs->users))
63642 new_fs = NULL;
63643 else
63644 new_fs = fs;
63645diff --git a/kernel/futex.c b/kernel/futex.c
63646index 1614be2..37abc7e 100644
63647--- a/kernel/futex.c
63648+++ b/kernel/futex.c
63649@@ -54,6 +54,7 @@
63650 #include <linux/mount.h>
63651 #include <linux/pagemap.h>
63652 #include <linux/syscalls.h>
63653+#include <linux/ptrace.h>
63654 #include <linux/signal.h>
63655 #include <linux/export.h>
63656 #include <linux/magic.h>
63657@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63658 struct page *page, *page_head;
63659 int err, ro = 0;
63660
63661+#ifdef CONFIG_PAX_SEGMEXEC
63662+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63663+ return -EFAULT;
63664+#endif
63665+
63666 /*
63667 * The futex address must be "naturally" aligned.
63668 */
63669@@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
63670 if (!p)
63671 goto err_unlock;
63672 ret = -EPERM;
63673+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63674+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63675+ goto err_unlock;
63676+#endif
63677 pcred = __task_cred(p);
63678 /* If victim is in different user_ns, then uids are not
63679 comparable, so we must have CAP_SYS_PTRACE */
63680@@ -2724,6 +2734,7 @@ static int __init futex_init(void)
63681 {
63682 u32 curval;
63683 int i;
63684+ mm_segment_t oldfs;
63685
63686 /*
63687 * This will fail and we want it. Some arch implementations do
63688@@ -2735,8 +2746,11 @@ static int __init futex_init(void)
63689 * implementation, the non-functional ones will return
63690 * -ENOSYS.
63691 */
63692+ oldfs = get_fs();
63693+ set_fs(USER_DS);
63694 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63695 futex_cmpxchg_enabled = 1;
63696+ set_fs(oldfs);
63697
63698 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63699 plist_head_init(&futex_queues[i].chain);
63700diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
63701index 5f9e689..582d46d 100644
63702--- a/kernel/futex_compat.c
63703+++ b/kernel/futex_compat.c
63704@@ -10,6 +10,7 @@
63705 #include <linux/compat.h>
63706 #include <linux/nsproxy.h>
63707 #include <linux/futex.h>
63708+#include <linux/ptrace.h>
63709
63710 #include <asm/uaccess.h>
63711
63712@@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63713 {
63714 struct compat_robust_list_head __user *head;
63715 unsigned long ret;
63716- const struct cred *cred = current_cred(), *pcred;
63717+ const struct cred *cred = current_cred();
63718+ const struct cred *pcred;
63719
63720 if (!futex_cmpxchg_enabled)
63721 return -ENOSYS;
63722@@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63723 if (!p)
63724 goto err_unlock;
63725 ret = -EPERM;
63726+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63727+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63728+ goto err_unlock;
63729+#endif
63730 pcred = __task_cred(p);
63731 /* If victim is in different user_ns, then uids are not
63732 comparable, so we must have CAP_SYS_PTRACE */
63733diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
63734index 9b22d03..6295b62 100644
63735--- a/kernel/gcov/base.c
63736+++ b/kernel/gcov/base.c
63737@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63738 }
63739
63740 #ifdef CONFIG_MODULES
63741-static inline int within(void *addr, void *start, unsigned long size)
63742-{
63743- return ((addr >= start) && (addr < start + size));
63744-}
63745-
63746 /* Update list and generate events when modules are unloaded. */
63747 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63748 void *data)
63749@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63750 prev = NULL;
63751 /* Remove entries located in module from linked list. */
63752 for (info = gcov_info_head; info; info = info->next) {
63753- if (within(info, mod->module_core, mod->core_size)) {
63754+ if (within_module_core_rw((unsigned long)info, mod)) {
63755 if (prev)
63756 prev->next = info->next;
63757 else
63758diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
63759index ae34bf5..4e2f3d0 100644
63760--- a/kernel/hrtimer.c
63761+++ b/kernel/hrtimer.c
63762@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
63763 local_irq_restore(flags);
63764 }
63765
63766-static void run_hrtimer_softirq(struct softirq_action *h)
63767+static void run_hrtimer_softirq(void)
63768 {
63769 hrtimer_peek_ahead_timers();
63770 }
63771diff --git a/kernel/jump_label.c b/kernel/jump_label.c
63772index 66ff710..05a5128 100644
63773--- a/kernel/jump_label.c
63774+++ b/kernel/jump_label.c
63775@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
63776
63777 size = (((unsigned long)stop - (unsigned long)start)
63778 / sizeof(struct jump_entry));
63779+ pax_open_kernel();
63780 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63781+ pax_close_kernel();
63782 }
63783
63784 static void jump_label_update(struct jump_label_key *key, int enable);
63785@@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
63786 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63787 struct jump_entry *iter;
63788
63789+ pax_open_kernel();
63790 for (iter = iter_start; iter < iter_stop; iter++) {
63791 if (within_module_init(iter->code, mod))
63792 iter->code = 0;
63793 }
63794+ pax_close_kernel();
63795 }
63796
63797 static int
63798diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
63799index 079f1d3..a407562 100644
63800--- a/kernel/kallsyms.c
63801+++ b/kernel/kallsyms.c
63802@@ -11,6 +11,9 @@
63803 * Changed the compression method from stem compression to "table lookup"
63804 * compression (see scripts/kallsyms.c for a more complete description)
63805 */
63806+#ifdef CONFIG_GRKERNSEC_HIDESYM
63807+#define __INCLUDED_BY_HIDESYM 1
63808+#endif
63809 #include <linux/kallsyms.h>
63810 #include <linux/module.h>
63811 #include <linux/init.h>
63812@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
63813
63814 static inline int is_kernel_inittext(unsigned long addr)
63815 {
63816+ if (system_state != SYSTEM_BOOTING)
63817+ return 0;
63818+
63819 if (addr >= (unsigned long)_sinittext
63820 && addr <= (unsigned long)_einittext)
63821 return 1;
63822 return 0;
63823 }
63824
63825+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63826+#ifdef CONFIG_MODULES
63827+static inline int is_module_text(unsigned long addr)
63828+{
63829+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63830+ return 1;
63831+
63832+ addr = ktla_ktva(addr);
63833+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63834+}
63835+#else
63836+static inline int is_module_text(unsigned long addr)
63837+{
63838+ return 0;
63839+}
63840+#endif
63841+#endif
63842+
63843 static inline int is_kernel_text(unsigned long addr)
63844 {
63845 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63846@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
63847
63848 static inline int is_kernel(unsigned long addr)
63849 {
63850+
63851+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63852+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63853+ return 1;
63854+
63855+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63856+#else
63857 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63858+#endif
63859+
63860 return 1;
63861 return in_gate_area_no_mm(addr);
63862 }
63863
63864 static int is_ksym_addr(unsigned long addr)
63865 {
63866+
63867+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63868+ if (is_module_text(addr))
63869+ return 0;
63870+#endif
63871+
63872 if (all_var)
63873 return is_kernel(addr);
63874
63875@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
63876
63877 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63878 {
63879- iter->name[0] = '\0';
63880 iter->nameoff = get_symbol_offset(new_pos);
63881 iter->pos = new_pos;
63882 }
63883@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
63884 {
63885 struct kallsym_iter *iter = m->private;
63886
63887+#ifdef CONFIG_GRKERNSEC_HIDESYM
63888+ if (current_uid())
63889+ return 0;
63890+#endif
63891+
63892 /* Some debugging symbols have no name. Ignore them. */
63893 if (!iter->name[0])
63894 return 0;
63895@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
63896 struct kallsym_iter *iter;
63897 int ret;
63898
63899- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63900+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63901 if (!iter)
63902 return -ENOMEM;
63903 reset_iter(iter, 0);
63904diff --git a/kernel/kexec.c b/kernel/kexec.c
63905index dc7bc08..4601964 100644
63906--- a/kernel/kexec.c
63907+++ b/kernel/kexec.c
63908@@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
63909 unsigned long flags)
63910 {
63911 struct compat_kexec_segment in;
63912- struct kexec_segment out, __user *ksegments;
63913+ struct kexec_segment out;
63914+ struct kexec_segment __user *ksegments;
63915 unsigned long i, result;
63916
63917 /* Don't allow clients that don't understand the native
63918diff --git a/kernel/kmod.c b/kernel/kmod.c
63919index a4bea97..7a1ae9a 100644
63920--- a/kernel/kmod.c
63921+++ b/kernel/kmod.c
63922@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
63923 * If module auto-loading support is disabled then this function
63924 * becomes a no-operation.
63925 */
63926-int __request_module(bool wait, const char *fmt, ...)
63927+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63928 {
63929- va_list args;
63930 char module_name[MODULE_NAME_LEN];
63931 unsigned int max_modprobes;
63932 int ret;
63933- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63934+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63935 static char *envp[] = { "HOME=/",
63936 "TERM=linux",
63937 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63938@@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
63939 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63940 static int kmod_loop_msg;
63941
63942- va_start(args, fmt);
63943- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63944- va_end(args);
63945+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63946 if (ret >= MODULE_NAME_LEN)
63947 return -ENAMETOOLONG;
63948
63949@@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
63950 if (ret)
63951 return ret;
63952
63953+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63954+ if (!current_uid()) {
63955+ /* hack to workaround consolekit/udisks stupidity */
63956+ read_lock(&tasklist_lock);
63957+ if (!strcmp(current->comm, "mount") &&
63958+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63959+ read_unlock(&tasklist_lock);
63960+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63961+ return -EPERM;
63962+ }
63963+ read_unlock(&tasklist_lock);
63964+ }
63965+#endif
63966+
63967 /* If modprobe needs a service that is in a module, we get a recursive
63968 * loop. Limit the number of running kmod threads to max_threads/2 or
63969 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63970@@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
63971 atomic_dec(&kmod_concurrent);
63972 return ret;
63973 }
63974+
63975+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63976+{
63977+ va_list args;
63978+ int ret;
63979+
63980+ va_start(args, fmt);
63981+ ret = ____request_module(wait, module_param, fmt, args);
63982+ va_end(args);
63983+
63984+ return ret;
63985+}
63986+
63987+int __request_module(bool wait, const char *fmt, ...)
63988+{
63989+ va_list args;
63990+ int ret;
63991+
63992+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63993+ if (current_uid()) {
63994+ char module_param[MODULE_NAME_LEN];
63995+
63996+ memset(module_param, 0, sizeof(module_param));
63997+
63998+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63999+
64000+ va_start(args, fmt);
64001+ ret = ____request_module(wait, module_param, fmt, args);
64002+ va_end(args);
64003+
64004+ return ret;
64005+ }
64006+#endif
64007+
64008+ va_start(args, fmt);
64009+ ret = ____request_module(wait, NULL, fmt, args);
64010+ va_end(args);
64011+
64012+ return ret;
64013+}
64014+
64015 EXPORT_SYMBOL(__request_module);
64016 #endif /* CONFIG_MODULES */
64017
64018@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
64019 *
64020 * Thus the __user pointer cast is valid here.
64021 */
64022- sys_wait4(pid, (int __user *)&ret, 0, NULL);
64023+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
64024
64025 /*
64026 * If ret is 0, either ____call_usermodehelper failed and the
64027diff --git a/kernel/kprobes.c b/kernel/kprobes.c
64028index 52fd049..3def6a8 100644
64029--- a/kernel/kprobes.c
64030+++ b/kernel/kprobes.c
64031@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
64032 * kernel image and loaded module images reside. This is required
64033 * so x86_64 can correctly handle the %rip-relative fixups.
64034 */
64035- kip->insns = module_alloc(PAGE_SIZE);
64036+ kip->insns = module_alloc_exec(PAGE_SIZE);
64037 if (!kip->insns) {
64038 kfree(kip);
64039 return NULL;
64040@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
64041 */
64042 if (!list_is_singular(&kip->list)) {
64043 list_del(&kip->list);
64044- module_free(NULL, kip->insns);
64045+ module_free_exec(NULL, kip->insns);
64046 kfree(kip);
64047 }
64048 return 1;
64049@@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
64050 {
64051 int i, err = 0;
64052 unsigned long offset = 0, size = 0;
64053- char *modname, namebuf[128];
64054+ char *modname, namebuf[KSYM_NAME_LEN];
64055 const char *symbol_name;
64056 void *addr;
64057 struct kprobe_blackpoint *kb;
64058@@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
64059 const char *sym = NULL;
64060 unsigned int i = *(loff_t *) v;
64061 unsigned long offset = 0;
64062- char *modname, namebuf[128];
64063+ char *modname, namebuf[KSYM_NAME_LEN];
64064
64065 head = &kprobe_table[i];
64066 preempt_disable();
64067diff --git a/kernel/lockdep.c b/kernel/lockdep.c
64068index b2e08c9..01d8049 100644
64069--- a/kernel/lockdep.c
64070+++ b/kernel/lockdep.c
64071@@ -592,6 +592,10 @@ static int static_obj(void *obj)
64072 end = (unsigned long) &_end,
64073 addr = (unsigned long) obj;
64074
64075+#ifdef CONFIG_PAX_KERNEXEC
64076+ start = ktla_ktva(start);
64077+#endif
64078+
64079 /*
64080 * static variable?
64081 */
64082@@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
64083 if (!static_obj(lock->key)) {
64084 debug_locks_off();
64085 printk("INFO: trying to register non-static key.\n");
64086+ printk("lock:%pS key:%pS.\n", lock, lock->key);
64087 printk("the code is fine but needs lockdep annotation.\n");
64088 printk("turning off the locking correctness validator.\n");
64089 dump_stack();
64090@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
64091 if (!class)
64092 return 0;
64093 }
64094- atomic_inc((atomic_t *)&class->ops);
64095+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
64096 if (very_verbose(class)) {
64097 printk("\nacquire class [%p] %s", class->key, class->name);
64098 if (class->name_version > 1)
64099diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
64100index 91c32a0..b2c71c5 100644
64101--- a/kernel/lockdep_proc.c
64102+++ b/kernel/lockdep_proc.c
64103@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
64104
64105 static void print_name(struct seq_file *m, struct lock_class *class)
64106 {
64107- char str[128];
64108+ char str[KSYM_NAME_LEN];
64109 const char *name = class->name;
64110
64111 if (!name) {
64112diff --git a/kernel/module.c b/kernel/module.c
64113index 178333c..04e3408 100644
64114--- a/kernel/module.c
64115+++ b/kernel/module.c
64116@@ -58,6 +58,7 @@
64117 #include <linux/jump_label.h>
64118 #include <linux/pfn.h>
64119 #include <linux/bsearch.h>
64120+#include <linux/grsecurity.h>
64121
64122 #define CREATE_TRACE_POINTS
64123 #include <trace/events/module.h>
64124@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64125
64126 /* Bounds of module allocation, for speeding __module_address.
64127 * Protected by module_mutex. */
64128-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64129+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64130+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64131
64132 int register_module_notifier(struct notifier_block * nb)
64133 {
64134@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64135 return true;
64136
64137 list_for_each_entry_rcu(mod, &modules, list) {
64138- struct symsearch arr[] = {
64139+ struct symsearch modarr[] = {
64140 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64141 NOT_GPL_ONLY, false },
64142 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64143@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64144 #endif
64145 };
64146
64147- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64148+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64149 return true;
64150 }
64151 return false;
64152@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
64153 static int percpu_modalloc(struct module *mod,
64154 unsigned long size, unsigned long align)
64155 {
64156- if (align > PAGE_SIZE) {
64157+ if (align-1 >= PAGE_SIZE) {
64158 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64159 mod->name, align, PAGE_SIZE);
64160 align = PAGE_SIZE;
64161@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64162 */
64163 #ifdef CONFIG_SYSFS
64164
64165-#ifdef CONFIG_KALLSYMS
64166+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64167 static inline bool sect_empty(const Elf_Shdr *sect)
64168 {
64169 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64170@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
64171
64172 static void unset_module_core_ro_nx(struct module *mod)
64173 {
64174- set_page_attributes(mod->module_core + mod->core_text_size,
64175- mod->module_core + mod->core_size,
64176+ set_page_attributes(mod->module_core_rw,
64177+ mod->module_core_rw + mod->core_size_rw,
64178 set_memory_x);
64179- set_page_attributes(mod->module_core,
64180- mod->module_core + mod->core_ro_size,
64181+ set_page_attributes(mod->module_core_rx,
64182+ mod->module_core_rx + mod->core_size_rx,
64183 set_memory_rw);
64184 }
64185
64186 static void unset_module_init_ro_nx(struct module *mod)
64187 {
64188- set_page_attributes(mod->module_init + mod->init_text_size,
64189- mod->module_init + mod->init_size,
64190+ set_page_attributes(mod->module_init_rw,
64191+ mod->module_init_rw + mod->init_size_rw,
64192 set_memory_x);
64193- set_page_attributes(mod->module_init,
64194- mod->module_init + mod->init_ro_size,
64195+ set_page_attributes(mod->module_init_rx,
64196+ mod->module_init_rx + mod->init_size_rx,
64197 set_memory_rw);
64198 }
64199
64200@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64201
64202 mutex_lock(&module_mutex);
64203 list_for_each_entry_rcu(mod, &modules, list) {
64204- if ((mod->module_core) && (mod->core_text_size)) {
64205- set_page_attributes(mod->module_core,
64206- mod->module_core + mod->core_text_size,
64207+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
64208+ set_page_attributes(mod->module_core_rx,
64209+ mod->module_core_rx + mod->core_size_rx,
64210 set_memory_rw);
64211 }
64212- if ((mod->module_init) && (mod->init_text_size)) {
64213- set_page_attributes(mod->module_init,
64214- mod->module_init + mod->init_text_size,
64215+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
64216+ set_page_attributes(mod->module_init_rx,
64217+ mod->module_init_rx + mod->init_size_rx,
64218 set_memory_rw);
64219 }
64220 }
64221@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64222
64223 mutex_lock(&module_mutex);
64224 list_for_each_entry_rcu(mod, &modules, list) {
64225- if ((mod->module_core) && (mod->core_text_size)) {
64226- set_page_attributes(mod->module_core,
64227- mod->module_core + mod->core_text_size,
64228+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
64229+ set_page_attributes(mod->module_core_rx,
64230+ mod->module_core_rx + mod->core_size_rx,
64231 set_memory_ro);
64232 }
64233- if ((mod->module_init) && (mod->init_text_size)) {
64234- set_page_attributes(mod->module_init,
64235- mod->module_init + mod->init_text_size,
64236+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
64237+ set_page_attributes(mod->module_init_rx,
64238+ mod->module_init_rx + mod->init_size_rx,
64239 set_memory_ro);
64240 }
64241 }
64242@@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
64243
64244 /* This may be NULL, but that's OK */
64245 unset_module_init_ro_nx(mod);
64246- module_free(mod, mod->module_init);
64247+ module_free(mod, mod->module_init_rw);
64248+ module_free_exec(mod, mod->module_init_rx);
64249 kfree(mod->args);
64250 percpu_modfree(mod);
64251
64252 /* Free lock-classes: */
64253- lockdep_free_key_range(mod->module_core, mod->core_size);
64254+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64255+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64256
64257 /* Finally, free the core (containing the module structure) */
64258 unset_module_core_ro_nx(mod);
64259- module_free(mod, mod->module_core);
64260+ module_free_exec(mod, mod->module_core_rx);
64261+ module_free(mod, mod->module_core_rw);
64262
64263 #ifdef CONFIG_MPU
64264 update_protections(current->mm);
64265@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64266 unsigned int i;
64267 int ret = 0;
64268 const struct kernel_symbol *ksym;
64269+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64270+ int is_fs_load = 0;
64271+ int register_filesystem_found = 0;
64272+ char *p;
64273+
64274+ p = strstr(mod->args, "grsec_modharden_fs");
64275+ if (p) {
64276+ char *endptr = p + strlen("grsec_modharden_fs");
64277+ /* copy \0 as well */
64278+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64279+ is_fs_load = 1;
64280+ }
64281+#endif
64282
64283 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64284 const char *name = info->strtab + sym[i].st_name;
64285
64286+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64287+ /* it's a real shame this will never get ripped and copied
64288+ upstream! ;(
64289+ */
64290+ if (is_fs_load && !strcmp(name, "register_filesystem"))
64291+ register_filesystem_found = 1;
64292+#endif
64293+
64294 switch (sym[i].st_shndx) {
64295 case SHN_COMMON:
64296 /* We compiled with -fno-common. These are not
64297@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64298 ksym = resolve_symbol_wait(mod, info, name);
64299 /* Ok if resolved. */
64300 if (ksym && !IS_ERR(ksym)) {
64301+ pax_open_kernel();
64302 sym[i].st_value = ksym->value;
64303+ pax_close_kernel();
64304 break;
64305 }
64306
64307@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64308 secbase = (unsigned long)mod_percpu(mod);
64309 else
64310 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64311+ pax_open_kernel();
64312 sym[i].st_value += secbase;
64313+ pax_close_kernel();
64314 break;
64315 }
64316 }
64317
64318+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64319+ if (is_fs_load && !register_filesystem_found) {
64320+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64321+ ret = -EPERM;
64322+ }
64323+#endif
64324+
64325 return ret;
64326 }
64327
64328@@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
64329 || s->sh_entsize != ~0UL
64330 || strstarts(sname, ".init"))
64331 continue;
64332- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64333+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64334+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64335+ else
64336+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64337 DEBUGP("\t%s\n", name);
64338 }
64339- switch (m) {
64340- case 0: /* executable */
64341- mod->core_size = debug_align(mod->core_size);
64342- mod->core_text_size = mod->core_size;
64343- break;
64344- case 1: /* RO: text and ro-data */
64345- mod->core_size = debug_align(mod->core_size);
64346- mod->core_ro_size = mod->core_size;
64347- break;
64348- case 3: /* whole core */
64349- mod->core_size = debug_align(mod->core_size);
64350- break;
64351- }
64352 }
64353
64354 DEBUGP("Init section allocation order:\n");
64355@@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
64356 || s->sh_entsize != ~0UL
64357 || !strstarts(sname, ".init"))
64358 continue;
64359- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64360- | INIT_OFFSET_MASK);
64361+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64362+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64363+ else
64364+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64365+ s->sh_entsize |= INIT_OFFSET_MASK;
64366 DEBUGP("\t%s\n", sname);
64367 }
64368- switch (m) {
64369- case 0: /* executable */
64370- mod->init_size = debug_align(mod->init_size);
64371- mod->init_text_size = mod->init_size;
64372- break;
64373- case 1: /* RO: text and ro-data */
64374- mod->init_size = debug_align(mod->init_size);
64375- mod->init_ro_size = mod->init_size;
64376- break;
64377- case 3: /* whole init */
64378- mod->init_size = debug_align(mod->init_size);
64379- break;
64380- }
64381 }
64382 }
64383
64384@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64385
64386 /* Put symbol section at end of init part of module. */
64387 symsect->sh_flags |= SHF_ALLOC;
64388- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64389+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64390 info->index.sym) | INIT_OFFSET_MASK;
64391 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64392
64393@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64394 }
64395
64396 /* Append room for core symbols at end of core part. */
64397- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64398- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64399+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64400+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64401
64402 /* Put string table section at end of init part of module. */
64403 strsect->sh_flags |= SHF_ALLOC;
64404- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64405+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64406 info->index.str) | INIT_OFFSET_MASK;
64407 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64408
64409 /* Append room for core symbols' strings at end of core part. */
64410- info->stroffs = mod->core_size;
64411+ info->stroffs = mod->core_size_rx;
64412 __set_bit(0, info->strmap);
64413- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64414+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64415 }
64416
64417 static void add_kallsyms(struct module *mod, const struct load_info *info)
64418@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64419 /* Make sure we get permanent strtab: don't use info->strtab. */
64420 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64421
64422+ pax_open_kernel();
64423+
64424 /* Set types up while we still have access to sections. */
64425 for (i = 0; i < mod->num_symtab; i++)
64426 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64427
64428- mod->core_symtab = dst = mod->module_core + info->symoffs;
64429+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64430 src = mod->symtab;
64431 *dst = *src;
64432 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64433@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64434 }
64435 mod->core_num_syms = ndst;
64436
64437- mod->core_strtab = s = mod->module_core + info->stroffs;
64438+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64439 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64440 if (test_bit(i, info->strmap))
64441 *++s = mod->strtab[i];
64442+
64443+ pax_close_kernel();
64444 }
64445 #else
64446 static inline void layout_symtab(struct module *mod, struct load_info *info)
64447@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64448 return size == 0 ? NULL : vmalloc_exec(size);
64449 }
64450
64451-static void *module_alloc_update_bounds(unsigned long size)
64452+static void *module_alloc_update_bounds_rw(unsigned long size)
64453 {
64454 void *ret = module_alloc(size);
64455
64456 if (ret) {
64457 mutex_lock(&module_mutex);
64458 /* Update module bounds. */
64459- if ((unsigned long)ret < module_addr_min)
64460- module_addr_min = (unsigned long)ret;
64461- if ((unsigned long)ret + size > module_addr_max)
64462- module_addr_max = (unsigned long)ret + size;
64463+ if ((unsigned long)ret < module_addr_min_rw)
64464+ module_addr_min_rw = (unsigned long)ret;
64465+ if ((unsigned long)ret + size > module_addr_max_rw)
64466+ module_addr_max_rw = (unsigned long)ret + size;
64467+ mutex_unlock(&module_mutex);
64468+ }
64469+ return ret;
64470+}
64471+
64472+static void *module_alloc_update_bounds_rx(unsigned long size)
64473+{
64474+ void *ret = module_alloc_exec(size);
64475+
64476+ if (ret) {
64477+ mutex_lock(&module_mutex);
64478+ /* Update module bounds. */
64479+ if ((unsigned long)ret < module_addr_min_rx)
64480+ module_addr_min_rx = (unsigned long)ret;
64481+ if ((unsigned long)ret + size > module_addr_max_rx)
64482+ module_addr_max_rx = (unsigned long)ret + size;
64483 mutex_unlock(&module_mutex);
64484 }
64485 return ret;
64486@@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64487 static int check_modinfo(struct module *mod, struct load_info *info)
64488 {
64489 const char *modmagic = get_modinfo(info, "vermagic");
64490+ const char *license = get_modinfo(info, "license");
64491 int err;
64492
64493+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64494+ if (!license || !license_is_gpl_compatible(license))
64495+ return -ENOEXEC;
64496+#endif
64497+
64498 /* This is allowed: modprobe --force will invalidate it. */
64499 if (!modmagic) {
64500 err = try_to_force_load(mod, "bad vermagic");
64501@@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64502 }
64503
64504 /* Set up license info based on the info section */
64505- set_license(mod, get_modinfo(info, "license"));
64506+ set_license(mod, license);
64507
64508 return 0;
64509 }
64510@@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64511 void *ptr;
64512
64513 /* Do the allocs. */
64514- ptr = module_alloc_update_bounds(mod->core_size);
64515+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64516 /*
64517 * The pointer to this block is stored in the module structure
64518 * which is inside the block. Just mark it as not being a
64519@@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64520 if (!ptr)
64521 return -ENOMEM;
64522
64523- memset(ptr, 0, mod->core_size);
64524- mod->module_core = ptr;
64525+ memset(ptr, 0, mod->core_size_rw);
64526+ mod->module_core_rw = ptr;
64527
64528- ptr = module_alloc_update_bounds(mod->init_size);
64529+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64530 /*
64531 * The pointer to this block is stored in the module structure
64532 * which is inside the block. This block doesn't need to be
64533 * scanned as it contains data and code that will be freed
64534 * after the module is initialized.
64535 */
64536- kmemleak_ignore(ptr);
64537- if (!ptr && mod->init_size) {
64538- module_free(mod, mod->module_core);
64539+ kmemleak_not_leak(ptr);
64540+ if (!ptr && mod->init_size_rw) {
64541+ module_free(mod, mod->module_core_rw);
64542 return -ENOMEM;
64543 }
64544- memset(ptr, 0, mod->init_size);
64545- mod->module_init = ptr;
64546+ memset(ptr, 0, mod->init_size_rw);
64547+ mod->module_init_rw = ptr;
64548+
64549+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64550+ kmemleak_not_leak(ptr);
64551+ if (!ptr) {
64552+ module_free(mod, mod->module_init_rw);
64553+ module_free(mod, mod->module_core_rw);
64554+ return -ENOMEM;
64555+ }
64556+
64557+ pax_open_kernel();
64558+ memset(ptr, 0, mod->core_size_rx);
64559+ pax_close_kernel();
64560+ mod->module_core_rx = ptr;
64561+
64562+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64563+ kmemleak_not_leak(ptr);
64564+ if (!ptr && mod->init_size_rx) {
64565+ module_free_exec(mod, mod->module_core_rx);
64566+ module_free(mod, mod->module_init_rw);
64567+ module_free(mod, mod->module_core_rw);
64568+ return -ENOMEM;
64569+ }
64570+
64571+ pax_open_kernel();
64572+ memset(ptr, 0, mod->init_size_rx);
64573+ pax_close_kernel();
64574+ mod->module_init_rx = ptr;
64575
64576 /* Transfer each section which specifies SHF_ALLOC */
64577 DEBUGP("final section addresses:\n");
64578@@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64579 if (!(shdr->sh_flags & SHF_ALLOC))
64580 continue;
64581
64582- if (shdr->sh_entsize & INIT_OFFSET_MASK)
64583- dest = mod->module_init
64584- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64585- else
64586- dest = mod->module_core + shdr->sh_entsize;
64587+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64588+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64589+ dest = mod->module_init_rw
64590+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64591+ else
64592+ dest = mod->module_init_rx
64593+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64594+ } else {
64595+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64596+ dest = mod->module_core_rw + shdr->sh_entsize;
64597+ else
64598+ dest = mod->module_core_rx + shdr->sh_entsize;
64599+ }
64600+
64601+ if (shdr->sh_type != SHT_NOBITS) {
64602+
64603+#ifdef CONFIG_PAX_KERNEXEC
64604+#ifdef CONFIG_X86_64
64605+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64606+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64607+#endif
64608+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64609+ pax_open_kernel();
64610+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64611+ pax_close_kernel();
64612+ } else
64613+#endif
64614
64615- if (shdr->sh_type != SHT_NOBITS)
64616 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64617+ }
64618 /* Update sh_addr to point to copy in image. */
64619- shdr->sh_addr = (unsigned long)dest;
64620+
64621+#ifdef CONFIG_PAX_KERNEXEC
64622+ if (shdr->sh_flags & SHF_EXECINSTR)
64623+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
64624+ else
64625+#endif
64626+
64627+ shdr->sh_addr = (unsigned long)dest;
64628 DEBUGP("\t0x%lx %s\n",
64629 shdr->sh_addr, info->secstrings + shdr->sh_name);
64630 }
64631@@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64632 * Do it before processing of module parameters, so the module
64633 * can provide parameter accessor functions of its own.
64634 */
64635- if (mod->module_init)
64636- flush_icache_range((unsigned long)mod->module_init,
64637- (unsigned long)mod->module_init
64638- + mod->init_size);
64639- flush_icache_range((unsigned long)mod->module_core,
64640- (unsigned long)mod->module_core + mod->core_size);
64641+ if (mod->module_init_rx)
64642+ flush_icache_range((unsigned long)mod->module_init_rx,
64643+ (unsigned long)mod->module_init_rx
64644+ + mod->init_size_rx);
64645+ flush_icache_range((unsigned long)mod->module_core_rx,
64646+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64647
64648 set_fs(old_fs);
64649 }
64650@@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64651 {
64652 kfree(info->strmap);
64653 percpu_modfree(mod);
64654- module_free(mod, mod->module_init);
64655- module_free(mod, mod->module_core);
64656+ module_free_exec(mod, mod->module_init_rx);
64657+ module_free_exec(mod, mod->module_core_rx);
64658+ module_free(mod, mod->module_init_rw);
64659+ module_free(mod, mod->module_core_rw);
64660 }
64661
64662 int __weak module_finalize(const Elf_Ehdr *hdr,
64663@@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64664 if (err)
64665 goto free_unload;
64666
64667+ /* Now copy in args */
64668+ mod->args = strndup_user(uargs, ~0UL >> 1);
64669+ if (IS_ERR(mod->args)) {
64670+ err = PTR_ERR(mod->args);
64671+ goto free_unload;
64672+ }
64673+
64674 /* Set up MODINFO_ATTR fields */
64675 setup_modinfo(mod, &info);
64676
64677+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64678+ {
64679+ char *p, *p2;
64680+
64681+ if (strstr(mod->args, "grsec_modharden_netdev")) {
64682+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64683+ err = -EPERM;
64684+ goto free_modinfo;
64685+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64686+ p += strlen("grsec_modharden_normal");
64687+ p2 = strstr(p, "_");
64688+ if (p2) {
64689+ *p2 = '\0';
64690+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64691+ *p2 = '_';
64692+ }
64693+ err = -EPERM;
64694+ goto free_modinfo;
64695+ }
64696+ }
64697+#endif
64698+
64699 /* Fix up syms, so that st_value is a pointer to location. */
64700 err = simplify_symbols(mod, &info);
64701 if (err < 0)
64702@@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
64703
64704 flush_module_icache(mod);
64705
64706- /* Now copy in args */
64707- mod->args = strndup_user(uargs, ~0UL >> 1);
64708- if (IS_ERR(mod->args)) {
64709- err = PTR_ERR(mod->args);
64710- goto free_arch_cleanup;
64711- }
64712-
64713 /* Mark state as coming so strong_try_module_get() ignores us. */
64714 mod->state = MODULE_STATE_COMING;
64715
64716@@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
64717 unlock:
64718 mutex_unlock(&module_mutex);
64719 synchronize_sched();
64720- kfree(mod->args);
64721- free_arch_cleanup:
64722 module_arch_cleanup(mod);
64723 free_modinfo:
64724 free_modinfo(mod);
64725+ kfree(mod->args);
64726 free_unload:
64727 module_unload_free(mod);
64728 free_module:
64729@@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64730 MODULE_STATE_COMING, mod);
64731
64732 /* Set RO and NX regions for core */
64733- set_section_ro_nx(mod->module_core,
64734- mod->core_text_size,
64735- mod->core_ro_size,
64736- mod->core_size);
64737+ set_section_ro_nx(mod->module_core_rx,
64738+ mod->core_size_rx,
64739+ mod->core_size_rx,
64740+ mod->core_size_rx);
64741
64742 /* Set RO and NX regions for init */
64743- set_section_ro_nx(mod->module_init,
64744- mod->init_text_size,
64745- mod->init_ro_size,
64746- mod->init_size);
64747+ set_section_ro_nx(mod->module_init_rx,
64748+ mod->init_size_rx,
64749+ mod->init_size_rx,
64750+ mod->init_size_rx);
64751
64752 do_mod_ctors(mod);
64753 /* Start the module */
64754@@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64755 mod->strtab = mod->core_strtab;
64756 #endif
64757 unset_module_init_ro_nx(mod);
64758- module_free(mod, mod->module_init);
64759- mod->module_init = NULL;
64760- mod->init_size = 0;
64761- mod->init_ro_size = 0;
64762- mod->init_text_size = 0;
64763+ module_free(mod, mod->module_init_rw);
64764+ module_free_exec(mod, mod->module_init_rx);
64765+ mod->module_init_rw = NULL;
64766+ mod->module_init_rx = NULL;
64767+ mod->init_size_rw = 0;
64768+ mod->init_size_rx = 0;
64769 mutex_unlock(&module_mutex);
64770
64771 return 0;
64772@@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
64773 unsigned long nextval;
64774
64775 /* At worse, next value is at end of module */
64776- if (within_module_init(addr, mod))
64777- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64778+ if (within_module_init_rx(addr, mod))
64779+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64780+ else if (within_module_init_rw(addr, mod))
64781+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64782+ else if (within_module_core_rx(addr, mod))
64783+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64784+ else if (within_module_core_rw(addr, mod))
64785+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64786 else
64787- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64788+ return NULL;
64789
64790 /* Scan for closest preceding symbol, and next symbol. (ELF
64791 starts real symbols at 1). */
64792@@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
64793 char buf[8];
64794
64795 seq_printf(m, "%s %u",
64796- mod->name, mod->init_size + mod->core_size);
64797+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64798 print_unload_info(m, mod);
64799
64800 /* Informative for users. */
64801@@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
64802 mod->state == MODULE_STATE_COMING ? "Loading":
64803 "Live");
64804 /* Used by oprofile and other similar tools. */
64805- seq_printf(m, " 0x%pK", mod->module_core);
64806+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64807
64808 /* Taints info */
64809 if (mod->taints)
64810@@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
64811
64812 static int __init proc_modules_init(void)
64813 {
64814+#ifndef CONFIG_GRKERNSEC_HIDESYM
64815+#ifdef CONFIG_GRKERNSEC_PROC_USER
64816+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64817+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64818+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64819+#else
64820 proc_create("modules", 0, NULL, &proc_modules_operations);
64821+#endif
64822+#else
64823+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64824+#endif
64825 return 0;
64826 }
64827 module_init(proc_modules_init);
64828@@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
64829 {
64830 struct module *mod;
64831
64832- if (addr < module_addr_min || addr > module_addr_max)
64833+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64834+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64835 return NULL;
64836
64837 list_for_each_entry_rcu(mod, &modules, list)
64838- if (within_module_core(addr, mod)
64839- || within_module_init(addr, mod))
64840+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64841 return mod;
64842 return NULL;
64843 }
64844@@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
64845 */
64846 struct module *__module_text_address(unsigned long addr)
64847 {
64848- struct module *mod = __module_address(addr);
64849+ struct module *mod;
64850+
64851+#ifdef CONFIG_X86_32
64852+ addr = ktla_ktva(addr);
64853+#endif
64854+
64855+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64856+ return NULL;
64857+
64858+ mod = __module_address(addr);
64859+
64860 if (mod) {
64861 /* Make sure it's within the text section. */
64862- if (!within(addr, mod->module_init, mod->init_text_size)
64863- && !within(addr, mod->module_core, mod->core_text_size))
64864+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64865 mod = NULL;
64866 }
64867 return mod;
64868diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
64869index 7e3443f..b2a1e6b 100644
64870--- a/kernel/mutex-debug.c
64871+++ b/kernel/mutex-debug.c
64872@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
64873 }
64874
64875 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64876- struct thread_info *ti)
64877+ struct task_struct *task)
64878 {
64879 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64880
64881 /* Mark the current thread as blocked on the lock: */
64882- ti->task->blocked_on = waiter;
64883+ task->blocked_on = waiter;
64884 }
64885
64886 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64887- struct thread_info *ti)
64888+ struct task_struct *task)
64889 {
64890 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64891- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64892- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64893- ti->task->blocked_on = NULL;
64894+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64895+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64896+ task->blocked_on = NULL;
64897
64898 list_del_init(&waiter->list);
64899 waiter->task = NULL;
64900diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
64901index 0799fd3..d06ae3b 100644
64902--- a/kernel/mutex-debug.h
64903+++ b/kernel/mutex-debug.h
64904@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
64905 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64906 extern void debug_mutex_add_waiter(struct mutex *lock,
64907 struct mutex_waiter *waiter,
64908- struct thread_info *ti);
64909+ struct task_struct *task);
64910 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64911- struct thread_info *ti);
64912+ struct task_struct *task);
64913 extern void debug_mutex_unlock(struct mutex *lock);
64914 extern void debug_mutex_init(struct mutex *lock, const char *name,
64915 struct lock_class_key *key);
64916diff --git a/kernel/mutex.c b/kernel/mutex.c
64917index 89096dd..f91ebc5 100644
64918--- a/kernel/mutex.c
64919+++ b/kernel/mutex.c
64920@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64921 spin_lock_mutex(&lock->wait_lock, flags);
64922
64923 debug_mutex_lock_common(lock, &waiter);
64924- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64925+ debug_mutex_add_waiter(lock, &waiter, task);
64926
64927 /* add waiting tasks to the end of the waitqueue (FIFO): */
64928 list_add_tail(&waiter.list, &lock->wait_list);
64929@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64930 * TASK_UNINTERRUPTIBLE case.)
64931 */
64932 if (unlikely(signal_pending_state(state, task))) {
64933- mutex_remove_waiter(lock, &waiter,
64934- task_thread_info(task));
64935+ mutex_remove_waiter(lock, &waiter, task);
64936 mutex_release(&lock->dep_map, 1, ip);
64937 spin_unlock_mutex(&lock->wait_lock, flags);
64938
64939@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64940 done:
64941 lock_acquired(&lock->dep_map, ip);
64942 /* got the lock - rejoice! */
64943- mutex_remove_waiter(lock, &waiter, current_thread_info());
64944+ mutex_remove_waiter(lock, &waiter, task);
64945 mutex_set_owner(lock);
64946
64947 /* set it to 0 if there are no waiters left: */
64948diff --git a/kernel/padata.c b/kernel/padata.c
64949index b452599..5d68f4e 100644
64950--- a/kernel/padata.c
64951+++ b/kernel/padata.c
64952@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
64953 padata->pd = pd;
64954 padata->cb_cpu = cb_cpu;
64955
64956- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64957- atomic_set(&pd->seq_nr, -1);
64958+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64959+ atomic_set_unchecked(&pd->seq_nr, -1);
64960
64961- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64962+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64963
64964 target_cpu = padata_cpu_hash(padata);
64965 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64966@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
64967 padata_init_pqueues(pd);
64968 padata_init_squeues(pd);
64969 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64970- atomic_set(&pd->seq_nr, -1);
64971+ atomic_set_unchecked(&pd->seq_nr, -1);
64972 atomic_set(&pd->reorder_objects, 0);
64973 atomic_set(&pd->refcnt, 0);
64974 pd->pinst = pinst;
64975diff --git a/kernel/panic.c b/kernel/panic.c
64976index b2659360..5972a0f 100644
64977--- a/kernel/panic.c
64978+++ b/kernel/panic.c
64979@@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
64980 va_end(args);
64981 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
64982 #ifdef CONFIG_DEBUG_BUGVERBOSE
64983- dump_stack();
64984+ /*
64985+ * Avoid nested stack-dumping if a panic occurs during oops processing
64986+ */
64987+ if (!oops_in_progress)
64988+ dump_stack();
64989 #endif
64990
64991 /*
64992@@ -373,7 +377,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
64993 const char *board;
64994
64995 printk(KERN_WARNING "------------[ cut here ]------------\n");
64996- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64997+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64998 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64999 if (board)
65000 printk(KERN_WARNING "Hardware name: %s\n", board);
65001@@ -428,7 +432,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
65002 */
65003 void __stack_chk_fail(void)
65004 {
65005- panic("stack-protector: Kernel stack is corrupted in: %p\n",
65006+ dump_stack();
65007+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
65008 __builtin_return_address(0));
65009 }
65010 EXPORT_SYMBOL(__stack_chk_fail);
65011diff --git a/kernel/pid.c b/kernel/pid.c
65012index fa5f722..0c93e57 100644
65013--- a/kernel/pid.c
65014+++ b/kernel/pid.c
65015@@ -33,6 +33,7 @@
65016 #include <linux/rculist.h>
65017 #include <linux/bootmem.h>
65018 #include <linux/hash.h>
65019+#include <linux/security.h>
65020 #include <linux/pid_namespace.h>
65021 #include <linux/init_task.h>
65022 #include <linux/syscalls.h>
65023@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
65024
65025 int pid_max = PID_MAX_DEFAULT;
65026
65027-#define RESERVED_PIDS 300
65028+#define RESERVED_PIDS 500
65029
65030 int pid_max_min = RESERVED_PIDS + 1;
65031 int pid_max_max = PID_MAX_LIMIT;
65032@@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
65033 */
65034 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65035 {
65036+ struct task_struct *task;
65037+
65038 rcu_lockdep_assert(rcu_read_lock_held(),
65039 "find_task_by_pid_ns() needs rcu_read_lock()"
65040 " protection");
65041- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65042+
65043+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65044+
65045+ if (gr_pid_is_chrooted(task))
65046+ return NULL;
65047+
65048+ return task;
65049 }
65050
65051 struct task_struct *find_task_by_vpid(pid_t vnr)
65052@@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
65053 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65054 }
65055
65056+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65057+{
65058+ rcu_lockdep_assert(rcu_read_lock_held(),
65059+ "find_task_by_pid_ns() needs rcu_read_lock()"
65060+ " protection");
65061+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65062+}
65063+
65064 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65065 {
65066 struct pid *pid;
65067diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
65068index e7cb76d..75eceb3 100644
65069--- a/kernel/posix-cpu-timers.c
65070+++ b/kernel/posix-cpu-timers.c
65071@@ -6,6 +6,7 @@
65072 #include <linux/posix-timers.h>
65073 #include <linux/errno.h>
65074 #include <linux/math64.h>
65075+#include <linux/security.h>
65076 #include <asm/uaccess.h>
65077 #include <linux/kernel_stat.h>
65078 #include <trace/events/timer.h>
65079@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
65080
65081 static __init int init_posix_cpu_timers(void)
65082 {
65083- struct k_clock process = {
65084+ static struct k_clock process = {
65085 .clock_getres = process_cpu_clock_getres,
65086 .clock_get = process_cpu_clock_get,
65087 .timer_create = process_cpu_timer_create,
65088 .nsleep = process_cpu_nsleep,
65089 .nsleep_restart = process_cpu_nsleep_restart,
65090 };
65091- struct k_clock thread = {
65092+ static struct k_clock thread = {
65093 .clock_getres = thread_cpu_clock_getres,
65094 .clock_get = thread_cpu_clock_get,
65095 .timer_create = thread_cpu_timer_create,
65096diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
65097index 69185ae..cc2847a 100644
65098--- a/kernel/posix-timers.c
65099+++ b/kernel/posix-timers.c
65100@@ -43,6 +43,7 @@
65101 #include <linux/idr.h>
65102 #include <linux/posix-clock.h>
65103 #include <linux/posix-timers.h>
65104+#include <linux/grsecurity.h>
65105 #include <linux/syscalls.h>
65106 #include <linux/wait.h>
65107 #include <linux/workqueue.h>
65108@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
65109 * which we beg off on and pass to do_sys_settimeofday().
65110 */
65111
65112-static struct k_clock posix_clocks[MAX_CLOCKS];
65113+static struct k_clock *posix_clocks[MAX_CLOCKS];
65114
65115 /*
65116 * These ones are defined below.
65117@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
65118 */
65119 static __init int init_posix_timers(void)
65120 {
65121- struct k_clock clock_realtime = {
65122+ static struct k_clock clock_realtime = {
65123 .clock_getres = hrtimer_get_res,
65124 .clock_get = posix_clock_realtime_get,
65125 .clock_set = posix_clock_realtime_set,
65126@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
65127 .timer_get = common_timer_get,
65128 .timer_del = common_timer_del,
65129 };
65130- struct k_clock clock_monotonic = {
65131+ static struct k_clock clock_monotonic = {
65132 .clock_getres = hrtimer_get_res,
65133 .clock_get = posix_ktime_get_ts,
65134 .nsleep = common_nsleep,
65135@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
65136 .timer_get = common_timer_get,
65137 .timer_del = common_timer_del,
65138 };
65139- struct k_clock clock_monotonic_raw = {
65140+ static struct k_clock clock_monotonic_raw = {
65141 .clock_getres = hrtimer_get_res,
65142 .clock_get = posix_get_monotonic_raw,
65143 };
65144- struct k_clock clock_realtime_coarse = {
65145+ static struct k_clock clock_realtime_coarse = {
65146 .clock_getres = posix_get_coarse_res,
65147 .clock_get = posix_get_realtime_coarse,
65148 };
65149- struct k_clock clock_monotonic_coarse = {
65150+ static struct k_clock clock_monotonic_coarse = {
65151 .clock_getres = posix_get_coarse_res,
65152 .clock_get = posix_get_monotonic_coarse,
65153 };
65154- struct k_clock clock_boottime = {
65155+ static struct k_clock clock_boottime = {
65156 .clock_getres = hrtimer_get_res,
65157 .clock_get = posix_get_boottime,
65158 .nsleep = common_nsleep,
65159@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
65160 return;
65161 }
65162
65163- posix_clocks[clock_id] = *new_clock;
65164+ posix_clocks[clock_id] = new_clock;
65165 }
65166 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65167
65168@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
65169 return (id & CLOCKFD_MASK) == CLOCKFD ?
65170 &clock_posix_dynamic : &clock_posix_cpu;
65171
65172- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65173+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65174 return NULL;
65175- return &posix_clocks[id];
65176+ return posix_clocks[id];
65177 }
65178
65179 static int common_timer_create(struct k_itimer *new_timer)
65180@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
65181 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65182 return -EFAULT;
65183
65184+ /* only the CLOCK_REALTIME clock can be set, all other clocks
65185+ have their clock_set fptr set to a nosettime dummy function
65186+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65187+ call common_clock_set, which calls do_sys_settimeofday, which
65188+ we hook
65189+ */
65190+
65191 return kc->clock_set(which_clock, &new_tp);
65192 }
65193
65194diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
65195index d523593..68197a4 100644
65196--- a/kernel/power/poweroff.c
65197+++ b/kernel/power/poweroff.c
65198@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
65199 .enable_mask = SYSRQ_ENABLE_BOOT,
65200 };
65201
65202-static int pm_sysrq_init(void)
65203+static int __init pm_sysrq_init(void)
65204 {
65205 register_sysrq_key('o', &sysrq_poweroff_op);
65206 return 0;
65207diff --git a/kernel/power/process.c b/kernel/power/process.c
65208index addbbe5..f9e32e0 100644
65209--- a/kernel/power/process.c
65210+++ b/kernel/power/process.c
65211@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
65212 u64 elapsed_csecs64;
65213 unsigned int elapsed_csecs;
65214 bool wakeup = false;
65215+ bool timedout = false;
65216
65217 do_gettimeofday(&start);
65218
65219@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
65220
65221 while (true) {
65222 todo = 0;
65223+ if (time_after(jiffies, end_time))
65224+ timedout = true;
65225 read_lock(&tasklist_lock);
65226 do_each_thread(g, p) {
65227 if (frozen(p) || !freezable(p))
65228@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
65229 * try_to_stop() after schedule() in ptrace/signal
65230 * stop sees TIF_FREEZE.
65231 */
65232- if (!task_is_stopped_or_traced(p) &&
65233- !freezer_should_skip(p))
65234+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65235 todo++;
65236+ if (timedout) {
65237+ printk(KERN_ERR "Task refusing to freeze:\n");
65238+ sched_show_task(p);
65239+ }
65240+ }
65241 } while_each_thread(g, p);
65242 read_unlock(&tasklist_lock);
65243
65244@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
65245 todo += wq_busy;
65246 }
65247
65248- if (!todo || time_after(jiffies, end_time))
65249+ if (!todo || timedout)
65250 break;
65251
65252 if (pm_wakeup_pending()) {
65253diff --git a/kernel/printk.c b/kernel/printk.c
65254index 7982a0a..2095fdc 100644
65255--- a/kernel/printk.c
65256+++ b/kernel/printk.c
65257@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
65258 if (from_file && type != SYSLOG_ACTION_OPEN)
65259 return 0;
65260
65261+#ifdef CONFIG_GRKERNSEC_DMESG
65262+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65263+ return -EPERM;
65264+#endif
65265+
65266 if (syslog_action_restricted(type)) {
65267 if (capable(CAP_SYSLOG))
65268 return 0;
65269diff --git a/kernel/profile.c b/kernel/profile.c
65270index 76b8e77..a2930e8 100644
65271--- a/kernel/profile.c
65272+++ b/kernel/profile.c
65273@@ -39,7 +39,7 @@ struct profile_hit {
65274 /* Oprofile timer tick hook */
65275 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65276
65277-static atomic_t *prof_buffer;
65278+static atomic_unchecked_t *prof_buffer;
65279 static unsigned long prof_len, prof_shift;
65280
65281 int prof_on __read_mostly;
65282@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65283 hits[i].pc = 0;
65284 continue;
65285 }
65286- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65287+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65288 hits[i].hits = hits[i].pc = 0;
65289 }
65290 }
65291@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65292 * Add the current hit(s) and flush the write-queue out
65293 * to the global buffer:
65294 */
65295- atomic_add(nr_hits, &prof_buffer[pc]);
65296+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65297 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65298- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65299+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65300 hits[i].pc = hits[i].hits = 0;
65301 }
65302 out:
65303@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65304 {
65305 unsigned long pc;
65306 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65307- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65308+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65309 }
65310 #endif /* !CONFIG_SMP */
65311
65312@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65313 return -EFAULT;
65314 buf++; p++; count--; read++;
65315 }
65316- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65317+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65318 if (copy_to_user(buf, (void *)pnt, count))
65319 return -EFAULT;
65320 read += count;
65321@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
65322 }
65323 #endif
65324 profile_discard_flip_buffers();
65325- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65326+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65327 return count;
65328 }
65329
65330diff --git a/kernel/ptrace.c b/kernel/ptrace.c
65331index 78ab24a..332c915 100644
65332--- a/kernel/ptrace.c
65333+++ b/kernel/ptrace.c
65334@@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
65335 return ret;
65336 }
65337
65338-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65339+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65340+ unsigned int log)
65341 {
65342 const struct cred *cred = current_cred(), *tcred;
65343
65344@@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65345 cred->gid == tcred->sgid &&
65346 cred->gid == tcred->gid))
65347 goto ok;
65348- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65349+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65350+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65351 goto ok;
65352 rcu_read_unlock();
65353 return -EPERM;
65354@@ -207,7 +209,9 @@ ok:
65355 smp_rmb();
65356 if (task->mm)
65357 dumpable = get_dumpable(task->mm);
65358- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65359+ if (!dumpable &&
65360+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65361+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65362 return -EPERM;
65363
65364 return security_ptrace_access_check(task, mode);
65365@@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
65366 {
65367 int err;
65368 task_lock(task);
65369- err = __ptrace_may_access(task, mode);
65370+ err = __ptrace_may_access(task, mode, 0);
65371+ task_unlock(task);
65372+ return !err;
65373+}
65374+
65375+bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
65376+{
65377+ return __ptrace_may_access(task, mode, 0);
65378+}
65379+
65380+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65381+{
65382+ int err;
65383+ task_lock(task);
65384+ err = __ptrace_may_access(task, mode, 1);
65385 task_unlock(task);
65386 return !err;
65387 }
65388@@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65389 goto out;
65390
65391 task_lock(task);
65392- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65393+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65394 task_unlock(task);
65395 if (retval)
65396 goto unlock_creds;
65397@@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65398 task->ptrace = PT_PTRACED;
65399 if (seize)
65400 task->ptrace |= PT_SEIZED;
65401- if (task_ns_capable(task, CAP_SYS_PTRACE))
65402+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65403 task->ptrace |= PT_PTRACE_CAP;
65404
65405 __ptrace_link(task, current);
65406@@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
65407 break;
65408 return -EIO;
65409 }
65410- if (copy_to_user(dst, buf, retval))
65411+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65412 return -EFAULT;
65413 copied += retval;
65414 src += retval;
65415@@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
65416 bool seized = child->ptrace & PT_SEIZED;
65417 int ret = -EIO;
65418 siginfo_t siginfo, *si;
65419- void __user *datavp = (void __user *) data;
65420+ void __user *datavp = (__force void __user *) data;
65421 unsigned long __user *datalp = datavp;
65422 unsigned long flags;
65423
65424@@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
65425 goto out;
65426 }
65427
65428+ if (gr_handle_ptrace(child, request)) {
65429+ ret = -EPERM;
65430+ goto out_put_task_struct;
65431+ }
65432+
65433 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65434 ret = ptrace_attach(child, request, data);
65435 /*
65436 * Some architectures need to do book-keeping after
65437 * a ptrace attach.
65438 */
65439- if (!ret)
65440+ if (!ret) {
65441 arch_ptrace_attach(child);
65442+ gr_audit_ptrace(child);
65443+ }
65444 goto out_put_task_struct;
65445 }
65446
65447@@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65448 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65449 if (copied != sizeof(tmp))
65450 return -EIO;
65451- return put_user(tmp, (unsigned long __user *)data);
65452+ return put_user(tmp, (__force unsigned long __user *)data);
65453 }
65454
65455 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65456@@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65457 goto out;
65458 }
65459
65460+ if (gr_handle_ptrace(child, request)) {
65461+ ret = -EPERM;
65462+ goto out_put_task_struct;
65463+ }
65464+
65465 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65466 ret = ptrace_attach(child, request, data);
65467 /*
65468 * Some architectures need to do book-keeping after
65469 * a ptrace attach.
65470 */
65471- if (!ret)
65472+ if (!ret) {
65473 arch_ptrace_attach(child);
65474+ gr_audit_ptrace(child);
65475+ }
65476 goto out_put_task_struct;
65477 }
65478
65479diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65480index 764825c..3aa6ac4 100644
65481--- a/kernel/rcutorture.c
65482+++ b/kernel/rcutorture.c
65483@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65484 { 0 };
65485 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65486 { 0 };
65487-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65488-static atomic_t n_rcu_torture_alloc;
65489-static atomic_t n_rcu_torture_alloc_fail;
65490-static atomic_t n_rcu_torture_free;
65491-static atomic_t n_rcu_torture_mberror;
65492-static atomic_t n_rcu_torture_error;
65493+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65494+static atomic_unchecked_t n_rcu_torture_alloc;
65495+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65496+static atomic_unchecked_t n_rcu_torture_free;
65497+static atomic_unchecked_t n_rcu_torture_mberror;
65498+static atomic_unchecked_t n_rcu_torture_error;
65499 static long n_rcu_torture_boost_ktrerror;
65500 static long n_rcu_torture_boost_rterror;
65501 static long n_rcu_torture_boost_failure;
65502@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65503
65504 spin_lock_bh(&rcu_torture_lock);
65505 if (list_empty(&rcu_torture_freelist)) {
65506- atomic_inc(&n_rcu_torture_alloc_fail);
65507+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65508 spin_unlock_bh(&rcu_torture_lock);
65509 return NULL;
65510 }
65511- atomic_inc(&n_rcu_torture_alloc);
65512+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65513 p = rcu_torture_freelist.next;
65514 list_del_init(p);
65515 spin_unlock_bh(&rcu_torture_lock);
65516@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65517 static void
65518 rcu_torture_free(struct rcu_torture *p)
65519 {
65520- atomic_inc(&n_rcu_torture_free);
65521+ atomic_inc_unchecked(&n_rcu_torture_free);
65522 spin_lock_bh(&rcu_torture_lock);
65523 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65524 spin_unlock_bh(&rcu_torture_lock);
65525@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65526 i = rp->rtort_pipe_count;
65527 if (i > RCU_TORTURE_PIPE_LEN)
65528 i = RCU_TORTURE_PIPE_LEN;
65529- atomic_inc(&rcu_torture_wcount[i]);
65530+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65531 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65532 rp->rtort_mbtest = 0;
65533 rcu_torture_free(rp);
65534@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65535 i = rp->rtort_pipe_count;
65536 if (i > RCU_TORTURE_PIPE_LEN)
65537 i = RCU_TORTURE_PIPE_LEN;
65538- atomic_inc(&rcu_torture_wcount[i]);
65539+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65540 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65541 rp->rtort_mbtest = 0;
65542 list_del(&rp->rtort_free);
65543@@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65544 i = old_rp->rtort_pipe_count;
65545 if (i > RCU_TORTURE_PIPE_LEN)
65546 i = RCU_TORTURE_PIPE_LEN;
65547- atomic_inc(&rcu_torture_wcount[i]);
65548+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65549 old_rp->rtort_pipe_count++;
65550 cur_ops->deferred_free(old_rp);
65551 }
65552@@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65553 return;
65554 }
65555 if (p->rtort_mbtest == 0)
65556- atomic_inc(&n_rcu_torture_mberror);
65557+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65558 spin_lock(&rand_lock);
65559 cur_ops->read_delay(&rand);
65560 n_rcu_torture_timers++;
65561@@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65562 continue;
65563 }
65564 if (p->rtort_mbtest == 0)
65565- atomic_inc(&n_rcu_torture_mberror);
65566+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65567 cur_ops->read_delay(&rand);
65568 preempt_disable();
65569 pipe_count = p->rtort_pipe_count;
65570@@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65571 rcu_torture_current,
65572 rcu_torture_current_version,
65573 list_empty(&rcu_torture_freelist),
65574- atomic_read(&n_rcu_torture_alloc),
65575- atomic_read(&n_rcu_torture_alloc_fail),
65576- atomic_read(&n_rcu_torture_free),
65577- atomic_read(&n_rcu_torture_mberror),
65578+ atomic_read_unchecked(&n_rcu_torture_alloc),
65579+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65580+ atomic_read_unchecked(&n_rcu_torture_free),
65581+ atomic_read_unchecked(&n_rcu_torture_mberror),
65582 n_rcu_torture_boost_ktrerror,
65583 n_rcu_torture_boost_rterror,
65584 n_rcu_torture_boost_failure,
65585 n_rcu_torture_boosts,
65586 n_rcu_torture_timers);
65587- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65588+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65589 n_rcu_torture_boost_ktrerror != 0 ||
65590 n_rcu_torture_boost_rterror != 0 ||
65591 n_rcu_torture_boost_failure != 0)
65592@@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65593 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65594 if (i > 1) {
65595 cnt += sprintf(&page[cnt], "!!! ");
65596- atomic_inc(&n_rcu_torture_error);
65597+ atomic_inc_unchecked(&n_rcu_torture_error);
65598 WARN_ON_ONCE(1);
65599 }
65600 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65601@@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65602 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65603 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65604 cnt += sprintf(&page[cnt], " %d",
65605- atomic_read(&rcu_torture_wcount[i]));
65606+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65607 }
65608 cnt += sprintf(&page[cnt], "\n");
65609 if (cur_ops->stats)
65610@@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65611
65612 if (cur_ops->cleanup)
65613 cur_ops->cleanup();
65614- if (atomic_read(&n_rcu_torture_error))
65615+ if (atomic_read_unchecked(&n_rcu_torture_error))
65616 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65617 else
65618 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65619@@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65620
65621 rcu_torture_current = NULL;
65622 rcu_torture_current_version = 0;
65623- atomic_set(&n_rcu_torture_alloc, 0);
65624- atomic_set(&n_rcu_torture_alloc_fail, 0);
65625- atomic_set(&n_rcu_torture_free, 0);
65626- atomic_set(&n_rcu_torture_mberror, 0);
65627- atomic_set(&n_rcu_torture_error, 0);
65628+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65629+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65630+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65631+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65632+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65633 n_rcu_torture_boost_ktrerror = 0;
65634 n_rcu_torture_boost_rterror = 0;
65635 n_rcu_torture_boost_failure = 0;
65636 n_rcu_torture_boosts = 0;
65637 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65638- atomic_set(&rcu_torture_wcount[i], 0);
65639+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65640 for_each_possible_cpu(cpu) {
65641 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65642 per_cpu(rcu_torture_count, cpu)[i] = 0;
65643diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65644index 6b76d81..7afc1b3 100644
65645--- a/kernel/rcutree.c
65646+++ b/kernel/rcutree.c
65647@@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65648 trace_rcu_dyntick("Start");
65649 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65650 smp_mb__before_atomic_inc(); /* See above. */
65651- atomic_inc(&rdtp->dynticks);
65652+ atomic_inc_unchecked(&rdtp->dynticks);
65653 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65654- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65655+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65656 local_irq_restore(flags);
65657 }
65658
65659@@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65660 return;
65661 }
65662 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65663- atomic_inc(&rdtp->dynticks);
65664+ atomic_inc_unchecked(&rdtp->dynticks);
65665 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65666 smp_mb__after_atomic_inc(); /* See above. */
65667- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65668+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65669 trace_rcu_dyntick("End");
65670 local_irq_restore(flags);
65671 }
65672@@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
65673 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65674
65675 if (rdtp->dynticks_nmi_nesting == 0 &&
65676- (atomic_read(&rdtp->dynticks) & 0x1))
65677+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65678 return;
65679 rdtp->dynticks_nmi_nesting++;
65680 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65681- atomic_inc(&rdtp->dynticks);
65682+ atomic_inc_unchecked(&rdtp->dynticks);
65683 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65684 smp_mb__after_atomic_inc(); /* See above. */
65685- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65686+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65687 }
65688
65689 /**
65690@@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
65691 return;
65692 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65693 smp_mb__before_atomic_inc(); /* See above. */
65694- atomic_inc(&rdtp->dynticks);
65695+ atomic_inc_unchecked(&rdtp->dynticks);
65696 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65697- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65698+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65699 }
65700
65701 /**
65702@@ -474,7 +474,7 @@ void rcu_irq_exit(void)
65703 */
65704 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65705 {
65706- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65707+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65708 return 0;
65709 }
65710
65711@@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
65712 unsigned int curr;
65713 unsigned int snap;
65714
65715- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
65716+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65717 snap = (unsigned int)rdp->dynticks_snap;
65718
65719 /*
65720@@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
65721 /*
65722 * Do RCU core processing for the current CPU.
65723 */
65724-static void rcu_process_callbacks(struct softirq_action *unused)
65725+static void rcu_process_callbacks(void)
65726 {
65727 trace_rcu_utilization("Start RCU core");
65728 __rcu_process_callbacks(&rcu_sched_state,
65729diff --git a/kernel/rcutree.h b/kernel/rcutree.h
65730index 849ce9e..74bc9de 100644
65731--- a/kernel/rcutree.h
65732+++ b/kernel/rcutree.h
65733@@ -86,7 +86,7 @@
65734 struct rcu_dynticks {
65735 int dynticks_nesting; /* Track irq/process nesting level. */
65736 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65737- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65738+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65739 };
65740
65741 /* RCU's kthread states for tracing. */
65742diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
65743index 4b9b9f8..2326053 100644
65744--- a/kernel/rcutree_plugin.h
65745+++ b/kernel/rcutree_plugin.h
65746@@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
65747
65748 /* Clean up and exit. */
65749 smp_mb(); /* ensure expedited GP seen before counter increment. */
65750- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65751+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65752 unlock_mb_ret:
65753 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65754 mb_ret:
65755@@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
65756
65757 #else /* #ifndef CONFIG_SMP */
65758
65759-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65760-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65761+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65762+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65763
65764 static int synchronize_sched_expedited_cpu_stop(void *data)
65765 {
65766@@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
65767 int firstsnap, s, snap, trycount = 0;
65768
65769 /* Note that atomic_inc_return() implies full memory barrier. */
65770- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65771+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65772 get_online_cpus();
65773
65774 /*
65775@@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
65776 }
65777
65778 /* Check to see if someone else did our work for us. */
65779- s = atomic_read(&sync_sched_expedited_done);
65780+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65781 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65782 smp_mb(); /* ensure test happens before caller kfree */
65783 return;
65784@@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
65785 * grace period works for us.
65786 */
65787 get_online_cpus();
65788- snap = atomic_read(&sync_sched_expedited_started) - 1;
65789+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65790 smp_mb(); /* ensure read is before try_stop_cpus(). */
65791 }
65792
65793@@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
65794 * than we did beat us to the punch.
65795 */
65796 do {
65797- s = atomic_read(&sync_sched_expedited_done);
65798+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65799 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65800 smp_mb(); /* ensure test happens before caller kfree */
65801 break;
65802 }
65803- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65804+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65805
65806 put_online_cpus();
65807 }
65808@@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
65809 for_each_online_cpu(thatcpu) {
65810 if (thatcpu == cpu)
65811 continue;
65812- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
65813+ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
65814 thatcpu).dynticks);
65815 smp_mb(); /* Order sampling of snap with end of grace period. */
65816 if ((snap & 0x1) != 0) {
65817diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
65818index 9feffa4..54058df 100644
65819--- a/kernel/rcutree_trace.c
65820+++ b/kernel/rcutree_trace.c
65821@@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
65822 rdp->qs_pending);
65823 #ifdef CONFIG_NO_HZ
65824 seq_printf(m, " dt=%d/%d/%d df=%lu",
65825- atomic_read(&rdp->dynticks->dynticks),
65826+ atomic_read_unchecked(&rdp->dynticks->dynticks),
65827 rdp->dynticks->dynticks_nesting,
65828 rdp->dynticks->dynticks_nmi_nesting,
65829 rdp->dynticks_fqs);
65830@@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
65831 rdp->qs_pending);
65832 #ifdef CONFIG_NO_HZ
65833 seq_printf(m, ",%d,%d,%d,%lu",
65834- atomic_read(&rdp->dynticks->dynticks),
65835+ atomic_read_unchecked(&rdp->dynticks->dynticks),
65836 rdp->dynticks->dynticks_nesting,
65837 rdp->dynticks->dynticks_nmi_nesting,
65838 rdp->dynticks_fqs);
65839diff --git a/kernel/resource.c b/kernel/resource.c
65840index 7640b3a..5879283 100644
65841--- a/kernel/resource.c
65842+++ b/kernel/resource.c
65843@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
65844
65845 static int __init ioresources_init(void)
65846 {
65847+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65848+#ifdef CONFIG_GRKERNSEC_PROC_USER
65849+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65850+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65851+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65852+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65853+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65854+#endif
65855+#else
65856 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65857 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65858+#endif
65859 return 0;
65860 }
65861 __initcall(ioresources_init);
65862diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
65863index 3d9f31c..7fefc9e 100644
65864--- a/kernel/rtmutex-tester.c
65865+++ b/kernel/rtmutex-tester.c
65866@@ -20,7 +20,7 @@
65867 #define MAX_RT_TEST_MUTEXES 8
65868
65869 static spinlock_t rttest_lock;
65870-static atomic_t rttest_event;
65871+static atomic_unchecked_t rttest_event;
65872
65873 struct test_thread_data {
65874 int opcode;
65875@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65876
65877 case RTTEST_LOCKCONT:
65878 td->mutexes[td->opdata] = 1;
65879- td->event = atomic_add_return(1, &rttest_event);
65880+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65881 return 0;
65882
65883 case RTTEST_RESET:
65884@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65885 return 0;
65886
65887 case RTTEST_RESETEVENT:
65888- atomic_set(&rttest_event, 0);
65889+ atomic_set_unchecked(&rttest_event, 0);
65890 return 0;
65891
65892 default:
65893@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65894 return ret;
65895
65896 td->mutexes[id] = 1;
65897- td->event = atomic_add_return(1, &rttest_event);
65898+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65899 rt_mutex_lock(&mutexes[id]);
65900- td->event = atomic_add_return(1, &rttest_event);
65901+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65902 td->mutexes[id] = 4;
65903 return 0;
65904
65905@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65906 return ret;
65907
65908 td->mutexes[id] = 1;
65909- td->event = atomic_add_return(1, &rttest_event);
65910+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65911 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65912- td->event = atomic_add_return(1, &rttest_event);
65913+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65914 td->mutexes[id] = ret ? 0 : 4;
65915 return ret ? -EINTR : 0;
65916
65917@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65918 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65919 return ret;
65920
65921- td->event = atomic_add_return(1, &rttest_event);
65922+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65923 rt_mutex_unlock(&mutexes[id]);
65924- td->event = atomic_add_return(1, &rttest_event);
65925+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65926 td->mutexes[id] = 0;
65927 return 0;
65928
65929@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65930 break;
65931
65932 td->mutexes[dat] = 2;
65933- td->event = atomic_add_return(1, &rttest_event);
65934+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65935 break;
65936
65937 default:
65938@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65939 return;
65940
65941 td->mutexes[dat] = 3;
65942- td->event = atomic_add_return(1, &rttest_event);
65943+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65944 break;
65945
65946 case RTTEST_LOCKNOWAIT:
65947@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65948 return;
65949
65950 td->mutexes[dat] = 1;
65951- td->event = atomic_add_return(1, &rttest_event);
65952+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65953 return;
65954
65955 default:
65956diff --git a/kernel/sched.c b/kernel/sched.c
65957index d6b149c..896cbb8 100644
65958--- a/kernel/sched.c
65959+++ b/kernel/sched.c
65960@@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
65961 BUG(); /* the idle class will always have a runnable task */
65962 }
65963
65964+#ifdef CONFIG_GRKERNSEC_SETXID
65965+extern void gr_delayed_cred_worker(void);
65966+static inline void gr_cred_schedule(void)
65967+{
65968+ if (unlikely(current->delayed_cred))
65969+ gr_delayed_cred_worker();
65970+}
65971+#else
65972+static inline void gr_cred_schedule(void)
65973+{
65974+}
65975+#endif
65976+
65977 /*
65978 * __schedule() is the main scheduler function.
65979 */
65980@@ -4408,6 +4421,8 @@ need_resched:
65981
65982 schedule_debug(prev);
65983
65984+ gr_cred_schedule();
65985+
65986 if (sched_feat(HRTICK))
65987 hrtick_clear(rq);
65988
65989@@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
65990 /* convert nice value [19,-20] to rlimit style value [1,40] */
65991 int nice_rlim = 20 - nice;
65992
65993+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65994+
65995 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65996 capable(CAP_SYS_NICE));
65997 }
65998@@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65999 if (nice > 19)
66000 nice = 19;
66001
66002- if (increment < 0 && !can_nice(current, nice))
66003+ if (increment < 0 && (!can_nice(current, nice) ||
66004+ gr_handle_chroot_nice()))
66005 return -EPERM;
66006
66007 retval = security_task_setnice(current, nice);
66008@@ -5288,6 +5306,7 @@ recheck:
66009 unsigned long rlim_rtprio =
66010 task_rlimit(p, RLIMIT_RTPRIO);
66011
66012+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
66013 /* can't set/change the rt policy */
66014 if (policy != p->policy && !rlim_rtprio)
66015 return -EPERM;
66016diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
66017index 429242f..d7cca82 100644
66018--- a/kernel/sched_autogroup.c
66019+++ b/kernel/sched_autogroup.c
66020@@ -7,7 +7,7 @@
66021
66022 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
66023 static struct autogroup autogroup_default;
66024-static atomic_t autogroup_seq_nr;
66025+static atomic_unchecked_t autogroup_seq_nr;
66026
66027 static void __init autogroup_init(struct task_struct *init_task)
66028 {
66029@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
66030
66031 kref_init(&ag->kref);
66032 init_rwsem(&ag->lock);
66033- ag->id = atomic_inc_return(&autogroup_seq_nr);
66034+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
66035 ag->tg = tg;
66036 #ifdef CONFIG_RT_GROUP_SCHED
66037 /*
66038diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
66039index 8a39fa3..34f3dbc 100644
66040--- a/kernel/sched_fair.c
66041+++ b/kernel/sched_fair.c
66042@@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
66043 * run_rebalance_domains is triggered when needed from the scheduler tick.
66044 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
66045 */
66046-static void run_rebalance_domains(struct softirq_action *h)
66047+static void run_rebalance_domains(void)
66048 {
66049 int this_cpu = smp_processor_id();
66050 struct rq *this_rq = cpu_rq(this_cpu);
66051diff --git a/kernel/signal.c b/kernel/signal.c
66052index 2065515..aed2987 100644
66053--- a/kernel/signal.c
66054+++ b/kernel/signal.c
66055@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
66056
66057 int print_fatal_signals __read_mostly;
66058
66059-static void __user *sig_handler(struct task_struct *t, int sig)
66060+static __sighandler_t sig_handler(struct task_struct *t, int sig)
66061 {
66062 return t->sighand->action[sig - 1].sa.sa_handler;
66063 }
66064
66065-static int sig_handler_ignored(void __user *handler, int sig)
66066+static int sig_handler_ignored(__sighandler_t handler, int sig)
66067 {
66068 /* Is it explicitly or implicitly ignored? */
66069 return handler == SIG_IGN ||
66070@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
66071 static int sig_task_ignored(struct task_struct *t, int sig,
66072 int from_ancestor_ns)
66073 {
66074- void __user *handler;
66075+ __sighandler_t handler;
66076
66077 handler = sig_handler(t, sig);
66078
66079@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
66080 atomic_inc(&user->sigpending);
66081 rcu_read_unlock();
66082
66083+ if (!override_rlimit)
66084+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66085+
66086 if (override_rlimit ||
66087 atomic_read(&user->sigpending) <=
66088 task_rlimit(t, RLIMIT_SIGPENDING)) {
66089@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
66090
66091 int unhandled_signal(struct task_struct *tsk, int sig)
66092 {
66093- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66094+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66095 if (is_global_init(tsk))
66096 return 1;
66097 if (handler != SIG_IGN && handler != SIG_DFL)
66098@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
66099 }
66100 }
66101
66102+ /* allow glibc communication via tgkill to other threads in our
66103+ thread group */
66104+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
66105+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
66106+ && gr_handle_signal(t, sig))
66107+ return -EPERM;
66108+
66109 return security_task_kill(t, info, sig, 0);
66110 }
66111
66112@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66113 return send_signal(sig, info, p, 1);
66114 }
66115
66116-static int
66117+int
66118 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66119 {
66120 return send_signal(sig, info, t, 0);
66121@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66122 unsigned long int flags;
66123 int ret, blocked, ignored;
66124 struct k_sigaction *action;
66125+ int is_unhandled = 0;
66126
66127 spin_lock_irqsave(&t->sighand->siglock, flags);
66128 action = &t->sighand->action[sig-1];
66129@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66130 }
66131 if (action->sa.sa_handler == SIG_DFL)
66132 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66133+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66134+ is_unhandled = 1;
66135 ret = specific_send_sig_info(sig, info, t);
66136 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66137
66138+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
66139+ normal operation */
66140+ if (is_unhandled) {
66141+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66142+ gr_handle_crash(t, sig);
66143+ }
66144+
66145 return ret;
66146 }
66147
66148@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66149 ret = check_kill_permission(sig, info, p);
66150 rcu_read_unlock();
66151
66152- if (!ret && sig)
66153+ if (!ret && sig) {
66154 ret = do_send_sig_info(sig, info, p, true);
66155+ if (!ret)
66156+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66157+ }
66158
66159 return ret;
66160 }
66161@@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
66162 int error = -ESRCH;
66163
66164 rcu_read_lock();
66165- p = find_task_by_vpid(pid);
66166+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66167+ /* allow glibc communication via tgkill to other threads in our
66168+ thread group */
66169+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66170+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
66171+ p = find_task_by_vpid_unrestricted(pid);
66172+ else
66173+#endif
66174+ p = find_task_by_vpid(pid);
66175 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66176 error = check_kill_permission(sig, info, p);
66177 /*
66178diff --git a/kernel/smp.c b/kernel/smp.c
66179index db197d6..17aef0b 100644
66180--- a/kernel/smp.c
66181+++ b/kernel/smp.c
66182@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
66183 }
66184 EXPORT_SYMBOL(smp_call_function);
66185
66186-void ipi_call_lock(void)
66187+void ipi_call_lock(void) __acquires(call_function.lock)
66188 {
66189 raw_spin_lock(&call_function.lock);
66190 }
66191
66192-void ipi_call_unlock(void)
66193+void ipi_call_unlock(void) __releases(call_function.lock)
66194 {
66195 raw_spin_unlock(&call_function.lock);
66196 }
66197
66198-void ipi_call_lock_irq(void)
66199+void ipi_call_lock_irq(void) __acquires(call_function.lock)
66200 {
66201 raw_spin_lock_irq(&call_function.lock);
66202 }
66203
66204-void ipi_call_unlock_irq(void)
66205+void ipi_call_unlock_irq(void) __releases(call_function.lock)
66206 {
66207 raw_spin_unlock_irq(&call_function.lock);
66208 }
66209diff --git a/kernel/softirq.c b/kernel/softirq.c
66210index 2c71d91..1021f81 100644
66211--- a/kernel/softirq.c
66212+++ b/kernel/softirq.c
66213@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
66214
66215 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66216
66217-char *softirq_to_name[NR_SOFTIRQS] = {
66218+const char * const softirq_to_name[NR_SOFTIRQS] = {
66219 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66220 "TASKLET", "SCHED", "HRTIMER", "RCU"
66221 };
66222@@ -235,7 +235,7 @@ restart:
66223 kstat_incr_softirqs_this_cpu(vec_nr);
66224
66225 trace_softirq_entry(vec_nr);
66226- h->action(h);
66227+ h->action();
66228 trace_softirq_exit(vec_nr);
66229 if (unlikely(prev_count != preempt_count())) {
66230 printk(KERN_ERR "huh, entered softirq %u %s %p"
66231@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66232 local_irq_restore(flags);
66233 }
66234
66235-void open_softirq(int nr, void (*action)(struct softirq_action *))
66236+void open_softirq(int nr, void (*action)(void))
66237 {
66238- softirq_vec[nr].action = action;
66239+ pax_open_kernel();
66240+ *(void **)&softirq_vec[nr].action = action;
66241+ pax_close_kernel();
66242 }
66243
66244 /*
66245@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
66246
66247 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66248
66249-static void tasklet_action(struct softirq_action *a)
66250+static void tasklet_action(void)
66251 {
66252 struct tasklet_struct *list;
66253
66254@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
66255 }
66256 }
66257
66258-static void tasklet_hi_action(struct softirq_action *a)
66259+static void tasklet_hi_action(void)
66260 {
66261 struct tasklet_struct *list;
66262
66263diff --git a/kernel/sys.c b/kernel/sys.c
66264index 481611f..0754d86 100644
66265--- a/kernel/sys.c
66266+++ b/kernel/sys.c
66267@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
66268 error = -EACCES;
66269 goto out;
66270 }
66271+
66272+ if (gr_handle_chroot_setpriority(p, niceval)) {
66273+ error = -EACCES;
66274+ goto out;
66275+ }
66276+
66277 no_nice = security_task_setnice(p, niceval);
66278 if (no_nice) {
66279 error = no_nice;
66280@@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
66281 goto error;
66282 }
66283
66284+ if (gr_check_group_change(new->gid, new->egid, -1))
66285+ goto error;
66286+
66287 if (rgid != (gid_t) -1 ||
66288 (egid != (gid_t) -1 && egid != old->gid))
66289 new->sgid = new->egid;
66290@@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66291 old = current_cred();
66292
66293 retval = -EPERM;
66294+
66295+ if (gr_check_group_change(gid, gid, gid))
66296+ goto error;
66297+
66298 if (nsown_capable(CAP_SETGID))
66299 new->gid = new->egid = new->sgid = new->fsgid = gid;
66300 else if (gid == old->gid || gid == old->sgid)
66301@@ -618,7 +631,7 @@ error:
66302 /*
66303 * change the user struct in a credentials set to match the new UID
66304 */
66305-static int set_user(struct cred *new)
66306+int set_user(struct cred *new)
66307 {
66308 struct user_struct *new_user;
66309
66310@@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
66311 goto error;
66312 }
66313
66314+ if (gr_check_user_change(new->uid, new->euid, -1))
66315+ goto error;
66316+
66317 if (new->uid != old->uid) {
66318 retval = set_user(new);
66319 if (retval < 0)
66320@@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66321 old = current_cred();
66322
66323 retval = -EPERM;
66324+
66325+ if (gr_check_crash_uid(uid))
66326+ goto error;
66327+ if (gr_check_user_change(uid, uid, uid))
66328+ goto error;
66329+
66330 if (nsown_capable(CAP_SETUID)) {
66331 new->suid = new->uid = uid;
66332 if (uid != old->uid) {
66333@@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66334 goto error;
66335 }
66336
66337+ if (gr_check_user_change(ruid, euid, -1))
66338+ goto error;
66339+
66340 if (ruid != (uid_t) -1) {
66341 new->uid = ruid;
66342 if (ruid != old->uid) {
66343@@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66344 goto error;
66345 }
66346
66347+ if (gr_check_group_change(rgid, egid, -1))
66348+ goto error;
66349+
66350 if (rgid != (gid_t) -1)
66351 new->gid = rgid;
66352 if (egid != (gid_t) -1)
66353@@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66354 old = current_cred();
66355 old_fsuid = old->fsuid;
66356
66357+ if (gr_check_user_change(-1, -1, uid))
66358+ goto error;
66359+
66360 if (uid == old->uid || uid == old->euid ||
66361 uid == old->suid || uid == old->fsuid ||
66362 nsown_capable(CAP_SETUID)) {
66363@@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66364 }
66365 }
66366
66367+error:
66368 abort_creds(new);
66369 return old_fsuid;
66370
66371@@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66372 if (gid == old->gid || gid == old->egid ||
66373 gid == old->sgid || gid == old->fsgid ||
66374 nsown_capable(CAP_SETGID)) {
66375+ if (gr_check_group_change(-1, -1, gid))
66376+ goto error;
66377+
66378 if (gid != old_fsgid) {
66379 new->fsgid = gid;
66380 goto change_okay;
66381 }
66382 }
66383
66384+error:
66385 abort_creds(new);
66386 return old_fsgid;
66387
66388@@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
66389 }
66390 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
66391 snprintf(buf, len, "2.6.%u%s", v, rest);
66392- ret = copy_to_user(release, buf, len);
66393+ if (len > sizeof(buf))
66394+ ret = -EFAULT;
66395+ else
66396+ ret = copy_to_user(release, buf, len);
66397 }
66398 return ret;
66399 }
66400@@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
66401 return -EFAULT;
66402
66403 down_read(&uts_sem);
66404- error = __copy_to_user(&name->sysname, &utsname()->sysname,
66405+ error = __copy_to_user(name->sysname, &utsname()->sysname,
66406 __OLD_UTS_LEN);
66407 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66408- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66409+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
66410 __OLD_UTS_LEN);
66411 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66412- error |= __copy_to_user(&name->release, &utsname()->release,
66413+ error |= __copy_to_user(name->release, &utsname()->release,
66414 __OLD_UTS_LEN);
66415 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66416- error |= __copy_to_user(&name->version, &utsname()->version,
66417+ error |= __copy_to_user(name->version, &utsname()->version,
66418 __OLD_UTS_LEN);
66419 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66420- error |= __copy_to_user(&name->machine, &utsname()->machine,
66421+ error |= __copy_to_user(name->machine, &utsname()->machine,
66422 __OLD_UTS_LEN);
66423 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66424 up_read(&uts_sem);
66425@@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
66426 error = get_dumpable(me->mm);
66427 break;
66428 case PR_SET_DUMPABLE:
66429- if (arg2 < 0 || arg2 > 1) {
66430+ if (arg2 > 1) {
66431 error = -EINVAL;
66432 break;
66433 }
66434diff --git a/kernel/sysctl.c b/kernel/sysctl.c
66435index ae27196..7506d69 100644
66436--- a/kernel/sysctl.c
66437+++ b/kernel/sysctl.c
66438@@ -86,6 +86,13 @@
66439
66440
66441 #if defined(CONFIG_SYSCTL)
66442+#include <linux/grsecurity.h>
66443+#include <linux/grinternal.h>
66444+
66445+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66446+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66447+ const int op);
66448+extern int gr_handle_chroot_sysctl(const int op);
66449
66450 /* External variables not in a header file. */
66451 extern int sysctl_overcommit_memory;
66452@@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66453 }
66454
66455 #endif
66456+extern struct ctl_table grsecurity_table[];
66457
66458 static struct ctl_table root_table[];
66459 static struct ctl_table_root sysctl_table_root;
66460@@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66461 int sysctl_legacy_va_layout;
66462 #endif
66463
66464+#ifdef CONFIG_PAX_SOFTMODE
66465+static ctl_table pax_table[] = {
66466+ {
66467+ .procname = "softmode",
66468+ .data = &pax_softmode,
66469+ .maxlen = sizeof(unsigned int),
66470+ .mode = 0600,
66471+ .proc_handler = &proc_dointvec,
66472+ },
66473+
66474+ { }
66475+};
66476+#endif
66477+
66478 /* The default sysctl tables: */
66479
66480 static struct ctl_table root_table[] = {
66481@@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66482 #endif
66483
66484 static struct ctl_table kern_table[] = {
66485+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66486+ {
66487+ .procname = "grsecurity",
66488+ .mode = 0500,
66489+ .child = grsecurity_table,
66490+ },
66491+#endif
66492+
66493+#ifdef CONFIG_PAX_SOFTMODE
66494+ {
66495+ .procname = "pax",
66496+ .mode = 0500,
66497+ .child = pax_table,
66498+ },
66499+#endif
66500+
66501 {
66502 .procname = "sched_child_runs_first",
66503 .data = &sysctl_sched_child_runs_first,
66504@@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66505 .data = &modprobe_path,
66506 .maxlen = KMOD_PATH_LEN,
66507 .mode = 0644,
66508- .proc_handler = proc_dostring,
66509+ .proc_handler = proc_dostring_modpriv,
66510 },
66511 {
66512 .procname = "modules_disabled",
66513@@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66514 .extra1 = &zero,
66515 .extra2 = &one,
66516 },
66517+#endif
66518 {
66519 .procname = "kptr_restrict",
66520 .data = &kptr_restrict,
66521 .maxlen = sizeof(int),
66522 .mode = 0644,
66523 .proc_handler = proc_dmesg_restrict,
66524+#ifdef CONFIG_GRKERNSEC_HIDESYM
66525+ .extra1 = &two,
66526+#else
66527 .extra1 = &zero,
66528+#endif
66529 .extra2 = &two,
66530 },
66531-#endif
66532 {
66533 .procname = "ngroups_max",
66534 .data = &ngroups_max,
66535@@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66536 .proc_handler = proc_dointvec_minmax,
66537 .extra1 = &zero,
66538 },
66539+ {
66540+ .procname = "heap_stack_gap",
66541+ .data = &sysctl_heap_stack_gap,
66542+ .maxlen = sizeof(sysctl_heap_stack_gap),
66543+ .mode = 0644,
66544+ .proc_handler = proc_doulongvec_minmax,
66545+ },
66546 #else
66547 {
66548 .procname = "nr_trim_pages",
66549@@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66550 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66551 {
66552 int mode;
66553+ int error;
66554+
66555+ if (table->parent != NULL && table->parent->procname != NULL &&
66556+ table->procname != NULL &&
66557+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66558+ return -EACCES;
66559+ if (gr_handle_chroot_sysctl(op))
66560+ return -EACCES;
66561+ error = gr_handle_sysctl(table, op);
66562+ if (error)
66563+ return error;
66564
66565 if (root->permissions)
66566 mode = root->permissions(root, current->nsproxy, table);
66567@@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66568 buffer, lenp, ppos);
66569 }
66570
66571+int proc_dostring_modpriv(struct ctl_table *table, int write,
66572+ void __user *buffer, size_t *lenp, loff_t *ppos)
66573+{
66574+ if (write && !capable(CAP_SYS_MODULE))
66575+ return -EPERM;
66576+
66577+ return _proc_do_string(table->data, table->maxlen, write,
66578+ buffer, lenp, ppos);
66579+}
66580+
66581 static size_t proc_skip_spaces(char **buf)
66582 {
66583 size_t ret;
66584@@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66585 len = strlen(tmp);
66586 if (len > *size)
66587 len = *size;
66588+ if (len > sizeof(tmp))
66589+ len = sizeof(tmp);
66590 if (copy_to_user(*buf, tmp, len))
66591 return -EFAULT;
66592 *size -= len;
66593@@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66594 *i = val;
66595 } else {
66596 val = convdiv * (*i) / convmul;
66597- if (!first)
66598+ if (!first) {
66599 err = proc_put_char(&buffer, &left, '\t');
66600+ if (err)
66601+ break;
66602+ }
66603 err = proc_put_long(&buffer, &left, val, false);
66604 if (err)
66605 break;
66606@@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66607 return -ENOSYS;
66608 }
66609
66610+int proc_dostring_modpriv(struct ctl_table *table, int write,
66611+ void __user *buffer, size_t *lenp, loff_t *ppos)
66612+{
66613+ return -ENOSYS;
66614+}
66615+
66616 int proc_dointvec(struct ctl_table *table, int write,
66617 void __user *buffer, size_t *lenp, loff_t *ppos)
66618 {
66619@@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66620 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66621 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66622 EXPORT_SYMBOL(proc_dostring);
66623+EXPORT_SYMBOL(proc_dostring_modpriv);
66624 EXPORT_SYMBOL(proc_doulongvec_minmax);
66625 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66626 EXPORT_SYMBOL(register_sysctl_table);
66627diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66628index a650694..aaeeb20 100644
66629--- a/kernel/sysctl_binary.c
66630+++ b/kernel/sysctl_binary.c
66631@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66632 int i;
66633
66634 set_fs(KERNEL_DS);
66635- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66636+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66637 set_fs(old_fs);
66638 if (result < 0)
66639 goto out_kfree;
66640@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66641 }
66642
66643 set_fs(KERNEL_DS);
66644- result = vfs_write(file, buffer, str - buffer, &pos);
66645+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66646 set_fs(old_fs);
66647 if (result < 0)
66648 goto out_kfree;
66649@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66650 int i;
66651
66652 set_fs(KERNEL_DS);
66653- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66654+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66655 set_fs(old_fs);
66656 if (result < 0)
66657 goto out_kfree;
66658@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66659 }
66660
66661 set_fs(KERNEL_DS);
66662- result = vfs_write(file, buffer, str - buffer, &pos);
66663+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66664 set_fs(old_fs);
66665 if (result < 0)
66666 goto out_kfree;
66667@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
66668 int i;
66669
66670 set_fs(KERNEL_DS);
66671- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66672+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66673 set_fs(old_fs);
66674 if (result < 0)
66675 goto out;
66676@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66677 __le16 dnaddr;
66678
66679 set_fs(KERNEL_DS);
66680- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66681+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66682 set_fs(old_fs);
66683 if (result < 0)
66684 goto out;
66685@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66686 le16_to_cpu(dnaddr) & 0x3ff);
66687
66688 set_fs(KERNEL_DS);
66689- result = vfs_write(file, buf, len, &pos);
66690+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66691 set_fs(old_fs);
66692 if (result < 0)
66693 goto out;
66694diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
66695index 362da65..ab8ef8c 100644
66696--- a/kernel/sysctl_check.c
66697+++ b/kernel/sysctl_check.c
66698@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
66699 set_fail(&fail, table, "Directory with extra2");
66700 } else {
66701 if ((table->proc_handler == proc_dostring) ||
66702+ (table->proc_handler == proc_dostring_modpriv) ||
66703 (table->proc_handler == proc_dointvec) ||
66704 (table->proc_handler == proc_dointvec_minmax) ||
66705 (table->proc_handler == proc_dointvec_jiffies) ||
66706diff --git a/kernel/taskstats.c b/kernel/taskstats.c
66707index e660464..c8b9e67 100644
66708--- a/kernel/taskstats.c
66709+++ b/kernel/taskstats.c
66710@@ -27,9 +27,12 @@
66711 #include <linux/cgroup.h>
66712 #include <linux/fs.h>
66713 #include <linux/file.h>
66714+#include <linux/grsecurity.h>
66715 #include <net/genetlink.h>
66716 #include <linux/atomic.h>
66717
66718+extern int gr_is_taskstats_denied(int pid);
66719+
66720 /*
66721 * Maximum length of a cpumask that can be specified in
66722 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66723@@ -556,6 +559,9 @@ err:
66724
66725 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66726 {
66727+ if (gr_is_taskstats_denied(current->pid))
66728+ return -EACCES;
66729+
66730 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66731 return cmd_attr_register_cpumask(info);
66732 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66733diff --git a/kernel/time.c b/kernel/time.c
66734index 73e416d..cfc6f69 100644
66735--- a/kernel/time.c
66736+++ b/kernel/time.c
66737@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
66738 return error;
66739
66740 if (tz) {
66741+ /* we log in do_settimeofday called below, so don't log twice
66742+ */
66743+ if (!tv)
66744+ gr_log_timechange();
66745+
66746 /* SMP safe, global irq locking makes it work. */
66747 sys_tz = *tz;
66748 update_vsyscall_tz();
66749diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
66750index 8a46f5d..bbe6f9c 100644
66751--- a/kernel/time/alarmtimer.c
66752+++ b/kernel/time/alarmtimer.c
66753@@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
66754 struct platform_device *pdev;
66755 int error = 0;
66756 int i;
66757- struct k_clock alarm_clock = {
66758+ static struct k_clock alarm_clock = {
66759 .clock_getres = alarm_clock_getres,
66760 .clock_get = alarm_clock_get,
66761 .timer_create = alarm_timer_create,
66762diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
66763index fd4a7b1..fae5c2a 100644
66764--- a/kernel/time/tick-broadcast.c
66765+++ b/kernel/time/tick-broadcast.c
66766@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
66767 * then clear the broadcast bit.
66768 */
66769 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66770- int cpu = smp_processor_id();
66771+ cpu = smp_processor_id();
66772
66773 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66774 tick_broadcast_clear_oneshot(cpu);
66775diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
66776index 2378413..be455fd 100644
66777--- a/kernel/time/timekeeping.c
66778+++ b/kernel/time/timekeeping.c
66779@@ -14,6 +14,7 @@
66780 #include <linux/init.h>
66781 #include <linux/mm.h>
66782 #include <linux/sched.h>
66783+#include <linux/grsecurity.h>
66784 #include <linux/syscore_ops.h>
66785 #include <linux/clocksource.h>
66786 #include <linux/jiffies.h>
66787@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
66788 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66789 return -EINVAL;
66790
66791+ gr_log_timechange();
66792+
66793 write_seqlock_irqsave(&xtime_lock, flags);
66794
66795 timekeeping_forward_now();
66796diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
66797index 3258455..f35227d 100644
66798--- a/kernel/time/timer_list.c
66799+++ b/kernel/time/timer_list.c
66800@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
66801
66802 static void print_name_offset(struct seq_file *m, void *sym)
66803 {
66804+#ifdef CONFIG_GRKERNSEC_HIDESYM
66805+ SEQ_printf(m, "<%p>", NULL);
66806+#else
66807 char symname[KSYM_NAME_LEN];
66808
66809 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66810 SEQ_printf(m, "<%pK>", sym);
66811 else
66812 SEQ_printf(m, "%s", symname);
66813+#endif
66814 }
66815
66816 static void
66817@@ -112,7 +116,11 @@ next_one:
66818 static void
66819 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66820 {
66821+#ifdef CONFIG_GRKERNSEC_HIDESYM
66822+ SEQ_printf(m, " .base: %p\n", NULL);
66823+#else
66824 SEQ_printf(m, " .base: %pK\n", base);
66825+#endif
66826 SEQ_printf(m, " .index: %d\n",
66827 base->index);
66828 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66829@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
66830 {
66831 struct proc_dir_entry *pe;
66832
66833+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66834+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66835+#else
66836 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66837+#endif
66838 if (!pe)
66839 return -ENOMEM;
66840 return 0;
66841diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
66842index 0b537f2..9e71eca 100644
66843--- a/kernel/time/timer_stats.c
66844+++ b/kernel/time/timer_stats.c
66845@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66846 static unsigned long nr_entries;
66847 static struct entry entries[MAX_ENTRIES];
66848
66849-static atomic_t overflow_count;
66850+static atomic_unchecked_t overflow_count;
66851
66852 /*
66853 * The entries are in a hash-table, for fast lookup:
66854@@ -140,7 +140,7 @@ static void reset_entries(void)
66855 nr_entries = 0;
66856 memset(entries, 0, sizeof(entries));
66857 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66858- atomic_set(&overflow_count, 0);
66859+ atomic_set_unchecked(&overflow_count, 0);
66860 }
66861
66862 static struct entry *alloc_entry(void)
66863@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66864 if (likely(entry))
66865 entry->count++;
66866 else
66867- atomic_inc(&overflow_count);
66868+ atomic_inc_unchecked(&overflow_count);
66869
66870 out_unlock:
66871 raw_spin_unlock_irqrestore(lock, flags);
66872@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66873
66874 static void print_name_offset(struct seq_file *m, unsigned long addr)
66875 {
66876+#ifdef CONFIG_GRKERNSEC_HIDESYM
66877+ seq_printf(m, "<%p>", NULL);
66878+#else
66879 char symname[KSYM_NAME_LEN];
66880
66881 if (lookup_symbol_name(addr, symname) < 0)
66882 seq_printf(m, "<%p>", (void *)addr);
66883 else
66884 seq_printf(m, "%s", symname);
66885+#endif
66886 }
66887
66888 static int tstats_show(struct seq_file *m, void *v)
66889@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
66890
66891 seq_puts(m, "Timer Stats Version: v0.2\n");
66892 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66893- if (atomic_read(&overflow_count))
66894+ if (atomic_read_unchecked(&overflow_count))
66895 seq_printf(m, "Overflow: %d entries\n",
66896- atomic_read(&overflow_count));
66897+ atomic_read_unchecked(&overflow_count));
66898
66899 for (i = 0; i < nr_entries; i++) {
66900 entry = entries + i;
66901@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
66902 {
66903 struct proc_dir_entry *pe;
66904
66905+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66906+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66907+#else
66908 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66909+#endif
66910 if (!pe)
66911 return -ENOMEM;
66912 return 0;
66913diff --git a/kernel/timer.c b/kernel/timer.c
66914index 9c3c62b..441690e 100644
66915--- a/kernel/timer.c
66916+++ b/kernel/timer.c
66917@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66918 /*
66919 * This function runs timers and the timer-tq in bottom half context.
66920 */
66921-static void run_timer_softirq(struct softirq_action *h)
66922+static void run_timer_softirq(void)
66923 {
66924 struct tvec_base *base = __this_cpu_read(tvec_bases);
66925
66926diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
66927index 16fc34a..efd8bb8 100644
66928--- a/kernel/trace/blktrace.c
66929+++ b/kernel/trace/blktrace.c
66930@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
66931 struct blk_trace *bt = filp->private_data;
66932 char buf[16];
66933
66934- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66935+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66936
66937 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66938 }
66939@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
66940 return 1;
66941
66942 bt = buf->chan->private_data;
66943- atomic_inc(&bt->dropped);
66944+ atomic_inc_unchecked(&bt->dropped);
66945 return 0;
66946 }
66947
66948@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
66949
66950 bt->dir = dir;
66951 bt->dev = dev;
66952- atomic_set(&bt->dropped, 0);
66953+ atomic_set_unchecked(&bt->dropped, 0);
66954
66955 ret = -EIO;
66956 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66957diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
66958index 25b4f4d..6f4772d 100644
66959--- a/kernel/trace/ftrace.c
66960+++ b/kernel/trace/ftrace.c
66961@@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
66962 if (unlikely(ftrace_disabled))
66963 return 0;
66964
66965+ ret = ftrace_arch_code_modify_prepare();
66966+ FTRACE_WARN_ON(ret);
66967+ if (ret)
66968+ return 0;
66969+
66970 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66971+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66972 if (ret) {
66973 ftrace_bug(ret, ip);
66974- return 0;
66975 }
66976- return 1;
66977+ return ret ? 0 : 1;
66978 }
66979
66980 /*
66981@@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
66982
66983 int
66984 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66985- void *data)
66986+ void *data)
66987 {
66988 struct ftrace_func_probe *entry;
66989 struct ftrace_page *pg;
66990diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
66991index f2bd275..adaf3a2 100644
66992--- a/kernel/trace/trace.c
66993+++ b/kernel/trace/trace.c
66994@@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
66995 };
66996 #endif
66997
66998-static struct dentry *d_tracer;
66999-
67000 struct dentry *tracing_init_dentry(void)
67001 {
67002+ static struct dentry *d_tracer;
67003 static int once;
67004
67005 if (d_tracer)
67006@@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
67007 return d_tracer;
67008 }
67009
67010-static struct dentry *d_percpu;
67011-
67012 struct dentry *tracing_dentry_percpu(void)
67013 {
67014+ static struct dentry *d_percpu;
67015 static int once;
67016 struct dentry *d_tracer;
67017
67018diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
67019index c212a7f..7b02394 100644
67020--- a/kernel/trace/trace_events.c
67021+++ b/kernel/trace/trace_events.c
67022@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
67023 struct ftrace_module_file_ops {
67024 struct list_head list;
67025 struct module *mod;
67026- struct file_operations id;
67027- struct file_operations enable;
67028- struct file_operations format;
67029- struct file_operations filter;
67030 };
67031
67032 static struct ftrace_module_file_ops *
67033@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
67034
67035 file_ops->mod = mod;
67036
67037- file_ops->id = ftrace_event_id_fops;
67038- file_ops->id.owner = mod;
67039-
67040- file_ops->enable = ftrace_enable_fops;
67041- file_ops->enable.owner = mod;
67042-
67043- file_ops->filter = ftrace_event_filter_fops;
67044- file_ops->filter.owner = mod;
67045-
67046- file_ops->format = ftrace_event_format_fops;
67047- file_ops->format.owner = mod;
67048+ pax_open_kernel();
67049+ *(void **)&mod->trace_id.owner = mod;
67050+ *(void **)&mod->trace_enable.owner = mod;
67051+ *(void **)&mod->trace_filter.owner = mod;
67052+ *(void **)&mod->trace_format.owner = mod;
67053+ pax_close_kernel();
67054
67055 list_add(&file_ops->list, &ftrace_module_file_list);
67056
67057@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
67058
67059 for_each_event(call, start, end) {
67060 __trace_add_event_call(*call, mod,
67061- &file_ops->id, &file_ops->enable,
67062- &file_ops->filter, &file_ops->format);
67063+ &mod->trace_id, &mod->trace_enable,
67064+ &mod->trace_filter, &mod->trace_format);
67065 }
67066 }
67067
67068diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
67069index 00d527c..7c5b1a3 100644
67070--- a/kernel/trace/trace_kprobe.c
67071+++ b/kernel/trace/trace_kprobe.c
67072@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67073 long ret;
67074 int maxlen = get_rloc_len(*(u32 *)dest);
67075 u8 *dst = get_rloc_data(dest);
67076- u8 *src = addr;
67077+ const u8 __user *src = (const u8 __force_user *)addr;
67078 mm_segment_t old_fs = get_fs();
67079 if (!maxlen)
67080 return;
67081@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67082 pagefault_disable();
67083 do
67084 ret = __copy_from_user_inatomic(dst++, src++, 1);
67085- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
67086+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
67087 dst[-1] = '\0';
67088 pagefault_enable();
67089 set_fs(old_fs);
67090@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67091 ((u8 *)get_rloc_data(dest))[0] = '\0';
67092 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
67093 } else
67094- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67095+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67096 get_rloc_offs(*(u32 *)dest));
67097 }
67098 /* Return the length of string -- including null terminal byte */
67099@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
67100 set_fs(KERNEL_DS);
67101 pagefault_disable();
67102 do {
67103- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67104+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67105 len++;
67106 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67107 pagefault_enable();
67108diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
67109index fd3c8aa..5f324a6 100644
67110--- a/kernel/trace/trace_mmiotrace.c
67111+++ b/kernel/trace/trace_mmiotrace.c
67112@@ -24,7 +24,7 @@ struct header_iter {
67113 static struct trace_array *mmio_trace_array;
67114 static bool overrun_detected;
67115 static unsigned long prev_overruns;
67116-static atomic_t dropped_count;
67117+static atomic_unchecked_t dropped_count;
67118
67119 static void mmio_reset_data(struct trace_array *tr)
67120 {
67121@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
67122
67123 static unsigned long count_overruns(struct trace_iterator *iter)
67124 {
67125- unsigned long cnt = atomic_xchg(&dropped_count, 0);
67126+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67127 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67128
67129 if (over > prev_overruns)
67130@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
67131 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67132 sizeof(*entry), 0, pc);
67133 if (!event) {
67134- atomic_inc(&dropped_count);
67135+ atomic_inc_unchecked(&dropped_count);
67136 return;
67137 }
67138 entry = ring_buffer_event_data(event);
67139@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
67140 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67141 sizeof(*entry), 0, pc);
67142 if (!event) {
67143- atomic_inc(&dropped_count);
67144+ atomic_inc_unchecked(&dropped_count);
67145 return;
67146 }
67147 entry = ring_buffer_event_data(event);
67148diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
67149index 5199930..26c73a0 100644
67150--- a/kernel/trace/trace_output.c
67151+++ b/kernel/trace/trace_output.c
67152@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
67153
67154 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67155 if (!IS_ERR(p)) {
67156- p = mangle_path(s->buffer + s->len, p, "\n");
67157+ p = mangle_path(s->buffer + s->len, p, "\n\\");
67158 if (p) {
67159 s->len = p - s->buffer;
67160 return 1;
67161diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
67162index 77575b3..6e623d1 100644
67163--- a/kernel/trace/trace_stack.c
67164+++ b/kernel/trace/trace_stack.c
67165@@ -50,7 +50,7 @@ static inline void check_stack(void)
67166 return;
67167
67168 /* we do not handle interrupt stacks yet */
67169- if (!object_is_on_stack(&this_size))
67170+ if (!object_starts_on_stack(&this_size))
67171 return;
67172
67173 local_irq_save(flags);
67174diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
67175index 209b379..7f76423 100644
67176--- a/kernel/trace/trace_workqueue.c
67177+++ b/kernel/trace/trace_workqueue.c
67178@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67179 int cpu;
67180 pid_t pid;
67181 /* Can be inserted from interrupt or user context, need to be atomic */
67182- atomic_t inserted;
67183+ atomic_unchecked_t inserted;
67184 /*
67185 * Don't need to be atomic, works are serialized in a single workqueue thread
67186 * on a single CPU.
67187@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67188 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67189 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67190 if (node->pid == wq_thread->pid) {
67191- atomic_inc(&node->inserted);
67192+ atomic_inc_unchecked(&node->inserted);
67193 goto found;
67194 }
67195 }
67196@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
67197 tsk = get_pid_task(pid, PIDTYPE_PID);
67198 if (tsk) {
67199 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67200- atomic_read(&cws->inserted), cws->executed,
67201+ atomic_read_unchecked(&cws->inserted), cws->executed,
67202 tsk->comm);
67203 put_task_struct(tsk);
67204 }
67205diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
67206index 82928f5..92da771 100644
67207--- a/lib/Kconfig.debug
67208+++ b/lib/Kconfig.debug
67209@@ -1103,6 +1103,7 @@ config LATENCYTOP
67210 depends on DEBUG_KERNEL
67211 depends on STACKTRACE_SUPPORT
67212 depends on PROC_FS
67213+ depends on !GRKERNSEC_HIDESYM
67214 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
67215 select KALLSYMS
67216 select KALLSYMS_ALL
67217diff --git a/lib/bitmap.c b/lib/bitmap.c
67218index 0d4a127..33a06c7 100644
67219--- a/lib/bitmap.c
67220+++ b/lib/bitmap.c
67221@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
67222 {
67223 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67224 u32 chunk;
67225- const char __user __force *ubuf = (const char __user __force *)buf;
67226+ const char __user *ubuf = (const char __force_user *)buf;
67227
67228 bitmap_zero(maskp, nmaskbits);
67229
67230@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
67231 {
67232 if (!access_ok(VERIFY_READ, ubuf, ulen))
67233 return -EFAULT;
67234- return __bitmap_parse((const char __force *)ubuf,
67235+ return __bitmap_parse((const char __force_kernel *)ubuf,
67236 ulen, 1, maskp, nmaskbits);
67237
67238 }
67239@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
67240 {
67241 unsigned a, b;
67242 int c, old_c, totaldigits;
67243- const char __user __force *ubuf = (const char __user __force *)buf;
67244+ const char __user *ubuf = (const char __force_user *)buf;
67245 int exp_digit, in_range;
67246
67247 totaldigits = c = 0;
67248@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
67249 {
67250 if (!access_ok(VERIFY_READ, ubuf, ulen))
67251 return -EFAULT;
67252- return __bitmap_parselist((const char __force *)ubuf,
67253+ return __bitmap_parselist((const char __force_kernel *)ubuf,
67254 ulen, 1, maskp, nmaskbits);
67255 }
67256 EXPORT_SYMBOL(bitmap_parselist_user);
67257diff --git a/lib/bug.c b/lib/bug.c
67258index 1955209..cbbb2ad 100644
67259--- a/lib/bug.c
67260+++ b/lib/bug.c
67261@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
67262 return BUG_TRAP_TYPE_NONE;
67263
67264 bug = find_bug(bugaddr);
67265+ if (!bug)
67266+ return BUG_TRAP_TYPE_NONE;
67267
67268 file = NULL;
67269 line = 0;
67270diff --git a/lib/debugobjects.c b/lib/debugobjects.c
67271index a78b7c6..2c73084 100644
67272--- a/lib/debugobjects.c
67273+++ b/lib/debugobjects.c
67274@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
67275 if (limit > 4)
67276 return;
67277
67278- is_on_stack = object_is_on_stack(addr);
67279+ is_on_stack = object_starts_on_stack(addr);
67280 if (is_on_stack == onstack)
67281 return;
67282
67283diff --git a/lib/devres.c b/lib/devres.c
67284index 7c0e953..f642b5c 100644
67285--- a/lib/devres.c
67286+++ b/lib/devres.c
67287@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67288 void devm_iounmap(struct device *dev, void __iomem *addr)
67289 {
67290 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67291- (void *)addr));
67292+ (void __force *)addr));
67293 iounmap(addr);
67294 }
67295 EXPORT_SYMBOL(devm_iounmap);
67296@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
67297 {
67298 ioport_unmap(addr);
67299 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67300- devm_ioport_map_match, (void *)addr));
67301+ devm_ioport_map_match, (void __force *)addr));
67302 }
67303 EXPORT_SYMBOL(devm_ioport_unmap);
67304
67305diff --git a/lib/dma-debug.c b/lib/dma-debug.c
67306index fea790a..ebb0e82 100644
67307--- a/lib/dma-debug.c
67308+++ b/lib/dma-debug.c
67309@@ -925,7 +925,7 @@ out:
67310
67311 static void check_for_stack(struct device *dev, void *addr)
67312 {
67313- if (object_is_on_stack(addr))
67314+ if (object_starts_on_stack(addr))
67315 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67316 "stack [addr=%p]\n", addr);
67317 }
67318diff --git a/lib/extable.c b/lib/extable.c
67319index 4cac81e..63e9b8f 100644
67320--- a/lib/extable.c
67321+++ b/lib/extable.c
67322@@ -13,6 +13,7 @@
67323 #include <linux/init.h>
67324 #include <linux/sort.h>
67325 #include <asm/uaccess.h>
67326+#include <asm/pgtable.h>
67327
67328 #ifndef ARCH_HAS_SORT_EXTABLE
67329 /*
67330@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
67331 void sort_extable(struct exception_table_entry *start,
67332 struct exception_table_entry *finish)
67333 {
67334+ pax_open_kernel();
67335 sort(start, finish - start, sizeof(struct exception_table_entry),
67336 cmp_ex, NULL);
67337+ pax_close_kernel();
67338 }
67339
67340 #ifdef CONFIG_MODULES
67341diff --git a/lib/inflate.c b/lib/inflate.c
67342index 013a761..c28f3fc 100644
67343--- a/lib/inflate.c
67344+++ b/lib/inflate.c
67345@@ -269,7 +269,7 @@ static void free(void *where)
67346 malloc_ptr = free_mem_ptr;
67347 }
67348 #else
67349-#define malloc(a) kmalloc(a, GFP_KERNEL)
67350+#define malloc(a) kmalloc((a), GFP_KERNEL)
67351 #define free(a) kfree(a)
67352 #endif
67353
67354diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
67355index bd2bea9..6b3c95e 100644
67356--- a/lib/is_single_threaded.c
67357+++ b/lib/is_single_threaded.c
67358@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
67359 struct task_struct *p, *t;
67360 bool ret;
67361
67362+ if (!mm)
67363+ return true;
67364+
67365 if (atomic_read(&task->signal->live) != 1)
67366 return false;
67367
67368diff --git a/lib/kref.c b/lib/kref.c
67369index 3efb882..8492f4c 100644
67370--- a/lib/kref.c
67371+++ b/lib/kref.c
67372@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67373 */
67374 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67375 {
67376- WARN_ON(release == NULL);
67377+ BUG_ON(release == NULL);
67378 WARN_ON(release == (void (*)(struct kref *))kfree);
67379
67380 if (atomic_dec_and_test(&kref->refcount)) {
67381diff --git a/lib/radix-tree.c b/lib/radix-tree.c
67382index d9df745..e73c2fe 100644
67383--- a/lib/radix-tree.c
67384+++ b/lib/radix-tree.c
67385@@ -80,7 +80,7 @@ struct radix_tree_preload {
67386 int nr;
67387 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67388 };
67389-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67390+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67391
67392 static inline void *ptr_to_indirect(void *ptr)
67393 {
67394diff --git a/lib/vsprintf.c b/lib/vsprintf.c
67395index 993599e..84dc70e 100644
67396--- a/lib/vsprintf.c
67397+++ b/lib/vsprintf.c
67398@@ -16,6 +16,9 @@
67399 * - scnprintf and vscnprintf
67400 */
67401
67402+#ifdef CONFIG_GRKERNSEC_HIDESYM
67403+#define __INCLUDED_BY_HIDESYM 1
67404+#endif
67405 #include <stdarg.h>
67406 #include <linux/module.h>
67407 #include <linux/types.h>
67408@@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
67409 char sym[KSYM_SYMBOL_LEN];
67410 if (ext == 'B')
67411 sprint_backtrace(sym, value);
67412- else if (ext != 'f' && ext != 's')
67413+ else if (ext != 'f' && ext != 's' && ext != 'a')
67414 sprint_symbol(sym, value);
67415 else
67416 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67417@@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
67418 return string(buf, end, uuid, spec);
67419 }
67420
67421+#ifdef CONFIG_GRKERNSEC_HIDESYM
67422+int kptr_restrict __read_mostly = 2;
67423+#else
67424 int kptr_restrict __read_mostly;
67425+#endif
67426
67427 /*
67428 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67429@@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
67430 * - 'S' For symbolic direct pointers with offset
67431 * - 's' For symbolic direct pointers without offset
67432 * - 'B' For backtraced symbolic direct pointers with offset
67433+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67434+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67435 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67436 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67437 * - 'M' For a 6-byte MAC address, it prints the address in the
67438@@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67439 {
67440 if (!ptr && *fmt != 'K') {
67441 /*
67442- * Print (null) with the same width as a pointer so it makes
67443+ * Print (nil) with the same width as a pointer so it makes
67444 * tabular output look nice.
67445 */
67446 if (spec.field_width == -1)
67447 spec.field_width = 2 * sizeof(void *);
67448- return string(buf, end, "(null)", spec);
67449+ return string(buf, end, "(nil)", spec);
67450 }
67451
67452 switch (*fmt) {
67453@@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67454 /* Fallthrough */
67455 case 'S':
67456 case 's':
67457+#ifdef CONFIG_GRKERNSEC_HIDESYM
67458+ break;
67459+#else
67460+ return symbol_string(buf, end, ptr, spec, *fmt);
67461+#endif
67462+ case 'A':
67463+ case 'a':
67464 case 'B':
67465 return symbol_string(buf, end, ptr, spec, *fmt);
67466 case 'R':
67467@@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67468 typeof(type) value; \
67469 if (sizeof(type) == 8) { \
67470 args = PTR_ALIGN(args, sizeof(u32)); \
67471- *(u32 *)&value = *(u32 *)args; \
67472- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67473+ *(u32 *)&value = *(const u32 *)args; \
67474+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67475 } else { \
67476 args = PTR_ALIGN(args, sizeof(type)); \
67477- value = *(typeof(type) *)args; \
67478+ value = *(const typeof(type) *)args; \
67479 } \
67480 args += sizeof(type); \
67481 value; \
67482@@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67483 case FORMAT_TYPE_STR: {
67484 const char *str_arg = args;
67485 args += strlen(str_arg) + 1;
67486- str = string(str, end, (char *)str_arg, spec);
67487+ str = string(str, end, str_arg, spec);
67488 break;
67489 }
67490
67491diff --git a/localversion-grsec b/localversion-grsec
67492new file mode 100644
67493index 0000000..7cd6065
67494--- /dev/null
67495+++ b/localversion-grsec
67496@@ -0,0 +1 @@
67497+-grsec
67498diff --git a/mm/Kconfig b/mm/Kconfig
67499index 011b110..b492af2 100644
67500--- a/mm/Kconfig
67501+++ b/mm/Kconfig
67502@@ -241,10 +241,10 @@ config KSM
67503 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67504
67505 config DEFAULT_MMAP_MIN_ADDR
67506- int "Low address space to protect from user allocation"
67507+ int "Low address space to protect from user allocation"
67508 depends on MMU
67509- default 4096
67510- help
67511+ default 65536
67512+ help
67513 This is the portion of low virtual memory which should be protected
67514 from userspace allocation. Keeping a user from writing to low pages
67515 can help reduce the impact of kernel NULL pointer bugs.
67516diff --git a/mm/filemap.c b/mm/filemap.c
67517index 90286a4..f441caa 100644
67518--- a/mm/filemap.c
67519+++ b/mm/filemap.c
67520@@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67521 struct address_space *mapping = file->f_mapping;
67522
67523 if (!mapping->a_ops->readpage)
67524- return -ENOEXEC;
67525+ return -ENODEV;
67526 file_accessed(file);
67527 vma->vm_ops = &generic_file_vm_ops;
67528 vma->vm_flags |= VM_CAN_NONLINEAR;
67529@@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67530 *pos = i_size_read(inode);
67531
67532 if (limit != RLIM_INFINITY) {
67533+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67534 if (*pos >= limit) {
67535 send_sig(SIGXFSZ, current, 0);
67536 return -EFBIG;
67537diff --git a/mm/fremap.c b/mm/fremap.c
67538index 9ed4fd4..c42648d 100644
67539--- a/mm/fremap.c
67540+++ b/mm/fremap.c
67541@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67542 retry:
67543 vma = find_vma(mm, start);
67544
67545+#ifdef CONFIG_PAX_SEGMEXEC
67546+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67547+ goto out;
67548+#endif
67549+
67550 /*
67551 * Make sure the vma is shared, that it supports prefaulting,
67552 * and that the remapped range is valid and fully within
67553diff --git a/mm/highmem.c b/mm/highmem.c
67554index 57d82c6..e9e0552 100644
67555--- a/mm/highmem.c
67556+++ b/mm/highmem.c
67557@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67558 * So no dangers, even with speculative execution.
67559 */
67560 page = pte_page(pkmap_page_table[i]);
67561+ pax_open_kernel();
67562 pte_clear(&init_mm, (unsigned long)page_address(page),
67563 &pkmap_page_table[i]);
67564-
67565+ pax_close_kernel();
67566 set_page_address(page, NULL);
67567 need_flush = 1;
67568 }
67569@@ -186,9 +187,11 @@ start:
67570 }
67571 }
67572 vaddr = PKMAP_ADDR(last_pkmap_nr);
67573+
67574+ pax_open_kernel();
67575 set_pte_at(&init_mm, vaddr,
67576 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67577-
67578+ pax_close_kernel();
67579 pkmap_count[last_pkmap_nr] = 1;
67580 set_page_address(page, (void *)vaddr);
67581
67582diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67583index 36b3d98..584cb54 100644
67584--- a/mm/huge_memory.c
67585+++ b/mm/huge_memory.c
67586@@ -703,7 +703,7 @@ out:
67587 * run pte_offset_map on the pmd, if an huge pmd could
67588 * materialize from under us from a different thread.
67589 */
67590- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67591+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67592 return VM_FAULT_OOM;
67593 /* if an huge pmd materialized from under us just retry later */
67594 if (unlikely(pmd_trans_huge(*pmd)))
67595diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67596index 2316840..b418671 100644
67597--- a/mm/hugetlb.c
67598+++ b/mm/hugetlb.c
67599@@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67600 return 1;
67601 }
67602
67603+#ifdef CONFIG_PAX_SEGMEXEC
67604+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67605+{
67606+ struct mm_struct *mm = vma->vm_mm;
67607+ struct vm_area_struct *vma_m;
67608+ unsigned long address_m;
67609+ pte_t *ptep_m;
67610+
67611+ vma_m = pax_find_mirror_vma(vma);
67612+ if (!vma_m)
67613+ return;
67614+
67615+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67616+ address_m = address + SEGMEXEC_TASK_SIZE;
67617+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67618+ get_page(page_m);
67619+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
67620+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67621+}
67622+#endif
67623+
67624 /*
67625 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67626 */
67627@@ -2450,6 +2471,11 @@ retry_avoidcopy:
67628 make_huge_pte(vma, new_page, 1));
67629 page_remove_rmap(old_page);
67630 hugepage_add_new_anon_rmap(new_page, vma, address);
67631+
67632+#ifdef CONFIG_PAX_SEGMEXEC
67633+ pax_mirror_huge_pte(vma, address, new_page);
67634+#endif
67635+
67636 /* Make the old page be freed below */
67637 new_page = old_page;
67638 mmu_notifier_invalidate_range_end(mm,
67639@@ -2601,6 +2627,10 @@ retry:
67640 && (vma->vm_flags & VM_SHARED)));
67641 set_huge_pte_at(mm, address, ptep, new_pte);
67642
67643+#ifdef CONFIG_PAX_SEGMEXEC
67644+ pax_mirror_huge_pte(vma, address, page);
67645+#endif
67646+
67647 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67648 /* Optimization, do the COW without a second fault */
67649 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67650@@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67651 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67652 struct hstate *h = hstate_vma(vma);
67653
67654+#ifdef CONFIG_PAX_SEGMEXEC
67655+ struct vm_area_struct *vma_m;
67656+#endif
67657+
67658 ptep = huge_pte_offset(mm, address);
67659 if (ptep) {
67660 entry = huge_ptep_get(ptep);
67661@@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67662 VM_FAULT_SET_HINDEX(h - hstates);
67663 }
67664
67665+#ifdef CONFIG_PAX_SEGMEXEC
67666+ vma_m = pax_find_mirror_vma(vma);
67667+ if (vma_m) {
67668+ unsigned long address_m;
67669+
67670+ if (vma->vm_start > vma_m->vm_start) {
67671+ address_m = address;
67672+ address -= SEGMEXEC_TASK_SIZE;
67673+ vma = vma_m;
67674+ h = hstate_vma(vma);
67675+ } else
67676+ address_m = address + SEGMEXEC_TASK_SIZE;
67677+
67678+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67679+ return VM_FAULT_OOM;
67680+ address_m &= HPAGE_MASK;
67681+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67682+ }
67683+#endif
67684+
67685 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67686 if (!ptep)
67687 return VM_FAULT_OOM;
67688diff --git a/mm/internal.h b/mm/internal.h
67689index 2189af4..f2ca332 100644
67690--- a/mm/internal.h
67691+++ b/mm/internal.h
67692@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
67693 * in mm/page_alloc.c
67694 */
67695 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67696+extern void free_compound_page(struct page *page);
67697 extern void prep_compound_page(struct page *page, unsigned long order);
67698 #ifdef CONFIG_MEMORY_FAILURE
67699 extern bool is_free_buddy_page(struct page *page);
67700diff --git a/mm/kmemleak.c b/mm/kmemleak.c
67701index f3b2a00..61da94d 100644
67702--- a/mm/kmemleak.c
67703+++ b/mm/kmemleak.c
67704@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
67705
67706 for (i = 0; i < object->trace_len; i++) {
67707 void *ptr = (void *)object->trace[i];
67708- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67709+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67710 }
67711 }
67712
67713diff --git a/mm/maccess.c b/mm/maccess.c
67714index d53adf9..03a24bf 100644
67715--- a/mm/maccess.c
67716+++ b/mm/maccess.c
67717@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
67718 set_fs(KERNEL_DS);
67719 pagefault_disable();
67720 ret = __copy_from_user_inatomic(dst,
67721- (__force const void __user *)src, size);
67722+ (const void __force_user *)src, size);
67723 pagefault_enable();
67724 set_fs(old_fs);
67725
67726@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
67727
67728 set_fs(KERNEL_DS);
67729 pagefault_disable();
67730- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67731+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67732 pagefault_enable();
67733 set_fs(old_fs);
67734
67735diff --git a/mm/madvise.c b/mm/madvise.c
67736index 74bf193..feb6fd3 100644
67737--- a/mm/madvise.c
67738+++ b/mm/madvise.c
67739@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
67740 pgoff_t pgoff;
67741 unsigned long new_flags = vma->vm_flags;
67742
67743+#ifdef CONFIG_PAX_SEGMEXEC
67744+ struct vm_area_struct *vma_m;
67745+#endif
67746+
67747 switch (behavior) {
67748 case MADV_NORMAL:
67749 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67750@@ -110,6 +114,13 @@ success:
67751 /*
67752 * vm_flags is protected by the mmap_sem held in write mode.
67753 */
67754+
67755+#ifdef CONFIG_PAX_SEGMEXEC
67756+ vma_m = pax_find_mirror_vma(vma);
67757+ if (vma_m)
67758+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67759+#endif
67760+
67761 vma->vm_flags = new_flags;
67762
67763 out:
67764@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67765 struct vm_area_struct ** prev,
67766 unsigned long start, unsigned long end)
67767 {
67768+
67769+#ifdef CONFIG_PAX_SEGMEXEC
67770+ struct vm_area_struct *vma_m;
67771+#endif
67772+
67773 *prev = vma;
67774 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67775 return -EINVAL;
67776@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67777 zap_page_range(vma, start, end - start, &details);
67778 } else
67779 zap_page_range(vma, start, end - start, NULL);
67780+
67781+#ifdef CONFIG_PAX_SEGMEXEC
67782+ vma_m = pax_find_mirror_vma(vma);
67783+ if (vma_m) {
67784+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67785+ struct zap_details details = {
67786+ .nonlinear_vma = vma_m,
67787+ .last_index = ULONG_MAX,
67788+ };
67789+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67790+ } else
67791+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67792+ }
67793+#endif
67794+
67795 return 0;
67796 }
67797
67798@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
67799 if (end < start)
67800 goto out;
67801
67802+#ifdef CONFIG_PAX_SEGMEXEC
67803+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67804+ if (end > SEGMEXEC_TASK_SIZE)
67805+ goto out;
67806+ } else
67807+#endif
67808+
67809+ if (end > TASK_SIZE)
67810+ goto out;
67811+
67812 error = 0;
67813 if (end == start)
67814 goto out;
67815diff --git a/mm/memory-failure.c b/mm/memory-failure.c
67816index 06d3479..0778eef 100644
67817--- a/mm/memory-failure.c
67818+++ b/mm/memory-failure.c
67819@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
67820
67821 int sysctl_memory_failure_recovery __read_mostly = 1;
67822
67823-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67824+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67825
67826 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67827
67828@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
67829 si.si_signo = SIGBUS;
67830 si.si_errno = 0;
67831 si.si_code = BUS_MCEERR_AO;
67832- si.si_addr = (void *)addr;
67833+ si.si_addr = (void __user *)addr;
67834 #ifdef __ARCH_SI_TRAPNO
67835 si.si_trapno = trapno;
67836 #endif
67837@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67838 }
67839
67840 nr_pages = 1 << compound_trans_order(hpage);
67841- atomic_long_add(nr_pages, &mce_bad_pages);
67842+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67843
67844 /*
67845 * We need/can do nothing about count=0 pages.
67846@@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67847 if (!PageHWPoison(hpage)
67848 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67849 || (p != hpage && TestSetPageHWPoison(hpage))) {
67850- atomic_long_sub(nr_pages, &mce_bad_pages);
67851+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67852 return 0;
67853 }
67854 set_page_hwpoison_huge_page(hpage);
67855@@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67856 }
67857 if (hwpoison_filter(p)) {
67858 if (TestClearPageHWPoison(p))
67859- atomic_long_sub(nr_pages, &mce_bad_pages);
67860+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67861 unlock_page(hpage);
67862 put_page(hpage);
67863 return 0;
67864@@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
67865 return 0;
67866 }
67867 if (TestClearPageHWPoison(p))
67868- atomic_long_sub(nr_pages, &mce_bad_pages);
67869+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67870 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
67871 return 0;
67872 }
67873@@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
67874 */
67875 if (TestClearPageHWPoison(page)) {
67876 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
67877- atomic_long_sub(nr_pages, &mce_bad_pages);
67878+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67879 freeit = 1;
67880 if (PageHuge(page))
67881 clear_page_hwpoison_huge_page(page);
67882@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
67883 }
67884 done:
67885 if (!PageHWPoison(hpage))
67886- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
67887+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
67888 set_page_hwpoison_huge_page(hpage);
67889 dequeue_hwpoisoned_huge_page(hpage);
67890 /* keep elevated page count for bad page */
67891@@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
67892 return ret;
67893
67894 done:
67895- atomic_long_add(1, &mce_bad_pages);
67896+ atomic_long_add_unchecked(1, &mce_bad_pages);
67897 SetPageHWPoison(page);
67898 /* keep elevated page count for bad page */
67899 return ret;
67900diff --git a/mm/memory.c b/mm/memory.c
67901index 829d437..3d3926a 100644
67902--- a/mm/memory.c
67903+++ b/mm/memory.c
67904@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
67905 return;
67906
67907 pmd = pmd_offset(pud, start);
67908+
67909+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67910 pud_clear(pud);
67911 pmd_free_tlb(tlb, pmd, start);
67912+#endif
67913+
67914 }
67915
67916 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67917@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67918 if (end - 1 > ceiling - 1)
67919 return;
67920
67921+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67922 pud = pud_offset(pgd, start);
67923 pgd_clear(pgd);
67924 pud_free_tlb(tlb, pud, start);
67925+#endif
67926+
67927 }
67928
67929 /*
67930@@ -1566,12 +1573,6 @@ no_page_table:
67931 return page;
67932 }
67933
67934-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67935-{
67936- return stack_guard_page_start(vma, addr) ||
67937- stack_guard_page_end(vma, addr+PAGE_SIZE);
67938-}
67939-
67940 /**
67941 * __get_user_pages() - pin user pages in memory
67942 * @tsk: task_struct of target task
67943@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67944 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67945 i = 0;
67946
67947- do {
67948+ while (nr_pages) {
67949 struct vm_area_struct *vma;
67950
67951- vma = find_extend_vma(mm, start);
67952+ vma = find_vma(mm, start);
67953 if (!vma && in_gate_area(mm, start)) {
67954 unsigned long pg = start & PAGE_MASK;
67955 pgd_t *pgd;
67956@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67957 goto next_page;
67958 }
67959
67960- if (!vma ||
67961+ if (!vma || start < vma->vm_start ||
67962 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67963 !(vm_flags & vma->vm_flags))
67964 return i ? : -EFAULT;
67965@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67966 int ret;
67967 unsigned int fault_flags = 0;
67968
67969- /* For mlock, just skip the stack guard page. */
67970- if (foll_flags & FOLL_MLOCK) {
67971- if (stack_guard_page(vma, start))
67972- goto next_page;
67973- }
67974 if (foll_flags & FOLL_WRITE)
67975 fault_flags |= FAULT_FLAG_WRITE;
67976 if (nonblocking)
67977@@ -1800,7 +1796,7 @@ next_page:
67978 start += PAGE_SIZE;
67979 nr_pages--;
67980 } while (nr_pages && start < vma->vm_end);
67981- } while (nr_pages);
67982+ }
67983 return i;
67984 }
67985 EXPORT_SYMBOL(__get_user_pages);
67986@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
67987 page_add_file_rmap(page);
67988 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67989
67990+#ifdef CONFIG_PAX_SEGMEXEC
67991+ pax_mirror_file_pte(vma, addr, page, ptl);
67992+#endif
67993+
67994 retval = 0;
67995 pte_unmap_unlock(pte, ptl);
67996 return retval;
67997@@ -2041,10 +2041,22 @@ out:
67998 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67999 struct page *page)
68000 {
68001+
68002+#ifdef CONFIG_PAX_SEGMEXEC
68003+ struct vm_area_struct *vma_m;
68004+#endif
68005+
68006 if (addr < vma->vm_start || addr >= vma->vm_end)
68007 return -EFAULT;
68008 if (!page_count(page))
68009 return -EINVAL;
68010+
68011+#ifdef CONFIG_PAX_SEGMEXEC
68012+ vma_m = pax_find_mirror_vma(vma);
68013+ if (vma_m)
68014+ vma_m->vm_flags |= VM_INSERTPAGE;
68015+#endif
68016+
68017 vma->vm_flags |= VM_INSERTPAGE;
68018 return insert_page(vma, addr, page, vma->vm_page_prot);
68019 }
68020@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
68021 unsigned long pfn)
68022 {
68023 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
68024+ BUG_ON(vma->vm_mirror);
68025
68026 if (addr < vma->vm_start || addr >= vma->vm_end)
68027 return -EFAULT;
68028@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
68029 copy_user_highpage(dst, src, va, vma);
68030 }
68031
68032+#ifdef CONFIG_PAX_SEGMEXEC
68033+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
68034+{
68035+ struct mm_struct *mm = vma->vm_mm;
68036+ spinlock_t *ptl;
68037+ pte_t *pte, entry;
68038+
68039+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68040+ entry = *pte;
68041+ if (!pte_present(entry)) {
68042+ if (!pte_none(entry)) {
68043+ BUG_ON(pte_file(entry));
68044+ free_swap_and_cache(pte_to_swp_entry(entry));
68045+ pte_clear_not_present_full(mm, address, pte, 0);
68046+ }
68047+ } else {
68048+ struct page *page;
68049+
68050+ flush_cache_page(vma, address, pte_pfn(entry));
68051+ entry = ptep_clear_flush(vma, address, pte);
68052+ BUG_ON(pte_dirty(entry));
68053+ page = vm_normal_page(vma, address, entry);
68054+ if (page) {
68055+ update_hiwater_rss(mm);
68056+ if (PageAnon(page))
68057+ dec_mm_counter_fast(mm, MM_ANONPAGES);
68058+ else
68059+ dec_mm_counter_fast(mm, MM_FILEPAGES);
68060+ page_remove_rmap(page);
68061+ page_cache_release(page);
68062+ }
68063+ }
68064+ pte_unmap_unlock(pte, ptl);
68065+}
68066+
68067+/* PaX: if vma is mirrored, synchronize the mirror's PTE
68068+ *
68069+ * the ptl of the lower mapped page is held on entry and is not released on exit
68070+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68071+ */
68072+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68073+{
68074+ struct mm_struct *mm = vma->vm_mm;
68075+ unsigned long address_m;
68076+ spinlock_t *ptl_m;
68077+ struct vm_area_struct *vma_m;
68078+ pmd_t *pmd_m;
68079+ pte_t *pte_m, entry_m;
68080+
68081+ BUG_ON(!page_m || !PageAnon(page_m));
68082+
68083+ vma_m = pax_find_mirror_vma(vma);
68084+ if (!vma_m)
68085+ return;
68086+
68087+ BUG_ON(!PageLocked(page_m));
68088+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68089+ address_m = address + SEGMEXEC_TASK_SIZE;
68090+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68091+ pte_m = pte_offset_map(pmd_m, address_m);
68092+ ptl_m = pte_lockptr(mm, pmd_m);
68093+ if (ptl != ptl_m) {
68094+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68095+ if (!pte_none(*pte_m))
68096+ goto out;
68097+ }
68098+
68099+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68100+ page_cache_get(page_m);
68101+ page_add_anon_rmap(page_m, vma_m, address_m);
68102+ inc_mm_counter_fast(mm, MM_ANONPAGES);
68103+ set_pte_at(mm, address_m, pte_m, entry_m);
68104+ update_mmu_cache(vma_m, address_m, entry_m);
68105+out:
68106+ if (ptl != ptl_m)
68107+ spin_unlock(ptl_m);
68108+ pte_unmap(pte_m);
68109+ unlock_page(page_m);
68110+}
68111+
68112+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68113+{
68114+ struct mm_struct *mm = vma->vm_mm;
68115+ unsigned long address_m;
68116+ spinlock_t *ptl_m;
68117+ struct vm_area_struct *vma_m;
68118+ pmd_t *pmd_m;
68119+ pte_t *pte_m, entry_m;
68120+
68121+ BUG_ON(!page_m || PageAnon(page_m));
68122+
68123+ vma_m = pax_find_mirror_vma(vma);
68124+ if (!vma_m)
68125+ return;
68126+
68127+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68128+ address_m = address + SEGMEXEC_TASK_SIZE;
68129+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68130+ pte_m = pte_offset_map(pmd_m, address_m);
68131+ ptl_m = pte_lockptr(mm, pmd_m);
68132+ if (ptl != ptl_m) {
68133+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68134+ if (!pte_none(*pte_m))
68135+ goto out;
68136+ }
68137+
68138+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68139+ page_cache_get(page_m);
68140+ page_add_file_rmap(page_m);
68141+ inc_mm_counter_fast(mm, MM_FILEPAGES);
68142+ set_pte_at(mm, address_m, pte_m, entry_m);
68143+ update_mmu_cache(vma_m, address_m, entry_m);
68144+out:
68145+ if (ptl != ptl_m)
68146+ spin_unlock(ptl_m);
68147+ pte_unmap(pte_m);
68148+}
68149+
68150+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68151+{
68152+ struct mm_struct *mm = vma->vm_mm;
68153+ unsigned long address_m;
68154+ spinlock_t *ptl_m;
68155+ struct vm_area_struct *vma_m;
68156+ pmd_t *pmd_m;
68157+ pte_t *pte_m, entry_m;
68158+
68159+ vma_m = pax_find_mirror_vma(vma);
68160+ if (!vma_m)
68161+ return;
68162+
68163+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68164+ address_m = address + SEGMEXEC_TASK_SIZE;
68165+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68166+ pte_m = pte_offset_map(pmd_m, address_m);
68167+ ptl_m = pte_lockptr(mm, pmd_m);
68168+ if (ptl != ptl_m) {
68169+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68170+ if (!pte_none(*pte_m))
68171+ goto out;
68172+ }
68173+
68174+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68175+ set_pte_at(mm, address_m, pte_m, entry_m);
68176+out:
68177+ if (ptl != ptl_m)
68178+ spin_unlock(ptl_m);
68179+ pte_unmap(pte_m);
68180+}
68181+
68182+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68183+{
68184+ struct page *page_m;
68185+ pte_t entry;
68186+
68187+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68188+ goto out;
68189+
68190+ entry = *pte;
68191+ page_m = vm_normal_page(vma, address, entry);
68192+ if (!page_m)
68193+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68194+ else if (PageAnon(page_m)) {
68195+ if (pax_find_mirror_vma(vma)) {
68196+ pte_unmap_unlock(pte, ptl);
68197+ lock_page(page_m);
68198+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68199+ if (pte_same(entry, *pte))
68200+ pax_mirror_anon_pte(vma, address, page_m, ptl);
68201+ else
68202+ unlock_page(page_m);
68203+ }
68204+ } else
68205+ pax_mirror_file_pte(vma, address, page_m, ptl);
68206+
68207+out:
68208+ pte_unmap_unlock(pte, ptl);
68209+}
68210+#endif
68211+
68212 /*
68213 * This routine handles present pages, when users try to write
68214 * to a shared page. It is done by copying the page to a new address
68215@@ -2656,6 +2849,12 @@ gotten:
68216 */
68217 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68218 if (likely(pte_same(*page_table, orig_pte))) {
68219+
68220+#ifdef CONFIG_PAX_SEGMEXEC
68221+ if (pax_find_mirror_vma(vma))
68222+ BUG_ON(!trylock_page(new_page));
68223+#endif
68224+
68225 if (old_page) {
68226 if (!PageAnon(old_page)) {
68227 dec_mm_counter_fast(mm, MM_FILEPAGES);
68228@@ -2707,6 +2906,10 @@ gotten:
68229 page_remove_rmap(old_page);
68230 }
68231
68232+#ifdef CONFIG_PAX_SEGMEXEC
68233+ pax_mirror_anon_pte(vma, address, new_page, ptl);
68234+#endif
68235+
68236 /* Free the old page.. */
68237 new_page = old_page;
68238 ret |= VM_FAULT_WRITE;
68239@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68240 swap_free(entry);
68241 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68242 try_to_free_swap(page);
68243+
68244+#ifdef CONFIG_PAX_SEGMEXEC
68245+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68246+#endif
68247+
68248 unlock_page(page);
68249 if (swapcache) {
68250 /*
68251@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68252
68253 /* No need to invalidate - it was non-present before */
68254 update_mmu_cache(vma, address, page_table);
68255+
68256+#ifdef CONFIG_PAX_SEGMEXEC
68257+ pax_mirror_anon_pte(vma, address, page, ptl);
68258+#endif
68259+
68260 unlock:
68261 pte_unmap_unlock(page_table, ptl);
68262 out:
68263@@ -3028,40 +3241,6 @@ out_release:
68264 }
68265
68266 /*
68267- * This is like a special single-page "expand_{down|up}wards()",
68268- * except we must first make sure that 'address{-|+}PAGE_SIZE'
68269- * doesn't hit another vma.
68270- */
68271-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68272-{
68273- address &= PAGE_MASK;
68274- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68275- struct vm_area_struct *prev = vma->vm_prev;
68276-
68277- /*
68278- * Is there a mapping abutting this one below?
68279- *
68280- * That's only ok if it's the same stack mapping
68281- * that has gotten split..
68282- */
68283- if (prev && prev->vm_end == address)
68284- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68285-
68286- expand_downwards(vma, address - PAGE_SIZE);
68287- }
68288- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68289- struct vm_area_struct *next = vma->vm_next;
68290-
68291- /* As VM_GROWSDOWN but s/below/above/ */
68292- if (next && next->vm_start == address + PAGE_SIZE)
68293- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68294-
68295- expand_upwards(vma, address + PAGE_SIZE);
68296- }
68297- return 0;
68298-}
68299-
68300-/*
68301 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68302 * but allow concurrent faults), and pte mapped but not yet locked.
68303 * We return with mmap_sem still held, but pte unmapped and unlocked.
68304@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68305 unsigned long address, pte_t *page_table, pmd_t *pmd,
68306 unsigned int flags)
68307 {
68308- struct page *page;
68309+ struct page *page = NULL;
68310 spinlock_t *ptl;
68311 pte_t entry;
68312
68313- pte_unmap(page_table);
68314-
68315- /* Check if we need to add a guard page to the stack */
68316- if (check_stack_guard_page(vma, address) < 0)
68317- return VM_FAULT_SIGBUS;
68318-
68319- /* Use the zero-page for reads */
68320 if (!(flags & FAULT_FLAG_WRITE)) {
68321 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68322 vma->vm_page_prot));
68323- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68324+ ptl = pte_lockptr(mm, pmd);
68325+ spin_lock(ptl);
68326 if (!pte_none(*page_table))
68327 goto unlock;
68328 goto setpte;
68329 }
68330
68331 /* Allocate our own private page. */
68332+ pte_unmap(page_table);
68333+
68334 if (unlikely(anon_vma_prepare(vma)))
68335 goto oom;
68336 page = alloc_zeroed_user_highpage_movable(vma, address);
68337@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68338 if (!pte_none(*page_table))
68339 goto release;
68340
68341+#ifdef CONFIG_PAX_SEGMEXEC
68342+ if (pax_find_mirror_vma(vma))
68343+ BUG_ON(!trylock_page(page));
68344+#endif
68345+
68346 inc_mm_counter_fast(mm, MM_ANONPAGES);
68347 page_add_new_anon_rmap(page, vma, address);
68348 setpte:
68349@@ -3116,6 +3296,12 @@ setpte:
68350
68351 /* No need to invalidate - it was non-present before */
68352 update_mmu_cache(vma, address, page_table);
68353+
68354+#ifdef CONFIG_PAX_SEGMEXEC
68355+ if (page)
68356+ pax_mirror_anon_pte(vma, address, page, ptl);
68357+#endif
68358+
68359 unlock:
68360 pte_unmap_unlock(page_table, ptl);
68361 return 0;
68362@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68363 */
68364 /* Only go through if we didn't race with anybody else... */
68365 if (likely(pte_same(*page_table, orig_pte))) {
68366+
68367+#ifdef CONFIG_PAX_SEGMEXEC
68368+ if (anon && pax_find_mirror_vma(vma))
68369+ BUG_ON(!trylock_page(page));
68370+#endif
68371+
68372 flush_icache_page(vma, page);
68373 entry = mk_pte(page, vma->vm_page_prot);
68374 if (flags & FAULT_FLAG_WRITE)
68375@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68376
68377 /* no need to invalidate: a not-present page won't be cached */
68378 update_mmu_cache(vma, address, page_table);
68379+
68380+#ifdef CONFIG_PAX_SEGMEXEC
68381+ if (anon)
68382+ pax_mirror_anon_pte(vma, address, page, ptl);
68383+ else
68384+ pax_mirror_file_pte(vma, address, page, ptl);
68385+#endif
68386+
68387 } else {
68388 if (cow_page)
68389 mem_cgroup_uncharge_page(cow_page);
68390@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
68391 if (flags & FAULT_FLAG_WRITE)
68392 flush_tlb_fix_spurious_fault(vma, address);
68393 }
68394+
68395+#ifdef CONFIG_PAX_SEGMEXEC
68396+ pax_mirror_pte(vma, address, pte, pmd, ptl);
68397+ return 0;
68398+#endif
68399+
68400 unlock:
68401 pte_unmap_unlock(pte, ptl);
68402 return 0;
68403@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68404 pmd_t *pmd;
68405 pte_t *pte;
68406
68407+#ifdef CONFIG_PAX_SEGMEXEC
68408+ struct vm_area_struct *vma_m;
68409+#endif
68410+
68411 __set_current_state(TASK_RUNNING);
68412
68413 count_vm_event(PGFAULT);
68414@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68415 if (unlikely(is_vm_hugetlb_page(vma)))
68416 return hugetlb_fault(mm, vma, address, flags);
68417
68418+#ifdef CONFIG_PAX_SEGMEXEC
68419+ vma_m = pax_find_mirror_vma(vma);
68420+ if (vma_m) {
68421+ unsigned long address_m;
68422+ pgd_t *pgd_m;
68423+ pud_t *pud_m;
68424+ pmd_t *pmd_m;
68425+
68426+ if (vma->vm_start > vma_m->vm_start) {
68427+ address_m = address;
68428+ address -= SEGMEXEC_TASK_SIZE;
68429+ vma = vma_m;
68430+ } else
68431+ address_m = address + SEGMEXEC_TASK_SIZE;
68432+
68433+ pgd_m = pgd_offset(mm, address_m);
68434+ pud_m = pud_alloc(mm, pgd_m, address_m);
68435+ if (!pud_m)
68436+ return VM_FAULT_OOM;
68437+ pmd_m = pmd_alloc(mm, pud_m, address_m);
68438+ if (!pmd_m)
68439+ return VM_FAULT_OOM;
68440+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68441+ return VM_FAULT_OOM;
68442+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68443+ }
68444+#endif
68445+
68446 pgd = pgd_offset(mm, address);
68447 pud = pud_alloc(mm, pgd, address);
68448 if (!pud)
68449@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68450 * run pte_offset_map on the pmd, if an huge pmd could
68451 * materialize from under us from a different thread.
68452 */
68453- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68454+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68455 return VM_FAULT_OOM;
68456 /* if an huge pmd materialized from under us just retry later */
68457 if (unlikely(pmd_trans_huge(*pmd)))
68458@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68459 gate_vma.vm_start = FIXADDR_USER_START;
68460 gate_vma.vm_end = FIXADDR_USER_END;
68461 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68462- gate_vma.vm_page_prot = __P101;
68463+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68464 /*
68465 * Make sure the vDSO gets into every core dump.
68466 * Dumping its contents makes post-mortem fully interpretable later
68467diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68468index c3fdbcb..2e8ef90 100644
68469--- a/mm/mempolicy.c
68470+++ b/mm/mempolicy.c
68471@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68472 unsigned long vmstart;
68473 unsigned long vmend;
68474
68475+#ifdef CONFIG_PAX_SEGMEXEC
68476+ struct vm_area_struct *vma_m;
68477+#endif
68478+
68479 vma = find_vma_prev(mm, start, &prev);
68480 if (!vma || vma->vm_start > start)
68481 return -EFAULT;
68482@@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68483 err = policy_vma(vma, new_pol);
68484 if (err)
68485 goto out;
68486+
68487+#ifdef CONFIG_PAX_SEGMEXEC
68488+ vma_m = pax_find_mirror_vma(vma);
68489+ if (vma_m) {
68490+ err = policy_vma(vma_m, new_pol);
68491+ if (err)
68492+ goto out;
68493+ }
68494+#endif
68495+
68496 }
68497
68498 out:
68499@@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68500
68501 if (end < start)
68502 return -EINVAL;
68503+
68504+#ifdef CONFIG_PAX_SEGMEXEC
68505+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68506+ if (end > SEGMEXEC_TASK_SIZE)
68507+ return -EINVAL;
68508+ } else
68509+#endif
68510+
68511+ if (end > TASK_SIZE)
68512+ return -EINVAL;
68513+
68514 if (end == start)
68515 return 0;
68516
68517@@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68518 if (!mm)
68519 goto out;
68520
68521+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68522+ if (mm != current->mm &&
68523+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68524+ err = -EPERM;
68525+ goto out;
68526+ }
68527+#endif
68528+
68529 /*
68530 * Check if this process has the right to modify the specified
68531 * process. The right exists if the process has administrative
68532@@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68533 rcu_read_lock();
68534 tcred = __task_cred(task);
68535 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68536- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68537- !capable(CAP_SYS_NICE)) {
68538+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68539 rcu_read_unlock();
68540 err = -EPERM;
68541 goto out;
68542diff --git a/mm/migrate.c b/mm/migrate.c
68543index 177aca4..ab3a744 100644
68544--- a/mm/migrate.c
68545+++ b/mm/migrate.c
68546@@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68547 if (!mm)
68548 return -EINVAL;
68549
68550+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68551+ if (mm != current->mm &&
68552+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68553+ err = -EPERM;
68554+ goto out;
68555+ }
68556+#endif
68557+
68558 /*
68559 * Check if this process has the right to modify the specified
68560 * process. The right exists if the process has administrative
68561@@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68562 rcu_read_lock();
68563 tcred = __task_cred(task);
68564 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68565- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68566- !capable(CAP_SYS_NICE)) {
68567+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68568 rcu_read_unlock();
68569 err = -EPERM;
68570 goto out;
68571diff --git a/mm/mlock.c b/mm/mlock.c
68572index 4f4f53b..9511904 100644
68573--- a/mm/mlock.c
68574+++ b/mm/mlock.c
68575@@ -13,6 +13,7 @@
68576 #include <linux/pagemap.h>
68577 #include <linux/mempolicy.h>
68578 #include <linux/syscalls.h>
68579+#include <linux/security.h>
68580 #include <linux/sched.h>
68581 #include <linux/export.h>
68582 #include <linux/rmap.h>
68583@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68584 return -EINVAL;
68585 if (end == start)
68586 return 0;
68587+ if (end > TASK_SIZE)
68588+ return -EINVAL;
68589+
68590 vma = find_vma_prev(current->mm, start, &prev);
68591 if (!vma || vma->vm_start > start)
68592 return -ENOMEM;
68593@@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68594 for (nstart = start ; ; ) {
68595 vm_flags_t newflags;
68596
68597+#ifdef CONFIG_PAX_SEGMEXEC
68598+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68599+ break;
68600+#endif
68601+
68602 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68603
68604 newflags = vma->vm_flags | VM_LOCKED;
68605@@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68606 lock_limit >>= PAGE_SHIFT;
68607
68608 /* check against resource limits */
68609+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68610 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68611 error = do_mlock(start, len, 1);
68612 up_write(&current->mm->mmap_sem);
68613@@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68614 static int do_mlockall(int flags)
68615 {
68616 struct vm_area_struct * vma, * prev = NULL;
68617- unsigned int def_flags = 0;
68618
68619 if (flags & MCL_FUTURE)
68620- def_flags = VM_LOCKED;
68621- current->mm->def_flags = def_flags;
68622+ current->mm->def_flags |= VM_LOCKED;
68623+ else
68624+ current->mm->def_flags &= ~VM_LOCKED;
68625 if (flags == MCL_FUTURE)
68626 goto out;
68627
68628 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68629 vm_flags_t newflags;
68630
68631+#ifdef CONFIG_PAX_SEGMEXEC
68632+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68633+ break;
68634+#endif
68635+
68636+ BUG_ON(vma->vm_end > TASK_SIZE);
68637 newflags = vma->vm_flags | VM_LOCKED;
68638 if (!(flags & MCL_CURRENT))
68639 newflags &= ~VM_LOCKED;
68640@@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68641 lock_limit >>= PAGE_SHIFT;
68642
68643 ret = -ENOMEM;
68644+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68645 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68646 capable(CAP_IPC_LOCK))
68647 ret = do_mlockall(flags);
68648diff --git a/mm/mmap.c b/mm/mmap.c
68649index eae90af..51ca80b 100644
68650--- a/mm/mmap.c
68651+++ b/mm/mmap.c
68652@@ -46,6 +46,16 @@
68653 #define arch_rebalance_pgtables(addr, len) (addr)
68654 #endif
68655
68656+static inline void verify_mm_writelocked(struct mm_struct *mm)
68657+{
68658+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68659+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68660+ up_read(&mm->mmap_sem);
68661+ BUG();
68662+ }
68663+#endif
68664+}
68665+
68666 static void unmap_region(struct mm_struct *mm,
68667 struct vm_area_struct *vma, struct vm_area_struct *prev,
68668 unsigned long start, unsigned long end);
68669@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
68670 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68671 *
68672 */
68673-pgprot_t protection_map[16] = {
68674+pgprot_t protection_map[16] __read_only = {
68675 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68676 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68677 };
68678
68679-pgprot_t vm_get_page_prot(unsigned long vm_flags)
68680+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68681 {
68682- return __pgprot(pgprot_val(protection_map[vm_flags &
68683+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68684 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68685 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68686+
68687+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68688+ if (!(__supported_pte_mask & _PAGE_NX) &&
68689+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68690+ (vm_flags & (VM_READ | VM_WRITE)))
68691+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68692+#endif
68693+
68694+ return prot;
68695 }
68696 EXPORT_SYMBOL(vm_get_page_prot);
68697
68698 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68699 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68700 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68701+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68702 /*
68703 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68704 * other variables. It can be updated by several CPUs frequently.
68705@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
68706 struct vm_area_struct *next = vma->vm_next;
68707
68708 might_sleep();
68709+ BUG_ON(vma->vm_mirror);
68710 if (vma->vm_ops && vma->vm_ops->close)
68711 vma->vm_ops->close(vma);
68712 if (vma->vm_file) {
68713@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68714 * not page aligned -Ram Gupta
68715 */
68716 rlim = rlimit(RLIMIT_DATA);
68717+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68718 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68719 (mm->end_data - mm->start_data) > rlim)
68720 goto out;
68721@@ -689,6 +711,12 @@ static int
68722 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68723 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68724 {
68725+
68726+#ifdef CONFIG_PAX_SEGMEXEC
68727+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68728+ return 0;
68729+#endif
68730+
68731 if (is_mergeable_vma(vma, file, vm_flags) &&
68732 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68733 if (vma->vm_pgoff == vm_pgoff)
68734@@ -708,6 +736,12 @@ static int
68735 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68736 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68737 {
68738+
68739+#ifdef CONFIG_PAX_SEGMEXEC
68740+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68741+ return 0;
68742+#endif
68743+
68744 if (is_mergeable_vma(vma, file, vm_flags) &&
68745 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68746 pgoff_t vm_pglen;
68747@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68748 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68749 struct vm_area_struct *prev, unsigned long addr,
68750 unsigned long end, unsigned long vm_flags,
68751- struct anon_vma *anon_vma, struct file *file,
68752+ struct anon_vma *anon_vma, struct file *file,
68753 pgoff_t pgoff, struct mempolicy *policy)
68754 {
68755 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68756 struct vm_area_struct *area, *next;
68757 int err;
68758
68759+#ifdef CONFIG_PAX_SEGMEXEC
68760+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68761+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68762+
68763+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68764+#endif
68765+
68766 /*
68767 * We later require that vma->vm_flags == vm_flags,
68768 * so this tests vma->vm_flags & VM_SPECIAL, too.
68769@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68770 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68771 next = next->vm_next;
68772
68773+#ifdef CONFIG_PAX_SEGMEXEC
68774+ if (prev)
68775+ prev_m = pax_find_mirror_vma(prev);
68776+ if (area)
68777+ area_m = pax_find_mirror_vma(area);
68778+ if (next)
68779+ next_m = pax_find_mirror_vma(next);
68780+#endif
68781+
68782 /*
68783 * Can it merge with the predecessor?
68784 */
68785@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68786 /* cases 1, 6 */
68787 err = vma_adjust(prev, prev->vm_start,
68788 next->vm_end, prev->vm_pgoff, NULL);
68789- } else /* cases 2, 5, 7 */
68790+
68791+#ifdef CONFIG_PAX_SEGMEXEC
68792+ if (!err && prev_m)
68793+ err = vma_adjust(prev_m, prev_m->vm_start,
68794+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68795+#endif
68796+
68797+ } else { /* cases 2, 5, 7 */
68798 err = vma_adjust(prev, prev->vm_start,
68799 end, prev->vm_pgoff, NULL);
68800+
68801+#ifdef CONFIG_PAX_SEGMEXEC
68802+ if (!err && prev_m)
68803+ err = vma_adjust(prev_m, prev_m->vm_start,
68804+ end_m, prev_m->vm_pgoff, NULL);
68805+#endif
68806+
68807+ }
68808 if (err)
68809 return NULL;
68810 khugepaged_enter_vma_merge(prev);
68811@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68812 mpol_equal(policy, vma_policy(next)) &&
68813 can_vma_merge_before(next, vm_flags,
68814 anon_vma, file, pgoff+pglen)) {
68815- if (prev && addr < prev->vm_end) /* case 4 */
68816+ if (prev && addr < prev->vm_end) { /* case 4 */
68817 err = vma_adjust(prev, prev->vm_start,
68818 addr, prev->vm_pgoff, NULL);
68819- else /* cases 3, 8 */
68820+
68821+#ifdef CONFIG_PAX_SEGMEXEC
68822+ if (!err && prev_m)
68823+ err = vma_adjust(prev_m, prev_m->vm_start,
68824+ addr_m, prev_m->vm_pgoff, NULL);
68825+#endif
68826+
68827+ } else { /* cases 3, 8 */
68828 err = vma_adjust(area, addr, next->vm_end,
68829 next->vm_pgoff - pglen, NULL);
68830+
68831+#ifdef CONFIG_PAX_SEGMEXEC
68832+ if (!err && area_m)
68833+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
68834+ next_m->vm_pgoff - pglen, NULL);
68835+#endif
68836+
68837+ }
68838 if (err)
68839 return NULL;
68840 khugepaged_enter_vma_merge(area);
68841@@ -921,14 +1001,11 @@ none:
68842 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68843 struct file *file, long pages)
68844 {
68845- const unsigned long stack_flags
68846- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68847-
68848 if (file) {
68849 mm->shared_vm += pages;
68850 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68851 mm->exec_vm += pages;
68852- } else if (flags & stack_flags)
68853+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68854 mm->stack_vm += pages;
68855 if (flags & (VM_RESERVED|VM_IO))
68856 mm->reserved_vm += pages;
68857@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68858 * (the exception is when the underlying filesystem is noexec
68859 * mounted, in which case we dont add PROT_EXEC.)
68860 */
68861- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68862+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68863 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68864 prot |= PROT_EXEC;
68865
68866@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68867 /* Obtain the address to map to. we verify (or select) it and ensure
68868 * that it represents a valid section of the address space.
68869 */
68870- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68871+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68872 if (addr & ~PAGE_MASK)
68873 return addr;
68874
68875@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68876 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68877 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68878
68879+#ifdef CONFIG_PAX_MPROTECT
68880+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68881+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68882+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68883+ gr_log_rwxmmap(file);
68884+
68885+#ifdef CONFIG_PAX_EMUPLT
68886+ vm_flags &= ~VM_EXEC;
68887+#else
68888+ return -EPERM;
68889+#endif
68890+
68891+ }
68892+
68893+ if (!(vm_flags & VM_EXEC))
68894+ vm_flags &= ~VM_MAYEXEC;
68895+#else
68896+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68897+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68898+#endif
68899+ else
68900+ vm_flags &= ~VM_MAYWRITE;
68901+ }
68902+#endif
68903+
68904+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68905+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68906+ vm_flags &= ~VM_PAGEEXEC;
68907+#endif
68908+
68909 if (flags & MAP_LOCKED)
68910 if (!can_do_mlock())
68911 return -EPERM;
68912@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68913 locked += mm->locked_vm;
68914 lock_limit = rlimit(RLIMIT_MEMLOCK);
68915 lock_limit >>= PAGE_SHIFT;
68916+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68917 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68918 return -EAGAIN;
68919 }
68920@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68921 if (error)
68922 return error;
68923
68924+ if (!gr_acl_handle_mmap(file, prot))
68925+ return -EACCES;
68926+
68927 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68928 }
68929 EXPORT_SYMBOL(do_mmap_pgoff);
68930@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
68931 vm_flags_t vm_flags = vma->vm_flags;
68932
68933 /* If it was private or non-writable, the write bit is already clear */
68934- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68935+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68936 return 0;
68937
68938 /* The backer wishes to know when pages are first written to? */
68939@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
68940 unsigned long charged = 0;
68941 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68942
68943+#ifdef CONFIG_PAX_SEGMEXEC
68944+ struct vm_area_struct *vma_m = NULL;
68945+#endif
68946+
68947+ /*
68948+ * mm->mmap_sem is required to protect against another thread
68949+ * changing the mappings in case we sleep.
68950+ */
68951+ verify_mm_writelocked(mm);
68952+
68953 /* Clear old maps */
68954 error = -ENOMEM;
68955-munmap_back:
68956 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68957 if (vma && vma->vm_start < addr + len) {
68958 if (do_munmap(mm, addr, len))
68959 return -ENOMEM;
68960- goto munmap_back;
68961+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68962+ BUG_ON(vma && vma->vm_start < addr + len);
68963 }
68964
68965 /* Check against address space limit. */
68966@@ -1258,6 +1379,16 @@ munmap_back:
68967 goto unacct_error;
68968 }
68969
68970+#ifdef CONFIG_PAX_SEGMEXEC
68971+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68972+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68973+ if (!vma_m) {
68974+ error = -ENOMEM;
68975+ goto free_vma;
68976+ }
68977+ }
68978+#endif
68979+
68980 vma->vm_mm = mm;
68981 vma->vm_start = addr;
68982 vma->vm_end = addr + len;
68983@@ -1281,6 +1412,19 @@ munmap_back:
68984 error = file->f_op->mmap(file, vma);
68985 if (error)
68986 goto unmap_and_free_vma;
68987+
68988+#ifdef CONFIG_PAX_SEGMEXEC
68989+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68990+ added_exe_file_vma(mm);
68991+#endif
68992+
68993+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68994+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68995+ vma->vm_flags |= VM_PAGEEXEC;
68996+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68997+ }
68998+#endif
68999+
69000 if (vm_flags & VM_EXECUTABLE)
69001 added_exe_file_vma(mm);
69002
69003@@ -1316,6 +1460,11 @@ munmap_back:
69004 vma_link(mm, vma, prev, rb_link, rb_parent);
69005 file = vma->vm_file;
69006
69007+#ifdef CONFIG_PAX_SEGMEXEC
69008+ if (vma_m)
69009+ BUG_ON(pax_mirror_vma(vma_m, vma));
69010+#endif
69011+
69012 /* Once vma denies write, undo our temporary denial count */
69013 if (correct_wcount)
69014 atomic_inc(&inode->i_writecount);
69015@@ -1324,6 +1473,7 @@ out:
69016
69017 mm->total_vm += len >> PAGE_SHIFT;
69018 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
69019+ track_exec_limit(mm, addr, addr + len, vm_flags);
69020 if (vm_flags & VM_LOCKED) {
69021 if (!mlock_vma_pages_range(vma, addr, addr + len))
69022 mm->locked_vm += (len >> PAGE_SHIFT);
69023@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
69024 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
69025 charged = 0;
69026 free_vma:
69027+
69028+#ifdef CONFIG_PAX_SEGMEXEC
69029+ if (vma_m)
69030+ kmem_cache_free(vm_area_cachep, vma_m);
69031+#endif
69032+
69033 kmem_cache_free(vm_area_cachep, vma);
69034 unacct_error:
69035 if (charged)
69036@@ -1348,6 +1504,44 @@ unacct_error:
69037 return error;
69038 }
69039
69040+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69041+{
69042+ if (!vma) {
69043+#ifdef CONFIG_STACK_GROWSUP
69044+ if (addr > sysctl_heap_stack_gap)
69045+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69046+ else
69047+ vma = find_vma(current->mm, 0);
69048+ if (vma && (vma->vm_flags & VM_GROWSUP))
69049+ return false;
69050+#endif
69051+ return true;
69052+ }
69053+
69054+ if (addr + len > vma->vm_start)
69055+ return false;
69056+
69057+ if (vma->vm_flags & VM_GROWSDOWN)
69058+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69059+#ifdef CONFIG_STACK_GROWSUP
69060+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69061+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69062+#endif
69063+
69064+ return true;
69065+}
69066+
69067+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69068+{
69069+ if (vma->vm_start < len)
69070+ return -ENOMEM;
69071+ if (!(vma->vm_flags & VM_GROWSDOWN))
69072+ return vma->vm_start - len;
69073+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
69074+ return vma->vm_start - len - sysctl_heap_stack_gap;
69075+ return -ENOMEM;
69076+}
69077+
69078 /* Get an address range which is currently unmapped.
69079 * For shmat() with addr=0.
69080 *
69081@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
69082 if (flags & MAP_FIXED)
69083 return addr;
69084
69085+#ifdef CONFIG_PAX_RANDMMAP
69086+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69087+#endif
69088+
69089 if (addr) {
69090 addr = PAGE_ALIGN(addr);
69091- vma = find_vma(mm, addr);
69092- if (TASK_SIZE - len >= addr &&
69093- (!vma || addr + len <= vma->vm_start))
69094- return addr;
69095+ if (TASK_SIZE - len >= addr) {
69096+ vma = find_vma(mm, addr);
69097+ if (check_heap_stack_gap(vma, addr, len))
69098+ return addr;
69099+ }
69100 }
69101 if (len > mm->cached_hole_size) {
69102- start_addr = addr = mm->free_area_cache;
69103+ start_addr = addr = mm->free_area_cache;
69104 } else {
69105- start_addr = addr = TASK_UNMAPPED_BASE;
69106- mm->cached_hole_size = 0;
69107+ start_addr = addr = mm->mmap_base;
69108+ mm->cached_hole_size = 0;
69109 }
69110
69111 full_search:
69112@@ -1396,34 +1595,40 @@ full_search:
69113 * Start a new search - just in case we missed
69114 * some holes.
69115 */
69116- if (start_addr != TASK_UNMAPPED_BASE) {
69117- addr = TASK_UNMAPPED_BASE;
69118- start_addr = addr;
69119+ if (start_addr != mm->mmap_base) {
69120+ start_addr = addr = mm->mmap_base;
69121 mm->cached_hole_size = 0;
69122 goto full_search;
69123 }
69124 return -ENOMEM;
69125 }
69126- if (!vma || addr + len <= vma->vm_start) {
69127- /*
69128- * Remember the place where we stopped the search:
69129- */
69130- mm->free_area_cache = addr + len;
69131- return addr;
69132- }
69133+ if (check_heap_stack_gap(vma, addr, len))
69134+ break;
69135 if (addr + mm->cached_hole_size < vma->vm_start)
69136 mm->cached_hole_size = vma->vm_start - addr;
69137 addr = vma->vm_end;
69138 }
69139+
69140+ /*
69141+ * Remember the place where we stopped the search:
69142+ */
69143+ mm->free_area_cache = addr + len;
69144+ return addr;
69145 }
69146 #endif
69147
69148 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69149 {
69150+
69151+#ifdef CONFIG_PAX_SEGMEXEC
69152+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69153+ return;
69154+#endif
69155+
69156 /*
69157 * Is this a new hole at the lowest possible address?
69158 */
69159- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69160+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69161 mm->free_area_cache = addr;
69162 mm->cached_hole_size = ~0UL;
69163 }
69164@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69165 {
69166 struct vm_area_struct *vma;
69167 struct mm_struct *mm = current->mm;
69168- unsigned long addr = addr0;
69169+ unsigned long base = mm->mmap_base, addr = addr0;
69170
69171 /* requested length too big for entire address space */
69172 if (len > TASK_SIZE)
69173@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69174 if (flags & MAP_FIXED)
69175 return addr;
69176
69177+#ifdef CONFIG_PAX_RANDMMAP
69178+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69179+#endif
69180+
69181 /* requesting a specific address */
69182 if (addr) {
69183 addr = PAGE_ALIGN(addr);
69184- vma = find_vma(mm, addr);
69185- if (TASK_SIZE - len >= addr &&
69186- (!vma || addr + len <= vma->vm_start))
69187- return addr;
69188+ if (TASK_SIZE - len >= addr) {
69189+ vma = find_vma(mm, addr);
69190+ if (check_heap_stack_gap(vma, addr, len))
69191+ return addr;
69192+ }
69193 }
69194
69195 /* check if free_area_cache is useful for us */
69196@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69197 /* make sure it can fit in the remaining address space */
69198 if (addr > len) {
69199 vma = find_vma(mm, addr-len);
69200- if (!vma || addr <= vma->vm_start)
69201+ if (check_heap_stack_gap(vma, addr - len, len))
69202 /* remember the address as a hint for next time */
69203 return (mm->free_area_cache = addr-len);
69204 }
69205@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69206 * return with success:
69207 */
69208 vma = find_vma(mm, addr);
69209- if (!vma || addr+len <= vma->vm_start)
69210+ if (check_heap_stack_gap(vma, addr, len))
69211 /* remember the address as a hint for next time */
69212 return (mm->free_area_cache = addr);
69213
69214@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69215 mm->cached_hole_size = vma->vm_start - addr;
69216
69217 /* try just below the current vma->vm_start */
69218- addr = vma->vm_start-len;
69219- } while (len < vma->vm_start);
69220+ addr = skip_heap_stack_gap(vma, len);
69221+ } while (!IS_ERR_VALUE(addr));
69222
69223 bottomup:
69224 /*
69225@@ -1507,13 +1717,21 @@ bottomup:
69226 * can happen with large stack limits and large mmap()
69227 * allocations.
69228 */
69229+ mm->mmap_base = TASK_UNMAPPED_BASE;
69230+
69231+#ifdef CONFIG_PAX_RANDMMAP
69232+ if (mm->pax_flags & MF_PAX_RANDMMAP)
69233+ mm->mmap_base += mm->delta_mmap;
69234+#endif
69235+
69236+ mm->free_area_cache = mm->mmap_base;
69237 mm->cached_hole_size = ~0UL;
69238- mm->free_area_cache = TASK_UNMAPPED_BASE;
69239 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69240 /*
69241 * Restore the topdown base:
69242 */
69243- mm->free_area_cache = mm->mmap_base;
69244+ mm->mmap_base = base;
69245+ mm->free_area_cache = base;
69246 mm->cached_hole_size = ~0UL;
69247
69248 return addr;
69249@@ -1522,6 +1740,12 @@ bottomup:
69250
69251 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69252 {
69253+
69254+#ifdef CONFIG_PAX_SEGMEXEC
69255+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69256+ return;
69257+#endif
69258+
69259 /*
69260 * Is this a new hole at the highest possible address?
69261 */
69262@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69263 mm->free_area_cache = addr;
69264
69265 /* dont allow allocations above current base */
69266- if (mm->free_area_cache > mm->mmap_base)
69267+ if (mm->free_area_cache > mm->mmap_base) {
69268 mm->free_area_cache = mm->mmap_base;
69269+ mm->cached_hole_size = ~0UL;
69270+ }
69271 }
69272
69273 unsigned long
69274@@ -1638,6 +1864,28 @@ out:
69275 return prev ? prev->vm_next : vma;
69276 }
69277
69278+#ifdef CONFIG_PAX_SEGMEXEC
69279+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69280+{
69281+ struct vm_area_struct *vma_m;
69282+
69283+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69284+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69285+ BUG_ON(vma->vm_mirror);
69286+ return NULL;
69287+ }
69288+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69289+ vma_m = vma->vm_mirror;
69290+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69291+ BUG_ON(vma->vm_file != vma_m->vm_file);
69292+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69293+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69294+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69295+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69296+ return vma_m;
69297+}
69298+#endif
69299+
69300 /*
69301 * Verify that the stack growth is acceptable and
69302 * update accounting. This is shared with both the
69303@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69304 return -ENOMEM;
69305
69306 /* Stack limit test */
69307+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
69308 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69309 return -ENOMEM;
69310
69311@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69312 locked = mm->locked_vm + grow;
69313 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69314 limit >>= PAGE_SHIFT;
69315+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69316 if (locked > limit && !capable(CAP_IPC_LOCK))
69317 return -ENOMEM;
69318 }
69319@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69320 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69321 * vma is the last one with address > vma->vm_end. Have to extend vma.
69322 */
69323+#ifndef CONFIG_IA64
69324+static
69325+#endif
69326 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69327 {
69328 int error;
69329+ bool locknext;
69330
69331 if (!(vma->vm_flags & VM_GROWSUP))
69332 return -EFAULT;
69333
69334+ /* Also guard against wrapping around to address 0. */
69335+ if (address < PAGE_ALIGN(address+1))
69336+ address = PAGE_ALIGN(address+1);
69337+ else
69338+ return -ENOMEM;
69339+
69340 /*
69341 * We must make sure the anon_vma is allocated
69342 * so that the anon_vma locking is not a noop.
69343 */
69344 if (unlikely(anon_vma_prepare(vma)))
69345 return -ENOMEM;
69346+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69347+ if (locknext && anon_vma_prepare(vma->vm_next))
69348+ return -ENOMEM;
69349 vma_lock_anon_vma(vma);
69350+ if (locknext)
69351+ vma_lock_anon_vma(vma->vm_next);
69352
69353 /*
69354 * vma->vm_start/vm_end cannot change under us because the caller
69355 * is required to hold the mmap_sem in read mode. We need the
69356- * anon_vma lock to serialize against concurrent expand_stacks.
69357- * Also guard against wrapping around to address 0.
69358+ * anon_vma locks to serialize against concurrent expand_stacks
69359+ * and expand_upwards.
69360 */
69361- if (address < PAGE_ALIGN(address+4))
69362- address = PAGE_ALIGN(address+4);
69363- else {
69364- vma_unlock_anon_vma(vma);
69365- return -ENOMEM;
69366- }
69367 error = 0;
69368
69369 /* Somebody else might have raced and expanded it already */
69370- if (address > vma->vm_end) {
69371+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69372+ error = -ENOMEM;
69373+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69374 unsigned long size, grow;
69375
69376 size = address - vma->vm_start;
69377@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69378 }
69379 }
69380 }
69381+ if (locknext)
69382+ vma_unlock_anon_vma(vma->vm_next);
69383 vma_unlock_anon_vma(vma);
69384 khugepaged_enter_vma_merge(vma);
69385 return error;
69386@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma,
69387 unsigned long address)
69388 {
69389 int error;
69390+ bool lockprev = false;
69391+ struct vm_area_struct *prev;
69392
69393 /*
69394 * We must make sure the anon_vma is allocated
69395@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma,
69396 if (error)
69397 return error;
69398
69399+ prev = vma->vm_prev;
69400+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69401+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69402+#endif
69403+ if (lockprev && anon_vma_prepare(prev))
69404+ return -ENOMEM;
69405+ if (lockprev)
69406+ vma_lock_anon_vma(prev);
69407+
69408 vma_lock_anon_vma(vma);
69409
69410 /*
69411@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma,
69412 */
69413
69414 /* Somebody else might have raced and expanded it already */
69415- if (address < vma->vm_start) {
69416+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69417+ error = -ENOMEM;
69418+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69419 unsigned long size, grow;
69420
69421+#ifdef CONFIG_PAX_SEGMEXEC
69422+ struct vm_area_struct *vma_m;
69423+
69424+ vma_m = pax_find_mirror_vma(vma);
69425+#endif
69426+
69427 size = vma->vm_end - address;
69428 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69429
69430@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma,
69431 if (!error) {
69432 vma->vm_start = address;
69433 vma->vm_pgoff -= grow;
69434+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69435+
69436+#ifdef CONFIG_PAX_SEGMEXEC
69437+ if (vma_m) {
69438+ vma_m->vm_start -= grow << PAGE_SHIFT;
69439+ vma_m->vm_pgoff -= grow;
69440+ }
69441+#endif
69442+
69443 perf_event_mmap(vma);
69444 }
69445 }
69446 }
69447 vma_unlock_anon_vma(vma);
69448+ if (lockprev)
69449+ vma_unlock_anon_vma(prev);
69450 khugepaged_enter_vma_merge(vma);
69451 return error;
69452 }
69453@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69454 do {
69455 long nrpages = vma_pages(vma);
69456
69457+#ifdef CONFIG_PAX_SEGMEXEC
69458+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69459+ vma = remove_vma(vma);
69460+ continue;
69461+ }
69462+#endif
69463+
69464 mm->total_vm -= nrpages;
69465 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69466 vma = remove_vma(vma);
69467@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69468 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69469 vma->vm_prev = NULL;
69470 do {
69471+
69472+#ifdef CONFIG_PAX_SEGMEXEC
69473+ if (vma->vm_mirror) {
69474+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69475+ vma->vm_mirror->vm_mirror = NULL;
69476+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
69477+ vma->vm_mirror = NULL;
69478+ }
69479+#endif
69480+
69481 rb_erase(&vma->vm_rb, &mm->mm_rb);
69482 mm->map_count--;
69483 tail_vma = vma;
69484@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69485 struct vm_area_struct *new;
69486 int err = -ENOMEM;
69487
69488+#ifdef CONFIG_PAX_SEGMEXEC
69489+ struct vm_area_struct *vma_m, *new_m = NULL;
69490+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69491+#endif
69492+
69493 if (is_vm_hugetlb_page(vma) && (addr &
69494 ~(huge_page_mask(hstate_vma(vma)))))
69495 return -EINVAL;
69496
69497+#ifdef CONFIG_PAX_SEGMEXEC
69498+ vma_m = pax_find_mirror_vma(vma);
69499+#endif
69500+
69501 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69502 if (!new)
69503 goto out_err;
69504
69505+#ifdef CONFIG_PAX_SEGMEXEC
69506+ if (vma_m) {
69507+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69508+ if (!new_m) {
69509+ kmem_cache_free(vm_area_cachep, new);
69510+ goto out_err;
69511+ }
69512+ }
69513+#endif
69514+
69515 /* most fields are the same, copy all, and then fixup */
69516 *new = *vma;
69517
69518@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69519 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69520 }
69521
69522+#ifdef CONFIG_PAX_SEGMEXEC
69523+ if (vma_m) {
69524+ *new_m = *vma_m;
69525+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
69526+ new_m->vm_mirror = new;
69527+ new->vm_mirror = new_m;
69528+
69529+ if (new_below)
69530+ new_m->vm_end = addr_m;
69531+ else {
69532+ new_m->vm_start = addr_m;
69533+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69534+ }
69535+ }
69536+#endif
69537+
69538 pol = mpol_dup(vma_policy(vma));
69539 if (IS_ERR(pol)) {
69540 err = PTR_ERR(pol);
69541@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69542 else
69543 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69544
69545+#ifdef CONFIG_PAX_SEGMEXEC
69546+ if (!err && vma_m) {
69547+ if (anon_vma_clone(new_m, vma_m))
69548+ goto out_free_mpol;
69549+
69550+ mpol_get(pol);
69551+ vma_set_policy(new_m, pol);
69552+
69553+ if (new_m->vm_file) {
69554+ get_file(new_m->vm_file);
69555+ if (vma_m->vm_flags & VM_EXECUTABLE)
69556+ added_exe_file_vma(mm);
69557+ }
69558+
69559+ if (new_m->vm_ops && new_m->vm_ops->open)
69560+ new_m->vm_ops->open(new_m);
69561+
69562+ if (new_below)
69563+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69564+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69565+ else
69566+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69567+
69568+ if (err) {
69569+ if (new_m->vm_ops && new_m->vm_ops->close)
69570+ new_m->vm_ops->close(new_m);
69571+ if (new_m->vm_file) {
69572+ if (vma_m->vm_flags & VM_EXECUTABLE)
69573+ removed_exe_file_vma(mm);
69574+ fput(new_m->vm_file);
69575+ }
69576+ mpol_put(pol);
69577+ }
69578+ }
69579+#endif
69580+
69581 /* Success. */
69582 if (!err)
69583 return 0;
69584@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69585 removed_exe_file_vma(mm);
69586 fput(new->vm_file);
69587 }
69588- unlink_anon_vmas(new);
69589 out_free_mpol:
69590 mpol_put(pol);
69591 out_free_vma:
69592+
69593+#ifdef CONFIG_PAX_SEGMEXEC
69594+ if (new_m) {
69595+ unlink_anon_vmas(new_m);
69596+ kmem_cache_free(vm_area_cachep, new_m);
69597+ }
69598+#endif
69599+
69600+ unlink_anon_vmas(new);
69601 kmem_cache_free(vm_area_cachep, new);
69602 out_err:
69603 return err;
69604@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69605 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69606 unsigned long addr, int new_below)
69607 {
69608+
69609+#ifdef CONFIG_PAX_SEGMEXEC
69610+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69611+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69612+ if (mm->map_count >= sysctl_max_map_count-1)
69613+ return -ENOMEM;
69614+ } else
69615+#endif
69616+
69617 if (mm->map_count >= sysctl_max_map_count)
69618 return -ENOMEM;
69619
69620@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69621 * work. This now handles partial unmappings.
69622 * Jeremy Fitzhardinge <jeremy@goop.org>
69623 */
69624+#ifdef CONFIG_PAX_SEGMEXEC
69625 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69626 {
69627+ int ret = __do_munmap(mm, start, len);
69628+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69629+ return ret;
69630+
69631+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69632+}
69633+
69634+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69635+#else
69636+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69637+#endif
69638+{
69639 unsigned long end;
69640 struct vm_area_struct *vma, *prev, *last;
69641
69642+ /*
69643+ * mm->mmap_sem is required to protect against another thread
69644+ * changing the mappings in case we sleep.
69645+ */
69646+ verify_mm_writelocked(mm);
69647+
69648 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69649 return -EINVAL;
69650
69651@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69652 /* Fix up all other VM information */
69653 remove_vma_list(mm, vma);
69654
69655+ track_exec_limit(mm, start, end, 0UL);
69656+
69657 return 0;
69658 }
69659
69660@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
69661
69662 profile_munmap(addr);
69663
69664+#ifdef CONFIG_PAX_SEGMEXEC
69665+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69666+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69667+ return -EINVAL;
69668+#endif
69669+
69670 down_write(&mm->mmap_sem);
69671 ret = do_munmap(mm, addr, len);
69672 up_write(&mm->mmap_sem);
69673 return ret;
69674 }
69675
69676-static inline void verify_mm_writelocked(struct mm_struct *mm)
69677-{
69678-#ifdef CONFIG_DEBUG_VM
69679- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69680- WARN_ON(1);
69681- up_read(&mm->mmap_sem);
69682- }
69683-#endif
69684-}
69685-
69686 /*
69687 * this is really a simplified "do_mmap". it only handles
69688 * anonymous maps. eventually we may be able to do some
69689@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69690 struct rb_node ** rb_link, * rb_parent;
69691 pgoff_t pgoff = addr >> PAGE_SHIFT;
69692 int error;
69693+ unsigned long charged;
69694
69695 len = PAGE_ALIGN(len);
69696 if (!len)
69697@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69698
69699 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69700
69701+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69702+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69703+ flags &= ~VM_EXEC;
69704+
69705+#ifdef CONFIG_PAX_MPROTECT
69706+ if (mm->pax_flags & MF_PAX_MPROTECT)
69707+ flags &= ~VM_MAYEXEC;
69708+#endif
69709+
69710+ }
69711+#endif
69712+
69713 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69714 if (error & ~PAGE_MASK)
69715 return error;
69716
69717+ charged = len >> PAGE_SHIFT;
69718+
69719 /*
69720 * mlock MCL_FUTURE?
69721 */
69722 if (mm->def_flags & VM_LOCKED) {
69723 unsigned long locked, lock_limit;
69724- locked = len >> PAGE_SHIFT;
69725+ locked = charged;
69726 locked += mm->locked_vm;
69727 lock_limit = rlimit(RLIMIT_MEMLOCK);
69728 lock_limit >>= PAGE_SHIFT;
69729@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69730 /*
69731 * Clear old maps. this also does some error checking for us
69732 */
69733- munmap_back:
69734 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69735 if (vma && vma->vm_start < addr + len) {
69736 if (do_munmap(mm, addr, len))
69737 return -ENOMEM;
69738- goto munmap_back;
69739+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69740+ BUG_ON(vma && vma->vm_start < addr + len);
69741 }
69742
69743 /* Check against address space limits *after* clearing old maps... */
69744- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69745+ if (!may_expand_vm(mm, charged))
69746 return -ENOMEM;
69747
69748 if (mm->map_count > sysctl_max_map_count)
69749 return -ENOMEM;
69750
69751- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69752+ if (security_vm_enough_memory(charged))
69753 return -ENOMEM;
69754
69755 /* Can we just expand an old private anonymous mapping? */
69756@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69757 */
69758 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69759 if (!vma) {
69760- vm_unacct_memory(len >> PAGE_SHIFT);
69761+ vm_unacct_memory(charged);
69762 return -ENOMEM;
69763 }
69764
69765@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69766 vma_link(mm, vma, prev, rb_link, rb_parent);
69767 out:
69768 perf_event_mmap(vma);
69769- mm->total_vm += len >> PAGE_SHIFT;
69770+ mm->total_vm += charged;
69771 if (flags & VM_LOCKED) {
69772 if (!mlock_vma_pages_range(vma, addr, addr + len))
69773- mm->locked_vm += (len >> PAGE_SHIFT);
69774+ mm->locked_vm += charged;
69775 }
69776+ track_exec_limit(mm, addr, addr + len, flags);
69777 return addr;
69778 }
69779
69780@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
69781 * Walk the list again, actually closing and freeing it,
69782 * with preemption enabled, without holding any MM locks.
69783 */
69784- while (vma)
69785+ while (vma) {
69786+ vma->vm_mirror = NULL;
69787 vma = remove_vma(vma);
69788+ }
69789
69790 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69791 }
69792@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69793 struct vm_area_struct * __vma, * prev;
69794 struct rb_node ** rb_link, * rb_parent;
69795
69796+#ifdef CONFIG_PAX_SEGMEXEC
69797+ struct vm_area_struct *vma_m = NULL;
69798+#endif
69799+
69800+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69801+ return -EPERM;
69802+
69803 /*
69804 * The vm_pgoff of a purely anonymous vma should be irrelevant
69805 * until its first write fault, when page's anon_vma and index
69806@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69807 if ((vma->vm_flags & VM_ACCOUNT) &&
69808 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69809 return -ENOMEM;
69810+
69811+#ifdef CONFIG_PAX_SEGMEXEC
69812+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69813+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69814+ if (!vma_m)
69815+ return -ENOMEM;
69816+ }
69817+#endif
69818+
69819 vma_link(mm, vma, prev, rb_link, rb_parent);
69820+
69821+#ifdef CONFIG_PAX_SEGMEXEC
69822+ if (vma_m)
69823+ BUG_ON(pax_mirror_vma(vma_m, vma));
69824+#endif
69825+
69826 return 0;
69827 }
69828
69829@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69830 struct rb_node **rb_link, *rb_parent;
69831 struct mempolicy *pol;
69832
69833+ BUG_ON(vma->vm_mirror);
69834+
69835 /*
69836 * If anonymous vma has not yet been faulted, update new pgoff
69837 * to match new location, to increase its chance of merging.
69838@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69839 return NULL;
69840 }
69841
69842+#ifdef CONFIG_PAX_SEGMEXEC
69843+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69844+{
69845+ struct vm_area_struct *prev_m;
69846+ struct rb_node **rb_link_m, *rb_parent_m;
69847+ struct mempolicy *pol_m;
69848+
69849+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69850+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69851+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69852+ *vma_m = *vma;
69853+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69854+ if (anon_vma_clone(vma_m, vma))
69855+ return -ENOMEM;
69856+ pol_m = vma_policy(vma_m);
69857+ mpol_get(pol_m);
69858+ vma_set_policy(vma_m, pol_m);
69859+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69860+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69861+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69862+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69863+ if (vma_m->vm_file)
69864+ get_file(vma_m->vm_file);
69865+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69866+ vma_m->vm_ops->open(vma_m);
69867+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69868+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69869+ vma_m->vm_mirror = vma;
69870+ vma->vm_mirror = vma_m;
69871+ return 0;
69872+}
69873+#endif
69874+
69875 /*
69876 * Return true if the calling process may expand its vm space by the passed
69877 * number of pages
69878@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
69879 unsigned long lim;
69880
69881 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69882-
69883+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69884 if (cur + npages > lim)
69885 return 0;
69886 return 1;
69887@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm,
69888 vma->vm_start = addr;
69889 vma->vm_end = addr + len;
69890
69891+#ifdef CONFIG_PAX_MPROTECT
69892+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69893+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69894+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69895+ return -EPERM;
69896+ if (!(vm_flags & VM_EXEC))
69897+ vm_flags &= ~VM_MAYEXEC;
69898+#else
69899+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69900+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69901+#endif
69902+ else
69903+ vm_flags &= ~VM_MAYWRITE;
69904+ }
69905+#endif
69906+
69907 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69908 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69909
69910diff --git a/mm/mprotect.c b/mm/mprotect.c
69911index 5a688a2..27e031c 100644
69912--- a/mm/mprotect.c
69913+++ b/mm/mprotect.c
69914@@ -23,10 +23,16 @@
69915 #include <linux/mmu_notifier.h>
69916 #include <linux/migrate.h>
69917 #include <linux/perf_event.h>
69918+
69919+#ifdef CONFIG_PAX_MPROTECT
69920+#include <linux/elf.h>
69921+#endif
69922+
69923 #include <asm/uaccess.h>
69924 #include <asm/pgtable.h>
69925 #include <asm/cacheflush.h>
69926 #include <asm/tlbflush.h>
69927+#include <asm/mmu_context.h>
69928
69929 #ifndef pgprot_modify
69930 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69931@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
69932 flush_tlb_range(vma, start, end);
69933 }
69934
69935+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69936+/* called while holding the mmap semaphor for writing except stack expansion */
69937+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69938+{
69939+ unsigned long oldlimit, newlimit = 0UL;
69940+
69941+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69942+ return;
69943+
69944+ spin_lock(&mm->page_table_lock);
69945+ oldlimit = mm->context.user_cs_limit;
69946+ if ((prot & VM_EXEC) && oldlimit < end)
69947+ /* USER_CS limit moved up */
69948+ newlimit = end;
69949+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69950+ /* USER_CS limit moved down */
69951+ newlimit = start;
69952+
69953+ if (newlimit) {
69954+ mm->context.user_cs_limit = newlimit;
69955+
69956+#ifdef CONFIG_SMP
69957+ wmb();
69958+ cpus_clear(mm->context.cpu_user_cs_mask);
69959+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69960+#endif
69961+
69962+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69963+ }
69964+ spin_unlock(&mm->page_table_lock);
69965+ if (newlimit == end) {
69966+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69967+
69968+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69969+ if (is_vm_hugetlb_page(vma))
69970+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69971+ else
69972+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69973+ }
69974+}
69975+#endif
69976+
69977 int
69978 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69979 unsigned long start, unsigned long end, unsigned long newflags)
69980@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69981 int error;
69982 int dirty_accountable = 0;
69983
69984+#ifdef CONFIG_PAX_SEGMEXEC
69985+ struct vm_area_struct *vma_m = NULL;
69986+ unsigned long start_m, end_m;
69987+
69988+ start_m = start + SEGMEXEC_TASK_SIZE;
69989+ end_m = end + SEGMEXEC_TASK_SIZE;
69990+#endif
69991+
69992 if (newflags == oldflags) {
69993 *pprev = vma;
69994 return 0;
69995 }
69996
69997+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69998+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69999+
70000+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
70001+ return -ENOMEM;
70002+
70003+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
70004+ return -ENOMEM;
70005+ }
70006+
70007 /*
70008 * If we make a private mapping writable we increase our commit;
70009 * but (without finer accounting) cannot reduce our commit if we
70010@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70011 }
70012 }
70013
70014+#ifdef CONFIG_PAX_SEGMEXEC
70015+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
70016+ if (start != vma->vm_start) {
70017+ error = split_vma(mm, vma, start, 1);
70018+ if (error)
70019+ goto fail;
70020+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
70021+ *pprev = (*pprev)->vm_next;
70022+ }
70023+
70024+ if (end != vma->vm_end) {
70025+ error = split_vma(mm, vma, end, 0);
70026+ if (error)
70027+ goto fail;
70028+ }
70029+
70030+ if (pax_find_mirror_vma(vma)) {
70031+ error = __do_munmap(mm, start_m, end_m - start_m);
70032+ if (error)
70033+ goto fail;
70034+ } else {
70035+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70036+ if (!vma_m) {
70037+ error = -ENOMEM;
70038+ goto fail;
70039+ }
70040+ vma->vm_flags = newflags;
70041+ error = pax_mirror_vma(vma_m, vma);
70042+ if (error) {
70043+ vma->vm_flags = oldflags;
70044+ goto fail;
70045+ }
70046+ }
70047+ }
70048+#endif
70049+
70050 /*
70051 * First try to merge with previous and/or next vma.
70052 */
70053@@ -204,9 +306,21 @@ success:
70054 * vm_flags and vm_page_prot are protected by the mmap_sem
70055 * held in write mode.
70056 */
70057+
70058+#ifdef CONFIG_PAX_SEGMEXEC
70059+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70060+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70061+#endif
70062+
70063 vma->vm_flags = newflags;
70064+
70065+#ifdef CONFIG_PAX_MPROTECT
70066+ if (mm->binfmt && mm->binfmt->handle_mprotect)
70067+ mm->binfmt->handle_mprotect(vma, newflags);
70068+#endif
70069+
70070 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70071- vm_get_page_prot(newflags));
70072+ vm_get_page_prot(vma->vm_flags));
70073
70074 if (vma_wants_writenotify(vma)) {
70075 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70076@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70077 end = start + len;
70078 if (end <= start)
70079 return -ENOMEM;
70080+
70081+#ifdef CONFIG_PAX_SEGMEXEC
70082+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70083+ if (end > SEGMEXEC_TASK_SIZE)
70084+ return -EINVAL;
70085+ } else
70086+#endif
70087+
70088+ if (end > TASK_SIZE)
70089+ return -EINVAL;
70090+
70091 if (!arch_validate_prot(prot))
70092 return -EINVAL;
70093
70094@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70095 /*
70096 * Does the application expect PROT_READ to imply PROT_EXEC:
70097 */
70098- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70099+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70100 prot |= PROT_EXEC;
70101
70102 vm_flags = calc_vm_prot_bits(prot);
70103@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70104 if (start > vma->vm_start)
70105 prev = vma;
70106
70107+#ifdef CONFIG_PAX_MPROTECT
70108+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70109+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
70110+#endif
70111+
70112 for (nstart = start ; ; ) {
70113 unsigned long newflags;
70114
70115@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70116
70117 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70118 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70119+ if (prot & (PROT_WRITE | PROT_EXEC))
70120+ gr_log_rwxmprotect(vma->vm_file);
70121+
70122+ error = -EACCES;
70123+ goto out;
70124+ }
70125+
70126+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70127 error = -EACCES;
70128 goto out;
70129 }
70130@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70131 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70132 if (error)
70133 goto out;
70134+
70135+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
70136+
70137 nstart = tmp;
70138
70139 if (nstart < prev->vm_end)
70140diff --git a/mm/mremap.c b/mm/mremap.c
70141index d6959cb..18a402a 100644
70142--- a/mm/mremap.c
70143+++ b/mm/mremap.c
70144@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
70145 continue;
70146 pte = ptep_get_and_clear(mm, old_addr, old_pte);
70147 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70148+
70149+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70150+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70151+ pte = pte_exprotect(pte);
70152+#endif
70153+
70154 set_pte_at(mm, new_addr, new_pte, pte);
70155 }
70156
70157@@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
70158 if (is_vm_hugetlb_page(vma))
70159 goto Einval;
70160
70161+#ifdef CONFIG_PAX_SEGMEXEC
70162+ if (pax_find_mirror_vma(vma))
70163+ goto Einval;
70164+#endif
70165+
70166 /* We can't remap across vm area boundaries */
70167 if (old_len > vma->vm_end - addr)
70168 goto Efault;
70169@@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
70170 unsigned long ret = -EINVAL;
70171 unsigned long charged = 0;
70172 unsigned long map_flags;
70173+ unsigned long pax_task_size = TASK_SIZE;
70174
70175 if (new_addr & ~PAGE_MASK)
70176 goto out;
70177
70178- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70179+#ifdef CONFIG_PAX_SEGMEXEC
70180+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70181+ pax_task_size = SEGMEXEC_TASK_SIZE;
70182+#endif
70183+
70184+ pax_task_size -= PAGE_SIZE;
70185+
70186+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70187 goto out;
70188
70189 /* Check if the location we're moving into overlaps the
70190 * old location at all, and fail if it does.
70191 */
70192- if ((new_addr <= addr) && (new_addr+new_len) > addr)
70193- goto out;
70194-
70195- if ((addr <= new_addr) && (addr+old_len) > new_addr)
70196+ if (addr + old_len > new_addr && new_addr + new_len > addr)
70197 goto out;
70198
70199 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70200@@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
70201 struct vm_area_struct *vma;
70202 unsigned long ret = -EINVAL;
70203 unsigned long charged = 0;
70204+ unsigned long pax_task_size = TASK_SIZE;
70205
70206 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70207 goto out;
70208@@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
70209 if (!new_len)
70210 goto out;
70211
70212+#ifdef CONFIG_PAX_SEGMEXEC
70213+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70214+ pax_task_size = SEGMEXEC_TASK_SIZE;
70215+#endif
70216+
70217+ pax_task_size -= PAGE_SIZE;
70218+
70219+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70220+ old_len > pax_task_size || addr > pax_task_size-old_len)
70221+ goto out;
70222+
70223 if (flags & MREMAP_FIXED) {
70224 if (flags & MREMAP_MAYMOVE)
70225 ret = mremap_to(addr, old_len, new_addr, new_len);
70226@@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
70227 addr + new_len);
70228 }
70229 ret = addr;
70230+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70231 goto out;
70232 }
70233 }
70234@@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
70235 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70236 if (ret)
70237 goto out;
70238+
70239+ map_flags = vma->vm_flags;
70240 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70241+ if (!(ret & ~PAGE_MASK)) {
70242+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70243+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70244+ }
70245 }
70246 out:
70247 if (ret & ~PAGE_MASK)
70248diff --git a/mm/nobootmem.c b/mm/nobootmem.c
70249index 7fa41b4..6087460 100644
70250--- a/mm/nobootmem.c
70251+++ b/mm/nobootmem.c
70252@@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
70253 unsigned long __init free_all_memory_core_early(int nodeid)
70254 {
70255 int i;
70256- u64 start, end;
70257+ u64 start, end, startrange, endrange;
70258 unsigned long count = 0;
70259- struct range *range = NULL;
70260+ struct range *range = NULL, rangerange = { 0, 0 };
70261 int nr_range;
70262
70263 nr_range = get_free_all_memory_range(&range, nodeid);
70264+ startrange = __pa(range) >> PAGE_SHIFT;
70265+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70266
70267 for (i = 0; i < nr_range; i++) {
70268 start = range[i].start;
70269 end = range[i].end;
70270+ if (start <= endrange && startrange < end) {
70271+ BUG_ON(rangerange.start | rangerange.end);
70272+ rangerange = range[i];
70273+ continue;
70274+ }
70275 count += end - start;
70276 __free_pages_memory(start, end);
70277 }
70278+ start = rangerange.start;
70279+ end = rangerange.end;
70280+ count += end - start;
70281+ __free_pages_memory(start, end);
70282
70283 return count;
70284 }
70285diff --git a/mm/nommu.c b/mm/nommu.c
70286index b982290..7d73f53 100644
70287--- a/mm/nommu.c
70288+++ b/mm/nommu.c
70289@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
70290 int sysctl_overcommit_ratio = 50; /* default is 50% */
70291 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70292 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70293-int heap_stack_gap = 0;
70294
70295 atomic_long_t mmap_pages_allocated;
70296
70297@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
70298 EXPORT_SYMBOL(find_vma);
70299
70300 /*
70301- * find a VMA
70302- * - we don't extend stack VMAs under NOMMU conditions
70303- */
70304-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70305-{
70306- return find_vma(mm, addr);
70307-}
70308-
70309-/*
70310 * expand a stack to a given address
70311 * - not supported under NOMMU conditions
70312 */
70313@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70314
70315 /* most fields are the same, copy all, and then fixup */
70316 *new = *vma;
70317+ INIT_LIST_HEAD(&new->anon_vma_chain);
70318 *region = *vma->vm_region;
70319 new->vm_region = region;
70320
70321diff --git a/mm/page_alloc.c b/mm/page_alloc.c
70322index 485be89..c059ad3 100644
70323--- a/mm/page_alloc.c
70324+++ b/mm/page_alloc.c
70325@@ -341,7 +341,7 @@ out:
70326 * This usage means that zero-order pages may not be compound.
70327 */
70328
70329-static void free_compound_page(struct page *page)
70330+void free_compound_page(struct page *page)
70331 {
70332 __free_pages_ok(page, compound_order(page));
70333 }
70334@@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70335 int i;
70336 int bad = 0;
70337
70338+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70339+ unsigned long index = 1UL << order;
70340+#endif
70341+
70342 trace_mm_page_free_direct(page, order);
70343 kmemcheck_free_shadow(page, order);
70344
70345@@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70346 debug_check_no_obj_freed(page_address(page),
70347 PAGE_SIZE << order);
70348 }
70349+
70350+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70351+ for (; index; --index)
70352+ sanitize_highpage(page + index - 1);
70353+#endif
70354+
70355 arch_free_page(page, order);
70356 kernel_map_pages(page, 1 << order, 0);
70357
70358@@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
70359 arch_alloc_page(page, order);
70360 kernel_map_pages(page, 1 << order, 1);
70361
70362+#ifndef CONFIG_PAX_MEMORY_SANITIZE
70363 if (gfp_flags & __GFP_ZERO)
70364 prep_zero_page(page, order, gfp_flags);
70365+#endif
70366
70367 if (order && (gfp_flags & __GFP_COMP))
70368 prep_compound_page(page, order);
70369@@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
70370 unsigned long pfn;
70371
70372 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70373+#ifdef CONFIG_X86_32
70374+ /* boot failures in VMware 8 on 32bit vanilla since
70375+ this change */
70376+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70377+#else
70378 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70379+#endif
70380 return 1;
70381 }
70382 return 0;
70383diff --git a/mm/percpu.c b/mm/percpu.c
70384index 716eb4a..8d10419 100644
70385--- a/mm/percpu.c
70386+++ b/mm/percpu.c
70387@@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
70388 static unsigned int pcpu_high_unit_cpu __read_mostly;
70389
70390 /* the address of the first chunk which starts with the kernel static area */
70391-void *pcpu_base_addr __read_mostly;
70392+void *pcpu_base_addr __read_only;
70393 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70394
70395 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70396diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
70397index e920aa3..78fe584 100644
70398--- a/mm/process_vm_access.c
70399+++ b/mm/process_vm_access.c
70400@@ -13,6 +13,7 @@
70401 #include <linux/uio.h>
70402 #include <linux/sched.h>
70403 #include <linux/highmem.h>
70404+#include <linux/security.h>
70405 #include <linux/ptrace.h>
70406 #include <linux/slab.h>
70407 #include <linux/syscalls.h>
70408@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70409 size_t iov_l_curr_offset = 0;
70410 ssize_t iov_len;
70411
70412+ return -ENOSYS; // PaX: until properly audited
70413+
70414 /*
70415 * Work out how many pages of struct pages we're going to need
70416 * when eventually calling get_user_pages
70417 */
70418 for (i = 0; i < riovcnt; i++) {
70419 iov_len = rvec[i].iov_len;
70420- if (iov_len > 0) {
70421- nr_pages_iov = ((unsigned long)rvec[i].iov_base
70422- + iov_len)
70423- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
70424- / PAGE_SIZE + 1;
70425- nr_pages = max(nr_pages, nr_pages_iov);
70426- }
70427+ if (iov_len <= 0)
70428+ continue;
70429+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
70430+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
70431+ nr_pages = max(nr_pages, nr_pages_iov);
70432 }
70433
70434 if (nr_pages == 0)
70435@@ -298,8 +299,13 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70436 goto free_proc_pages;
70437 }
70438
70439+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
70440+ rc = -EPERM;
70441+ goto put_task_struct;
70442+ }
70443+
70444 task_lock(task);
70445- if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
70446+ if (ptrace_may_access_nolock(task, PTRACE_MODE_ATTACH)) {
70447 task_unlock(task);
70448 rc = -EPERM;
70449 goto put_task_struct;
70450diff --git a/mm/rmap.c b/mm/rmap.c
70451index a4fd368..e0ffec7 100644
70452--- a/mm/rmap.c
70453+++ b/mm/rmap.c
70454@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70455 struct anon_vma *anon_vma = vma->anon_vma;
70456 struct anon_vma_chain *avc;
70457
70458+#ifdef CONFIG_PAX_SEGMEXEC
70459+ struct anon_vma_chain *avc_m = NULL;
70460+#endif
70461+
70462 might_sleep();
70463 if (unlikely(!anon_vma)) {
70464 struct mm_struct *mm = vma->vm_mm;
70465@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70466 if (!avc)
70467 goto out_enomem;
70468
70469+#ifdef CONFIG_PAX_SEGMEXEC
70470+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70471+ if (!avc_m)
70472+ goto out_enomem_free_avc;
70473+#endif
70474+
70475 anon_vma = find_mergeable_anon_vma(vma);
70476 allocated = NULL;
70477 if (!anon_vma) {
70478@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70479 /* page_table_lock to protect against threads */
70480 spin_lock(&mm->page_table_lock);
70481 if (likely(!vma->anon_vma)) {
70482+
70483+#ifdef CONFIG_PAX_SEGMEXEC
70484+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70485+
70486+ if (vma_m) {
70487+ BUG_ON(vma_m->anon_vma);
70488+ vma_m->anon_vma = anon_vma;
70489+ avc_m->anon_vma = anon_vma;
70490+ avc_m->vma = vma;
70491+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70492+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
70493+ avc_m = NULL;
70494+ }
70495+#endif
70496+
70497 vma->anon_vma = anon_vma;
70498 avc->anon_vma = anon_vma;
70499 avc->vma = vma;
70500@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70501
70502 if (unlikely(allocated))
70503 put_anon_vma(allocated);
70504+
70505+#ifdef CONFIG_PAX_SEGMEXEC
70506+ if (unlikely(avc_m))
70507+ anon_vma_chain_free(avc_m);
70508+#endif
70509+
70510 if (unlikely(avc))
70511 anon_vma_chain_free(avc);
70512 }
70513 return 0;
70514
70515 out_enomem_free_avc:
70516+
70517+#ifdef CONFIG_PAX_SEGMEXEC
70518+ if (avc_m)
70519+ anon_vma_chain_free(avc_m);
70520+#endif
70521+
70522 anon_vma_chain_free(avc);
70523 out_enomem:
70524 return -ENOMEM;
70525@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70526 * Attach the anon_vmas from src to dst.
70527 * Returns 0 on success, -ENOMEM on failure.
70528 */
70529-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70530+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70531 {
70532 struct anon_vma_chain *avc, *pavc;
70533 struct anon_vma *root = NULL;
70534@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70535 * the corresponding VMA in the parent process is attached to.
70536 * Returns 0 on success, non-zero on failure.
70537 */
70538-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70539+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70540 {
70541 struct anon_vma_chain *avc;
70542 struct anon_vma *anon_vma;
70543diff --git a/mm/shmem.c b/mm/shmem.c
70544index 6c253f7..367e20a 100644
70545--- a/mm/shmem.c
70546+++ b/mm/shmem.c
70547@@ -31,7 +31,7 @@
70548 #include <linux/export.h>
70549 #include <linux/swap.h>
70550
70551-static struct vfsmount *shm_mnt;
70552+struct vfsmount *shm_mnt;
70553
70554 #ifdef CONFIG_SHMEM
70555 /*
70556@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70557 #define BOGO_DIRENT_SIZE 20
70558
70559 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70560-#define SHORT_SYMLINK_LEN 128
70561+#define SHORT_SYMLINK_LEN 64
70562
70563 struct shmem_xattr {
70564 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70565@@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70566 int err = -ENOMEM;
70567
70568 /* Round up to L1_CACHE_BYTES to resist false sharing */
70569- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70570- L1_CACHE_BYTES), GFP_KERNEL);
70571+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70572 if (!sbinfo)
70573 return -ENOMEM;
70574
70575diff --git a/mm/slab.c b/mm/slab.c
70576index 83311c9a..fcf8f86 100644
70577--- a/mm/slab.c
70578+++ b/mm/slab.c
70579@@ -151,7 +151,7 @@
70580
70581 /* Legal flag mask for kmem_cache_create(). */
70582 #if DEBUG
70583-# define CREATE_MASK (SLAB_RED_ZONE | \
70584+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70585 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70586 SLAB_CACHE_DMA | \
70587 SLAB_STORE_USER | \
70588@@ -159,7 +159,7 @@
70589 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70590 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70591 #else
70592-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70593+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70594 SLAB_CACHE_DMA | \
70595 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70596 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70597@@ -288,7 +288,7 @@ struct kmem_list3 {
70598 * Need this for bootstrapping a per node allocator.
70599 */
70600 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70601-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70602+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70603 #define CACHE_CACHE 0
70604 #define SIZE_AC MAX_NUMNODES
70605 #define SIZE_L3 (2 * MAX_NUMNODES)
70606@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70607 if ((x)->max_freeable < i) \
70608 (x)->max_freeable = i; \
70609 } while (0)
70610-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70611-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70612-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70613-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70614+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70615+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70616+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70617+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70618 #else
70619 #define STATS_INC_ACTIVE(x) do { } while (0)
70620 #define STATS_DEC_ACTIVE(x) do { } while (0)
70621@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
70622 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70623 */
70624 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70625- const struct slab *slab, void *obj)
70626+ const struct slab *slab, const void *obj)
70627 {
70628 u32 offset = (obj - slab->s_mem);
70629 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70630@@ -564,7 +564,7 @@ struct cache_names {
70631 static struct cache_names __initdata cache_names[] = {
70632 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70633 #include <linux/kmalloc_sizes.h>
70634- {NULL,}
70635+ {NULL}
70636 #undef CACHE
70637 };
70638
70639@@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
70640 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70641 sizes[INDEX_AC].cs_size,
70642 ARCH_KMALLOC_MINALIGN,
70643- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70644+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70645 NULL);
70646
70647 if (INDEX_AC != INDEX_L3) {
70648@@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
70649 kmem_cache_create(names[INDEX_L3].name,
70650 sizes[INDEX_L3].cs_size,
70651 ARCH_KMALLOC_MINALIGN,
70652- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70653+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70654 NULL);
70655 }
70656
70657@@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
70658 sizes->cs_cachep = kmem_cache_create(names->name,
70659 sizes->cs_size,
70660 ARCH_KMALLOC_MINALIGN,
70661- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70662+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70663 NULL);
70664 }
70665 #ifdef CONFIG_ZONE_DMA
70666@@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
70667 }
70668 /* cpu stats */
70669 {
70670- unsigned long allochit = atomic_read(&cachep->allochit);
70671- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70672- unsigned long freehit = atomic_read(&cachep->freehit);
70673- unsigned long freemiss = atomic_read(&cachep->freemiss);
70674+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70675+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70676+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70677+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70678
70679 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70680 allochit, allocmiss, freehit, freemiss);
70681@@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
70682 {
70683 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
70684 #ifdef CONFIG_DEBUG_SLAB_LEAK
70685- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70686+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
70687 #endif
70688 return 0;
70689 }
70690 module_init(slab_proc_init);
70691 #endif
70692
70693+void check_object_size(const void *ptr, unsigned long n, bool to)
70694+{
70695+
70696+#ifdef CONFIG_PAX_USERCOPY
70697+ struct page *page;
70698+ struct kmem_cache *cachep = NULL;
70699+ struct slab *slabp;
70700+ unsigned int objnr;
70701+ unsigned long offset;
70702+ const char *type;
70703+
70704+ if (!n)
70705+ return;
70706+
70707+ type = "<null>";
70708+ if (ZERO_OR_NULL_PTR(ptr))
70709+ goto report;
70710+
70711+ if (!virt_addr_valid(ptr))
70712+ return;
70713+
70714+ page = virt_to_head_page(ptr);
70715+
70716+ type = "<process stack>";
70717+ if (!PageSlab(page)) {
70718+ if (object_is_on_stack(ptr, n) == -1)
70719+ goto report;
70720+ return;
70721+ }
70722+
70723+ cachep = page_get_cache(page);
70724+ type = cachep->name;
70725+ if (!(cachep->flags & SLAB_USERCOPY))
70726+ goto report;
70727+
70728+ slabp = page_get_slab(page);
70729+ objnr = obj_to_index(cachep, slabp, ptr);
70730+ BUG_ON(objnr >= cachep->num);
70731+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70732+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70733+ return;
70734+
70735+report:
70736+ pax_report_usercopy(ptr, n, to, type);
70737+#endif
70738+
70739+}
70740+EXPORT_SYMBOL(check_object_size);
70741+
70742 /**
70743 * ksize - get the actual amount of memory allocated for a given object
70744 * @objp: Pointer to the object
70745diff --git a/mm/slob.c b/mm/slob.c
70746index 8105be4..579da9d 100644
70747--- a/mm/slob.c
70748+++ b/mm/slob.c
70749@@ -29,7 +29,7 @@
70750 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70751 * alloc_pages() directly, allocating compound pages so the page order
70752 * does not have to be separately tracked, and also stores the exact
70753- * allocation size in page->private so that it can be used to accurately
70754+ * allocation size in slob_page->size so that it can be used to accurately
70755 * provide ksize(). These objects are detected in kfree() because slob_page()
70756 * is false for them.
70757 *
70758@@ -58,6 +58,7 @@
70759 */
70760
70761 #include <linux/kernel.h>
70762+#include <linux/sched.h>
70763 #include <linux/slab.h>
70764 #include <linux/mm.h>
70765 #include <linux/swap.h> /* struct reclaim_state */
70766@@ -102,7 +103,8 @@ struct slob_page {
70767 unsigned long flags; /* mandatory */
70768 atomic_t _count; /* mandatory */
70769 slobidx_t units; /* free units left in page */
70770- unsigned long pad[2];
70771+ unsigned long pad[1];
70772+ unsigned long size; /* size when >=PAGE_SIZE */
70773 slob_t *free; /* first free slob_t in page */
70774 struct list_head list; /* linked list of free pages */
70775 };
70776@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70777 */
70778 static inline int is_slob_page(struct slob_page *sp)
70779 {
70780- return PageSlab((struct page *)sp);
70781+ return PageSlab((struct page *)sp) && !sp->size;
70782 }
70783
70784 static inline void set_slob_page(struct slob_page *sp)
70785@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
70786
70787 static inline struct slob_page *slob_page(const void *addr)
70788 {
70789- return (struct slob_page *)virt_to_page(addr);
70790+ return (struct slob_page *)virt_to_head_page(addr);
70791 }
70792
70793 /*
70794@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
70795 /*
70796 * Return the size of a slob block.
70797 */
70798-static slobidx_t slob_units(slob_t *s)
70799+static slobidx_t slob_units(const slob_t *s)
70800 {
70801 if (s->units > 0)
70802 return s->units;
70803@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70804 /*
70805 * Return the next free slob block pointer after this one.
70806 */
70807-static slob_t *slob_next(slob_t *s)
70808+static slob_t *slob_next(const slob_t *s)
70809 {
70810 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70811 slobidx_t next;
70812@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70813 /*
70814 * Returns true if s is the last free block in its page.
70815 */
70816-static int slob_last(slob_t *s)
70817+static int slob_last(const slob_t *s)
70818 {
70819 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70820 }
70821@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
70822 if (!page)
70823 return NULL;
70824
70825+ set_slob_page(page);
70826 return page_address(page);
70827 }
70828
70829@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
70830 if (!b)
70831 return NULL;
70832 sp = slob_page(b);
70833- set_slob_page(sp);
70834
70835 spin_lock_irqsave(&slob_lock, flags);
70836 sp->units = SLOB_UNITS(PAGE_SIZE);
70837 sp->free = b;
70838+ sp->size = 0;
70839 INIT_LIST_HEAD(&sp->list);
70840 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70841 set_slob_page_free(sp, slob_list);
70842@@ -476,10 +479,9 @@ out:
70843 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70844 */
70845
70846-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70847+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70848 {
70849- unsigned int *m;
70850- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70851+ slob_t *m;
70852 void *ret;
70853
70854 gfp &= gfp_allowed_mask;
70855@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70856
70857 if (!m)
70858 return NULL;
70859- *m = size;
70860+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70861+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70862+ m[0].units = size;
70863+ m[1].units = align;
70864 ret = (void *)m + align;
70865
70866 trace_kmalloc_node(_RET_IP_, ret,
70867@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70868 gfp |= __GFP_COMP;
70869 ret = slob_new_pages(gfp, order, node);
70870 if (ret) {
70871- struct page *page;
70872- page = virt_to_page(ret);
70873- page->private = size;
70874+ struct slob_page *sp;
70875+ sp = slob_page(ret);
70876+ sp->size = size;
70877 }
70878
70879 trace_kmalloc_node(_RET_IP_, ret,
70880 size, PAGE_SIZE << order, gfp, node);
70881 }
70882
70883- kmemleak_alloc(ret, size, 1, gfp);
70884+ return ret;
70885+}
70886+
70887+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70888+{
70889+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70890+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70891+
70892+ if (!ZERO_OR_NULL_PTR(ret))
70893+ kmemleak_alloc(ret, size, 1, gfp);
70894 return ret;
70895 }
70896 EXPORT_SYMBOL(__kmalloc_node);
70897@@ -533,13 +547,92 @@ void kfree(const void *block)
70898 sp = slob_page(block);
70899 if (is_slob_page(sp)) {
70900 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70901- unsigned int *m = (unsigned int *)(block - align);
70902- slob_free(m, *m + align);
70903- } else
70904+ slob_t *m = (slob_t *)(block - align);
70905+ slob_free(m, m[0].units + align);
70906+ } else {
70907+ clear_slob_page(sp);
70908+ free_slob_page(sp);
70909+ sp->size = 0;
70910 put_page(&sp->page);
70911+ }
70912 }
70913 EXPORT_SYMBOL(kfree);
70914
70915+void check_object_size(const void *ptr, unsigned long n, bool to)
70916+{
70917+
70918+#ifdef CONFIG_PAX_USERCOPY
70919+ struct slob_page *sp;
70920+ const slob_t *free;
70921+ const void *base;
70922+ unsigned long flags;
70923+ const char *type;
70924+
70925+ if (!n)
70926+ return;
70927+
70928+ type = "<null>";
70929+ if (ZERO_OR_NULL_PTR(ptr))
70930+ goto report;
70931+
70932+ if (!virt_addr_valid(ptr))
70933+ return;
70934+
70935+ type = "<process stack>";
70936+ sp = slob_page(ptr);
70937+ if (!PageSlab((struct page*)sp)) {
70938+ if (object_is_on_stack(ptr, n) == -1)
70939+ goto report;
70940+ return;
70941+ }
70942+
70943+ type = "<slob>";
70944+ if (sp->size) {
70945+ base = page_address(&sp->page);
70946+ if (base <= ptr && n <= sp->size - (ptr - base))
70947+ return;
70948+ goto report;
70949+ }
70950+
70951+ /* some tricky double walking to find the chunk */
70952+ spin_lock_irqsave(&slob_lock, flags);
70953+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70954+ free = sp->free;
70955+
70956+ while (!slob_last(free) && (void *)free <= ptr) {
70957+ base = free + slob_units(free);
70958+ free = slob_next(free);
70959+ }
70960+
70961+ while (base < (void *)free) {
70962+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70963+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70964+ int offset;
70965+
70966+ if (ptr < base + align)
70967+ break;
70968+
70969+ offset = ptr - base - align;
70970+ if (offset >= m) {
70971+ base += size;
70972+ continue;
70973+ }
70974+
70975+ if (n > m - offset)
70976+ break;
70977+
70978+ spin_unlock_irqrestore(&slob_lock, flags);
70979+ return;
70980+ }
70981+
70982+ spin_unlock_irqrestore(&slob_lock, flags);
70983+report:
70984+ pax_report_usercopy(ptr, n, to, type);
70985+#endif
70986+
70987+}
70988+EXPORT_SYMBOL(check_object_size);
70989+
70990 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70991 size_t ksize(const void *block)
70992 {
70993@@ -552,10 +645,10 @@ size_t ksize(const void *block)
70994 sp = slob_page(block);
70995 if (is_slob_page(sp)) {
70996 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70997- unsigned int *m = (unsigned int *)(block - align);
70998- return SLOB_UNITS(*m) * SLOB_UNIT;
70999+ slob_t *m = (slob_t *)(block - align);
71000+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
71001 } else
71002- return sp->page.private;
71003+ return sp->size;
71004 }
71005 EXPORT_SYMBOL(ksize);
71006
71007@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71008 {
71009 struct kmem_cache *c;
71010
71011+#ifdef CONFIG_PAX_USERCOPY
71012+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
71013+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
71014+#else
71015 c = slob_alloc(sizeof(struct kmem_cache),
71016 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
71017+#endif
71018
71019 if (c) {
71020 c->name = name;
71021@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
71022
71023 lockdep_trace_alloc(flags);
71024
71025+#ifdef CONFIG_PAX_USERCOPY
71026+ b = __kmalloc_node_align(c->size, flags, node, c->align);
71027+#else
71028 if (c->size < PAGE_SIZE) {
71029 b = slob_alloc(c->size, flags, c->align, node);
71030 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71031 SLOB_UNITS(c->size) * SLOB_UNIT,
71032 flags, node);
71033 } else {
71034+ struct slob_page *sp;
71035+
71036 b = slob_new_pages(flags, get_order(c->size), node);
71037+ sp = slob_page(b);
71038+ sp->size = c->size;
71039 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71040 PAGE_SIZE << get_order(c->size),
71041 flags, node);
71042 }
71043+#endif
71044
71045 if (c->ctor)
71046 c->ctor(b);
71047@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
71048
71049 static void __kmem_cache_free(void *b, int size)
71050 {
71051- if (size < PAGE_SIZE)
71052+ struct slob_page *sp = slob_page(b);
71053+
71054+ if (is_slob_page(sp))
71055 slob_free(b, size);
71056- else
71057+ else {
71058+ clear_slob_page(sp);
71059+ free_slob_page(sp);
71060+ sp->size = 0;
71061 slob_free_pages(b, get_order(size));
71062+ }
71063 }
71064
71065 static void kmem_rcu_free(struct rcu_head *head)
71066@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
71067
71068 void kmem_cache_free(struct kmem_cache *c, void *b)
71069 {
71070+ int size = c->size;
71071+
71072+#ifdef CONFIG_PAX_USERCOPY
71073+ if (size + c->align < PAGE_SIZE) {
71074+ size += c->align;
71075+ b -= c->align;
71076+ }
71077+#endif
71078+
71079 kmemleak_free_recursive(b, c->flags);
71080 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71081 struct slob_rcu *slob_rcu;
71082- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71083- slob_rcu->size = c->size;
71084+ slob_rcu = b + (size - sizeof(struct slob_rcu));
71085+ slob_rcu->size = size;
71086 call_rcu(&slob_rcu->head, kmem_rcu_free);
71087 } else {
71088- __kmem_cache_free(b, c->size);
71089+ __kmem_cache_free(b, size);
71090 }
71091
71092+#ifdef CONFIG_PAX_USERCOPY
71093+ trace_kfree(_RET_IP_, b);
71094+#else
71095 trace_kmem_cache_free(_RET_IP_, b);
71096+#endif
71097+
71098 }
71099 EXPORT_SYMBOL(kmem_cache_free);
71100
71101diff --git a/mm/slub.c b/mm/slub.c
71102index 1a919f0..1739c9b 100644
71103--- a/mm/slub.c
71104+++ b/mm/slub.c
71105@@ -208,7 +208,7 @@ struct track {
71106
71107 enum track_item { TRACK_ALLOC, TRACK_FREE };
71108
71109-#ifdef CONFIG_SYSFS
71110+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71111 static int sysfs_slab_add(struct kmem_cache *);
71112 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71113 static void sysfs_slab_remove(struct kmem_cache *);
71114@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
71115 if (!t->addr)
71116 return;
71117
71118- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71119+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71120 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71121 #ifdef CONFIG_STACKTRACE
71122 {
71123@@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
71124
71125 page = virt_to_head_page(x);
71126
71127+ BUG_ON(!PageSlab(page));
71128+
71129 slab_free(s, page, x, _RET_IP_);
71130
71131 trace_kmem_cache_free(_RET_IP_, x);
71132@@ -2592,7 +2594,7 @@ static int slub_min_objects;
71133 * Merge control. If this is set then no merging of slab caches will occur.
71134 * (Could be removed. This was introduced to pacify the merge skeptics.)
71135 */
71136-static int slub_nomerge;
71137+static int slub_nomerge = 1;
71138
71139 /*
71140 * Calculate the order of allocation given an slab object size.
71141@@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
71142 else
71143 s->cpu_partial = 30;
71144
71145- s->refcount = 1;
71146+ atomic_set(&s->refcount, 1);
71147 #ifdef CONFIG_NUMA
71148 s->remote_node_defrag_ratio = 1000;
71149 #endif
71150@@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
71151 void kmem_cache_destroy(struct kmem_cache *s)
71152 {
71153 down_write(&slub_lock);
71154- s->refcount--;
71155- if (!s->refcount) {
71156+ if (atomic_dec_and_test(&s->refcount)) {
71157 list_del(&s->list);
71158 up_write(&slub_lock);
71159 if (kmem_cache_close(s)) {
71160@@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
71161 EXPORT_SYMBOL(__kmalloc_node);
71162 #endif
71163
71164+void check_object_size(const void *ptr, unsigned long n, bool to)
71165+{
71166+
71167+#ifdef CONFIG_PAX_USERCOPY
71168+ struct page *page;
71169+ struct kmem_cache *s = NULL;
71170+ unsigned long offset;
71171+ const char *type;
71172+
71173+ if (!n)
71174+ return;
71175+
71176+ type = "<null>";
71177+ if (ZERO_OR_NULL_PTR(ptr))
71178+ goto report;
71179+
71180+ if (!virt_addr_valid(ptr))
71181+ return;
71182+
71183+ page = virt_to_head_page(ptr);
71184+
71185+ type = "<process stack>";
71186+ if (!PageSlab(page)) {
71187+ if (object_is_on_stack(ptr, n) == -1)
71188+ goto report;
71189+ return;
71190+ }
71191+
71192+ s = page->slab;
71193+ type = s->name;
71194+ if (!(s->flags & SLAB_USERCOPY))
71195+ goto report;
71196+
71197+ offset = (ptr - page_address(page)) % s->size;
71198+ if (offset <= s->objsize && n <= s->objsize - offset)
71199+ return;
71200+
71201+report:
71202+ pax_report_usercopy(ptr, n, to, type);
71203+#endif
71204+
71205+}
71206+EXPORT_SYMBOL(check_object_size);
71207+
71208 size_t ksize(const void *object)
71209 {
71210 struct page *page;
71211@@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
71212 int node;
71213
71214 list_add(&s->list, &slab_caches);
71215- s->refcount = -1;
71216+ atomic_set(&s->refcount, -1);
71217
71218 for_each_node_state(node, N_NORMAL_MEMORY) {
71219 struct kmem_cache_node *n = get_node(s, node);
71220@@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
71221
71222 /* Caches that are not of the two-to-the-power-of size */
71223 if (KMALLOC_MIN_SIZE <= 32) {
71224- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71225+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71226 caches++;
71227 }
71228
71229 if (KMALLOC_MIN_SIZE <= 64) {
71230- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71231+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71232 caches++;
71233 }
71234
71235 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71236- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71237+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71238 caches++;
71239 }
71240
71241@@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
71242 /*
71243 * We may have set a slab to be unmergeable during bootstrap.
71244 */
71245- if (s->refcount < 0)
71246+ if (atomic_read(&s->refcount) < 0)
71247 return 1;
71248
71249 return 0;
71250@@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71251 down_write(&slub_lock);
71252 s = find_mergeable(size, align, flags, name, ctor);
71253 if (s) {
71254- s->refcount++;
71255+ atomic_inc(&s->refcount);
71256 /*
71257 * Adjust the object sizes so that we clear
71258 * the complete object on kzalloc.
71259@@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71260 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71261
71262 if (sysfs_slab_alias(s, name)) {
71263- s->refcount--;
71264+ atomic_dec(&s->refcount);
71265 goto err;
71266 }
71267 up_write(&slub_lock);
71268@@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
71269 }
71270 #endif
71271
71272-#ifdef CONFIG_SYSFS
71273+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71274 static int count_inuse(struct page *page)
71275 {
71276 return page->inuse;
71277@@ -4410,12 +4455,12 @@ static void resiliency_test(void)
71278 validate_slab_cache(kmalloc_caches[9]);
71279 }
71280 #else
71281-#ifdef CONFIG_SYSFS
71282+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71283 static void resiliency_test(void) {};
71284 #endif
71285 #endif
71286
71287-#ifdef CONFIG_SYSFS
71288+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71289 enum slab_stat_type {
71290 SL_ALL, /* All slabs */
71291 SL_PARTIAL, /* Only partially allocated slabs */
71292@@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
71293
71294 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71295 {
71296- return sprintf(buf, "%d\n", s->refcount - 1);
71297+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71298 }
71299 SLAB_ATTR_RO(aliases);
71300
71301@@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
71302 return name;
71303 }
71304
71305+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71306 static int sysfs_slab_add(struct kmem_cache *s)
71307 {
71308 int err;
71309@@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
71310 kobject_del(&s->kobj);
71311 kobject_put(&s->kobj);
71312 }
71313+#endif
71314
71315 /*
71316 * Need to buffer aliases during bootup until sysfs becomes
71317@@ -5298,6 +5345,7 @@ struct saved_alias {
71318
71319 static struct saved_alias *alias_list;
71320
71321+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71322 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71323 {
71324 struct saved_alias *al;
71325@@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71326 alias_list = al;
71327 return 0;
71328 }
71329+#endif
71330
71331 static int __init slab_sysfs_init(void)
71332 {
71333diff --git a/mm/swap.c b/mm/swap.c
71334index a91caf7..b887e735 100644
71335--- a/mm/swap.c
71336+++ b/mm/swap.c
71337@@ -31,6 +31,7 @@
71338 #include <linux/backing-dev.h>
71339 #include <linux/memcontrol.h>
71340 #include <linux/gfp.h>
71341+#include <linux/hugetlb.h>
71342
71343 #include "internal.h"
71344
71345@@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
71346
71347 __page_cache_release(page);
71348 dtor = get_compound_page_dtor(page);
71349+ if (!PageHuge(page))
71350+ BUG_ON(dtor != free_compound_page);
71351 (*dtor)(page);
71352 }
71353
71354diff --git a/mm/swapfile.c b/mm/swapfile.c
71355index b1cd120..aaae885 100644
71356--- a/mm/swapfile.c
71357+++ b/mm/swapfile.c
71358@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
71359
71360 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71361 /* Activity counter to indicate that a swapon or swapoff has occurred */
71362-static atomic_t proc_poll_event = ATOMIC_INIT(0);
71363+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71364
71365 static inline unsigned char swap_count(unsigned char ent)
71366 {
71367@@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
71368 }
71369 filp_close(swap_file, NULL);
71370 err = 0;
71371- atomic_inc(&proc_poll_event);
71372+ atomic_inc_unchecked(&proc_poll_event);
71373 wake_up_interruptible(&proc_poll_wait);
71374
71375 out_dput:
71376@@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
71377
71378 poll_wait(file, &proc_poll_wait, wait);
71379
71380- if (seq->poll_event != atomic_read(&proc_poll_event)) {
71381- seq->poll_event = atomic_read(&proc_poll_event);
71382+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71383+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71384 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71385 }
71386
71387@@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
71388 return ret;
71389
71390 seq = file->private_data;
71391- seq->poll_event = atomic_read(&proc_poll_event);
71392+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71393 return 0;
71394 }
71395
71396@@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
71397 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71398
71399 mutex_unlock(&swapon_mutex);
71400- atomic_inc(&proc_poll_event);
71401+ atomic_inc_unchecked(&proc_poll_event);
71402 wake_up_interruptible(&proc_poll_wait);
71403
71404 if (S_ISREG(inode->i_mode))
71405diff --git a/mm/util.c b/mm/util.c
71406index 136ac4f..5117eef 100644
71407--- a/mm/util.c
71408+++ b/mm/util.c
71409@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71410 * allocated buffer. Use this if you don't want to free the buffer immediately
71411 * like, for example, with RCU.
71412 */
71413+#undef __krealloc
71414 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71415 {
71416 void *ret;
71417@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71418 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71419 * %NULL pointer, the object pointed to is freed.
71420 */
71421+#undef krealloc
71422 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71423 {
71424 void *ret;
71425@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71426 void arch_pick_mmap_layout(struct mm_struct *mm)
71427 {
71428 mm->mmap_base = TASK_UNMAPPED_BASE;
71429+
71430+#ifdef CONFIG_PAX_RANDMMAP
71431+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71432+ mm->mmap_base += mm->delta_mmap;
71433+#endif
71434+
71435 mm->get_unmapped_area = arch_get_unmapped_area;
71436 mm->unmap_area = arch_unmap_area;
71437 }
71438diff --git a/mm/vmalloc.c b/mm/vmalloc.c
71439index 27be2f0..0aef2c2 100644
71440--- a/mm/vmalloc.c
71441+++ b/mm/vmalloc.c
71442@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
71443
71444 pte = pte_offset_kernel(pmd, addr);
71445 do {
71446- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71447- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71448+
71449+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71450+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71451+ BUG_ON(!pte_exec(*pte));
71452+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71453+ continue;
71454+ }
71455+#endif
71456+
71457+ {
71458+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71459+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71460+ }
71461 } while (pte++, addr += PAGE_SIZE, addr != end);
71462 }
71463
71464@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71465 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71466 {
71467 pte_t *pte;
71468+ int ret = -ENOMEM;
71469
71470 /*
71471 * nr is a running index into the array which helps higher level
71472@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71473 pte = pte_alloc_kernel(pmd, addr);
71474 if (!pte)
71475 return -ENOMEM;
71476+
71477+ pax_open_kernel();
71478 do {
71479 struct page *page = pages[*nr];
71480
71481- if (WARN_ON(!pte_none(*pte)))
71482- return -EBUSY;
71483- if (WARN_ON(!page))
71484- return -ENOMEM;
71485+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71486+ if (pgprot_val(prot) & _PAGE_NX)
71487+#endif
71488+
71489+ if (WARN_ON(!pte_none(*pte))) {
71490+ ret = -EBUSY;
71491+ goto out;
71492+ }
71493+ if (WARN_ON(!page)) {
71494+ ret = -ENOMEM;
71495+ goto out;
71496+ }
71497 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71498 (*nr)++;
71499 } while (pte++, addr += PAGE_SIZE, addr != end);
71500- return 0;
71501+ ret = 0;
71502+out:
71503+ pax_close_kernel();
71504+ return ret;
71505 }
71506
71507 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71508@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71509 * and fall back on vmalloc() if that fails. Others
71510 * just put it in the vmalloc space.
71511 */
71512-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71513+#ifdef CONFIG_MODULES
71514+#ifdef MODULES_VADDR
71515 unsigned long addr = (unsigned long)x;
71516 if (addr >= MODULES_VADDR && addr < MODULES_END)
71517 return 1;
71518 #endif
71519+
71520+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71521+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71522+ return 1;
71523+#endif
71524+
71525+#endif
71526+
71527 return is_vmalloc_addr(x);
71528 }
71529
71530@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71531
71532 if (!pgd_none(*pgd)) {
71533 pud_t *pud = pud_offset(pgd, addr);
71534+#ifdef CONFIG_X86
71535+ if (!pud_large(*pud))
71536+#endif
71537 if (!pud_none(*pud)) {
71538 pmd_t *pmd = pmd_offset(pud, addr);
71539+#ifdef CONFIG_X86
71540+ if (!pmd_large(*pmd))
71541+#endif
71542 if (!pmd_none(*pmd)) {
71543 pte_t *ptep, pte;
71544
71545@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71546 struct vm_struct *area;
71547
71548 BUG_ON(in_interrupt());
71549+
71550+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71551+ if (flags & VM_KERNEXEC) {
71552+ if (start != VMALLOC_START || end != VMALLOC_END)
71553+ return NULL;
71554+ start = (unsigned long)MODULES_EXEC_VADDR;
71555+ end = (unsigned long)MODULES_EXEC_END;
71556+ }
71557+#endif
71558+
71559 if (flags & VM_IOREMAP) {
71560 int bit = fls(size);
71561
71562@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71563 if (count > totalram_pages)
71564 return NULL;
71565
71566+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71567+ if (!(pgprot_val(prot) & _PAGE_NX))
71568+ flags |= VM_KERNEXEC;
71569+#endif
71570+
71571 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71572 __builtin_return_address(0));
71573 if (!area)
71574@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71575 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71576 goto fail;
71577
71578+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71579+ if (!(pgprot_val(prot) & _PAGE_NX))
71580+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71581+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71582+ else
71583+#endif
71584+
71585 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71586 start, end, node, gfp_mask, caller);
71587 if (!area)
71588@@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71589 gfp_mask, prot, node, caller);
71590 }
71591
71592+#undef __vmalloc
71593 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71594 {
71595 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71596@@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71597 * For tight control over page level allocator and protection flags
71598 * use __vmalloc() instead.
71599 */
71600+#undef vmalloc
71601 void *vmalloc(unsigned long size)
71602 {
71603 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71604@@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71605 * For tight control over page level allocator and protection flags
71606 * use __vmalloc() instead.
71607 */
71608+#undef vzalloc
71609 void *vzalloc(unsigned long size)
71610 {
71611 return __vmalloc_node_flags(size, -1,
71612@@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
71613 * The resulting memory area is zeroed so it can be mapped to userspace
71614 * without leaking data.
71615 */
71616+#undef vmalloc_user
71617 void *vmalloc_user(unsigned long size)
71618 {
71619 struct vm_struct *area;
71620@@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
71621 * For tight control over page level allocator and protection flags
71622 * use __vmalloc() instead.
71623 */
71624+#undef vmalloc_node
71625 void *vmalloc_node(unsigned long size, int node)
71626 {
71627 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71628@@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
71629 * For tight control over page level allocator and protection flags
71630 * use __vmalloc_node() instead.
71631 */
71632+#undef vzalloc_node
71633 void *vzalloc_node(unsigned long size, int node)
71634 {
71635 return __vmalloc_node_flags(size, node,
71636@@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
71637 * For tight control over page level allocator and protection flags
71638 * use __vmalloc() instead.
71639 */
71640-
71641+#undef vmalloc_exec
71642 void *vmalloc_exec(unsigned long size)
71643 {
71644- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71645+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71646 -1, __builtin_return_address(0));
71647 }
71648
71649@@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
71650 * Allocate enough 32bit PA addressable pages to cover @size from the
71651 * page level allocator and map them into contiguous kernel virtual space.
71652 */
71653+#undef vmalloc_32
71654 void *vmalloc_32(unsigned long size)
71655 {
71656 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71657@@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
71658 * The resulting memory area is 32bit addressable and zeroed so it can be
71659 * mapped to userspace without leaking data.
71660 */
71661+#undef vmalloc_32_user
71662 void *vmalloc_32_user(unsigned long size)
71663 {
71664 struct vm_struct *area;
71665@@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
71666 unsigned long uaddr = vma->vm_start;
71667 unsigned long usize = vma->vm_end - vma->vm_start;
71668
71669+ BUG_ON(vma->vm_mirror);
71670+
71671 if ((PAGE_SIZE-1) & (unsigned long)addr)
71672 return -EINVAL;
71673
71674diff --git a/mm/vmstat.c b/mm/vmstat.c
71675index 8fd603b..cf0d930 100644
71676--- a/mm/vmstat.c
71677+++ b/mm/vmstat.c
71678@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71679 *
71680 * vm_stat contains the global counters
71681 */
71682-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71683+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71684 EXPORT_SYMBOL(vm_stat);
71685
71686 #ifdef CONFIG_SMP
71687@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71688 v = p->vm_stat_diff[i];
71689 p->vm_stat_diff[i] = 0;
71690 local_irq_restore(flags);
71691- atomic_long_add(v, &zone->vm_stat[i]);
71692+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71693 global_diff[i] += v;
71694 #ifdef CONFIG_NUMA
71695 /* 3 seconds idle till flush */
71696@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71697
71698 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71699 if (global_diff[i])
71700- atomic_long_add(global_diff[i], &vm_stat[i]);
71701+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71702 }
71703
71704 #endif
71705@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
71706 start_cpu_timer(cpu);
71707 #endif
71708 #ifdef CONFIG_PROC_FS
71709- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71710- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71711- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71712- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71713+ {
71714+ mode_t gr_mode = S_IRUGO;
71715+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71716+ gr_mode = S_IRUSR;
71717+#endif
71718+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71719+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71720+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71721+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71722+#else
71723+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71724+#endif
71725+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71726+ }
71727 #endif
71728 return 0;
71729 }
71730diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
71731index 5471628..cef8398 100644
71732--- a/net/8021q/vlan.c
71733+++ b/net/8021q/vlan.c
71734@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
71735 err = -EPERM;
71736 if (!capable(CAP_NET_ADMIN))
71737 break;
71738- if ((args.u.name_type >= 0) &&
71739- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71740+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71741 struct vlan_net *vn;
71742
71743 vn = net_generic(net, vlan_net_id);
71744diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
71745index fdfdb57..38d368c 100644
71746--- a/net/9p/trans_fd.c
71747+++ b/net/9p/trans_fd.c
71748@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
71749 oldfs = get_fs();
71750 set_fs(get_ds());
71751 /* The cast to a user pointer is valid due to the set_fs() */
71752- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71753+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71754 set_fs(oldfs);
71755
71756 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71757diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
71758index f41f026..fe76ea8 100644
71759--- a/net/atm/atm_misc.c
71760+++ b/net/atm/atm_misc.c
71761@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
71762 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71763 return 1;
71764 atm_return(vcc, truesize);
71765- atomic_inc(&vcc->stats->rx_drop);
71766+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71767 return 0;
71768 }
71769 EXPORT_SYMBOL(atm_charge);
71770@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
71771 }
71772 }
71773 atm_return(vcc, guess);
71774- atomic_inc(&vcc->stats->rx_drop);
71775+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71776 return NULL;
71777 }
71778 EXPORT_SYMBOL(atm_alloc_charge);
71779@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71780
71781 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71782 {
71783-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71784+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71785 __SONET_ITEMS
71786 #undef __HANDLE_ITEM
71787 }
71788@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71789
71790 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71791 {
71792-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71793+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71794 __SONET_ITEMS
71795 #undef __HANDLE_ITEM
71796 }
71797diff --git a/net/atm/lec.h b/net/atm/lec.h
71798index dfc0719..47c5322 100644
71799--- a/net/atm/lec.h
71800+++ b/net/atm/lec.h
71801@@ -48,7 +48,7 @@ struct lane2_ops {
71802 const u8 *tlvs, u32 sizeoftlvs);
71803 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71804 const u8 *tlvs, u32 sizeoftlvs);
71805-};
71806+} __no_const;
71807
71808 /*
71809 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71810diff --git a/net/atm/mpc.h b/net/atm/mpc.h
71811index 0919a88..a23d54e 100644
71812--- a/net/atm/mpc.h
71813+++ b/net/atm/mpc.h
71814@@ -33,7 +33,7 @@ struct mpoa_client {
71815 struct mpc_parameters parameters; /* parameters for this client */
71816
71817 const struct net_device_ops *old_ops;
71818- struct net_device_ops new_ops;
71819+ net_device_ops_no_const new_ops;
71820 };
71821
71822
71823diff --git a/net/atm/proc.c b/net/atm/proc.c
71824index 0d020de..011c7bb 100644
71825--- a/net/atm/proc.c
71826+++ b/net/atm/proc.c
71827@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
71828 const struct k_atm_aal_stats *stats)
71829 {
71830 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71831- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71832- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71833- atomic_read(&stats->rx_drop));
71834+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71835+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71836+ atomic_read_unchecked(&stats->rx_drop));
71837 }
71838
71839 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71840diff --git a/net/atm/resources.c b/net/atm/resources.c
71841index 23f45ce..c748f1a 100644
71842--- a/net/atm/resources.c
71843+++ b/net/atm/resources.c
71844@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71845 static void copy_aal_stats(struct k_atm_aal_stats *from,
71846 struct atm_aal_stats *to)
71847 {
71848-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71849+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71850 __AAL_STAT_ITEMS
71851 #undef __HANDLE_ITEM
71852 }
71853@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
71854 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71855 struct atm_aal_stats *to)
71856 {
71857-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71858+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71859 __AAL_STAT_ITEMS
71860 #undef __HANDLE_ITEM
71861 }
71862diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
71863index 3512e25..2b33401 100644
71864--- a/net/batman-adv/bat_iv_ogm.c
71865+++ b/net/batman-adv/bat_iv_ogm.c
71866@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71867
71868 /* change sequence number to network order */
71869 batman_ogm_packet->seqno =
71870- htonl((uint32_t)atomic_read(&hard_iface->seqno));
71871+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71872
71873 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
71874 batman_ogm_packet->tt_crc = htons((uint16_t)
71875@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71876 else
71877 batman_ogm_packet->gw_flags = NO_FLAGS;
71878
71879- atomic_inc(&hard_iface->seqno);
71880+ atomic_inc_unchecked(&hard_iface->seqno);
71881
71882 slide_own_bcast_window(hard_iface);
71883 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
71884@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
71885 return;
71886
71887 /* could be changed by schedule_own_packet() */
71888- if_incoming_seqno = atomic_read(&if_incoming->seqno);
71889+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71890
71891 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
71892
71893diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
71894index 7704df4..beb4e16 100644
71895--- a/net/batman-adv/hard-interface.c
71896+++ b/net/batman-adv/hard-interface.c
71897@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
71898 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71899 dev_add_pack(&hard_iface->batman_adv_ptype);
71900
71901- atomic_set(&hard_iface->seqno, 1);
71902- atomic_set(&hard_iface->frag_seqno, 1);
71903+ atomic_set_unchecked(&hard_iface->seqno, 1);
71904+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71905 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71906 hard_iface->net_dev->name);
71907
71908diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
71909index f9cc957..efd9dae 100644
71910--- a/net/batman-adv/soft-interface.c
71911+++ b/net/batman-adv/soft-interface.c
71912@@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
71913
71914 /* set broadcast sequence number */
71915 bcast_packet->seqno =
71916- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71917+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71918
71919 add_bcast_packet_to_list(bat_priv, skb, 1);
71920
71921@@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
71922 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71923
71924 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71925- atomic_set(&bat_priv->bcast_seqno, 1);
71926+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71927 atomic_set(&bat_priv->ttvn, 0);
71928 atomic_set(&bat_priv->tt_local_changes, 0);
71929 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
71930diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
71931index ab8d0fe..ceba3fd 100644
71932--- a/net/batman-adv/types.h
71933+++ b/net/batman-adv/types.h
71934@@ -38,8 +38,8 @@ struct hard_iface {
71935 int16_t if_num;
71936 char if_status;
71937 struct net_device *net_dev;
71938- atomic_t seqno;
71939- atomic_t frag_seqno;
71940+ atomic_unchecked_t seqno;
71941+ atomic_unchecked_t frag_seqno;
71942 unsigned char *packet_buff;
71943 int packet_len;
71944 struct kobject *hardif_obj;
71945@@ -154,7 +154,7 @@ struct bat_priv {
71946 atomic_t orig_interval; /* uint */
71947 atomic_t hop_penalty; /* uint */
71948 atomic_t log_level; /* uint */
71949- atomic_t bcast_seqno;
71950+ atomic_unchecked_t bcast_seqno;
71951 atomic_t bcast_queue_left;
71952 atomic_t batman_queue_left;
71953 atomic_t ttvn; /* translation table version number */
71954diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
71955index 07d1c1d..7e9bea9 100644
71956--- a/net/batman-adv/unicast.c
71957+++ b/net/batman-adv/unicast.c
71958@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
71959 frag1->flags = UNI_FRAG_HEAD | large_tail;
71960 frag2->flags = large_tail;
71961
71962- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71963+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71964 frag1->seqno = htons(seqno - 1);
71965 frag2->seqno = htons(seqno);
71966
71967diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
71968index c1c597e..05ebb40 100644
71969--- a/net/bluetooth/hci_conn.c
71970+++ b/net/bluetooth/hci_conn.c
71971@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
71972 memset(&cp, 0, sizeof(cp));
71973
71974 cp.handle = cpu_to_le16(conn->handle);
71975- memcpy(cp.ltk, ltk, sizeof(ltk));
71976+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71977
71978 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
71979 }
71980diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
71981index 17b5b1c..826d872 100644
71982--- a/net/bluetooth/l2cap_core.c
71983+++ b/net/bluetooth/l2cap_core.c
71984@@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
71985 break;
71986
71987 case L2CAP_CONF_RFC:
71988- if (olen == sizeof(rfc))
71989- memcpy(&rfc, (void *)val, olen);
71990+ if (olen != sizeof(rfc))
71991+ break;
71992+
71993+ memcpy(&rfc, (void *)val, olen);
71994
71995 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
71996 rfc.mode != chan->mode)
71997@@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
71998
71999 switch (type) {
72000 case L2CAP_CONF_RFC:
72001- if (olen == sizeof(rfc))
72002- memcpy(&rfc, (void *)val, olen);
72003+ if (olen != sizeof(rfc))
72004+ break;
72005+
72006+ memcpy(&rfc, (void *)val, olen);
72007 goto done;
72008 }
72009 }
72010diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
72011index a5f4e57..910ee6d 100644
72012--- a/net/bridge/br_multicast.c
72013+++ b/net/bridge/br_multicast.c
72014@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
72015 nexthdr = ip6h->nexthdr;
72016 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
72017
72018- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
72019+ if (nexthdr != IPPROTO_ICMPV6)
72020 return 0;
72021
72022 /* Okay, we found ICMPv6 header */
72023diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
72024index 5864cc4..121f3a3 100644
72025--- a/net/bridge/netfilter/ebtables.c
72026+++ b/net/bridge/netfilter/ebtables.c
72027@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
72028 tmp.valid_hooks = t->table->valid_hooks;
72029 }
72030 mutex_unlock(&ebt_mutex);
72031- if (copy_to_user(user, &tmp, *len) != 0){
72032+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
72033 BUGPRINT("c2u Didn't work\n");
72034 ret = -EFAULT;
72035 break;
72036diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
72037index a986280..13444a1 100644
72038--- a/net/caif/caif_socket.c
72039+++ b/net/caif/caif_socket.c
72040@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
72041 #ifdef CONFIG_DEBUG_FS
72042 struct debug_fs_counter {
72043 atomic_t caif_nr_socks;
72044- atomic_t caif_sock_create;
72045- atomic_t num_connect_req;
72046- atomic_t num_connect_resp;
72047- atomic_t num_connect_fail_resp;
72048- atomic_t num_disconnect;
72049- atomic_t num_remote_shutdown_ind;
72050- atomic_t num_tx_flow_off_ind;
72051- atomic_t num_tx_flow_on_ind;
72052- atomic_t num_rx_flow_off;
72053- atomic_t num_rx_flow_on;
72054+ atomic_unchecked_t caif_sock_create;
72055+ atomic_unchecked_t num_connect_req;
72056+ atomic_unchecked_t num_connect_resp;
72057+ atomic_unchecked_t num_connect_fail_resp;
72058+ atomic_unchecked_t num_disconnect;
72059+ atomic_unchecked_t num_remote_shutdown_ind;
72060+ atomic_unchecked_t num_tx_flow_off_ind;
72061+ atomic_unchecked_t num_tx_flow_on_ind;
72062+ atomic_unchecked_t num_rx_flow_off;
72063+ atomic_unchecked_t num_rx_flow_on;
72064 };
72065 static struct debug_fs_counter cnt;
72066 #define dbfs_atomic_inc(v) atomic_inc_return(v)
72067+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
72068 #define dbfs_atomic_dec(v) atomic_dec_return(v)
72069 #else
72070 #define dbfs_atomic_inc(v) 0
72071@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72072 atomic_read(&cf_sk->sk.sk_rmem_alloc),
72073 sk_rcvbuf_lowwater(cf_sk));
72074 set_rx_flow_off(cf_sk);
72075- dbfs_atomic_inc(&cnt.num_rx_flow_off);
72076+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72077 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72078 }
72079
72080@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72081 set_rx_flow_off(cf_sk);
72082 if (net_ratelimit())
72083 pr_debug("sending flow OFF due to rmem_schedule\n");
72084- dbfs_atomic_inc(&cnt.num_rx_flow_off);
72085+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72086 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72087 }
72088 skb->dev = NULL;
72089@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
72090 switch (flow) {
72091 case CAIF_CTRLCMD_FLOW_ON_IND:
72092 /* OK from modem to start sending again */
72093- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72094+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72095 set_tx_flow_on(cf_sk);
72096 cf_sk->sk.sk_state_change(&cf_sk->sk);
72097 break;
72098
72099 case CAIF_CTRLCMD_FLOW_OFF_IND:
72100 /* Modem asks us to shut up */
72101- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72102+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72103 set_tx_flow_off(cf_sk);
72104 cf_sk->sk.sk_state_change(&cf_sk->sk);
72105 break;
72106@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72107 /* We're now connected */
72108 caif_client_register_refcnt(&cf_sk->layer,
72109 cfsk_hold, cfsk_put);
72110- dbfs_atomic_inc(&cnt.num_connect_resp);
72111+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72112 cf_sk->sk.sk_state = CAIF_CONNECTED;
72113 set_tx_flow_on(cf_sk);
72114 cf_sk->sk.sk_state_change(&cf_sk->sk);
72115@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72116
72117 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72118 /* Connect request failed */
72119- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72120+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72121 cf_sk->sk.sk_err = ECONNREFUSED;
72122 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72123 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72124@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72125
72126 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72127 /* Modem has closed this connection, or device is down. */
72128- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72129+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72130 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72131 cf_sk->sk.sk_err = ECONNRESET;
72132 set_rx_flow_on(cf_sk);
72133@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
72134 return;
72135
72136 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72137- dbfs_atomic_inc(&cnt.num_rx_flow_on);
72138+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72139 set_rx_flow_on(cf_sk);
72140 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72141 }
72142@@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
72143 /*ifindex = id of the interface.*/
72144 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72145
72146- dbfs_atomic_inc(&cnt.num_connect_req);
72147+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72148 cf_sk->layer.receive = caif_sktrecv_cb;
72149
72150 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72151@@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
72152 spin_unlock_bh(&sk->sk_receive_queue.lock);
72153 sock->sk = NULL;
72154
72155- dbfs_atomic_inc(&cnt.num_disconnect);
72156+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72157
72158 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72159 if (cf_sk->debugfs_socket_dir != NULL)
72160@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
72161 cf_sk->conn_req.protocol = protocol;
72162 /* Increase the number of sockets created. */
72163 dbfs_atomic_inc(&cnt.caif_nr_socks);
72164- num = dbfs_atomic_inc(&cnt.caif_sock_create);
72165+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72166 #ifdef CONFIG_DEBUG_FS
72167 if (!IS_ERR(debugfsdir)) {
72168
72169diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
72170index 5cf5222..6f704ad 100644
72171--- a/net/caif/cfctrl.c
72172+++ b/net/caif/cfctrl.c
72173@@ -9,6 +9,7 @@
72174 #include <linux/stddef.h>
72175 #include <linux/spinlock.h>
72176 #include <linux/slab.h>
72177+#include <linux/sched.h>
72178 #include <net/caif/caif_layer.h>
72179 #include <net/caif/cfpkt.h>
72180 #include <net/caif/cfctrl.h>
72181@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
72182 memset(&dev_info, 0, sizeof(dev_info));
72183 dev_info.id = 0xff;
72184 cfsrvl_init(&this->serv, 0, &dev_info, false);
72185- atomic_set(&this->req_seq_no, 1);
72186- atomic_set(&this->rsp_seq_no, 1);
72187+ atomic_set_unchecked(&this->req_seq_no, 1);
72188+ atomic_set_unchecked(&this->rsp_seq_no, 1);
72189 this->serv.layer.receive = cfctrl_recv;
72190 sprintf(this->serv.layer.name, "ctrl");
72191 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72192@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
72193 struct cfctrl_request_info *req)
72194 {
72195 spin_lock_bh(&ctrl->info_list_lock);
72196- atomic_inc(&ctrl->req_seq_no);
72197- req->sequence_no = atomic_read(&ctrl->req_seq_no);
72198+ atomic_inc_unchecked(&ctrl->req_seq_no);
72199+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72200 list_add_tail(&req->list, &ctrl->list);
72201 spin_unlock_bh(&ctrl->info_list_lock);
72202 }
72203@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
72204 if (p != first)
72205 pr_warn("Requests are not received in order\n");
72206
72207- atomic_set(&ctrl->rsp_seq_no,
72208+ atomic_set_unchecked(&ctrl->rsp_seq_no,
72209 p->sequence_no);
72210 list_del(&p->list);
72211 goto out;
72212diff --git a/net/can/gw.c b/net/can/gw.c
72213index 3d79b12..8de85fa 100644
72214--- a/net/can/gw.c
72215+++ b/net/can/gw.c
72216@@ -96,7 +96,7 @@ struct cf_mod {
72217 struct {
72218 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
72219 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
72220- } csumfunc;
72221+ } __no_const csumfunc;
72222 };
72223
72224
72225diff --git a/net/compat.c b/net/compat.c
72226index 6def90e..c6992fa 100644
72227--- a/net/compat.c
72228+++ b/net/compat.c
72229@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72230 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72231 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72232 return -EFAULT;
72233- kmsg->msg_name = compat_ptr(tmp1);
72234- kmsg->msg_iov = compat_ptr(tmp2);
72235- kmsg->msg_control = compat_ptr(tmp3);
72236+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72237+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72238+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72239 return 0;
72240 }
72241
72242@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72243
72244 if (kern_msg->msg_namelen) {
72245 if (mode == VERIFY_READ) {
72246- int err = move_addr_to_kernel(kern_msg->msg_name,
72247+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72248 kern_msg->msg_namelen,
72249 kern_address);
72250 if (err < 0)
72251@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72252 kern_msg->msg_name = NULL;
72253
72254 tot_len = iov_from_user_compat_to_kern(kern_iov,
72255- (struct compat_iovec __user *)kern_msg->msg_iov,
72256+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
72257 kern_msg->msg_iovlen);
72258 if (tot_len >= 0)
72259 kern_msg->msg_iov = kern_iov;
72260@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72261
72262 #define CMSG_COMPAT_FIRSTHDR(msg) \
72263 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72264- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72265+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72266 (struct compat_cmsghdr __user *)NULL)
72267
72268 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72269 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72270 (ucmlen) <= (unsigned long) \
72271 ((mhdr)->msg_controllen - \
72272- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72273+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72274
72275 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72276 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72277 {
72278 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72279- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72280+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72281 msg->msg_controllen)
72282 return NULL;
72283 return (struct compat_cmsghdr __user *)ptr;
72284@@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72285 {
72286 struct compat_timeval ctv;
72287 struct compat_timespec cts[3];
72288- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72289+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72290 struct compat_cmsghdr cmhdr;
72291 int cmlen;
72292
72293@@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72294
72295 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72296 {
72297- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72298+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72299 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72300 int fdnum = scm->fp->count;
72301 struct file **fp = scm->fp->fp;
72302@@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
72303 return -EFAULT;
72304 old_fs = get_fs();
72305 set_fs(KERNEL_DS);
72306- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72307+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72308 set_fs(old_fs);
72309
72310 return err;
72311@@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
72312 len = sizeof(ktime);
72313 old_fs = get_fs();
72314 set_fs(KERNEL_DS);
72315- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72316+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72317 set_fs(old_fs);
72318
72319 if (!err) {
72320@@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72321 case MCAST_JOIN_GROUP:
72322 case MCAST_LEAVE_GROUP:
72323 {
72324- struct compat_group_req __user *gr32 = (void *)optval;
72325+ struct compat_group_req __user *gr32 = (void __user *)optval;
72326 struct group_req __user *kgr =
72327 compat_alloc_user_space(sizeof(struct group_req));
72328 u32 interface;
72329@@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72330 case MCAST_BLOCK_SOURCE:
72331 case MCAST_UNBLOCK_SOURCE:
72332 {
72333- struct compat_group_source_req __user *gsr32 = (void *)optval;
72334+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72335 struct group_source_req __user *kgsr = compat_alloc_user_space(
72336 sizeof(struct group_source_req));
72337 u32 interface;
72338@@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72339 }
72340 case MCAST_MSFILTER:
72341 {
72342- struct compat_group_filter __user *gf32 = (void *)optval;
72343+ struct compat_group_filter __user *gf32 = (void __user *)optval;
72344 struct group_filter __user *kgf;
72345 u32 interface, fmode, numsrc;
72346
72347@@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
72348 char __user *optval, int __user *optlen,
72349 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72350 {
72351- struct compat_group_filter __user *gf32 = (void *)optval;
72352+ struct compat_group_filter __user *gf32 = (void __user *)optval;
72353 struct group_filter __user *kgf;
72354 int __user *koptlen;
72355 u32 interface, fmode, numsrc;
72356diff --git a/net/core/datagram.c b/net/core/datagram.c
72357index 68bbf9f..5ef0d12 100644
72358--- a/net/core/datagram.c
72359+++ b/net/core/datagram.c
72360@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
72361 }
72362
72363 kfree_skb(skb);
72364- atomic_inc(&sk->sk_drops);
72365+ atomic_inc_unchecked(&sk->sk_drops);
72366 sk_mem_reclaim_partial(sk);
72367
72368 return err;
72369diff --git a/net/core/dev.c b/net/core/dev.c
72370index 5a13edf..a6f2bd2 100644
72371--- a/net/core/dev.c
72372+++ b/net/core/dev.c
72373@@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
72374 if (no_module && capable(CAP_NET_ADMIN))
72375 no_module = request_module("netdev-%s", name);
72376 if (no_module && capable(CAP_SYS_MODULE)) {
72377+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72378+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
72379+#else
72380 if (!request_module("%s", name))
72381 pr_err("Loading kernel module for a network device "
72382 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72383 "instead\n", name);
72384+#endif
72385 }
72386 }
72387 EXPORT_SYMBOL(dev_load);
72388@@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72389 {
72390 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
72391 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
72392- atomic_long_inc(&dev->rx_dropped);
72393+ atomic_long_inc_unchecked(&dev->rx_dropped);
72394 kfree_skb(skb);
72395 return NET_RX_DROP;
72396 }
72397@@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72398 nf_reset(skb);
72399
72400 if (unlikely(!is_skb_forwardable(dev, skb))) {
72401- atomic_long_inc(&dev->rx_dropped);
72402+ atomic_long_inc_unchecked(&dev->rx_dropped);
72403 kfree_skb(skb);
72404 return NET_RX_DROP;
72405 }
72406@@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
72407
72408 struct dev_gso_cb {
72409 void (*destructor)(struct sk_buff *skb);
72410-};
72411+} __no_const;
72412
72413 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72414
72415@@ -2970,7 +2974,7 @@ enqueue:
72416
72417 local_irq_restore(flags);
72418
72419- atomic_long_inc(&skb->dev->rx_dropped);
72420+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72421 kfree_skb(skb);
72422 return NET_RX_DROP;
72423 }
72424@@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
72425 }
72426 EXPORT_SYMBOL(netif_rx_ni);
72427
72428-static void net_tx_action(struct softirq_action *h)
72429+static void net_tx_action(void)
72430 {
72431 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72432
72433@@ -3333,7 +3337,7 @@ ncls:
72434 if (pt_prev) {
72435 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
72436 } else {
72437- atomic_long_inc(&skb->dev->rx_dropped);
72438+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72439 kfree_skb(skb);
72440 /* Jamal, now you will not able to escape explaining
72441 * me how you were going to use this. :-)
72442@@ -3891,7 +3895,7 @@ void netif_napi_del(struct napi_struct *napi)
72443 }
72444 EXPORT_SYMBOL(netif_napi_del);
72445
72446-static void net_rx_action(struct softirq_action *h)
72447+static void net_rx_action(void)
72448 {
72449 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72450 unsigned long time_limit = jiffies + 2;
72451@@ -5949,7 +5953,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
72452 } else {
72453 netdev_stats_to_stats64(storage, &dev->stats);
72454 }
72455- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
72456+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
72457 return storage;
72458 }
72459 EXPORT_SYMBOL(dev_get_stats);
72460diff --git a/net/core/flow.c b/net/core/flow.c
72461index e318c7e..168b1d0 100644
72462--- a/net/core/flow.c
72463+++ b/net/core/flow.c
72464@@ -61,7 +61,7 @@ struct flow_cache {
72465 struct timer_list rnd_timer;
72466 };
72467
72468-atomic_t flow_cache_genid = ATOMIC_INIT(0);
72469+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72470 EXPORT_SYMBOL(flow_cache_genid);
72471 static struct flow_cache flow_cache_global;
72472 static struct kmem_cache *flow_cachep __read_mostly;
72473@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
72474
72475 static int flow_entry_valid(struct flow_cache_entry *fle)
72476 {
72477- if (atomic_read(&flow_cache_genid) != fle->genid)
72478+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72479 return 0;
72480 if (fle->object && !fle->object->ops->check(fle->object))
72481 return 0;
72482@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
72483 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72484 fcp->hash_count++;
72485 }
72486- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72487+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72488 flo = fle->object;
72489 if (!flo)
72490 goto ret_object;
72491@@ -280,7 +280,7 @@ nocache:
72492 }
72493 flo = resolver(net, key, family, dir, flo, ctx);
72494 if (fle) {
72495- fle->genid = atomic_read(&flow_cache_genid);
72496+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
72497 if (!IS_ERR(flo))
72498 fle->object = flo;
72499 else
72500diff --git a/net/core/iovec.c b/net/core/iovec.c
72501index c40f27e..7f49254 100644
72502--- a/net/core/iovec.c
72503+++ b/net/core/iovec.c
72504@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72505 if (m->msg_namelen) {
72506 if (mode == VERIFY_READ) {
72507 void __user *namep;
72508- namep = (void __user __force *) m->msg_name;
72509+ namep = (void __force_user *) m->msg_name;
72510 err = move_addr_to_kernel(namep, m->msg_namelen,
72511 address);
72512 if (err < 0)
72513@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72514 }
72515
72516 size = m->msg_iovlen * sizeof(struct iovec);
72517- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72518+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72519 return -EFAULT;
72520
72521 m->msg_iov = iov;
72522diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72523index 9083e82..1673203 100644
72524--- a/net/core/rtnetlink.c
72525+++ b/net/core/rtnetlink.c
72526@@ -57,7 +57,7 @@ struct rtnl_link {
72527 rtnl_doit_func doit;
72528 rtnl_dumpit_func dumpit;
72529 rtnl_calcit_func calcit;
72530-};
72531+} __no_const;
72532
72533 static DEFINE_MUTEX(rtnl_mutex);
72534 static u16 min_ifinfo_dump_size;
72535diff --git a/net/core/scm.c b/net/core/scm.c
72536index ff52ad0..aff1c0f 100644
72537--- a/net/core/scm.c
72538+++ b/net/core/scm.c
72539@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72540 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72541 {
72542 struct cmsghdr __user *cm
72543- = (__force struct cmsghdr __user *)msg->msg_control;
72544+ = (struct cmsghdr __force_user *)msg->msg_control;
72545 struct cmsghdr cmhdr;
72546 int cmlen = CMSG_LEN(len);
72547 int err;
72548@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72549 err = -EFAULT;
72550 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72551 goto out;
72552- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72553+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72554 goto out;
72555 cmlen = CMSG_SPACE(len);
72556 if (msg->msg_controllen < cmlen)
72557@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72558 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72559 {
72560 struct cmsghdr __user *cm
72561- = (__force struct cmsghdr __user*)msg->msg_control;
72562+ = (struct cmsghdr __force_user *)msg->msg_control;
72563
72564 int fdmax = 0;
72565 int fdnum = scm->fp->count;
72566@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72567 if (fdnum < fdmax)
72568 fdmax = fdnum;
72569
72570- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72571+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72572 i++, cmfptr++)
72573 {
72574 int new_fd;
72575diff --git a/net/core/sock.c b/net/core/sock.c
72576index b23f174..b9a0d26 100644
72577--- a/net/core/sock.c
72578+++ b/net/core/sock.c
72579@@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72580 struct sk_buff_head *list = &sk->sk_receive_queue;
72581
72582 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72583- atomic_inc(&sk->sk_drops);
72584+ atomic_inc_unchecked(&sk->sk_drops);
72585 trace_sock_rcvqueue_full(sk, skb);
72586 return -ENOMEM;
72587 }
72588@@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72589 return err;
72590
72591 if (!sk_rmem_schedule(sk, skb->truesize)) {
72592- atomic_inc(&sk->sk_drops);
72593+ atomic_inc_unchecked(&sk->sk_drops);
72594 return -ENOBUFS;
72595 }
72596
72597@@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72598 skb_dst_force(skb);
72599
72600 spin_lock_irqsave(&list->lock, flags);
72601- skb->dropcount = atomic_read(&sk->sk_drops);
72602+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72603 __skb_queue_tail(list, skb);
72604 spin_unlock_irqrestore(&list->lock, flags);
72605
72606@@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72607 skb->dev = NULL;
72608
72609 if (sk_rcvqueues_full(sk, skb)) {
72610- atomic_inc(&sk->sk_drops);
72611+ atomic_inc_unchecked(&sk->sk_drops);
72612 goto discard_and_relse;
72613 }
72614 if (nested)
72615@@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72616 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72617 } else if (sk_add_backlog(sk, skb)) {
72618 bh_unlock_sock(sk);
72619- atomic_inc(&sk->sk_drops);
72620+ atomic_inc_unchecked(&sk->sk_drops);
72621 goto discard_and_relse;
72622 }
72623
72624@@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72625 if (len > sizeof(peercred))
72626 len = sizeof(peercred);
72627 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72628- if (copy_to_user(optval, &peercred, len))
72629+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72630 return -EFAULT;
72631 goto lenout;
72632 }
72633@@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72634 return -ENOTCONN;
72635 if (lv < len)
72636 return -EINVAL;
72637- if (copy_to_user(optval, address, len))
72638+ if (len > sizeof(address) || copy_to_user(optval, address, len))
72639 return -EFAULT;
72640 goto lenout;
72641 }
72642@@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72643
72644 if (len > lv)
72645 len = lv;
72646- if (copy_to_user(optval, &v, len))
72647+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
72648 return -EFAULT;
72649 lenout:
72650 if (put_user(len, optlen))
72651@@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
72652 */
72653 smp_wmb();
72654 atomic_set(&sk->sk_refcnt, 1);
72655- atomic_set(&sk->sk_drops, 0);
72656+ atomic_set_unchecked(&sk->sk_drops, 0);
72657 }
72658 EXPORT_SYMBOL(sock_init_data);
72659
72660diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
72661index 02e75d1..9a57a7c 100644
72662--- a/net/decnet/sysctl_net_decnet.c
72663+++ b/net/decnet/sysctl_net_decnet.c
72664@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
72665
72666 if (len > *lenp) len = *lenp;
72667
72668- if (copy_to_user(buffer, addr, len))
72669+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
72670 return -EFAULT;
72671
72672 *lenp = len;
72673@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
72674
72675 if (len > *lenp) len = *lenp;
72676
72677- if (copy_to_user(buffer, devname, len))
72678+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
72679 return -EFAULT;
72680
72681 *lenp = len;
72682diff --git a/net/econet/Kconfig b/net/econet/Kconfig
72683index 39a2d29..f39c0fe 100644
72684--- a/net/econet/Kconfig
72685+++ b/net/econet/Kconfig
72686@@ -4,7 +4,7 @@
72687
72688 config ECONET
72689 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72690- depends on EXPERIMENTAL && INET
72691+ depends on EXPERIMENTAL && INET && BROKEN
72692 ---help---
72693 Econet is a fairly old and slow networking protocol mainly used by
72694 Acorn computers to access file and print servers. It uses native
72695diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
72696index 92fc5f6..b790d91 100644
72697--- a/net/ipv4/fib_frontend.c
72698+++ b/net/ipv4/fib_frontend.c
72699@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
72700 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72701 fib_sync_up(dev);
72702 #endif
72703- atomic_inc(&net->ipv4.dev_addr_genid);
72704+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72705 rt_cache_flush(dev_net(dev), -1);
72706 break;
72707 case NETDEV_DOWN:
72708 fib_del_ifaddr(ifa, NULL);
72709- atomic_inc(&net->ipv4.dev_addr_genid);
72710+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72711 if (ifa->ifa_dev->ifa_list == NULL) {
72712 /* Last address was deleted from this interface.
72713 * Disable IP.
72714@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
72715 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72716 fib_sync_up(dev);
72717 #endif
72718- atomic_inc(&net->ipv4.dev_addr_genid);
72719+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72720 rt_cache_flush(dev_net(dev), -1);
72721 break;
72722 case NETDEV_DOWN:
72723diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
72724index 80106d8..232e898 100644
72725--- a/net/ipv4/fib_semantics.c
72726+++ b/net/ipv4/fib_semantics.c
72727@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
72728 nh->nh_saddr = inet_select_addr(nh->nh_dev,
72729 nh->nh_gw,
72730 nh->nh_parent->fib_scope);
72731- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72732+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72733
72734 return nh->nh_saddr;
72735 }
72736diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
72737index ccee270..db23c3c 100644
72738--- a/net/ipv4/inet_diag.c
72739+++ b/net/ipv4/inet_diag.c
72740@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
72741 r->idiag_retrans = 0;
72742
72743 r->id.idiag_if = sk->sk_bound_dev_if;
72744+
72745+#ifdef CONFIG_GRKERNSEC_HIDESYM
72746+ r->id.idiag_cookie[0] = 0;
72747+ r->id.idiag_cookie[1] = 0;
72748+#else
72749 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72750 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72751+#endif
72752
72753 r->id.idiag_sport = inet->inet_sport;
72754 r->id.idiag_dport = inet->inet_dport;
72755@@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
72756 r->idiag_family = tw->tw_family;
72757 r->idiag_retrans = 0;
72758 r->id.idiag_if = tw->tw_bound_dev_if;
72759+
72760+#ifdef CONFIG_GRKERNSEC_HIDESYM
72761+ r->id.idiag_cookie[0] = 0;
72762+ r->id.idiag_cookie[1] = 0;
72763+#else
72764 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72765 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72766+#endif
72767+
72768 r->id.idiag_sport = tw->tw_sport;
72769 r->id.idiag_dport = tw->tw_dport;
72770 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72771@@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
72772 if (sk == NULL)
72773 goto unlock;
72774
72775+#ifndef CONFIG_GRKERNSEC_HIDESYM
72776 err = -ESTALE;
72777 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72778 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72779 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72780 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72781 goto out;
72782+#endif
72783
72784 err = -ENOMEM;
72785 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72786@@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
72787 r->idiag_retrans = req->retrans;
72788
72789 r->id.idiag_if = sk->sk_bound_dev_if;
72790+
72791+#ifdef CONFIG_GRKERNSEC_HIDESYM
72792+ r->id.idiag_cookie[0] = 0;
72793+ r->id.idiag_cookie[1] = 0;
72794+#else
72795 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72796 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72797+#endif
72798
72799 tmo = req->expires - jiffies;
72800 if (tmo < 0)
72801diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
72802index 984ec65..97ac518 100644
72803--- a/net/ipv4/inet_hashtables.c
72804+++ b/net/ipv4/inet_hashtables.c
72805@@ -18,12 +18,15 @@
72806 #include <linux/sched.h>
72807 #include <linux/slab.h>
72808 #include <linux/wait.h>
72809+#include <linux/security.h>
72810
72811 #include <net/inet_connection_sock.h>
72812 #include <net/inet_hashtables.h>
72813 #include <net/secure_seq.h>
72814 #include <net/ip.h>
72815
72816+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72817+
72818 /*
72819 * Allocate and initialize a new local port bind bucket.
72820 * The bindhash mutex for snum's hash chain must be held here.
72821@@ -530,6 +533,8 @@ ok:
72822 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72823 spin_unlock(&head->lock);
72824
72825+ gr_update_task_in_ip_table(current, inet_sk(sk));
72826+
72827 if (tw) {
72828 inet_twsk_deschedule(tw, death_row);
72829 while (twrefcnt) {
72830diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
72831index 86f13c67..59a35b5 100644
72832--- a/net/ipv4/inetpeer.c
72833+++ b/net/ipv4/inetpeer.c
72834@@ -436,8 +436,8 @@ relookup:
72835 if (p) {
72836 p->daddr = *daddr;
72837 atomic_set(&p->refcnt, 1);
72838- atomic_set(&p->rid, 0);
72839- atomic_set(&p->ip_id_count,
72840+ atomic_set_unchecked(&p->rid, 0);
72841+ atomic_set_unchecked(&p->ip_id_count,
72842 (daddr->family == AF_INET) ?
72843 secure_ip_id(daddr->addr.a4) :
72844 secure_ipv6_id(daddr->addr.a6));
72845diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
72846index fdaabf2..0ec3205 100644
72847--- a/net/ipv4/ip_fragment.c
72848+++ b/net/ipv4/ip_fragment.c
72849@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
72850 return 0;
72851
72852 start = qp->rid;
72853- end = atomic_inc_return(&peer->rid);
72854+ end = atomic_inc_return_unchecked(&peer->rid);
72855 qp->rid = end;
72856
72857 rc = qp->q.fragments && (end - start) > max;
72858diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
72859index 09ff51b..d3968eb 100644
72860--- a/net/ipv4/ip_sockglue.c
72861+++ b/net/ipv4/ip_sockglue.c
72862@@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72863 len = min_t(unsigned int, len, opt->optlen);
72864 if (put_user(len, optlen))
72865 return -EFAULT;
72866- if (copy_to_user(optval, opt->__data, len))
72867+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72868+ copy_to_user(optval, opt->__data, len))
72869 return -EFAULT;
72870 return 0;
72871 }
72872@@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72873 if (sk->sk_type != SOCK_STREAM)
72874 return -ENOPROTOOPT;
72875
72876- msg.msg_control = optval;
72877+ msg.msg_control = (void __force_kernel *)optval;
72878 msg.msg_controllen = len;
72879 msg.msg_flags = flags;
72880
72881diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
72882index 99ec116..c5628fe 100644
72883--- a/net/ipv4/ipconfig.c
72884+++ b/net/ipv4/ipconfig.c
72885@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
72886
72887 mm_segment_t oldfs = get_fs();
72888 set_fs(get_ds());
72889- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72890+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72891 set_fs(oldfs);
72892 return res;
72893 }
72894@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
72895
72896 mm_segment_t oldfs = get_fs();
72897 set_fs(get_ds());
72898- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72899+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72900 set_fs(oldfs);
72901 return res;
72902 }
72903@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
72904
72905 mm_segment_t oldfs = get_fs();
72906 set_fs(get_ds());
72907- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72908+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72909 set_fs(oldfs);
72910 return res;
72911 }
72912diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
72913index 2133c30..5c4b40b 100644
72914--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
72915+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
72916@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
72917
72918 *len = 0;
72919
72920- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72921+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72922 if (*octets == NULL)
72923 return 0;
72924
72925diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
72926index 43d4c3b..1914409 100644
72927--- a/net/ipv4/ping.c
72928+++ b/net/ipv4/ping.c
72929@@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
72930 sk_rmem_alloc_get(sp),
72931 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72932 atomic_read(&sp->sk_refcnt), sp,
72933- atomic_read(&sp->sk_drops), len);
72934+ atomic_read_unchecked(&sp->sk_drops), len);
72935 }
72936
72937 static int ping_seq_show(struct seq_file *seq, void *v)
72938diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
72939index 007e2eb..85a18a0 100644
72940--- a/net/ipv4/raw.c
72941+++ b/net/ipv4/raw.c
72942@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
72943 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72944 {
72945 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72946- atomic_inc(&sk->sk_drops);
72947+ atomic_inc_unchecked(&sk->sk_drops);
72948 kfree_skb(skb);
72949 return NET_RX_DROP;
72950 }
72951@@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
72952
72953 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72954 {
72955+ struct icmp_filter filter;
72956+
72957 if (optlen > sizeof(struct icmp_filter))
72958 optlen = sizeof(struct icmp_filter);
72959- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72960+ if (copy_from_user(&filter, optval, optlen))
72961 return -EFAULT;
72962+ raw_sk(sk)->filter = filter;
72963 return 0;
72964 }
72965
72966 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72967 {
72968 int len, ret = -EFAULT;
72969+ struct icmp_filter filter;
72970
72971 if (get_user(len, optlen))
72972 goto out;
72973@@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
72974 if (len > sizeof(struct icmp_filter))
72975 len = sizeof(struct icmp_filter);
72976 ret = -EFAULT;
72977- if (put_user(len, optlen) ||
72978- copy_to_user(optval, &raw_sk(sk)->filter, len))
72979+ filter = raw_sk(sk)->filter;
72980+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
72981 goto out;
72982 ret = 0;
72983 out: return ret;
72984@@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
72985 sk_wmem_alloc_get(sp),
72986 sk_rmem_alloc_get(sp),
72987 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72988- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72989+ atomic_read(&sp->sk_refcnt),
72990+#ifdef CONFIG_GRKERNSEC_HIDESYM
72991+ NULL,
72992+#else
72993+ sp,
72994+#endif
72995+ atomic_read_unchecked(&sp->sk_drops));
72996 }
72997
72998 static int raw_seq_show(struct seq_file *seq, void *v)
72999diff --git a/net/ipv4/route.c b/net/ipv4/route.c
73000index 94cdbc5..0cb0063 100644
73001--- a/net/ipv4/route.c
73002+++ b/net/ipv4/route.c
73003@@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
73004
73005 static inline int rt_genid(struct net *net)
73006 {
73007- return atomic_read(&net->ipv4.rt_genid);
73008+ return atomic_read_unchecked(&net->ipv4.rt_genid);
73009 }
73010
73011 #ifdef CONFIG_PROC_FS
73012@@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
73013 unsigned char shuffle;
73014
73015 get_random_bytes(&shuffle, sizeof(shuffle));
73016- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
73017+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
73018 redirect_genid++;
73019 }
73020
73021@@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
73022 error = rt->dst.error;
73023 if (peer) {
73024 inet_peer_refcheck(rt->peer);
73025- id = atomic_read(&peer->ip_id_count) & 0xffff;
73026+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
73027 if (peer->tcp_ts_stamp) {
73028 ts = peer->tcp_ts;
73029 tsage = get_seconds() - peer->tcp_ts_stamp;
73030diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
73031index a9db4b1..3c03301 100644
73032--- a/net/ipv4/tcp_ipv4.c
73033+++ b/net/ipv4/tcp_ipv4.c
73034@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
73035 int sysctl_tcp_low_latency __read_mostly;
73036 EXPORT_SYMBOL(sysctl_tcp_low_latency);
73037
73038+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73039+extern int grsec_enable_blackhole;
73040+#endif
73041
73042 #ifdef CONFIG_TCP_MD5SIG
73043 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
73044@@ -1627,6 +1630,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
73045 return 0;
73046
73047 reset:
73048+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73049+ if (!grsec_enable_blackhole)
73050+#endif
73051 tcp_v4_send_reset(rsk, skb);
73052 discard:
73053 kfree_skb(skb);
73054@@ -1689,12 +1695,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
73055 TCP_SKB_CB(skb)->sacked = 0;
73056
73057 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73058- if (!sk)
73059+ if (!sk) {
73060+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73061+ ret = 1;
73062+#endif
73063 goto no_tcp_socket;
73064-
73065+ }
73066 process:
73067- if (sk->sk_state == TCP_TIME_WAIT)
73068+ if (sk->sk_state == TCP_TIME_WAIT) {
73069+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73070+ ret = 2;
73071+#endif
73072 goto do_time_wait;
73073+ }
73074
73075 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73076 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73077@@ -1744,6 +1757,10 @@ no_tcp_socket:
73078 bad_packet:
73079 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73080 } else {
73081+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73082+ if (!grsec_enable_blackhole || (ret == 1 &&
73083+ (skb->dev->flags & IFF_LOOPBACK)))
73084+#endif
73085 tcp_v4_send_reset(NULL, skb);
73086 }
73087
73088@@ -2404,7 +2421,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
73089 0, /* non standard timer */
73090 0, /* open_requests have no inode */
73091 atomic_read(&sk->sk_refcnt),
73092+#ifdef CONFIG_GRKERNSEC_HIDESYM
73093+ NULL,
73094+#else
73095 req,
73096+#endif
73097 len);
73098 }
73099
73100@@ -2454,7 +2475,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
73101 sock_i_uid(sk),
73102 icsk->icsk_probes_out,
73103 sock_i_ino(sk),
73104- atomic_read(&sk->sk_refcnt), sk,
73105+ atomic_read(&sk->sk_refcnt),
73106+#ifdef CONFIG_GRKERNSEC_HIDESYM
73107+ NULL,
73108+#else
73109+ sk,
73110+#endif
73111 jiffies_to_clock_t(icsk->icsk_rto),
73112 jiffies_to_clock_t(icsk->icsk_ack.ato),
73113 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73114@@ -2482,7 +2508,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
73115 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73116 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73117 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73118- atomic_read(&tw->tw_refcnt), tw, len);
73119+ atomic_read(&tw->tw_refcnt),
73120+#ifdef CONFIG_GRKERNSEC_HIDESYM
73121+ NULL,
73122+#else
73123+ tw,
73124+#endif
73125+ len);
73126 }
73127
73128 #define TMPSZ 150
73129diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
73130index 66363b6..b0654a3 100644
73131--- a/net/ipv4/tcp_minisocks.c
73132+++ b/net/ipv4/tcp_minisocks.c
73133@@ -27,6 +27,10 @@
73134 #include <net/inet_common.h>
73135 #include <net/xfrm.h>
73136
73137+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73138+extern int grsec_enable_blackhole;
73139+#endif
73140+
73141 int sysctl_tcp_syncookies __read_mostly = 1;
73142 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73143
73144@@ -751,6 +755,10 @@ listen_overflow:
73145
73146 embryonic_reset:
73147 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73148+
73149+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73150+ if (!grsec_enable_blackhole)
73151+#endif
73152 if (!(flg & TCP_FLAG_RST))
73153 req->rsk_ops->send_reset(sk, skb);
73154
73155diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
73156index 85ee7eb..53277ab 100644
73157--- a/net/ipv4/tcp_probe.c
73158+++ b/net/ipv4/tcp_probe.c
73159@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
73160 if (cnt + width >= len)
73161 break;
73162
73163- if (copy_to_user(buf + cnt, tbuf, width))
73164+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73165 return -EFAULT;
73166 cnt += width;
73167 }
73168diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
73169index 2e0f0af..e2948bf 100644
73170--- a/net/ipv4/tcp_timer.c
73171+++ b/net/ipv4/tcp_timer.c
73172@@ -22,6 +22,10 @@
73173 #include <linux/gfp.h>
73174 #include <net/tcp.h>
73175
73176+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73177+extern int grsec_lastack_retries;
73178+#endif
73179+
73180 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73181 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73182 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73183@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
73184 }
73185 }
73186
73187+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73188+ if ((sk->sk_state == TCP_LAST_ACK) &&
73189+ (grsec_lastack_retries > 0) &&
73190+ (grsec_lastack_retries < retry_until))
73191+ retry_until = grsec_lastack_retries;
73192+#endif
73193+
73194 if (retransmits_timed_out(sk, retry_until,
73195 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73196 /* Has it gone just too far? */
73197diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
73198index 5a65eea..bd913a1 100644
73199--- a/net/ipv4/udp.c
73200+++ b/net/ipv4/udp.c
73201@@ -86,6 +86,7 @@
73202 #include <linux/types.h>
73203 #include <linux/fcntl.h>
73204 #include <linux/module.h>
73205+#include <linux/security.h>
73206 #include <linux/socket.h>
73207 #include <linux/sockios.h>
73208 #include <linux/igmp.h>
73209@@ -108,6 +109,10 @@
73210 #include <trace/events/udp.h>
73211 #include "udp_impl.h"
73212
73213+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73214+extern int grsec_enable_blackhole;
73215+#endif
73216+
73217 struct udp_table udp_table __read_mostly;
73218 EXPORT_SYMBOL(udp_table);
73219
73220@@ -565,6 +570,9 @@ found:
73221 return s;
73222 }
73223
73224+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73225+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73226+
73227 /*
73228 * This routine is called by the ICMP module when it gets some
73229 * sort of error condition. If err < 0 then the socket should
73230@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
73231 dport = usin->sin_port;
73232 if (dport == 0)
73233 return -EINVAL;
73234+
73235+ err = gr_search_udp_sendmsg(sk, usin);
73236+ if (err)
73237+ return err;
73238 } else {
73239 if (sk->sk_state != TCP_ESTABLISHED)
73240 return -EDESTADDRREQ;
73241+
73242+ err = gr_search_udp_sendmsg(sk, NULL);
73243+ if (err)
73244+ return err;
73245+
73246 daddr = inet->inet_daddr;
73247 dport = inet->inet_dport;
73248 /* Open fast path for connected socket.
73249@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
73250 udp_lib_checksum_complete(skb)) {
73251 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73252 IS_UDPLITE(sk));
73253- atomic_inc(&sk->sk_drops);
73254+ atomic_inc_unchecked(&sk->sk_drops);
73255 __skb_unlink(skb, rcvq);
73256 __skb_queue_tail(&list_kill, skb);
73257 }
73258@@ -1185,6 +1202,10 @@ try_again:
73259 if (!skb)
73260 goto out;
73261
73262+ err = gr_search_udp_recvmsg(sk, skb);
73263+ if (err)
73264+ goto out_free;
73265+
73266 ulen = skb->len - sizeof(struct udphdr);
73267 copied = len;
73268 if (copied > ulen)
73269@@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73270
73271 drop:
73272 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73273- atomic_inc(&sk->sk_drops);
73274+ atomic_inc_unchecked(&sk->sk_drops);
73275 kfree_skb(skb);
73276 return -1;
73277 }
73278@@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73279 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73280
73281 if (!skb1) {
73282- atomic_inc(&sk->sk_drops);
73283+ atomic_inc_unchecked(&sk->sk_drops);
73284 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73285 IS_UDPLITE(sk));
73286 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73287@@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73288 goto csum_error;
73289
73290 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73291+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73292+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73293+#endif
73294 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73295
73296 /*
73297@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
73298 sk_wmem_alloc_get(sp),
73299 sk_rmem_alloc_get(sp),
73300 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73301- atomic_read(&sp->sk_refcnt), sp,
73302- atomic_read(&sp->sk_drops), len);
73303+ atomic_read(&sp->sk_refcnt),
73304+#ifdef CONFIG_GRKERNSEC_HIDESYM
73305+ NULL,
73306+#else
73307+ sp,
73308+#endif
73309+ atomic_read_unchecked(&sp->sk_drops), len);
73310 }
73311
73312 int udp4_seq_show(struct seq_file *seq, void *v)
73313diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
73314index 36806de..b86f74c 100644
73315--- a/net/ipv6/addrconf.c
73316+++ b/net/ipv6/addrconf.c
73317@@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
73318 p.iph.ihl = 5;
73319 p.iph.protocol = IPPROTO_IPV6;
73320 p.iph.ttl = 64;
73321- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73322+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73323
73324 if (ops->ndo_do_ioctl) {
73325 mm_segment_t oldfs = get_fs();
73326diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
73327index 1567fb1..29af910 100644
73328--- a/net/ipv6/inet6_connection_sock.c
73329+++ b/net/ipv6/inet6_connection_sock.c
73330@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
73331 #ifdef CONFIG_XFRM
73332 {
73333 struct rt6_info *rt = (struct rt6_info *)dst;
73334- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73335+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73336 }
73337 #endif
73338 }
73339@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
73340 #ifdef CONFIG_XFRM
73341 if (dst) {
73342 struct rt6_info *rt = (struct rt6_info *)dst;
73343- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73344+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73345 __sk_dst_reset(sk);
73346 dst = NULL;
73347 }
73348diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
73349index 26cb08c..8af9877 100644
73350--- a/net/ipv6/ipv6_sockglue.c
73351+++ b/net/ipv6/ipv6_sockglue.c
73352@@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
73353 if (sk->sk_type != SOCK_STREAM)
73354 return -ENOPROTOOPT;
73355
73356- msg.msg_control = optval;
73357+ msg.msg_control = (void __force_kernel *)optval;
73358 msg.msg_controllen = len;
73359 msg.msg_flags = flags;
73360
73361diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
73362index 331af3b..7789844 100644
73363--- a/net/ipv6/raw.c
73364+++ b/net/ipv6/raw.c
73365@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
73366 {
73367 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
73368 skb_checksum_complete(skb)) {
73369- atomic_inc(&sk->sk_drops);
73370+ atomic_inc_unchecked(&sk->sk_drops);
73371 kfree_skb(skb);
73372 return NET_RX_DROP;
73373 }
73374@@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73375 struct raw6_sock *rp = raw6_sk(sk);
73376
73377 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73378- atomic_inc(&sk->sk_drops);
73379+ atomic_inc_unchecked(&sk->sk_drops);
73380 kfree_skb(skb);
73381 return NET_RX_DROP;
73382 }
73383@@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73384
73385 if (inet->hdrincl) {
73386 if (skb_checksum_complete(skb)) {
73387- atomic_inc(&sk->sk_drops);
73388+ atomic_inc_unchecked(&sk->sk_drops);
73389 kfree_skb(skb);
73390 return NET_RX_DROP;
73391 }
73392@@ -601,7 +601,7 @@ out:
73393 return err;
73394 }
73395
73396-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73397+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73398 struct flowi6 *fl6, struct dst_entry **dstp,
73399 unsigned int flags)
73400 {
73401@@ -909,12 +909,15 @@ do_confirm:
73402 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73403 char __user *optval, int optlen)
73404 {
73405+ struct icmp6_filter filter;
73406+
73407 switch (optname) {
73408 case ICMPV6_FILTER:
73409 if (optlen > sizeof(struct icmp6_filter))
73410 optlen = sizeof(struct icmp6_filter);
73411- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73412+ if (copy_from_user(&filter, optval, optlen))
73413 return -EFAULT;
73414+ raw6_sk(sk)->filter = filter;
73415 return 0;
73416 default:
73417 return -ENOPROTOOPT;
73418@@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73419 char __user *optval, int __user *optlen)
73420 {
73421 int len;
73422+ struct icmp6_filter filter;
73423
73424 switch (optname) {
73425 case ICMPV6_FILTER:
73426@@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73427 len = sizeof(struct icmp6_filter);
73428 if (put_user(len, optlen))
73429 return -EFAULT;
73430- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73431+ filter = raw6_sk(sk)->filter;
73432+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
73433 return -EFAULT;
73434 return 0;
73435 default:
73436@@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73437 0, 0L, 0,
73438 sock_i_uid(sp), 0,
73439 sock_i_ino(sp),
73440- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73441+ atomic_read(&sp->sk_refcnt),
73442+#ifdef CONFIG_GRKERNSEC_HIDESYM
73443+ NULL,
73444+#else
73445+ sp,
73446+#endif
73447+ atomic_read_unchecked(&sp->sk_drops));
73448 }
73449
73450 static int raw6_seq_show(struct seq_file *seq, void *v)
73451diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
73452index 2dea4bb..dca8ac5 100644
73453--- a/net/ipv6/tcp_ipv6.c
73454+++ b/net/ipv6/tcp_ipv6.c
73455@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
73456 }
73457 #endif
73458
73459+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73460+extern int grsec_enable_blackhole;
73461+#endif
73462+
73463 static void tcp_v6_hash(struct sock *sk)
73464 {
73465 if (sk->sk_state != TCP_CLOSE) {
73466@@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
73467 return 0;
73468
73469 reset:
73470+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73471+ if (!grsec_enable_blackhole)
73472+#endif
73473 tcp_v6_send_reset(sk, skb);
73474 discard:
73475 if (opt_skb)
73476@@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
73477 TCP_SKB_CB(skb)->sacked = 0;
73478
73479 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73480- if (!sk)
73481+ if (!sk) {
73482+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73483+ ret = 1;
73484+#endif
73485 goto no_tcp_socket;
73486+ }
73487
73488 process:
73489- if (sk->sk_state == TCP_TIME_WAIT)
73490+ if (sk->sk_state == TCP_TIME_WAIT) {
73491+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73492+ ret = 2;
73493+#endif
73494 goto do_time_wait;
73495+ }
73496
73497 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73498 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73499@@ -1783,6 +1798,10 @@ no_tcp_socket:
73500 bad_packet:
73501 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73502 } else {
73503+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73504+ if (!grsec_enable_blackhole || (ret == 1 &&
73505+ (skb->dev->flags & IFF_LOOPBACK)))
73506+#endif
73507 tcp_v6_send_reset(NULL, skb);
73508 }
73509
73510@@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73511 uid,
73512 0, /* non standard timer */
73513 0, /* open_requests have no inode */
73514- 0, req);
73515+ 0,
73516+#ifdef CONFIG_GRKERNSEC_HIDESYM
73517+ NULL
73518+#else
73519+ req
73520+#endif
73521+ );
73522 }
73523
73524 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73525@@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73526 sock_i_uid(sp),
73527 icsk->icsk_probes_out,
73528 sock_i_ino(sp),
73529- atomic_read(&sp->sk_refcnt), sp,
73530+ atomic_read(&sp->sk_refcnt),
73531+#ifdef CONFIG_GRKERNSEC_HIDESYM
73532+ NULL,
73533+#else
73534+ sp,
73535+#endif
73536 jiffies_to_clock_t(icsk->icsk_rto),
73537 jiffies_to_clock_t(icsk->icsk_ack.ato),
73538 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73539@@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73540 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73541 tw->tw_substate, 0, 0,
73542 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73543- atomic_read(&tw->tw_refcnt), tw);
73544+ atomic_read(&tw->tw_refcnt),
73545+#ifdef CONFIG_GRKERNSEC_HIDESYM
73546+ NULL
73547+#else
73548+ tw
73549+#endif
73550+ );
73551 }
73552
73553 static int tcp6_seq_show(struct seq_file *seq, void *v)
73554diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73555index 8c25419..47a51ae 100644
73556--- a/net/ipv6/udp.c
73557+++ b/net/ipv6/udp.c
73558@@ -50,6 +50,10 @@
73559 #include <linux/seq_file.h>
73560 #include "udp_impl.h"
73561
73562+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73563+extern int grsec_enable_blackhole;
73564+#endif
73565+
73566 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73567 {
73568 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73569@@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73570
73571 return 0;
73572 drop:
73573- atomic_inc(&sk->sk_drops);
73574+ atomic_inc_unchecked(&sk->sk_drops);
73575 drop_no_sk_drops_inc:
73576 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73577 kfree_skb(skb);
73578@@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73579 continue;
73580 }
73581 drop:
73582- atomic_inc(&sk->sk_drops);
73583+ atomic_inc_unchecked(&sk->sk_drops);
73584 UDP6_INC_STATS_BH(sock_net(sk),
73585 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73586 UDP6_INC_STATS_BH(sock_net(sk),
73587@@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73588 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73589 proto == IPPROTO_UDPLITE);
73590
73591+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73592+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73593+#endif
73594 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73595
73596 kfree_skb(skb);
73597@@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73598 if (!sock_owned_by_user(sk))
73599 udpv6_queue_rcv_skb(sk, skb);
73600 else if (sk_add_backlog(sk, skb)) {
73601- atomic_inc(&sk->sk_drops);
73602+ atomic_inc_unchecked(&sk->sk_drops);
73603 bh_unlock_sock(sk);
73604 sock_put(sk);
73605 goto discard;
73606@@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73607 0, 0L, 0,
73608 sock_i_uid(sp), 0,
73609 sock_i_ino(sp),
73610- atomic_read(&sp->sk_refcnt), sp,
73611- atomic_read(&sp->sk_drops));
73612+ atomic_read(&sp->sk_refcnt),
73613+#ifdef CONFIG_GRKERNSEC_HIDESYM
73614+ NULL,
73615+#else
73616+ sp,
73617+#endif
73618+ atomic_read_unchecked(&sp->sk_drops));
73619 }
73620
73621 int udp6_seq_show(struct seq_file *seq, void *v)
73622diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
73623index 253695d..9481ce8 100644
73624--- a/net/irda/ircomm/ircomm_tty.c
73625+++ b/net/irda/ircomm/ircomm_tty.c
73626@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73627 add_wait_queue(&self->open_wait, &wait);
73628
73629 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73630- __FILE__,__LINE__, tty->driver->name, self->open_count );
73631+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73632
73633 /* As far as I can see, we protect open_count - Jean II */
73634 spin_lock_irqsave(&self->spinlock, flags);
73635 if (!tty_hung_up_p(filp)) {
73636 extra_count = 1;
73637- self->open_count--;
73638+ local_dec(&self->open_count);
73639 }
73640 spin_unlock_irqrestore(&self->spinlock, flags);
73641- self->blocked_open++;
73642+ local_inc(&self->blocked_open);
73643
73644 while (1) {
73645 if (tty->termios->c_cflag & CBAUD) {
73646@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73647 }
73648
73649 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73650- __FILE__,__LINE__, tty->driver->name, self->open_count );
73651+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73652
73653 schedule();
73654 }
73655@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73656 if (extra_count) {
73657 /* ++ is not atomic, so this should be protected - Jean II */
73658 spin_lock_irqsave(&self->spinlock, flags);
73659- self->open_count++;
73660+ local_inc(&self->open_count);
73661 spin_unlock_irqrestore(&self->spinlock, flags);
73662 }
73663- self->blocked_open--;
73664+ local_dec(&self->blocked_open);
73665
73666 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73667- __FILE__,__LINE__, tty->driver->name, self->open_count);
73668+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73669
73670 if (!retval)
73671 self->flags |= ASYNC_NORMAL_ACTIVE;
73672@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
73673 }
73674 /* ++ is not atomic, so this should be protected - Jean II */
73675 spin_lock_irqsave(&self->spinlock, flags);
73676- self->open_count++;
73677+ local_inc(&self->open_count);
73678
73679 tty->driver_data = self;
73680 self->tty = tty;
73681 spin_unlock_irqrestore(&self->spinlock, flags);
73682
73683 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73684- self->line, self->open_count);
73685+ self->line, local_read(&self->open_count));
73686
73687 /* Not really used by us, but lets do it anyway */
73688 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73689@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73690 return;
73691 }
73692
73693- if ((tty->count == 1) && (self->open_count != 1)) {
73694+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73695 /*
73696 * Uh, oh. tty->count is 1, which means that the tty
73697 * structure will be freed. state->count should always
73698@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73699 */
73700 IRDA_DEBUG(0, "%s(), bad serial port count; "
73701 "tty->count is 1, state->count is %d\n", __func__ ,
73702- self->open_count);
73703- self->open_count = 1;
73704+ local_read(&self->open_count));
73705+ local_set(&self->open_count, 1);
73706 }
73707
73708- if (--self->open_count < 0) {
73709+ if (local_dec_return(&self->open_count) < 0) {
73710 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73711- __func__, self->line, self->open_count);
73712- self->open_count = 0;
73713+ __func__, self->line, local_read(&self->open_count));
73714+ local_set(&self->open_count, 0);
73715 }
73716- if (self->open_count) {
73717+ if (local_read(&self->open_count)) {
73718 spin_unlock_irqrestore(&self->spinlock, flags);
73719
73720 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73721@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73722 tty->closing = 0;
73723 self->tty = NULL;
73724
73725- if (self->blocked_open) {
73726+ if (local_read(&self->blocked_open)) {
73727 if (self->close_delay)
73728 schedule_timeout_interruptible(self->close_delay);
73729 wake_up_interruptible(&self->open_wait);
73730@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
73731 spin_lock_irqsave(&self->spinlock, flags);
73732 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73733 self->tty = NULL;
73734- self->open_count = 0;
73735+ local_set(&self->open_count, 0);
73736 spin_unlock_irqrestore(&self->spinlock, flags);
73737
73738 wake_up_interruptible(&self->open_wait);
73739@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
73740 seq_putc(m, '\n');
73741
73742 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73743- seq_printf(m, "Open count: %d\n", self->open_count);
73744+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73745 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73746 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73747
73748diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
73749index 274d150..656a144 100644
73750--- a/net/iucv/af_iucv.c
73751+++ b/net/iucv/af_iucv.c
73752@@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
73753
73754 write_lock_bh(&iucv_sk_list.lock);
73755
73756- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73757+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73758 while (__iucv_get_sock_by_name(name)) {
73759 sprintf(name, "%08x",
73760- atomic_inc_return(&iucv_sk_list.autobind_name));
73761+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73762 }
73763
73764 write_unlock_bh(&iucv_sk_list.lock);
73765diff --git a/net/key/af_key.c b/net/key/af_key.c
73766index 1e733e9..3d73c9f 100644
73767--- a/net/key/af_key.c
73768+++ b/net/key/af_key.c
73769@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
73770 static u32 get_acqseq(void)
73771 {
73772 u32 res;
73773- static atomic_t acqseq;
73774+ static atomic_unchecked_t acqseq;
73775
73776 do {
73777- res = atomic_inc_return(&acqseq);
73778+ res = atomic_inc_return_unchecked(&acqseq);
73779 } while (!res);
73780 return res;
73781 }
73782diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
73783index 73495f1..ad51356 100644
73784--- a/net/mac80211/ieee80211_i.h
73785+++ b/net/mac80211/ieee80211_i.h
73786@@ -27,6 +27,7 @@
73787 #include <net/ieee80211_radiotap.h>
73788 #include <net/cfg80211.h>
73789 #include <net/mac80211.h>
73790+#include <asm/local.h>
73791 #include "key.h"
73792 #include "sta_info.h"
73793
73794@@ -764,7 +765,7 @@ struct ieee80211_local {
73795 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73796 spinlock_t queue_stop_reason_lock;
73797
73798- int open_count;
73799+ local_t open_count;
73800 int monitors, cooked_mntrs;
73801 /* number of interfaces with corresponding FIF_ flags */
73802 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73803diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
73804index 30d7355..e260095 100644
73805--- a/net/mac80211/iface.c
73806+++ b/net/mac80211/iface.c
73807@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73808 break;
73809 }
73810
73811- if (local->open_count == 0) {
73812+ if (local_read(&local->open_count) == 0) {
73813 res = drv_start(local);
73814 if (res)
73815 goto err_del_bss;
73816@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73817 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73818
73819 if (!is_valid_ether_addr(dev->dev_addr)) {
73820- if (!local->open_count)
73821+ if (!local_read(&local->open_count))
73822 drv_stop(local);
73823 return -EADDRNOTAVAIL;
73824 }
73825@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73826 mutex_unlock(&local->mtx);
73827
73828 if (coming_up)
73829- local->open_count++;
73830+ local_inc(&local->open_count);
73831
73832 if (hw_reconf_flags) {
73833 ieee80211_hw_config(local, hw_reconf_flags);
73834@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73835 err_del_interface:
73836 drv_remove_interface(local, &sdata->vif);
73837 err_stop:
73838- if (!local->open_count)
73839+ if (!local_read(&local->open_count))
73840 drv_stop(local);
73841 err_del_bss:
73842 sdata->bss = NULL;
73843@@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73844 }
73845
73846 if (going_down)
73847- local->open_count--;
73848+ local_dec(&local->open_count);
73849
73850 switch (sdata->vif.type) {
73851 case NL80211_IFTYPE_AP_VLAN:
73852@@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73853
73854 ieee80211_recalc_ps(local, -1);
73855
73856- if (local->open_count == 0) {
73857+ if (local_read(&local->open_count) == 0) {
73858 if (local->ops->napi_poll)
73859 napi_disable(&local->napi);
73860 ieee80211_clear_tx_pending(local);
73861diff --git a/net/mac80211/main.c b/net/mac80211/main.c
73862index a7536fd..4039cc0 100644
73863--- a/net/mac80211/main.c
73864+++ b/net/mac80211/main.c
73865@@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
73866 local->hw.conf.power_level = power;
73867 }
73868
73869- if (changed && local->open_count) {
73870+ if (changed && local_read(&local->open_count)) {
73871 ret = drv_config(local, changed);
73872 /*
73873 * Goal:
73874diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
73875index 9ee7164..56c5061 100644
73876--- a/net/mac80211/pm.c
73877+++ b/net/mac80211/pm.c
73878@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73879 struct ieee80211_sub_if_data *sdata;
73880 struct sta_info *sta;
73881
73882- if (!local->open_count)
73883+ if (!local_read(&local->open_count))
73884 goto suspend;
73885
73886 ieee80211_scan_cancel(local);
73887@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73888 cancel_work_sync(&local->dynamic_ps_enable_work);
73889 del_timer_sync(&local->dynamic_ps_timer);
73890
73891- local->wowlan = wowlan && local->open_count;
73892+ local->wowlan = wowlan && local_read(&local->open_count);
73893 if (local->wowlan) {
73894 int err = drv_suspend(local, wowlan);
73895 if (err < 0) {
73896@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73897 }
73898
73899 /* stop hardware - this must stop RX */
73900- if (local->open_count)
73901+ if (local_read(&local->open_count))
73902 ieee80211_stop_device(local);
73903
73904 suspend:
73905diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
73906index 5a5a776..9600b11 100644
73907--- a/net/mac80211/rate.c
73908+++ b/net/mac80211/rate.c
73909@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
73910
73911 ASSERT_RTNL();
73912
73913- if (local->open_count)
73914+ if (local_read(&local->open_count))
73915 return -EBUSY;
73916
73917 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73918diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
73919index c97a065..ff61928 100644
73920--- a/net/mac80211/rc80211_pid_debugfs.c
73921+++ b/net/mac80211/rc80211_pid_debugfs.c
73922@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
73923
73924 spin_unlock_irqrestore(&events->lock, status);
73925
73926- if (copy_to_user(buf, pb, p))
73927+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73928 return -EFAULT;
73929
73930 return p;
73931diff --git a/net/mac80211/util.c b/net/mac80211/util.c
73932index d5230ec..c604b21 100644
73933--- a/net/mac80211/util.c
73934+++ b/net/mac80211/util.c
73935@@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
73936 drv_set_coverage_class(local, hw->wiphy->coverage_class);
73937
73938 /* everything else happens only if HW was up & running */
73939- if (!local->open_count)
73940+ if (!local_read(&local->open_count))
73941 goto wake_up;
73942
73943 /*
73944diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
73945index d5597b7..ab6d39c 100644
73946--- a/net/netfilter/Kconfig
73947+++ b/net/netfilter/Kconfig
73948@@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
73949
73950 To compile it as a module, choose M here. If unsure, say N.
73951
73952+config NETFILTER_XT_MATCH_GRADM
73953+ tristate '"gradm" match support'
73954+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73955+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73956+ ---help---
73957+ The gradm match allows to match on grsecurity RBAC being enabled.
73958+ It is useful when iptables rules are applied early on bootup to
73959+ prevent connections to the machine (except from a trusted host)
73960+ while the RBAC system is disabled.
73961+
73962 config NETFILTER_XT_MATCH_HASHLIMIT
73963 tristate '"hashlimit" match support'
73964 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73965diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
73966index 1a02853..5d8c22e 100644
73967--- a/net/netfilter/Makefile
73968+++ b/net/netfilter/Makefile
73969@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
73970 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73971 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73972 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73973+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73974 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73975 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73976 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73977diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
73978index 29fa5ba..8debc79 100644
73979--- a/net/netfilter/ipvs/ip_vs_conn.c
73980+++ b/net/netfilter/ipvs/ip_vs_conn.c
73981@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
73982 /* Increase the refcnt counter of the dest */
73983 atomic_inc(&dest->refcnt);
73984
73985- conn_flags = atomic_read(&dest->conn_flags);
73986+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
73987 if (cp->protocol != IPPROTO_UDP)
73988 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73989 /* Bind with the destination and its corresponding transmitter */
73990@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
73991 atomic_set(&cp->refcnt, 1);
73992
73993 atomic_set(&cp->n_control, 0);
73994- atomic_set(&cp->in_pkts, 0);
73995+ atomic_set_unchecked(&cp->in_pkts, 0);
73996
73997 atomic_inc(&ipvs->conn_count);
73998 if (flags & IP_VS_CONN_F_NO_CPORT)
73999@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
74000
74001 /* Don't drop the entry if its number of incoming packets is not
74002 located in [0, 8] */
74003- i = atomic_read(&cp->in_pkts);
74004+ i = atomic_read_unchecked(&cp->in_pkts);
74005 if (i > 8 || i < 0) return 0;
74006
74007 if (!todrop_rate[i]) return 0;
74008diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
74009index 093cc32..9209ae1 100644
74010--- a/net/netfilter/ipvs/ip_vs_core.c
74011+++ b/net/netfilter/ipvs/ip_vs_core.c
74012@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
74013 ret = cp->packet_xmit(skb, cp, pd->pp);
74014 /* do not touch skb anymore */
74015
74016- atomic_inc(&cp->in_pkts);
74017+ atomic_inc_unchecked(&cp->in_pkts);
74018 ip_vs_conn_put(cp);
74019 return ret;
74020 }
74021@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
74022 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
74023 pkts = sysctl_sync_threshold(ipvs);
74024 else
74025- pkts = atomic_add_return(1, &cp->in_pkts);
74026+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74027
74028 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
74029 cp->protocol == IPPROTO_SCTP) {
74030diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
74031index e1a66cf..0910076 100644
74032--- a/net/netfilter/ipvs/ip_vs_ctl.c
74033+++ b/net/netfilter/ipvs/ip_vs_ctl.c
74034@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
74035 ip_vs_rs_hash(ipvs, dest);
74036 write_unlock_bh(&ipvs->rs_lock);
74037 }
74038- atomic_set(&dest->conn_flags, conn_flags);
74039+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
74040
74041 /* bind the service */
74042 if (!dest->svc) {
74043@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74044 " %-7s %-6d %-10d %-10d\n",
74045 &dest->addr.in6,
74046 ntohs(dest->port),
74047- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74048+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74049 atomic_read(&dest->weight),
74050 atomic_read(&dest->activeconns),
74051 atomic_read(&dest->inactconns));
74052@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74053 "%-7s %-6d %-10d %-10d\n",
74054 ntohl(dest->addr.ip),
74055 ntohs(dest->port),
74056- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74057+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74058 atomic_read(&dest->weight),
74059 atomic_read(&dest->activeconns),
74060 atomic_read(&dest->inactconns));
74061@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
74062
74063 entry.addr = dest->addr.ip;
74064 entry.port = dest->port;
74065- entry.conn_flags = atomic_read(&dest->conn_flags);
74066+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
74067 entry.weight = atomic_read(&dest->weight);
74068 entry.u_threshold = dest->u_threshold;
74069 entry.l_threshold = dest->l_threshold;
74070@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
74071 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
74072
74073 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
74074- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74075+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74076 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74077 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74078 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74079diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
74080index 2b6678c0..aaa41fc 100644
74081--- a/net/netfilter/ipvs/ip_vs_sync.c
74082+++ b/net/netfilter/ipvs/ip_vs_sync.c
74083@@ -649,7 +649,7 @@ control:
74084 * i.e only increment in_pkts for Templates.
74085 */
74086 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74087- int pkts = atomic_add_return(1, &cp->in_pkts);
74088+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74089
74090 if (pkts % sysctl_sync_period(ipvs) != 1)
74091 return;
74092@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
74093
74094 if (opt)
74095 memcpy(&cp->in_seq, opt, sizeof(*opt));
74096- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74097+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74098 cp->state = state;
74099 cp->old_state = cp->state;
74100 /*
74101diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
74102index aa2d720..d8aa111 100644
74103--- a/net/netfilter/ipvs/ip_vs_xmit.c
74104+++ b/net/netfilter/ipvs/ip_vs_xmit.c
74105@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
74106 else
74107 rc = NF_ACCEPT;
74108 /* do not touch skb anymore */
74109- atomic_inc(&cp->in_pkts);
74110+ atomic_inc_unchecked(&cp->in_pkts);
74111 goto out;
74112 }
74113
74114@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
74115 else
74116 rc = NF_ACCEPT;
74117 /* do not touch skb anymore */
74118- atomic_inc(&cp->in_pkts);
74119+ atomic_inc_unchecked(&cp->in_pkts);
74120 goto out;
74121 }
74122
74123diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
74124index 66b2c54..c7884e3 100644
74125--- a/net/netfilter/nfnetlink_log.c
74126+++ b/net/netfilter/nfnetlink_log.c
74127@@ -70,7 +70,7 @@ struct nfulnl_instance {
74128 };
74129
74130 static DEFINE_SPINLOCK(instances_lock);
74131-static atomic_t global_seq;
74132+static atomic_unchecked_t global_seq;
74133
74134 #define INSTANCE_BUCKETS 16
74135 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74136@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
74137 /* global sequence number */
74138 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74139 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74140- htonl(atomic_inc_return(&global_seq)));
74141+ htonl(atomic_inc_return_unchecked(&global_seq)));
74142
74143 if (data_len) {
74144 struct nlattr *nla;
74145diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
74146new file mode 100644
74147index 0000000..6905327
74148--- /dev/null
74149+++ b/net/netfilter/xt_gradm.c
74150@@ -0,0 +1,51 @@
74151+/*
74152+ * gradm match for netfilter
74153