]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.2.4-201202051927.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.2.4-201202051927.patch
CommitLineData
dadd4cae
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index dfa6fc6..0095943 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9+*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13@@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17+*.gmo
18 *.grep
19 *.grp
20 *.gz
21@@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25+*.vim
26 *.xml
27 *.xz
28 *_MODULES
29+*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33@@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37+PERF*
38 SCCS
39 System.map*
40 TAGS
41@@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45+builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51+clut_vga16.c
52+common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59+config.c
60 config.mak
61 config.mak.autogen
62+config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66@@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70+exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74@@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78+gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90@@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103-linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107@@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111-media
112 mconf
113+mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120+mkpiggy
121 mkprep
122 mkregtable
123 mktables
124@@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128+regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132@@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152+vmlinux.bin.bz2
153 vmlinux.lds
154+vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158@@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zconf.lex.c
169 zoffset.h
170diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171index 81c287f..d456d02 100644
172--- a/Documentation/kernel-parameters.txt
173+++ b/Documentation/kernel-parameters.txt
174@@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179+ virtualization environments that don't cope well with the
180+ expand down segment used by UDEREF on X86-32 or the frequent
181+ page table updates on X86-64.
182+
183+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184+
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188diff --git a/Makefile b/Makefile
189index c8e187e..c445af7 100644
190--- a/Makefile
191+++ b/Makefile
192@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197-HOSTCXXFLAGS = -O2
198+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208-PHONY += scripts_basic
209-scripts_basic:
210+PHONY += scripts_basic gcc-plugins
211+scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215@@ -564,6 +565,46 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219+ifndef DISABLE_PAX_PLUGINS
220+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223+endif
224+ifdef CONFIG_PAX_MEMORY_STACKLEAK
225+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227+endif
228+ifdef CONFIG_KALLOCSTAT_PLUGIN
229+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230+endif
231+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234+endif
235+ifdef CONFIG_CHECKER_PLUGIN
236+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238+endif
239+endif
240+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242+ifeq ($(KBUILD_EXTMOD),)
243+gcc-plugins:
244+ $(Q)$(MAKE) $(build)=tools/gcc
245+else
246+gcc-plugins: ;
247+endif
248+else
249+gcc-plugins:
250+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
251+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
252+else
253+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
254+endif
255+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
256+endif
257+endif
258+
259 include $(srctree)/arch/$(SRCARCH)/Makefile
260
261 ifneq ($(CONFIG_FRAME_WARN),0)
262@@ -708,7 +749,7 @@ export mod_strip_cmd
263
264
265 ifeq ($(KBUILD_EXTMOD),)
266-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
267+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
268
269 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
270 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
271@@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
272
273 # The actual objects are generated when descending,
274 # make sure no implicit rule kicks in
275+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
276 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
277
278 # Handle descending into subdirectories listed in $(vmlinux-dirs)
279@@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280 # Error messages still appears in the original language
281
282 PHONY += $(vmlinux-dirs)
283-$(vmlinux-dirs): prepare scripts
284+$(vmlinux-dirs): gcc-plugins prepare scripts
285 $(Q)$(MAKE) $(build)=$@
286
287 # Store (new) KERNELRELASE string in include/config/kernel.release
288@@ -985,6 +1027,7 @@ prepare0: archprepare FORCE
289 $(Q)$(MAKE) $(build)=.
290
291 # All the preparing..
292+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
293 prepare: prepare0
294
295 # Generate some files
296@@ -1086,6 +1129,7 @@ all: modules
297 # using awk while concatenating to the final file.
298
299 PHONY += modules
300+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
301 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
302 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
303 @$(kecho) ' Building modules, stage 2.';
304@@ -1101,7 +1145,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
305
306 # Target to prepare building external modules
307 PHONY += modules_prepare
308-modules_prepare: prepare scripts
309+modules_prepare: gcc-plugins prepare scripts
310
311 # Target to install modules
312 PHONY += modules_install
313@@ -1198,6 +1242,7 @@ distclean: mrproper
314 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
315 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
316 -o -name '.*.rej' \
317+ -o -name '.*.rej' -o -name '*.so' \
318 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
319 -type f -print | xargs rm -f
320
321@@ -1358,6 +1403,7 @@ PHONY += $(module-dirs) modules
322 $(module-dirs): crmodverdir $(objtree)/Module.symvers
323 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
324
325+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
326 modules: $(module-dirs)
327 @$(kecho) ' Building modules, stage 2.';
328 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
329@@ -1484,17 +1530,19 @@ else
330 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
331 endif
332
333-%.s: %.c prepare scripts FORCE
334+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
335+%.s: %.c gcc-plugins prepare scripts FORCE
336 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
337 %.i: %.c prepare scripts FORCE
338 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
339-%.o: %.c prepare scripts FORCE
340+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
341+%.o: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.lst: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345-%.s: %.S prepare scripts FORCE
346+%.s: %.S gcc-plugins prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348-%.o: %.S prepare scripts FORCE
349+%.o: %.S gcc-plugins prepare scripts FORCE
350 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
351 %.symtypes: %.c prepare scripts FORCE
352 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
353@@ -1504,11 +1552,13 @@ endif
354 $(cmd_crmodverdir)
355 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
356 $(build)=$(build-dir)
357-%/: prepare scripts FORCE
358+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
359+%/: gcc-plugins prepare scripts FORCE
360 $(cmd_crmodverdir)
361 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
362 $(build)=$(build-dir)
363-%.ko: prepare scripts FORCE
364+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
365+%.ko: gcc-plugins prepare scripts FORCE
366 $(cmd_crmodverdir)
367 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
368 $(build)=$(build-dir) $(@:.ko=.o)
369diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
370index da5449e..7418343 100644
371--- a/arch/alpha/include/asm/elf.h
372+++ b/arch/alpha/include/asm/elf.h
373@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
374
375 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
376
377+#ifdef CONFIG_PAX_ASLR
378+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
379+
380+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
381+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
382+#endif
383+
384 /* $0 is set by ld.so to a pointer to a function which might be
385 registered using atexit. This provides a mean for the dynamic
386 linker to call DT_FINI functions for shared libraries that have
387diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
388index de98a73..bd4f1f8 100644
389--- a/arch/alpha/include/asm/pgtable.h
390+++ b/arch/alpha/include/asm/pgtable.h
391@@ -101,6 +101,17 @@ struct vm_area_struct;
392 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
393 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
394 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
395+
396+#ifdef CONFIG_PAX_PAGEEXEC
397+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
398+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
399+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
400+#else
401+# define PAGE_SHARED_NOEXEC PAGE_SHARED
402+# define PAGE_COPY_NOEXEC PAGE_COPY
403+# define PAGE_READONLY_NOEXEC PAGE_READONLY
404+#endif
405+
406 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
407
408 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
409diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
410index 2fd00b7..cfd5069 100644
411--- a/arch/alpha/kernel/module.c
412+++ b/arch/alpha/kernel/module.c
413@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
414
415 /* The small sections were sorted to the end of the segment.
416 The following should definitely cover them. */
417- gp = (u64)me->module_core + me->core_size - 0x8000;
418+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
419 got = sechdrs[me->arch.gotsecindex].sh_addr;
420
421 for (i = 0; i < n; i++) {
422diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
423index 01e8715..be0e80f 100644
424--- a/arch/alpha/kernel/osf_sys.c
425+++ b/arch/alpha/kernel/osf_sys.c
426@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
427 /* At this point: (!vma || addr < vma->vm_end). */
428 if (limit - len < addr)
429 return -ENOMEM;
430- if (!vma || addr + len <= vma->vm_start)
431+ if (check_heap_stack_gap(vma, addr, len))
432 return addr;
433 addr = vma->vm_end;
434 vma = vma->vm_next;
435@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
436 merely specific addresses, but regions of memory -- perhaps
437 this feature should be incorporated into all ports? */
438
439+#ifdef CONFIG_PAX_RANDMMAP
440+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
441+#endif
442+
443 if (addr) {
444 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
445 if (addr != (unsigned long) -ENOMEM)
446@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
447 }
448
449 /* Next, try allocating at TASK_UNMAPPED_BASE. */
450- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
451- len, limit);
452+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
453+
454 if (addr != (unsigned long) -ENOMEM)
455 return addr;
456
457diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
458index fadd5f8..904e73a 100644
459--- a/arch/alpha/mm/fault.c
460+++ b/arch/alpha/mm/fault.c
461@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
462 __reload_thread(pcb);
463 }
464
465+#ifdef CONFIG_PAX_PAGEEXEC
466+/*
467+ * PaX: decide what to do with offenders (regs->pc = fault address)
468+ *
469+ * returns 1 when task should be killed
470+ * 2 when patched PLT trampoline was detected
471+ * 3 when unpatched PLT trampoline was detected
472+ */
473+static int pax_handle_fetch_fault(struct pt_regs *regs)
474+{
475+
476+#ifdef CONFIG_PAX_EMUPLT
477+ int err;
478+
479+ do { /* PaX: patched PLT emulation #1 */
480+ unsigned int ldah, ldq, jmp;
481+
482+ err = get_user(ldah, (unsigned int *)regs->pc);
483+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
484+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
485+
486+ if (err)
487+ break;
488+
489+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
490+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
491+ jmp == 0x6BFB0000U)
492+ {
493+ unsigned long r27, addr;
494+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
495+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
496+
497+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
498+ err = get_user(r27, (unsigned long *)addr);
499+ if (err)
500+ break;
501+
502+ regs->r27 = r27;
503+ regs->pc = r27;
504+ return 2;
505+ }
506+ } while (0);
507+
508+ do { /* PaX: patched PLT emulation #2 */
509+ unsigned int ldah, lda, br;
510+
511+ err = get_user(ldah, (unsigned int *)regs->pc);
512+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
513+ err |= get_user(br, (unsigned int *)(regs->pc+8));
514+
515+ if (err)
516+ break;
517+
518+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
519+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
520+ (br & 0xFFE00000U) == 0xC3E00000U)
521+ {
522+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
523+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
524+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
525+
526+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
527+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
528+ return 2;
529+ }
530+ } while (0);
531+
532+ do { /* PaX: unpatched PLT emulation */
533+ unsigned int br;
534+
535+ err = get_user(br, (unsigned int *)regs->pc);
536+
537+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
538+ unsigned int br2, ldq, nop, jmp;
539+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
540+
541+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
542+ err = get_user(br2, (unsigned int *)addr);
543+ err |= get_user(ldq, (unsigned int *)(addr+4));
544+ err |= get_user(nop, (unsigned int *)(addr+8));
545+ err |= get_user(jmp, (unsigned int *)(addr+12));
546+ err |= get_user(resolver, (unsigned long *)(addr+16));
547+
548+ if (err)
549+ break;
550+
551+ if (br2 == 0xC3600000U &&
552+ ldq == 0xA77B000CU &&
553+ nop == 0x47FF041FU &&
554+ jmp == 0x6B7B0000U)
555+ {
556+ regs->r28 = regs->pc+4;
557+ regs->r27 = addr+16;
558+ regs->pc = resolver;
559+ return 3;
560+ }
561+ }
562+ } while (0);
563+#endif
564+
565+ return 1;
566+}
567+
568+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
569+{
570+ unsigned long i;
571+
572+ printk(KERN_ERR "PAX: bytes at PC: ");
573+ for (i = 0; i < 5; i++) {
574+ unsigned int c;
575+ if (get_user(c, (unsigned int *)pc+i))
576+ printk(KERN_CONT "???????? ");
577+ else
578+ printk(KERN_CONT "%08x ", c);
579+ }
580+ printk("\n");
581+}
582+#endif
583
584 /*
585 * This routine handles page faults. It determines the address,
586@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
587 good_area:
588 si_code = SEGV_ACCERR;
589 if (cause < 0) {
590- if (!(vma->vm_flags & VM_EXEC))
591+ if (!(vma->vm_flags & VM_EXEC)) {
592+
593+#ifdef CONFIG_PAX_PAGEEXEC
594+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
595+ goto bad_area;
596+
597+ up_read(&mm->mmap_sem);
598+ switch (pax_handle_fetch_fault(regs)) {
599+
600+#ifdef CONFIG_PAX_EMUPLT
601+ case 2:
602+ case 3:
603+ return;
604+#endif
605+
606+ }
607+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
608+ do_group_exit(SIGKILL);
609+#else
610 goto bad_area;
611+#endif
612+
613+ }
614 } else if (!cause) {
615 /* Allow reads even for write-only mappings */
616 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
617diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
618index 86976d0..8a57797 100644
619--- a/arch/arm/include/asm/atomic.h
620+++ b/arch/arm/include/asm/atomic.h
621@@ -239,6 +239,14 @@ typedef struct {
622 u64 __aligned(8) counter;
623 } atomic64_t;
624
625+#ifdef CONFIG_PAX_REFCOUNT
626+typedef struct {
627+ u64 __aligned(8) counter;
628+} atomic64_unchecked_t;
629+#else
630+typedef atomic64_t atomic64_unchecked_t;
631+#endif
632+
633 #define ATOMIC64_INIT(i) { (i) }
634
635 static inline u64 atomic64_read(atomic64_t *v)
636diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
637index 0e9ce8d..6ef1e03 100644
638--- a/arch/arm/include/asm/elf.h
639+++ b/arch/arm/include/asm/elf.h
640@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
641 the loader. We need to make sure that it is out of the way of the program
642 that it will "exec", and that there is sufficient room for the brk. */
643
644-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
645+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
646+
647+#ifdef CONFIG_PAX_ASLR
648+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
649+
650+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
651+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
652+#endif
653
654 /* When the program starts, a1 contains a pointer to a function to be
655 registered with atexit, as per the SVR4 ABI. A value of 0 means we
656@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 extern void elf_set_personality(const struct elf32_hdr *);
658 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
659
660-struct mm_struct;
661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
662-#define arch_randomize_brk arch_randomize_brk
663-
664 extern int vectors_user_mapping(void);
665 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
666 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
667diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
668index e51b1e8..32a3113 100644
669--- a/arch/arm/include/asm/kmap_types.h
670+++ b/arch/arm/include/asm/kmap_types.h
671@@ -21,6 +21,7 @@ enum km_type {
672 KM_L1_CACHE,
673 KM_L2_CACHE,
674 KM_KDB,
675+ KM_CLEARPAGE,
676 KM_TYPE_NR
677 };
678
679diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
680index b293616..96310e5 100644
681--- a/arch/arm/include/asm/uaccess.h
682+++ b/arch/arm/include/asm/uaccess.h
683@@ -22,6 +22,8 @@
684 #define VERIFY_READ 0
685 #define VERIFY_WRITE 1
686
687+extern void check_object_size(const void *ptr, unsigned long n, bool to);
688+
689 /*
690 * The exception table consists of pairs of addresses: the first is the
691 * address of an instruction that is allowed to fault, and the second is
692@@ -387,8 +389,23 @@ do { \
693
694
695 #ifdef CONFIG_MMU
696-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
697-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
698+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
699+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
700+
701+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
702+{
703+ if (!__builtin_constant_p(n))
704+ check_object_size(to, n, false);
705+ return ___copy_from_user(to, from, n);
706+}
707+
708+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
709+{
710+ if (!__builtin_constant_p(n))
711+ check_object_size(from, n, true);
712+ return ___copy_to_user(to, from, n);
713+}
714+
715 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
716 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
717 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
718@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
719
720 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
721 {
722+ if ((long)n < 0)
723+ return n;
724+
725 if (access_ok(VERIFY_READ, from, n))
726 n = __copy_from_user(to, from, n);
727 else /* security hole - plug it */
728@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
729
730 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
731 {
732+ if ((long)n < 0)
733+ return n;
734+
735 if (access_ok(VERIFY_WRITE, to, n))
736 n = __copy_to_user(to, from, n);
737 return n;
738diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
739index 5b0bce6..becd81c 100644
740--- a/arch/arm/kernel/armksyms.c
741+++ b/arch/arm/kernel/armksyms.c
742@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
743 #ifdef CONFIG_MMU
744 EXPORT_SYMBOL(copy_page);
745
746-EXPORT_SYMBOL(__copy_from_user);
747-EXPORT_SYMBOL(__copy_to_user);
748+EXPORT_SYMBOL(___copy_from_user);
749+EXPORT_SYMBOL(___copy_to_user);
750 EXPORT_SYMBOL(__clear_user);
751
752 EXPORT_SYMBOL(__get_user_1);
753diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
754index 3d0c6fb..3dcae52 100644
755--- a/arch/arm/kernel/process.c
756+++ b/arch/arm/kernel/process.c
757@@ -28,7 +28,6 @@
758 #include <linux/tick.h>
759 #include <linux/utsname.h>
760 #include <linux/uaccess.h>
761-#include <linux/random.h>
762 #include <linux/hw_breakpoint.h>
763 #include <linux/cpuidle.h>
764
765@@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
766 return 0;
767 }
768
769-unsigned long arch_randomize_brk(struct mm_struct *mm)
770-{
771- unsigned long range_end = mm->brk + 0x02000000;
772- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
773-}
774-
775 #ifdef CONFIG_MMU
776 /*
777 * The vectors page is always readable from user space for the
778diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
779index 99a5727..a3d5bb1 100644
780--- a/arch/arm/kernel/traps.c
781+++ b/arch/arm/kernel/traps.c
782@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
783
784 static DEFINE_RAW_SPINLOCK(die_lock);
785
786+extern void gr_handle_kernel_exploit(void);
787+
788 /*
789 * This function is protected against re-entrancy.
790 */
791@@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
792 panic("Fatal exception in interrupt");
793 if (panic_on_oops)
794 panic("Fatal exception");
795+
796+ gr_handle_kernel_exploit();
797+
798 if (ret != NOTIFY_STOP)
799 do_exit(SIGSEGV);
800 }
801diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
802index 66a477a..bee61d3 100644
803--- a/arch/arm/lib/copy_from_user.S
804+++ b/arch/arm/lib/copy_from_user.S
805@@ -16,7 +16,7 @@
806 /*
807 * Prototype:
808 *
809- * size_t __copy_from_user(void *to, const void *from, size_t n)
810+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
811 *
812 * Purpose:
813 *
814@@ -84,11 +84,11 @@
815
816 .text
817
818-ENTRY(__copy_from_user)
819+ENTRY(___copy_from_user)
820
821 #include "copy_template.S"
822
823-ENDPROC(__copy_from_user)
824+ENDPROC(___copy_from_user)
825
826 .pushsection .fixup,"ax"
827 .align 0
828diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
829index d066df6..df28194 100644
830--- a/arch/arm/lib/copy_to_user.S
831+++ b/arch/arm/lib/copy_to_user.S
832@@ -16,7 +16,7 @@
833 /*
834 * Prototype:
835 *
836- * size_t __copy_to_user(void *to, const void *from, size_t n)
837+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
838 *
839 * Purpose:
840 *
841@@ -88,11 +88,11 @@
842 .text
843
844 ENTRY(__copy_to_user_std)
845-WEAK(__copy_to_user)
846+WEAK(___copy_to_user)
847
848 #include "copy_template.S"
849
850-ENDPROC(__copy_to_user)
851+ENDPROC(___copy_to_user)
852 ENDPROC(__copy_to_user_std)
853
854 .pushsection .fixup,"ax"
855diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
856index d0ece2a..5ae2f39 100644
857--- a/arch/arm/lib/uaccess.S
858+++ b/arch/arm/lib/uaccess.S
859@@ -20,7 +20,7 @@
860
861 #define PAGE_SHIFT 12
862
863-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
864+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
865 * Purpose : copy a block to user memory from kernel memory
866 * Params : to - user memory
867 * : from - kernel memory
868@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
869 sub r2, r2, ip
870 b .Lc2u_dest_aligned
871
872-ENTRY(__copy_to_user)
873+ENTRY(___copy_to_user)
874 stmfd sp!, {r2, r4 - r7, lr}
875 cmp r2, #4
876 blt .Lc2u_not_enough
877@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
878 ldrgtb r3, [r1], #0
879 USER( T(strgtb) r3, [r0], #1) @ May fault
880 b .Lc2u_finished
881-ENDPROC(__copy_to_user)
882+ENDPROC(___copy_to_user)
883
884 .pushsection .fixup,"ax"
885 .align 0
886 9001: ldmfd sp!, {r0, r4 - r7, pc}
887 .popsection
888
889-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
890+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
891 * Purpose : copy a block from user memory to kernel memory
892 * Params : to - kernel memory
893 * : from - user memory
894@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
895 sub r2, r2, ip
896 b .Lcfu_dest_aligned
897
898-ENTRY(__copy_from_user)
899+ENTRY(___copy_from_user)
900 stmfd sp!, {r0, r2, r4 - r7, lr}
901 cmp r2, #4
902 blt .Lcfu_not_enough
903@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
904 USER( T(ldrgtb) r3, [r1], #1) @ May fault
905 strgtb r3, [r0], #1
906 b .Lcfu_finished
907-ENDPROC(__copy_from_user)
908+ENDPROC(___copy_from_user)
909
910 .pushsection .fixup,"ax"
911 .align 0
912diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
913index 025f742..8432b08 100644
914--- a/arch/arm/lib/uaccess_with_memcpy.c
915+++ b/arch/arm/lib/uaccess_with_memcpy.c
916@@ -104,7 +104,7 @@ out:
917 }
918
919 unsigned long
920-__copy_to_user(void __user *to, const void *from, unsigned long n)
921+___copy_to_user(void __user *to, const void *from, unsigned long n)
922 {
923 /*
924 * This test is stubbed out of the main function above to keep
925diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
926index 2b2d51c..0127490 100644
927--- a/arch/arm/mach-ux500/mbox-db5500.c
928+++ b/arch/arm/mach-ux500/mbox-db5500.c
929@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
930 return sprintf(buf, "0x%X\n", mbox_value);
931 }
932
933-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
934+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
935
936 static int mbox_show(struct seq_file *s, void *data)
937 {
938diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
939index aa33949..b242a2f 100644
940--- a/arch/arm/mm/fault.c
941+++ b/arch/arm/mm/fault.c
942@@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
943 }
944 #endif
945
946+#ifdef CONFIG_PAX_PAGEEXEC
947+ if (fsr & FSR_LNX_PF) {
948+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
949+ do_group_exit(SIGKILL);
950+ }
951+#endif
952+
953 tsk->thread.address = addr;
954 tsk->thread.error_code = fsr;
955 tsk->thread.trap_no = 14;
956@@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
957 }
958 #endif /* CONFIG_MMU */
959
960+#ifdef CONFIG_PAX_PAGEEXEC
961+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
962+{
963+ long i;
964+
965+ printk(KERN_ERR "PAX: bytes at PC: ");
966+ for (i = 0; i < 20; i++) {
967+ unsigned char c;
968+ if (get_user(c, (__force unsigned char __user *)pc+i))
969+ printk(KERN_CONT "?? ");
970+ else
971+ printk(KERN_CONT "%02x ", c);
972+ }
973+ printk("\n");
974+
975+ printk(KERN_ERR "PAX: bytes at SP-4: ");
976+ for (i = -1; i < 20; i++) {
977+ unsigned long c;
978+ if (get_user(c, (__force unsigned long __user *)sp+i))
979+ printk(KERN_CONT "???????? ");
980+ else
981+ printk(KERN_CONT "%08lx ", c);
982+ }
983+ printk("\n");
984+}
985+#endif
986+
987 /*
988 * First Level Translation Fault Handler
989 *
990diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
991index 44b628e..623ee2a 100644
992--- a/arch/arm/mm/mmap.c
993+++ b/arch/arm/mm/mmap.c
994@@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
995 if (len > TASK_SIZE)
996 return -ENOMEM;
997
998+#ifdef CONFIG_PAX_RANDMMAP
999+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1000+#endif
1001+
1002 if (addr) {
1003 if (do_align)
1004 addr = COLOUR_ALIGN(addr, pgoff);
1005@@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1006 addr = PAGE_ALIGN(addr);
1007
1008 vma = find_vma(mm, addr);
1009- if (TASK_SIZE - len >= addr &&
1010- (!vma || addr + len <= vma->vm_start))
1011+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1012 return addr;
1013 }
1014 if (len > mm->cached_hole_size) {
1015- start_addr = addr = mm->free_area_cache;
1016+ start_addr = addr = mm->free_area_cache;
1017 } else {
1018- start_addr = addr = TASK_UNMAPPED_BASE;
1019- mm->cached_hole_size = 0;
1020+ start_addr = addr = mm->mmap_base;
1021+ mm->cached_hole_size = 0;
1022 }
1023 /* 8 bits of randomness in 20 address space bits */
1024 if ((current->flags & PF_RANDOMIZE) &&
1025@@ -89,14 +92,14 @@ full_search:
1026 * Start a new search - just in case we missed
1027 * some holes.
1028 */
1029- if (start_addr != TASK_UNMAPPED_BASE) {
1030- start_addr = addr = TASK_UNMAPPED_BASE;
1031+ if (start_addr != mm->mmap_base) {
1032+ start_addr = addr = mm->mmap_base;
1033 mm->cached_hole_size = 0;
1034 goto full_search;
1035 }
1036 return -ENOMEM;
1037 }
1038- if (!vma || addr + len <= vma->vm_start) {
1039+ if (check_heap_stack_gap(vma, addr, len)) {
1040 /*
1041 * Remember the place where we stopped the search:
1042 */
1043diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1044index 3b3159b..425ea94 100644
1045--- a/arch/avr32/include/asm/elf.h
1046+++ b/arch/avr32/include/asm/elf.h
1047@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1048 the loader. We need to make sure that it is out of the way of the program
1049 that it will "exec", and that there is sufficient room for the brk. */
1050
1051-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1052+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1053
1054+#ifdef CONFIG_PAX_ASLR
1055+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1056+
1057+#define PAX_DELTA_MMAP_LEN 15
1058+#define PAX_DELTA_STACK_LEN 15
1059+#endif
1060
1061 /* This yields a mask that user programs can use to figure out what
1062 instruction set this CPU supports. This could be done in user space,
1063diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1064index b7f5c68..556135c 100644
1065--- a/arch/avr32/include/asm/kmap_types.h
1066+++ b/arch/avr32/include/asm/kmap_types.h
1067@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1068 D(11) KM_IRQ1,
1069 D(12) KM_SOFTIRQ0,
1070 D(13) KM_SOFTIRQ1,
1071-D(14) KM_TYPE_NR
1072+D(14) KM_CLEARPAGE,
1073+D(15) KM_TYPE_NR
1074 };
1075
1076 #undef D
1077diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1078index f7040a1..db9f300 100644
1079--- a/arch/avr32/mm/fault.c
1080+++ b/arch/avr32/mm/fault.c
1081@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1082
1083 int exception_trace = 1;
1084
1085+#ifdef CONFIG_PAX_PAGEEXEC
1086+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1087+{
1088+ unsigned long i;
1089+
1090+ printk(KERN_ERR "PAX: bytes at PC: ");
1091+ for (i = 0; i < 20; i++) {
1092+ unsigned char c;
1093+ if (get_user(c, (unsigned char *)pc+i))
1094+ printk(KERN_CONT "???????? ");
1095+ else
1096+ printk(KERN_CONT "%02x ", c);
1097+ }
1098+ printk("\n");
1099+}
1100+#endif
1101+
1102 /*
1103 * This routine handles page faults. It determines the address and the
1104 * problem, and then passes it off to one of the appropriate routines.
1105@@ -156,6 +173,16 @@ bad_area:
1106 up_read(&mm->mmap_sem);
1107
1108 if (user_mode(regs)) {
1109+
1110+#ifdef CONFIG_PAX_PAGEEXEC
1111+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1112+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1113+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1114+ do_group_exit(SIGKILL);
1115+ }
1116+ }
1117+#endif
1118+
1119 if (exception_trace && printk_ratelimit())
1120 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1121 "sp %08lx ecr %lu\n",
1122diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1123index f8e16b2..c73ff79 100644
1124--- a/arch/frv/include/asm/kmap_types.h
1125+++ b/arch/frv/include/asm/kmap_types.h
1126@@ -23,6 +23,7 @@ enum km_type {
1127 KM_IRQ1,
1128 KM_SOFTIRQ0,
1129 KM_SOFTIRQ1,
1130+ KM_CLEARPAGE,
1131 KM_TYPE_NR
1132 };
1133
1134diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1135index 385fd30..6c3d97e 100644
1136--- a/arch/frv/mm/elf-fdpic.c
1137+++ b/arch/frv/mm/elf-fdpic.c
1138@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1139 if (addr) {
1140 addr = PAGE_ALIGN(addr);
1141 vma = find_vma(current->mm, addr);
1142- if (TASK_SIZE - len >= addr &&
1143- (!vma || addr + len <= vma->vm_start))
1144+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1145 goto success;
1146 }
1147
1148@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1149 for (; vma; vma = vma->vm_next) {
1150 if (addr > limit)
1151 break;
1152- if (addr + len <= vma->vm_start)
1153+ if (check_heap_stack_gap(vma, addr, len))
1154 goto success;
1155 addr = vma->vm_end;
1156 }
1157@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1158 for (; vma; vma = vma->vm_next) {
1159 if (addr > limit)
1160 break;
1161- if (addr + len <= vma->vm_start)
1162+ if (check_heap_stack_gap(vma, addr, len))
1163 goto success;
1164 addr = vma->vm_end;
1165 }
1166diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1167index b5298eb..67c6e62 100644
1168--- a/arch/ia64/include/asm/elf.h
1169+++ b/arch/ia64/include/asm/elf.h
1170@@ -42,6 +42,13 @@
1171 */
1172 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1173
1174+#ifdef CONFIG_PAX_ASLR
1175+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1176+
1177+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1178+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1179+#endif
1180+
1181 #define PT_IA_64_UNWIND 0x70000001
1182
1183 /* IA-64 relocations: */
1184diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1185index 1a97af3..7529d31 100644
1186--- a/arch/ia64/include/asm/pgtable.h
1187+++ b/arch/ia64/include/asm/pgtable.h
1188@@ -12,7 +12,7 @@
1189 * David Mosberger-Tang <davidm@hpl.hp.com>
1190 */
1191
1192-
1193+#include <linux/const.h>
1194 #include <asm/mman.h>
1195 #include <asm/page.h>
1196 #include <asm/processor.h>
1197@@ -143,6 +143,17 @@
1198 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1199 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1200 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1201+
1202+#ifdef CONFIG_PAX_PAGEEXEC
1203+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1204+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1205+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1206+#else
1207+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1208+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1209+# define PAGE_COPY_NOEXEC PAGE_COPY
1210+#endif
1211+
1212 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1213 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1214 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1215diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1216index b77768d..e0795eb 100644
1217--- a/arch/ia64/include/asm/spinlock.h
1218+++ b/arch/ia64/include/asm/spinlock.h
1219@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1220 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1221
1222 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1223- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1224+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1225 }
1226
1227 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1228diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1229index 449c8c0..432a3d2 100644
1230--- a/arch/ia64/include/asm/uaccess.h
1231+++ b/arch/ia64/include/asm/uaccess.h
1232@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1233 const void *__cu_from = (from); \
1234 long __cu_len = (n); \
1235 \
1236- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1237+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1238 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1239 __cu_len; \
1240 })
1241@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1242 long __cu_len = (n); \
1243 \
1244 __chk_user_ptr(__cu_from); \
1245- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1246+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1247 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1248 __cu_len; \
1249 })
1250diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1251index 24603be..948052d 100644
1252--- a/arch/ia64/kernel/module.c
1253+++ b/arch/ia64/kernel/module.c
1254@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1255 void
1256 module_free (struct module *mod, void *module_region)
1257 {
1258- if (mod && mod->arch.init_unw_table &&
1259- module_region == mod->module_init) {
1260+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1261 unw_remove_unwind_table(mod->arch.init_unw_table);
1262 mod->arch.init_unw_table = NULL;
1263 }
1264@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1265 }
1266
1267 static inline int
1268+in_init_rx (const struct module *mod, uint64_t addr)
1269+{
1270+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1271+}
1272+
1273+static inline int
1274+in_init_rw (const struct module *mod, uint64_t addr)
1275+{
1276+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1277+}
1278+
1279+static inline int
1280 in_init (const struct module *mod, uint64_t addr)
1281 {
1282- return addr - (uint64_t) mod->module_init < mod->init_size;
1283+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1284+}
1285+
1286+static inline int
1287+in_core_rx (const struct module *mod, uint64_t addr)
1288+{
1289+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1290+}
1291+
1292+static inline int
1293+in_core_rw (const struct module *mod, uint64_t addr)
1294+{
1295+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1296 }
1297
1298 static inline int
1299 in_core (const struct module *mod, uint64_t addr)
1300 {
1301- return addr - (uint64_t) mod->module_core < mod->core_size;
1302+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1303 }
1304
1305 static inline int
1306@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1307 break;
1308
1309 case RV_BDREL:
1310- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1311+ if (in_init_rx(mod, val))
1312+ val -= (uint64_t) mod->module_init_rx;
1313+ else if (in_init_rw(mod, val))
1314+ val -= (uint64_t) mod->module_init_rw;
1315+ else if (in_core_rx(mod, val))
1316+ val -= (uint64_t) mod->module_core_rx;
1317+ else if (in_core_rw(mod, val))
1318+ val -= (uint64_t) mod->module_core_rw;
1319 break;
1320
1321 case RV_LTV:
1322@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1323 * addresses have been selected...
1324 */
1325 uint64_t gp;
1326- if (mod->core_size > MAX_LTOFF)
1327+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1328 /*
1329 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1330 * at the end of the module.
1331 */
1332- gp = mod->core_size - MAX_LTOFF / 2;
1333+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1334 else
1335- gp = mod->core_size / 2;
1336- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1337+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1338+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1339 mod->arch.gp = gp;
1340 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1341 }
1342diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1343index 609d500..7dde2a8 100644
1344--- a/arch/ia64/kernel/sys_ia64.c
1345+++ b/arch/ia64/kernel/sys_ia64.c
1346@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1347 if (REGION_NUMBER(addr) == RGN_HPAGE)
1348 addr = 0;
1349 #endif
1350+
1351+#ifdef CONFIG_PAX_RANDMMAP
1352+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1353+ addr = mm->free_area_cache;
1354+ else
1355+#endif
1356+
1357 if (!addr)
1358 addr = mm->free_area_cache;
1359
1360@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1361 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1362 /* At this point: (!vma || addr < vma->vm_end). */
1363 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1364- if (start_addr != TASK_UNMAPPED_BASE) {
1365+ if (start_addr != mm->mmap_base) {
1366 /* Start a new search --- just in case we missed some holes. */
1367- addr = TASK_UNMAPPED_BASE;
1368+ addr = mm->mmap_base;
1369 goto full_search;
1370 }
1371 return -ENOMEM;
1372 }
1373- if (!vma || addr + len <= vma->vm_start) {
1374+ if (check_heap_stack_gap(vma, addr, len)) {
1375 /* Remember the address where we stopped this search: */
1376 mm->free_area_cache = addr + len;
1377 return addr;
1378diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1379index 53c0ba0..2accdde 100644
1380--- a/arch/ia64/kernel/vmlinux.lds.S
1381+++ b/arch/ia64/kernel/vmlinux.lds.S
1382@@ -199,7 +199,7 @@ SECTIONS {
1383 /* Per-cpu data: */
1384 . = ALIGN(PERCPU_PAGE_SIZE);
1385 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1386- __phys_per_cpu_start = __per_cpu_load;
1387+ __phys_per_cpu_start = per_cpu_load;
1388 /*
1389 * ensure percpu data fits
1390 * into percpu page size
1391diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1392index 20b3593..1ce77f0 100644
1393--- a/arch/ia64/mm/fault.c
1394+++ b/arch/ia64/mm/fault.c
1395@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1396 return pte_present(pte);
1397 }
1398
1399+#ifdef CONFIG_PAX_PAGEEXEC
1400+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1401+{
1402+ unsigned long i;
1403+
1404+ printk(KERN_ERR "PAX: bytes at PC: ");
1405+ for (i = 0; i < 8; i++) {
1406+ unsigned int c;
1407+ if (get_user(c, (unsigned int *)pc+i))
1408+ printk(KERN_CONT "???????? ");
1409+ else
1410+ printk(KERN_CONT "%08x ", c);
1411+ }
1412+ printk("\n");
1413+}
1414+#endif
1415+
1416 void __kprobes
1417 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1418 {
1419@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1420 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1421 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1422
1423- if ((vma->vm_flags & mask) != mask)
1424+ if ((vma->vm_flags & mask) != mask) {
1425+
1426+#ifdef CONFIG_PAX_PAGEEXEC
1427+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1428+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1429+ goto bad_area;
1430+
1431+ up_read(&mm->mmap_sem);
1432+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1433+ do_group_exit(SIGKILL);
1434+ }
1435+#endif
1436+
1437 goto bad_area;
1438
1439+ }
1440+
1441 /*
1442 * If for any reason at all we couldn't handle the fault, make
1443 * sure we exit gracefully rather than endlessly redo the
1444diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1445index 5ca674b..e0e1b70 100644
1446--- a/arch/ia64/mm/hugetlbpage.c
1447+++ b/arch/ia64/mm/hugetlbpage.c
1448@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1449 /* At this point: (!vmm || addr < vmm->vm_end). */
1450 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1451 return -ENOMEM;
1452- if (!vmm || (addr + len) <= vmm->vm_start)
1453+ if (check_heap_stack_gap(vmm, addr, len))
1454 return addr;
1455 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1456 }
1457diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1458index 00cb0e2..2ad8024 100644
1459--- a/arch/ia64/mm/init.c
1460+++ b/arch/ia64/mm/init.c
1461@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1462 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1463 vma->vm_end = vma->vm_start + PAGE_SIZE;
1464 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1465+
1466+#ifdef CONFIG_PAX_PAGEEXEC
1467+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1468+ vma->vm_flags &= ~VM_EXEC;
1469+
1470+#ifdef CONFIG_PAX_MPROTECT
1471+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1472+ vma->vm_flags &= ~VM_MAYEXEC;
1473+#endif
1474+
1475+ }
1476+#endif
1477+
1478 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1479 down_write(&current->mm->mmap_sem);
1480 if (insert_vm_struct(current->mm, vma)) {
1481diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1482index 82abd15..d95ae5d 100644
1483--- a/arch/m32r/lib/usercopy.c
1484+++ b/arch/m32r/lib/usercopy.c
1485@@ -14,6 +14,9 @@
1486 unsigned long
1487 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1488 {
1489+ if ((long)n < 0)
1490+ return n;
1491+
1492 prefetch(from);
1493 if (access_ok(VERIFY_WRITE, to, n))
1494 __copy_user(to,from,n);
1495@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1496 unsigned long
1497 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1498 {
1499+ if ((long)n < 0)
1500+ return n;
1501+
1502 prefetchw(to);
1503 if (access_ok(VERIFY_READ, from, n))
1504 __copy_user_zeroing(to,from,n);
1505diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1506index 455c0ac..ad65fbe 100644
1507--- a/arch/mips/include/asm/elf.h
1508+++ b/arch/mips/include/asm/elf.h
1509@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1510 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1511 #endif
1512
1513+#ifdef CONFIG_PAX_ASLR
1514+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1515+
1516+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1517+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1518+#endif
1519+
1520 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1521 struct linux_binprm;
1522 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1523 int uses_interp);
1524
1525-struct mm_struct;
1526-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1527-#define arch_randomize_brk arch_randomize_brk
1528-
1529 #endif /* _ASM_ELF_H */
1530diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1531index e59cd1a..8e329d6 100644
1532--- a/arch/mips/include/asm/page.h
1533+++ b/arch/mips/include/asm/page.h
1534@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1535 #ifdef CONFIG_CPU_MIPS32
1536 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1537 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1538- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1539+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1540 #else
1541 typedef struct { unsigned long long pte; } pte_t;
1542 #define pte_val(x) ((x).pte)
1543diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1544index 6018c80..7c37203 100644
1545--- a/arch/mips/include/asm/system.h
1546+++ b/arch/mips/include/asm/system.h
1547@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1548 */
1549 #define __ARCH_WANT_UNLOCKED_CTXSW
1550
1551-extern unsigned long arch_align_stack(unsigned long sp);
1552+#define arch_align_stack(x) ((x) & ~0xfUL)
1553
1554 #endif /* _ASM_SYSTEM_H */
1555diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1556index 9fdd8bc..4bd7f1a 100644
1557--- a/arch/mips/kernel/binfmt_elfn32.c
1558+++ b/arch/mips/kernel/binfmt_elfn32.c
1559@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1560 #undef ELF_ET_DYN_BASE
1561 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1562
1563+#ifdef CONFIG_PAX_ASLR
1564+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1565+
1566+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1567+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1568+#endif
1569+
1570 #include <asm/processor.h>
1571 #include <linux/module.h>
1572 #include <linux/elfcore.h>
1573diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1574index ff44823..97f8906 100644
1575--- a/arch/mips/kernel/binfmt_elfo32.c
1576+++ b/arch/mips/kernel/binfmt_elfo32.c
1577@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1578 #undef ELF_ET_DYN_BASE
1579 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1580
1581+#ifdef CONFIG_PAX_ASLR
1582+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1583+
1584+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1585+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1586+#endif
1587+
1588 #include <asm/processor.h>
1589
1590 /*
1591diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1592index c47f96e..661d418 100644
1593--- a/arch/mips/kernel/process.c
1594+++ b/arch/mips/kernel/process.c
1595@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1596 out:
1597 return pc;
1598 }
1599-
1600-/*
1601- * Don't forget that the stack pointer must be aligned on a 8 bytes
1602- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1603- */
1604-unsigned long arch_align_stack(unsigned long sp)
1605-{
1606- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1607- sp -= get_random_int() & ~PAGE_MASK;
1608-
1609- return sp & ALMASK;
1610-}
1611diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1612index 937cf33..adb39bb 100644
1613--- a/arch/mips/mm/fault.c
1614+++ b/arch/mips/mm/fault.c
1615@@ -28,6 +28,23 @@
1616 #include <asm/highmem.h> /* For VMALLOC_END */
1617 #include <linux/kdebug.h>
1618
1619+#ifdef CONFIG_PAX_PAGEEXEC
1620+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1621+{
1622+ unsigned long i;
1623+
1624+ printk(KERN_ERR "PAX: bytes at PC: ");
1625+ for (i = 0; i < 5; i++) {
1626+ unsigned int c;
1627+ if (get_user(c, (unsigned int *)pc+i))
1628+ printk(KERN_CONT "???????? ");
1629+ else
1630+ printk(KERN_CONT "%08x ", c);
1631+ }
1632+ printk("\n");
1633+}
1634+#endif
1635+
1636 /*
1637 * This routine handles page faults. It determines the address,
1638 * and the problem, and then passes it off to one of the appropriate
1639diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1640index 302d779..7d35bf8 100644
1641--- a/arch/mips/mm/mmap.c
1642+++ b/arch/mips/mm/mmap.c
1643@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1644 do_color_align = 1;
1645
1646 /* requesting a specific address */
1647+
1648+#ifdef CONFIG_PAX_RANDMMAP
1649+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1650+#endif
1651+
1652 if (addr) {
1653 if (do_color_align)
1654 addr = COLOUR_ALIGN(addr, pgoff);
1655@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1656 addr = PAGE_ALIGN(addr);
1657
1658 vma = find_vma(mm, addr);
1659- if (TASK_SIZE - len >= addr &&
1660- (!vma || addr + len <= vma->vm_start))
1661+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1662 return addr;
1663 }
1664
1665@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1666 /* At this point: (!vma || addr < vma->vm_end). */
1667 if (TASK_SIZE - len < addr)
1668 return -ENOMEM;
1669- if (!vma || addr + len <= vma->vm_start)
1670+ if (check_heap_stack_gap(vmm, addr, len))
1671 return addr;
1672 addr = vma->vm_end;
1673 if (do_color_align)
1674@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1675 /* make sure it can fit in the remaining address space */
1676 if (likely(addr > len)) {
1677 vma = find_vma(mm, addr - len);
1678- if (!vma || addr <= vma->vm_start) {
1679+ if (check_heap_stack_gap(vmm, addr - len, len))
1680 /* cache the address as a hint for next time */
1681 return mm->free_area_cache = addr - len;
1682 }
1683@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1684 * return with success:
1685 */
1686 vma = find_vma(mm, addr);
1687- if (likely(!vma || addr + len <= vma->vm_start)) {
1688+ if (check_heap_stack_gap(vmm, addr, len)) {
1689 /* cache the address as a hint for next time */
1690 return mm->free_area_cache = addr;
1691 }
1692@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1693 mm->unmap_area = arch_unmap_area_topdown;
1694 }
1695 }
1696-
1697-static inline unsigned long brk_rnd(void)
1698-{
1699- unsigned long rnd = get_random_int();
1700-
1701- rnd = rnd << PAGE_SHIFT;
1702- /* 8MB for 32bit, 256MB for 64bit */
1703- if (TASK_IS_32BIT_ADDR)
1704- rnd = rnd & 0x7ffffful;
1705- else
1706- rnd = rnd & 0xffffffful;
1707-
1708- return rnd;
1709-}
1710-
1711-unsigned long arch_randomize_brk(struct mm_struct *mm)
1712-{
1713- unsigned long base = mm->brk;
1714- unsigned long ret;
1715-
1716- ret = PAGE_ALIGN(base + brk_rnd());
1717-
1718- if (ret < mm->brk)
1719- return mm->brk;
1720-
1721- return ret;
1722-}
1723diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1724index 19f6cb1..6c78cf2 100644
1725--- a/arch/parisc/include/asm/elf.h
1726+++ b/arch/parisc/include/asm/elf.h
1727@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1728
1729 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1730
1731+#ifdef CONFIG_PAX_ASLR
1732+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1733+
1734+#define PAX_DELTA_MMAP_LEN 16
1735+#define PAX_DELTA_STACK_LEN 16
1736+#endif
1737+
1738 /* This yields a mask that user programs can use to figure out what
1739 instruction set this CPU supports. This could be done in user space,
1740 but it's not easy, and we've already done it here. */
1741diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1742index 22dadeb..f6c2be4 100644
1743--- a/arch/parisc/include/asm/pgtable.h
1744+++ b/arch/parisc/include/asm/pgtable.h
1745@@ -210,6 +210,17 @@ struct vm_area_struct;
1746 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1747 #define PAGE_COPY PAGE_EXECREAD
1748 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1749+
1750+#ifdef CONFIG_PAX_PAGEEXEC
1751+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1752+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1753+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1754+#else
1755+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1756+# define PAGE_COPY_NOEXEC PAGE_COPY
1757+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1758+#endif
1759+
1760 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1761 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1762 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1763diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1764index 5e34ccf..672bc9c 100644
1765--- a/arch/parisc/kernel/module.c
1766+++ b/arch/parisc/kernel/module.c
1767@@ -98,16 +98,38 @@
1768
1769 /* three functions to determine where in the module core
1770 * or init pieces the location is */
1771+static inline int in_init_rx(struct module *me, void *loc)
1772+{
1773+ return (loc >= me->module_init_rx &&
1774+ loc < (me->module_init_rx + me->init_size_rx));
1775+}
1776+
1777+static inline int in_init_rw(struct module *me, void *loc)
1778+{
1779+ return (loc >= me->module_init_rw &&
1780+ loc < (me->module_init_rw + me->init_size_rw));
1781+}
1782+
1783 static inline int in_init(struct module *me, void *loc)
1784 {
1785- return (loc >= me->module_init &&
1786- loc <= (me->module_init + me->init_size));
1787+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1788+}
1789+
1790+static inline int in_core_rx(struct module *me, void *loc)
1791+{
1792+ return (loc >= me->module_core_rx &&
1793+ loc < (me->module_core_rx + me->core_size_rx));
1794+}
1795+
1796+static inline int in_core_rw(struct module *me, void *loc)
1797+{
1798+ return (loc >= me->module_core_rw &&
1799+ loc < (me->module_core_rw + me->core_size_rw));
1800 }
1801
1802 static inline int in_core(struct module *me, void *loc)
1803 {
1804- return (loc >= me->module_core &&
1805- loc <= (me->module_core + me->core_size));
1806+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1807 }
1808
1809 static inline int in_local(struct module *me, void *loc)
1810@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1811 }
1812
1813 /* align things a bit */
1814- me->core_size = ALIGN(me->core_size, 16);
1815- me->arch.got_offset = me->core_size;
1816- me->core_size += gots * sizeof(struct got_entry);
1817+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1818+ me->arch.got_offset = me->core_size_rw;
1819+ me->core_size_rw += gots * sizeof(struct got_entry);
1820
1821- me->core_size = ALIGN(me->core_size, 16);
1822- me->arch.fdesc_offset = me->core_size;
1823- me->core_size += fdescs * sizeof(Elf_Fdesc);
1824+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1825+ me->arch.fdesc_offset = me->core_size_rw;
1826+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1827
1828 me->arch.got_max = gots;
1829 me->arch.fdesc_max = fdescs;
1830@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1831
1832 BUG_ON(value == 0);
1833
1834- got = me->module_core + me->arch.got_offset;
1835+ got = me->module_core_rw + me->arch.got_offset;
1836 for (i = 0; got[i].addr; i++)
1837 if (got[i].addr == value)
1838 goto out;
1839@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1840 #ifdef CONFIG_64BIT
1841 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1842 {
1843- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1844+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1845
1846 if (!value) {
1847 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1848@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1849
1850 /* Create new one */
1851 fdesc->addr = value;
1852- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1853+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1854 return (Elf_Addr)fdesc;
1855 }
1856 #endif /* CONFIG_64BIT */
1857@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1858
1859 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1860 end = table + sechdrs[me->arch.unwind_section].sh_size;
1861- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1862+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1863
1864 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1865 me->arch.unwind_section, table, end, gp);
1866diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1867index c9b9322..02d8940 100644
1868--- a/arch/parisc/kernel/sys_parisc.c
1869+++ b/arch/parisc/kernel/sys_parisc.c
1870@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
1871 /* At this point: (!vma || addr < vma->vm_end). */
1872 if (TASK_SIZE - len < addr)
1873 return -ENOMEM;
1874- if (!vma || addr + len <= vma->vm_start)
1875+ if (check_heap_stack_gap(vma, addr, len))
1876 return addr;
1877 addr = vma->vm_end;
1878 }
1879@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
1880 /* At this point: (!vma || addr < vma->vm_end). */
1881 if (TASK_SIZE - len < addr)
1882 return -ENOMEM;
1883- if (!vma || addr + len <= vma->vm_start)
1884+ if (check_heap_stack_gap(vma, addr, len))
1885 return addr;
1886 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1887 if (addr < vma->vm_end) /* handle wraparound */
1888@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1889 if (flags & MAP_FIXED)
1890 return addr;
1891 if (!addr)
1892- addr = TASK_UNMAPPED_BASE;
1893+ addr = current->mm->mmap_base;
1894
1895 if (filp) {
1896 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1897diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1898index f19e660..414fe24 100644
1899--- a/arch/parisc/kernel/traps.c
1900+++ b/arch/parisc/kernel/traps.c
1901@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1902
1903 down_read(&current->mm->mmap_sem);
1904 vma = find_vma(current->mm,regs->iaoq[0]);
1905- if (vma && (regs->iaoq[0] >= vma->vm_start)
1906- && (vma->vm_flags & VM_EXEC)) {
1907-
1908+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1909 fault_address = regs->iaoq[0];
1910 fault_space = regs->iasq[0];
1911
1912diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1913index 18162ce..94de376 100644
1914--- a/arch/parisc/mm/fault.c
1915+++ b/arch/parisc/mm/fault.c
1916@@ -15,6 +15,7 @@
1917 #include <linux/sched.h>
1918 #include <linux/interrupt.h>
1919 #include <linux/module.h>
1920+#include <linux/unistd.h>
1921
1922 #include <asm/uaccess.h>
1923 #include <asm/traps.h>
1924@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
1925 static unsigned long
1926 parisc_acctyp(unsigned long code, unsigned int inst)
1927 {
1928- if (code == 6 || code == 16)
1929+ if (code == 6 || code == 7 || code == 16)
1930 return VM_EXEC;
1931
1932 switch (inst & 0xf0000000) {
1933@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
1934 }
1935 #endif
1936
1937+#ifdef CONFIG_PAX_PAGEEXEC
1938+/*
1939+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1940+ *
1941+ * returns 1 when task should be killed
1942+ * 2 when rt_sigreturn trampoline was detected
1943+ * 3 when unpatched PLT trampoline was detected
1944+ */
1945+static int pax_handle_fetch_fault(struct pt_regs *regs)
1946+{
1947+
1948+#ifdef CONFIG_PAX_EMUPLT
1949+ int err;
1950+
1951+ do { /* PaX: unpatched PLT emulation */
1952+ unsigned int bl, depwi;
1953+
1954+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1955+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1956+
1957+ if (err)
1958+ break;
1959+
1960+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1961+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1962+
1963+ err = get_user(ldw, (unsigned int *)addr);
1964+ err |= get_user(bv, (unsigned int *)(addr+4));
1965+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1966+
1967+ if (err)
1968+ break;
1969+
1970+ if (ldw == 0x0E801096U &&
1971+ bv == 0xEAC0C000U &&
1972+ ldw2 == 0x0E881095U)
1973+ {
1974+ unsigned int resolver, map;
1975+
1976+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1977+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1978+ if (err)
1979+ break;
1980+
1981+ regs->gr[20] = instruction_pointer(regs)+8;
1982+ regs->gr[21] = map;
1983+ regs->gr[22] = resolver;
1984+ regs->iaoq[0] = resolver | 3UL;
1985+ regs->iaoq[1] = regs->iaoq[0] + 4;
1986+ return 3;
1987+ }
1988+ }
1989+ } while (0);
1990+#endif
1991+
1992+#ifdef CONFIG_PAX_EMUTRAMP
1993+
1994+#ifndef CONFIG_PAX_EMUSIGRT
1995+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1996+ return 1;
1997+#endif
1998+
1999+ do { /* PaX: rt_sigreturn emulation */
2000+ unsigned int ldi1, ldi2, bel, nop;
2001+
2002+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2003+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2004+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2005+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2006+
2007+ if (err)
2008+ break;
2009+
2010+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2011+ ldi2 == 0x3414015AU &&
2012+ bel == 0xE4008200U &&
2013+ nop == 0x08000240U)
2014+ {
2015+ regs->gr[25] = (ldi1 & 2) >> 1;
2016+ regs->gr[20] = __NR_rt_sigreturn;
2017+ regs->gr[31] = regs->iaoq[1] + 16;
2018+ regs->sr[0] = regs->iasq[1];
2019+ regs->iaoq[0] = 0x100UL;
2020+ regs->iaoq[1] = regs->iaoq[0] + 4;
2021+ regs->iasq[0] = regs->sr[2];
2022+ regs->iasq[1] = regs->sr[2];
2023+ return 2;
2024+ }
2025+ } while (0);
2026+#endif
2027+
2028+ return 1;
2029+}
2030+
2031+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2032+{
2033+ unsigned long i;
2034+
2035+ printk(KERN_ERR "PAX: bytes at PC: ");
2036+ for (i = 0; i < 5; i++) {
2037+ unsigned int c;
2038+ if (get_user(c, (unsigned int *)pc+i))
2039+ printk(KERN_CONT "???????? ");
2040+ else
2041+ printk(KERN_CONT "%08x ", c);
2042+ }
2043+ printk("\n");
2044+}
2045+#endif
2046+
2047 int fixup_exception(struct pt_regs *regs)
2048 {
2049 const struct exception_table_entry *fix;
2050@@ -192,8 +303,33 @@ good_area:
2051
2052 acc_type = parisc_acctyp(code,regs->iir);
2053
2054- if ((vma->vm_flags & acc_type) != acc_type)
2055+ if ((vma->vm_flags & acc_type) != acc_type) {
2056+
2057+#ifdef CONFIG_PAX_PAGEEXEC
2058+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2059+ (address & ~3UL) == instruction_pointer(regs))
2060+ {
2061+ up_read(&mm->mmap_sem);
2062+ switch (pax_handle_fetch_fault(regs)) {
2063+
2064+#ifdef CONFIG_PAX_EMUPLT
2065+ case 3:
2066+ return;
2067+#endif
2068+
2069+#ifdef CONFIG_PAX_EMUTRAMP
2070+ case 2:
2071+ return;
2072+#endif
2073+
2074+ }
2075+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2076+ do_group_exit(SIGKILL);
2077+ }
2078+#endif
2079+
2080 goto bad_area;
2081+ }
2082
2083 /*
2084 * If for any reason at all we couldn't handle the fault, make
2085diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2086index 3bf9cca..e7457d0 100644
2087--- a/arch/powerpc/include/asm/elf.h
2088+++ b/arch/powerpc/include/asm/elf.h
2089@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2090 the loader. We need to make sure that it is out of the way of the program
2091 that it will "exec", and that there is sufficient room for the brk. */
2092
2093-extern unsigned long randomize_et_dyn(unsigned long base);
2094-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2095+#define ELF_ET_DYN_BASE (0x20000000)
2096+
2097+#ifdef CONFIG_PAX_ASLR
2098+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2099+
2100+#ifdef __powerpc64__
2101+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2102+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2103+#else
2104+#define PAX_DELTA_MMAP_LEN 15
2105+#define PAX_DELTA_STACK_LEN 15
2106+#endif
2107+#endif
2108
2109 /*
2110 * Our registers are always unsigned longs, whether we're a 32 bit
2111@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2112 (0x7ff >> (PAGE_SHIFT - 12)) : \
2113 (0x3ffff >> (PAGE_SHIFT - 12)))
2114
2115-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2116-#define arch_randomize_brk arch_randomize_brk
2117-
2118 #endif /* __KERNEL__ */
2119
2120 /*
2121diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2122index bca8fdc..61e9580 100644
2123--- a/arch/powerpc/include/asm/kmap_types.h
2124+++ b/arch/powerpc/include/asm/kmap_types.h
2125@@ -27,6 +27,7 @@ enum km_type {
2126 KM_PPC_SYNC_PAGE,
2127 KM_PPC_SYNC_ICACHE,
2128 KM_KDB,
2129+ KM_CLEARPAGE,
2130 KM_TYPE_NR
2131 };
2132
2133diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2134index d4a7f64..451de1c 100644
2135--- a/arch/powerpc/include/asm/mman.h
2136+++ b/arch/powerpc/include/asm/mman.h
2137@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2138 }
2139 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2140
2141-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2142+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2143 {
2144 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2145 }
2146diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2147index dd9c4fd..a2ced87 100644
2148--- a/arch/powerpc/include/asm/page.h
2149+++ b/arch/powerpc/include/asm/page.h
2150@@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156+#define VM_DATA_DEFAULT_FLAGS32 \
2157+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162@@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166+#define ktla_ktva(addr) (addr)
2167+#define ktva_ktla(addr) (addr)
2168+
2169 /*
2170 * Use the top bit of the higher-level page table entries to indicate whether
2171 * the entries we point to contain hugepages. This works because we know that
2172diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2173index fb40ede..d3ce956 100644
2174--- a/arch/powerpc/include/asm/page_64.h
2175+++ b/arch/powerpc/include/asm/page_64.h
2176@@ -144,15 +144,18 @@ do { \
2177 * stack by default, so in the absence of a PT_GNU_STACK program header
2178 * we turn execute permission off.
2179 */
2180-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2181- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2182+#define VM_STACK_DEFAULT_FLAGS32 \
2183+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2184+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2185
2186 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2187 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2188
2189+#ifndef CONFIG_PAX_PAGEEXEC
2190 #define VM_STACK_DEFAULT_FLAGS \
2191 (is_32bit_task() ? \
2192 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2193+#endif
2194
2195 #include <asm-generic/getorder.h>
2196
2197diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2198index 88b0bd9..e32bc67 100644
2199--- a/arch/powerpc/include/asm/pgtable.h
2200+++ b/arch/powerpc/include/asm/pgtable.h
2201@@ -2,6 +2,7 @@
2202 #define _ASM_POWERPC_PGTABLE_H
2203 #ifdef __KERNEL__
2204
2205+#include <linux/const.h>
2206 #ifndef __ASSEMBLY__
2207 #include <asm/processor.h> /* For TASK_SIZE */
2208 #include <asm/mmu.h>
2209diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2210index 4aad413..85d86bf 100644
2211--- a/arch/powerpc/include/asm/pte-hash32.h
2212+++ b/arch/powerpc/include/asm/pte-hash32.h
2213@@ -21,6 +21,7 @@
2214 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2215 #define _PAGE_USER 0x004 /* usermode access allowed */
2216 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2217+#define _PAGE_EXEC _PAGE_GUARDED
2218 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2219 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2220 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2221diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2222index 559da19..7e5835c 100644
2223--- a/arch/powerpc/include/asm/reg.h
2224+++ b/arch/powerpc/include/asm/reg.h
2225@@ -212,6 +212,7 @@
2226 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2227 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2228 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2229+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2230 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2231 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2232 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2233diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2234index e30a13d..2b7d994 100644
2235--- a/arch/powerpc/include/asm/system.h
2236+++ b/arch/powerpc/include/asm/system.h
2237@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2238 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2239 #endif
2240
2241-extern unsigned long arch_align_stack(unsigned long sp);
2242+#define arch_align_stack(x) ((x) & ~0xfUL)
2243
2244 /* Used in very early kernel initialization. */
2245 extern unsigned long reloc_offset(void);
2246diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2247index bd0fb84..a42a14b 100644
2248--- a/arch/powerpc/include/asm/uaccess.h
2249+++ b/arch/powerpc/include/asm/uaccess.h
2250@@ -13,6 +13,8 @@
2251 #define VERIFY_READ 0
2252 #define VERIFY_WRITE 1
2253
2254+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2255+
2256 /*
2257 * The fs value determines whether argument validity checking should be
2258 * performed or not. If get_fs() == USER_DS, checking is performed, with
2259@@ -327,52 +329,6 @@ do { \
2260 extern unsigned long __copy_tofrom_user(void __user *to,
2261 const void __user *from, unsigned long size);
2262
2263-#ifndef __powerpc64__
2264-
2265-static inline unsigned long copy_from_user(void *to,
2266- const void __user *from, unsigned long n)
2267-{
2268- unsigned long over;
2269-
2270- if (access_ok(VERIFY_READ, from, n))
2271- return __copy_tofrom_user((__force void __user *)to, from, n);
2272- if ((unsigned long)from < TASK_SIZE) {
2273- over = (unsigned long)from + n - TASK_SIZE;
2274- return __copy_tofrom_user((__force void __user *)to, from,
2275- n - over) + over;
2276- }
2277- return n;
2278-}
2279-
2280-static inline unsigned long copy_to_user(void __user *to,
2281- const void *from, unsigned long n)
2282-{
2283- unsigned long over;
2284-
2285- if (access_ok(VERIFY_WRITE, to, n))
2286- return __copy_tofrom_user(to, (__force void __user *)from, n);
2287- if ((unsigned long)to < TASK_SIZE) {
2288- over = (unsigned long)to + n - TASK_SIZE;
2289- return __copy_tofrom_user(to, (__force void __user *)from,
2290- n - over) + over;
2291- }
2292- return n;
2293-}
2294-
2295-#else /* __powerpc64__ */
2296-
2297-#define __copy_in_user(to, from, size) \
2298- __copy_tofrom_user((to), (from), (size))
2299-
2300-extern unsigned long copy_from_user(void *to, const void __user *from,
2301- unsigned long n);
2302-extern unsigned long copy_to_user(void __user *to, const void *from,
2303- unsigned long n);
2304-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2305- unsigned long n);
2306-
2307-#endif /* __powerpc64__ */
2308-
2309 static inline unsigned long __copy_from_user_inatomic(void *to,
2310 const void __user *from, unsigned long n)
2311 {
2312@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2313 if (ret == 0)
2314 return 0;
2315 }
2316+
2317+ if (!__builtin_constant_p(n))
2318+ check_object_size(to, n, false);
2319+
2320 return __copy_tofrom_user((__force void __user *)to, from, n);
2321 }
2322
2323@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2324 if (ret == 0)
2325 return 0;
2326 }
2327+
2328+ if (!__builtin_constant_p(n))
2329+ check_object_size(from, n, true);
2330+
2331 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2332 }
2333
2334@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2335 return __copy_to_user_inatomic(to, from, size);
2336 }
2337
2338+#ifndef __powerpc64__
2339+
2340+static inline unsigned long __must_check copy_from_user(void *to,
2341+ const void __user *from, unsigned long n)
2342+{
2343+ unsigned long over;
2344+
2345+ if ((long)n < 0)
2346+ return n;
2347+
2348+ if (access_ok(VERIFY_READ, from, n)) {
2349+ if (!__builtin_constant_p(n))
2350+ check_object_size(to, n, false);
2351+ return __copy_tofrom_user((__force void __user *)to, from, n);
2352+ }
2353+ if ((unsigned long)from < TASK_SIZE) {
2354+ over = (unsigned long)from + n - TASK_SIZE;
2355+ if (!__builtin_constant_p(n - over))
2356+ check_object_size(to, n - over, false);
2357+ return __copy_tofrom_user((__force void __user *)to, from,
2358+ n - over) + over;
2359+ }
2360+ return n;
2361+}
2362+
2363+static inline unsigned long __must_check copy_to_user(void __user *to,
2364+ const void *from, unsigned long n)
2365+{
2366+ unsigned long over;
2367+
2368+ if ((long)n < 0)
2369+ return n;
2370+
2371+ if (access_ok(VERIFY_WRITE, to, n)) {
2372+ if (!__builtin_constant_p(n))
2373+ check_object_size(from, n, true);
2374+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2375+ }
2376+ if ((unsigned long)to < TASK_SIZE) {
2377+ over = (unsigned long)to + n - TASK_SIZE;
2378+ if (!__builtin_constant_p(n))
2379+ check_object_size(from, n - over, true);
2380+ return __copy_tofrom_user(to, (__force void __user *)from,
2381+ n - over) + over;
2382+ }
2383+ return n;
2384+}
2385+
2386+#else /* __powerpc64__ */
2387+
2388+#define __copy_in_user(to, from, size) \
2389+ __copy_tofrom_user((to), (from), (size))
2390+
2391+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2392+{
2393+ if ((long)n < 0 || n > INT_MAX)
2394+ return n;
2395+
2396+ if (!__builtin_constant_p(n))
2397+ check_object_size(to, n, false);
2398+
2399+ if (likely(access_ok(VERIFY_READ, from, n)))
2400+ n = __copy_from_user(to, from, n);
2401+ else
2402+ memset(to, 0, n);
2403+ return n;
2404+}
2405+
2406+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2407+{
2408+ if ((long)n < 0 || n > INT_MAX)
2409+ return n;
2410+
2411+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2412+ if (!__builtin_constant_p(n))
2413+ check_object_size(from, n, true);
2414+ n = __copy_to_user(to, from, n);
2415+ }
2416+ return n;
2417+}
2418+
2419+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2420+ unsigned long n);
2421+
2422+#endif /* __powerpc64__ */
2423+
2424 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2425
2426 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2427diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2428index 429983c..7af363b 100644
2429--- a/arch/powerpc/kernel/exceptions-64e.S
2430+++ b/arch/powerpc/kernel/exceptions-64e.S
2431@@ -587,6 +587,7 @@ storage_fault_common:
2432 std r14,_DAR(r1)
2433 std r15,_DSISR(r1)
2434 addi r3,r1,STACK_FRAME_OVERHEAD
2435+ bl .save_nvgprs
2436 mr r4,r14
2437 mr r5,r15
2438 ld r14,PACA_EXGEN+EX_R14(r13)
2439@@ -596,8 +597,7 @@ storage_fault_common:
2440 cmpdi r3,0
2441 bne- 1f
2442 b .ret_from_except_lite
2443-1: bl .save_nvgprs
2444- mr r5,r3
2445+1: mr r5,r3
2446 addi r3,r1,STACK_FRAME_OVERHEAD
2447 ld r4,_DAR(r1)
2448 bl .bad_page_fault
2449diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2450index cf9c69b..ebc9640 100644
2451--- a/arch/powerpc/kernel/exceptions-64s.S
2452+++ b/arch/powerpc/kernel/exceptions-64s.S
2453@@ -1004,10 +1004,10 @@ handle_page_fault:
2454 11: ld r4,_DAR(r1)
2455 ld r5,_DSISR(r1)
2456 addi r3,r1,STACK_FRAME_OVERHEAD
2457+ bl .save_nvgprs
2458 bl .do_page_fault
2459 cmpdi r3,0
2460 beq+ 13f
2461- bl .save_nvgprs
2462 mr r5,r3
2463 addi r3,r1,STACK_FRAME_OVERHEAD
2464 lwz r4,_DAR(r1)
2465diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2466index 0b6d796..d760ddb 100644
2467--- a/arch/powerpc/kernel/module_32.c
2468+++ b/arch/powerpc/kernel/module_32.c
2469@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2470 me->arch.core_plt_section = i;
2471 }
2472 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2473- printk("Module doesn't contain .plt or .init.plt sections.\n");
2474+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2475 return -ENOEXEC;
2476 }
2477
2478@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2479
2480 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2481 /* Init, or core PLT? */
2482- if (location >= mod->module_core
2483- && location < mod->module_core + mod->core_size)
2484+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2485+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2486 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2487- else
2488+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2489+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2490 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2491+ else {
2492+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2493+ return ~0UL;
2494+ }
2495
2496 /* Find this entry, or if that fails, the next avail. entry */
2497 while (entry->jump[0]) {
2498diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2499index 6457574..08b28d3 100644
2500--- a/arch/powerpc/kernel/process.c
2501+++ b/arch/powerpc/kernel/process.c
2502@@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2503 * Lookup NIP late so we have the best change of getting the
2504 * above info out without failing
2505 */
2506- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2507- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2508+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2509+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2510 #endif
2511 show_stack(current, (unsigned long *) regs->gpr[1]);
2512 if (!user_mode(regs))
2513@@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2514 newsp = stack[0];
2515 ip = stack[STACK_FRAME_LR_SAVE];
2516 if (!firstframe || ip != lr) {
2517- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2518+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2519 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2520 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2521- printk(" (%pS)",
2522+ printk(" (%pA)",
2523 (void *)current->ret_stack[curr_frame].ret);
2524 curr_frame--;
2525 }
2526@@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2527 struct pt_regs *regs = (struct pt_regs *)
2528 (sp + STACK_FRAME_OVERHEAD);
2529 lr = regs->link;
2530- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2531+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2532 regs->trap, (void *)regs->nip, (void *)lr);
2533 firstframe = 1;
2534 }
2535@@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2536 }
2537
2538 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2539-
2540-unsigned long arch_align_stack(unsigned long sp)
2541-{
2542- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2543- sp -= get_random_int() & ~PAGE_MASK;
2544- return sp & ~0xf;
2545-}
2546-
2547-static inline unsigned long brk_rnd(void)
2548-{
2549- unsigned long rnd = 0;
2550-
2551- /* 8MB for 32bit, 1GB for 64bit */
2552- if (is_32bit_task())
2553- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2554- else
2555- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2556-
2557- return rnd << PAGE_SHIFT;
2558-}
2559-
2560-unsigned long arch_randomize_brk(struct mm_struct *mm)
2561-{
2562- unsigned long base = mm->brk;
2563- unsigned long ret;
2564-
2565-#ifdef CONFIG_PPC_STD_MMU_64
2566- /*
2567- * If we are using 1TB segments and we are allowed to randomise
2568- * the heap, we can put it above 1TB so it is backed by a 1TB
2569- * segment. Otherwise the heap will be in the bottom 1TB
2570- * which always uses 256MB segments and this may result in a
2571- * performance penalty.
2572- */
2573- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2574- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2575-#endif
2576-
2577- ret = PAGE_ALIGN(base + brk_rnd());
2578-
2579- if (ret < mm->brk)
2580- return mm->brk;
2581-
2582- return ret;
2583-}
2584-
2585-unsigned long randomize_et_dyn(unsigned long base)
2586-{
2587- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2588-
2589- if (ret < base)
2590- return base;
2591-
2592- return ret;
2593-}
2594diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2595index 836a5a1..27289a3 100644
2596--- a/arch/powerpc/kernel/signal_32.c
2597+++ b/arch/powerpc/kernel/signal_32.c
2598@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2599 /* Save user registers on the stack */
2600 frame = &rt_sf->uc.uc_mcontext;
2601 addr = frame;
2602- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2603+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2604 if (save_user_regs(regs, frame, 0, 1))
2605 goto badframe;
2606 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2607diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2608index a50b5ec..547078a 100644
2609--- a/arch/powerpc/kernel/signal_64.c
2610+++ b/arch/powerpc/kernel/signal_64.c
2611@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2612 current->thread.fpscr.val = 0;
2613
2614 /* Set up to return from userspace. */
2615- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2616+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2617 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2618 } else {
2619 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2620diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2621index 5459d14..10f8070 100644
2622--- a/arch/powerpc/kernel/traps.c
2623+++ b/arch/powerpc/kernel/traps.c
2624@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2625 static inline void pmac_backlight_unblank(void) { }
2626 #endif
2627
2628+extern void gr_handle_kernel_exploit(void);
2629+
2630 int die(const char *str, struct pt_regs *regs, long err)
2631 {
2632 static struct {
2633@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2634 if (panic_on_oops)
2635 panic("Fatal exception");
2636
2637+ gr_handle_kernel_exploit();
2638+
2639 oops_exit();
2640 do_exit(err);
2641
2642diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2643index 7d14bb6..1305601 100644
2644--- a/arch/powerpc/kernel/vdso.c
2645+++ b/arch/powerpc/kernel/vdso.c
2646@@ -35,6 +35,7 @@
2647 #include <asm/firmware.h>
2648 #include <asm/vdso.h>
2649 #include <asm/vdso_datapage.h>
2650+#include <asm/mman.h>
2651
2652 #include "setup.h"
2653
2654@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2655 vdso_base = VDSO32_MBASE;
2656 #endif
2657
2658- current->mm->context.vdso_base = 0;
2659+ current->mm->context.vdso_base = ~0UL;
2660
2661 /* vDSO has a problem and was disabled, just don't "enable" it for the
2662 * process
2663@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2664 vdso_base = get_unmapped_area(NULL, vdso_base,
2665 (vdso_pages << PAGE_SHIFT) +
2666 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2667- 0, 0);
2668+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2669 if (IS_ERR_VALUE(vdso_base)) {
2670 rc = vdso_base;
2671 goto fail_mmapsem;
2672diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2673index 5eea6f3..5d10396 100644
2674--- a/arch/powerpc/lib/usercopy_64.c
2675+++ b/arch/powerpc/lib/usercopy_64.c
2676@@ -9,22 +9,6 @@
2677 #include <linux/module.h>
2678 #include <asm/uaccess.h>
2679
2680-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2681-{
2682- if (likely(access_ok(VERIFY_READ, from, n)))
2683- n = __copy_from_user(to, from, n);
2684- else
2685- memset(to, 0, n);
2686- return n;
2687-}
2688-
2689-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2690-{
2691- if (likely(access_ok(VERIFY_WRITE, to, n)))
2692- n = __copy_to_user(to, from, n);
2693- return n;
2694-}
2695-
2696 unsigned long copy_in_user(void __user *to, const void __user *from,
2697 unsigned long n)
2698 {
2699@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2700 return n;
2701 }
2702
2703-EXPORT_SYMBOL(copy_from_user);
2704-EXPORT_SYMBOL(copy_to_user);
2705 EXPORT_SYMBOL(copy_in_user);
2706
2707diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2708index 5efe8c9..db9ceef 100644
2709--- a/arch/powerpc/mm/fault.c
2710+++ b/arch/powerpc/mm/fault.c
2711@@ -32,6 +32,10 @@
2712 #include <linux/perf_event.h>
2713 #include <linux/magic.h>
2714 #include <linux/ratelimit.h>
2715+#include <linux/slab.h>
2716+#include <linux/pagemap.h>
2717+#include <linux/compiler.h>
2718+#include <linux/unistd.h>
2719
2720 #include <asm/firmware.h>
2721 #include <asm/page.h>
2722@@ -43,6 +47,7 @@
2723 #include <asm/tlbflush.h>
2724 #include <asm/siginfo.h>
2725 #include <mm/mmu_decl.h>
2726+#include <asm/ptrace.h>
2727
2728 #ifdef CONFIG_KPROBES
2729 static inline int notify_page_fault(struct pt_regs *regs)
2730@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2731 }
2732 #endif
2733
2734+#ifdef CONFIG_PAX_PAGEEXEC
2735+/*
2736+ * PaX: decide what to do with offenders (regs->nip = fault address)
2737+ *
2738+ * returns 1 when task should be killed
2739+ */
2740+static int pax_handle_fetch_fault(struct pt_regs *regs)
2741+{
2742+ return 1;
2743+}
2744+
2745+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2746+{
2747+ unsigned long i;
2748+
2749+ printk(KERN_ERR "PAX: bytes at PC: ");
2750+ for (i = 0; i < 5; i++) {
2751+ unsigned int c;
2752+ if (get_user(c, (unsigned int __user *)pc+i))
2753+ printk(KERN_CONT "???????? ");
2754+ else
2755+ printk(KERN_CONT "%08x ", c);
2756+ }
2757+ printk("\n");
2758+}
2759+#endif
2760+
2761 /*
2762 * Check whether the instruction at regs->nip is a store using
2763 * an update addressing form which will update r1.
2764@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2765 * indicate errors in DSISR but can validly be set in SRR1.
2766 */
2767 if (trap == 0x400)
2768- error_code &= 0x48200000;
2769+ error_code &= 0x58200000;
2770 else
2771 is_write = error_code & DSISR_ISSTORE;
2772 #else
2773@@ -259,7 +291,7 @@ good_area:
2774 * "undefined". Of those that can be set, this is the only
2775 * one which seems bad.
2776 */
2777- if (error_code & 0x10000000)
2778+ if (error_code & DSISR_GUARDED)
2779 /* Guarded storage error. */
2780 goto bad_area;
2781 #endif /* CONFIG_8xx */
2782@@ -274,7 +306,7 @@ good_area:
2783 * processors use the same I/D cache coherency mechanism
2784 * as embedded.
2785 */
2786- if (error_code & DSISR_PROTFAULT)
2787+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2788 goto bad_area;
2789 #endif /* CONFIG_PPC_STD_MMU */
2790
2791@@ -343,6 +375,23 @@ bad_area:
2792 bad_area_nosemaphore:
2793 /* User mode accesses cause a SIGSEGV */
2794 if (user_mode(regs)) {
2795+
2796+#ifdef CONFIG_PAX_PAGEEXEC
2797+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2798+#ifdef CONFIG_PPC_STD_MMU
2799+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2800+#else
2801+ if (is_exec && regs->nip == address) {
2802+#endif
2803+ switch (pax_handle_fetch_fault(regs)) {
2804+ }
2805+
2806+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2807+ do_group_exit(SIGKILL);
2808+ }
2809+ }
2810+#endif
2811+
2812 _exception(SIGSEGV, regs, code, address);
2813 return 0;
2814 }
2815diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2816index 5a783d8..c23e14b 100644
2817--- a/arch/powerpc/mm/mmap_64.c
2818+++ b/arch/powerpc/mm/mmap_64.c
2819@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2820 */
2821 if (mmap_is_legacy()) {
2822 mm->mmap_base = TASK_UNMAPPED_BASE;
2823+
2824+#ifdef CONFIG_PAX_RANDMMAP
2825+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2826+ mm->mmap_base += mm->delta_mmap;
2827+#endif
2828+
2829 mm->get_unmapped_area = arch_get_unmapped_area;
2830 mm->unmap_area = arch_unmap_area;
2831 } else {
2832 mm->mmap_base = mmap_base();
2833+
2834+#ifdef CONFIG_PAX_RANDMMAP
2835+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2836+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2837+#endif
2838+
2839 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2840 mm->unmap_area = arch_unmap_area_topdown;
2841 }
2842diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2843index 73709f7..6b90313 100644
2844--- a/arch/powerpc/mm/slice.c
2845+++ b/arch/powerpc/mm/slice.c
2846@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
2847 if ((mm->task_size - len) < addr)
2848 return 0;
2849 vma = find_vma(mm, addr);
2850- return (!vma || (addr + len) <= vma->vm_start);
2851+ return check_heap_stack_gap(vma, addr, len);
2852 }
2853
2854 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2855@@ -256,7 +256,7 @@ full_search:
2856 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2857 continue;
2858 }
2859- if (!vma || addr + len <= vma->vm_start) {
2860+ if (check_heap_stack_gap(vma, addr, len)) {
2861 /*
2862 * Remember the place where we stopped the search:
2863 */
2864@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2865 }
2866 }
2867
2868- addr = mm->mmap_base;
2869- while (addr > len) {
2870+ if (mm->mmap_base < len)
2871+ addr = -ENOMEM;
2872+ else
2873+ addr = mm->mmap_base - len;
2874+
2875+ while (!IS_ERR_VALUE(addr)) {
2876 /* Go down by chunk size */
2877- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2878+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2879
2880 /* Check for hit with different page size */
2881 mask = slice_range_to_mask(addr, len);
2882@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2883 * return with success:
2884 */
2885 vma = find_vma(mm, addr);
2886- if (!vma || (addr + len) <= vma->vm_start) {
2887+ if (check_heap_stack_gap(vma, addr, len)) {
2888 /* remember the address as a hint for next time */
2889 if (use_cache)
2890 mm->free_area_cache = addr;
2891@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2892 mm->cached_hole_size = vma->vm_start - addr;
2893
2894 /* try just below the current vma->vm_start */
2895- addr = vma->vm_start;
2896+ addr = skip_heap_stack_gap(vma, len);
2897 }
2898
2899 /*
2900@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
2901 if (fixed && addr > (mm->task_size - len))
2902 return -EINVAL;
2903
2904+#ifdef CONFIG_PAX_RANDMMAP
2905+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2906+ addr = 0;
2907+#endif
2908+
2909 /* If hint, make sure it matches our alignment restrictions */
2910 if (!fixed && addr) {
2911 addr = _ALIGN_UP(addr, 1ul << pshift);
2912diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2913index 547f1a6..3fff354 100644
2914--- a/arch/s390/include/asm/elf.h
2915+++ b/arch/s390/include/asm/elf.h
2916@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2917 the loader. We need to make sure that it is out of the way of the program
2918 that it will "exec", and that there is sufficient room for the brk. */
2919
2920-extern unsigned long randomize_et_dyn(unsigned long base);
2921-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2922+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2923+
2924+#ifdef CONFIG_PAX_ASLR
2925+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2926+
2927+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2928+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2929+#endif
2930
2931 /* This yields a mask that user programs can use to figure out what
2932 instruction set this CPU supports. */
2933@@ -211,7 +217,4 @@ struct linux_binprm;
2934 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2935 int arch_setup_additional_pages(struct linux_binprm *, int);
2936
2937-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2938-#define arch_randomize_brk arch_randomize_brk
2939-
2940 #endif
2941diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2942index ef573c1..75a1ce6 100644
2943--- a/arch/s390/include/asm/system.h
2944+++ b/arch/s390/include/asm/system.h
2945@@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
2946 extern void (*_machine_halt)(void);
2947 extern void (*_machine_power_off)(void);
2948
2949-extern unsigned long arch_align_stack(unsigned long sp);
2950+#define arch_align_stack(x) ((x) & ~0xfUL)
2951
2952 static inline int tprot(unsigned long addr)
2953 {
2954diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2955index 2b23885..e136e31 100644
2956--- a/arch/s390/include/asm/uaccess.h
2957+++ b/arch/s390/include/asm/uaccess.h
2958@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2959 copy_to_user(void __user *to, const void *from, unsigned long n)
2960 {
2961 might_fault();
2962+
2963+ if ((long)n < 0)
2964+ return n;
2965+
2966 if (access_ok(VERIFY_WRITE, to, n))
2967 n = __copy_to_user(to, from, n);
2968 return n;
2969@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
2970 static inline unsigned long __must_check
2971 __copy_from_user(void *to, const void __user *from, unsigned long n)
2972 {
2973+ if ((long)n < 0)
2974+ return n;
2975+
2976 if (__builtin_constant_p(n) && (n <= 256))
2977 return uaccess.copy_from_user_small(n, from, to);
2978 else
2979@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
2980 unsigned int sz = __compiletime_object_size(to);
2981
2982 might_fault();
2983+
2984+ if ((long)n < 0)
2985+ return n;
2986+
2987 if (unlikely(sz != -1 && sz < n)) {
2988 copy_from_user_overflow();
2989 return n;
2990diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2991index dfcb343..eda788a 100644
2992--- a/arch/s390/kernel/module.c
2993+++ b/arch/s390/kernel/module.c
2994@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2995
2996 /* Increase core size by size of got & plt and set start
2997 offsets for got and plt. */
2998- me->core_size = ALIGN(me->core_size, 4);
2999- me->arch.got_offset = me->core_size;
3000- me->core_size += me->arch.got_size;
3001- me->arch.plt_offset = me->core_size;
3002- me->core_size += me->arch.plt_size;
3003+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3004+ me->arch.got_offset = me->core_size_rw;
3005+ me->core_size_rw += me->arch.got_size;
3006+ me->arch.plt_offset = me->core_size_rx;
3007+ me->core_size_rx += me->arch.plt_size;
3008 return 0;
3009 }
3010
3011@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3012 if (info->got_initialized == 0) {
3013 Elf_Addr *gotent;
3014
3015- gotent = me->module_core + me->arch.got_offset +
3016+ gotent = me->module_core_rw + me->arch.got_offset +
3017 info->got_offset;
3018 *gotent = val;
3019 info->got_initialized = 1;
3020@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3021 else if (r_type == R_390_GOTENT ||
3022 r_type == R_390_GOTPLTENT)
3023 *(unsigned int *) loc =
3024- (val + (Elf_Addr) me->module_core - loc) >> 1;
3025+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3026 else if (r_type == R_390_GOT64 ||
3027 r_type == R_390_GOTPLT64)
3028 *(unsigned long *) loc = val;
3029@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3030 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3031 if (info->plt_initialized == 0) {
3032 unsigned int *ip;
3033- ip = me->module_core + me->arch.plt_offset +
3034+ ip = me->module_core_rx + me->arch.plt_offset +
3035 info->plt_offset;
3036 #ifndef CONFIG_64BIT
3037 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3038@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3039 val - loc + 0xffffUL < 0x1ffffeUL) ||
3040 (r_type == R_390_PLT32DBL &&
3041 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3042- val = (Elf_Addr) me->module_core +
3043+ val = (Elf_Addr) me->module_core_rx +
3044 me->arch.plt_offset +
3045 info->plt_offset;
3046 val += rela->r_addend - loc;
3047@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3048 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3049 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3050 val = val + rela->r_addend -
3051- ((Elf_Addr) me->module_core + me->arch.got_offset);
3052+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3053 if (r_type == R_390_GOTOFF16)
3054 *(unsigned short *) loc = val;
3055 else if (r_type == R_390_GOTOFF32)
3056@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3057 break;
3058 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3059 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3060- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3061+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3062 rela->r_addend - loc;
3063 if (r_type == R_390_GOTPC)
3064 *(unsigned int *) loc = val;
3065diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3066index 9451b21..ed8956f 100644
3067--- a/arch/s390/kernel/process.c
3068+++ b/arch/s390/kernel/process.c
3069@@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3070 }
3071 return 0;
3072 }
3073-
3074-unsigned long arch_align_stack(unsigned long sp)
3075-{
3076- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3077- sp -= get_random_int() & ~PAGE_MASK;
3078- return sp & ~0xf;
3079-}
3080-
3081-static inline unsigned long brk_rnd(void)
3082-{
3083- /* 8MB for 32bit, 1GB for 64bit */
3084- if (is_32bit_task())
3085- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3086- else
3087- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3088-}
3089-
3090-unsigned long arch_randomize_brk(struct mm_struct *mm)
3091-{
3092- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3093-
3094- if (ret < mm->brk)
3095- return mm->brk;
3096- return ret;
3097-}
3098-
3099-unsigned long randomize_et_dyn(unsigned long base)
3100-{
3101- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3102-
3103- if (!(current->flags & PF_RANDOMIZE))
3104- return base;
3105- if (ret < base)
3106- return base;
3107- return ret;
3108-}
3109diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3110index f09c748..cf9ec1d 100644
3111--- a/arch/s390/mm/mmap.c
3112+++ b/arch/s390/mm/mmap.c
3113@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3114 */
3115 if (mmap_is_legacy()) {
3116 mm->mmap_base = TASK_UNMAPPED_BASE;
3117+
3118+#ifdef CONFIG_PAX_RANDMMAP
3119+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3120+ mm->mmap_base += mm->delta_mmap;
3121+#endif
3122+
3123 mm->get_unmapped_area = arch_get_unmapped_area;
3124 mm->unmap_area = arch_unmap_area;
3125 } else {
3126 mm->mmap_base = mmap_base();
3127+
3128+#ifdef CONFIG_PAX_RANDMMAP
3129+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3130+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3131+#endif
3132+
3133 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3134 mm->unmap_area = arch_unmap_area_topdown;
3135 }
3136@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3137 */
3138 if (mmap_is_legacy()) {
3139 mm->mmap_base = TASK_UNMAPPED_BASE;
3140+
3141+#ifdef CONFIG_PAX_RANDMMAP
3142+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3143+ mm->mmap_base += mm->delta_mmap;
3144+#endif
3145+
3146 mm->get_unmapped_area = s390_get_unmapped_area;
3147 mm->unmap_area = arch_unmap_area;
3148 } else {
3149 mm->mmap_base = mmap_base();
3150+
3151+#ifdef CONFIG_PAX_RANDMMAP
3152+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3153+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3154+#endif
3155+
3156 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3157 mm->unmap_area = arch_unmap_area_topdown;
3158 }
3159diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3160index 589d5c7..669e274 100644
3161--- a/arch/score/include/asm/system.h
3162+++ b/arch/score/include/asm/system.h
3163@@ -17,7 +17,7 @@ do { \
3164 #define finish_arch_switch(prev) do {} while (0)
3165
3166 typedef void (*vi_handler_t)(void);
3167-extern unsigned long arch_align_stack(unsigned long sp);
3168+#define arch_align_stack(x) (x)
3169
3170 #define mb() barrier()
3171 #define rmb() barrier()
3172diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3173index 25d0803..d6c8e36 100644
3174--- a/arch/score/kernel/process.c
3175+++ b/arch/score/kernel/process.c
3176@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3177
3178 return task_pt_regs(task)->cp0_epc;
3179 }
3180-
3181-unsigned long arch_align_stack(unsigned long sp)
3182-{
3183- return sp;
3184-}
3185diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3186index afeb710..d1d1289 100644
3187--- a/arch/sh/mm/mmap.c
3188+++ b/arch/sh/mm/mmap.c
3189@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3190 addr = PAGE_ALIGN(addr);
3191
3192 vma = find_vma(mm, addr);
3193- if (TASK_SIZE - len >= addr &&
3194- (!vma || addr + len <= vma->vm_start))
3195+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3196 return addr;
3197 }
3198
3199@@ -106,7 +105,7 @@ full_search:
3200 }
3201 return -ENOMEM;
3202 }
3203- if (likely(!vma || addr + len <= vma->vm_start)) {
3204+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3205 /*
3206 * Remember the place where we stopped the search:
3207 */
3208@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3209 addr = PAGE_ALIGN(addr);
3210
3211 vma = find_vma(mm, addr);
3212- if (TASK_SIZE - len >= addr &&
3213- (!vma || addr + len <= vma->vm_start))
3214+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3215 return addr;
3216 }
3217
3218@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3219 /* make sure it can fit in the remaining address space */
3220 if (likely(addr > len)) {
3221 vma = find_vma(mm, addr-len);
3222- if (!vma || addr <= vma->vm_start) {
3223+ if (check_heap_stack_gap(vma, addr - len, len)) {
3224 /* remember the address as a hint for next time */
3225 return (mm->free_area_cache = addr-len);
3226 }
3227@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3228 if (unlikely(mm->mmap_base < len))
3229 goto bottomup;
3230
3231- addr = mm->mmap_base-len;
3232- if (do_colour_align)
3233- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3234+ addr = mm->mmap_base - len;
3235
3236 do {
3237+ if (do_colour_align)
3238+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3239 /*
3240 * Lookup failure means no vma is above this address,
3241 * else if new region fits below vma->vm_start,
3242 * return with success:
3243 */
3244 vma = find_vma(mm, addr);
3245- if (likely(!vma || addr+len <= vma->vm_start)) {
3246+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3247 /* remember the address as a hint for next time */
3248 return (mm->free_area_cache = addr);
3249 }
3250@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3251 mm->cached_hole_size = vma->vm_start - addr;
3252
3253 /* try just below the current vma->vm_start */
3254- addr = vma->vm_start-len;
3255- if (do_colour_align)
3256- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3257- } while (likely(len < vma->vm_start));
3258+ addr = skip_heap_stack_gap(vma, len);
3259+ } while (!IS_ERR_VALUE(addr));
3260
3261 bottomup:
3262 /*
3263diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3264index ad1fb5d..fc5315b 100644
3265--- a/arch/sparc/Makefile
3266+++ b/arch/sparc/Makefile
3267@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3268 # Export what is needed by arch/sparc/boot/Makefile
3269 export VMLINUX_INIT VMLINUX_MAIN
3270 VMLINUX_INIT := $(head-y) $(init-y)
3271-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3272+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3273 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3274 VMLINUX_MAIN += $(drivers-y) $(net-y)
3275
3276diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3277index 9f421df..b81fc12 100644
3278--- a/arch/sparc/include/asm/atomic_64.h
3279+++ b/arch/sparc/include/asm/atomic_64.h
3280@@ -14,18 +14,40 @@
3281 #define ATOMIC64_INIT(i) { (i) }
3282
3283 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3284+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3285+{
3286+ return v->counter;
3287+}
3288 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3289+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3290+{
3291+ return v->counter;
3292+}
3293
3294 #define atomic_set(v, i) (((v)->counter) = i)
3295+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3296+{
3297+ v->counter = i;
3298+}
3299 #define atomic64_set(v, i) (((v)->counter) = i)
3300+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3301+{
3302+ v->counter = i;
3303+}
3304
3305 extern void atomic_add(int, atomic_t *);
3306+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3307 extern void atomic64_add(long, atomic64_t *);
3308+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3309 extern void atomic_sub(int, atomic_t *);
3310+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3311 extern void atomic64_sub(long, atomic64_t *);
3312+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3313
3314 extern int atomic_add_ret(int, atomic_t *);
3315+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3316 extern long atomic64_add_ret(long, atomic64_t *);
3317+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3318 extern int atomic_sub_ret(int, atomic_t *);
3319 extern long atomic64_sub_ret(long, atomic64_t *);
3320
3321@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3322 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3323
3324 #define atomic_inc_return(v) atomic_add_ret(1, v)
3325+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3326+{
3327+ return atomic_add_ret_unchecked(1, v);
3328+}
3329 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3330+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3331+{
3332+ return atomic64_add_ret_unchecked(1, v);
3333+}
3334
3335 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3336 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3337
3338 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3339+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3340+{
3341+ return atomic_add_ret_unchecked(i, v);
3342+}
3343 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3344+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3345+{
3346+ return atomic64_add_ret_unchecked(i, v);
3347+}
3348
3349 /*
3350 * atomic_inc_and_test - increment and test
3351@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3352 * other cases.
3353 */
3354 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3355+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3356+{
3357+ return atomic_inc_return_unchecked(v) == 0;
3358+}
3359 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3360
3361 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3362@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3363 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3364
3365 #define atomic_inc(v) atomic_add(1, v)
3366+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3367+{
3368+ atomic_add_unchecked(1, v);
3369+}
3370 #define atomic64_inc(v) atomic64_add(1, v)
3371+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3372+{
3373+ atomic64_add_unchecked(1, v);
3374+}
3375
3376 #define atomic_dec(v) atomic_sub(1, v)
3377+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3378+{
3379+ atomic_sub_unchecked(1, v);
3380+}
3381 #define atomic64_dec(v) atomic64_sub(1, v)
3382+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3383+{
3384+ atomic64_sub_unchecked(1, v);
3385+}
3386
3387 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3388 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3389
3390 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3391+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3392+{
3393+ return cmpxchg(&v->counter, old, new);
3394+}
3395 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3396+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3397+{
3398+ return xchg(&v->counter, new);
3399+}
3400
3401 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3402 {
3403- int c, old;
3404+ int c, old, new;
3405 c = atomic_read(v);
3406 for (;;) {
3407- if (unlikely(c == (u)))
3408+ if (unlikely(c == u))
3409 break;
3410- old = atomic_cmpxchg((v), c, c + (a));
3411+
3412+ asm volatile("addcc %2, %0, %0\n"
3413+
3414+#ifdef CONFIG_PAX_REFCOUNT
3415+ "tvs %%icc, 6\n"
3416+#endif
3417+
3418+ : "=r" (new)
3419+ : "0" (c), "ir" (a)
3420+ : "cc");
3421+
3422+ old = atomic_cmpxchg(v, c, new);
3423 if (likely(old == c))
3424 break;
3425 c = old;
3426@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3427 #define atomic64_cmpxchg(v, o, n) \
3428 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3429 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3430+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3431+{
3432+ return xchg(&v->counter, new);
3433+}
3434
3435 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3436 {
3437- long c, old;
3438+ long c, old, new;
3439 c = atomic64_read(v);
3440 for (;;) {
3441- if (unlikely(c == (u)))
3442+ if (unlikely(c == u))
3443 break;
3444- old = atomic64_cmpxchg((v), c, c + (a));
3445+
3446+ asm volatile("addcc %2, %0, %0\n"
3447+
3448+#ifdef CONFIG_PAX_REFCOUNT
3449+ "tvs %%xcc, 6\n"
3450+#endif
3451+
3452+ : "=r" (new)
3453+ : "0" (c), "ir" (a)
3454+ : "cc");
3455+
3456+ old = atomic64_cmpxchg(v, c, new);
3457 if (likely(old == c))
3458 break;
3459 c = old;
3460 }
3461- return c != (u);
3462+ return c != u;
3463 }
3464
3465 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3466diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3467index 69358b5..17b4745 100644
3468--- a/arch/sparc/include/asm/cache.h
3469+++ b/arch/sparc/include/asm/cache.h
3470@@ -10,7 +10,7 @@
3471 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3472
3473 #define L1_CACHE_SHIFT 5
3474-#define L1_CACHE_BYTES 32
3475+#define L1_CACHE_BYTES 32UL
3476
3477 #ifdef CONFIG_SPARC32
3478 #define SMP_CACHE_BYTES_SHIFT 5
3479diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3480index 4269ca6..e3da77f 100644
3481--- a/arch/sparc/include/asm/elf_32.h
3482+++ b/arch/sparc/include/asm/elf_32.h
3483@@ -114,6 +114,13 @@ typedef struct {
3484
3485 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3486
3487+#ifdef CONFIG_PAX_ASLR
3488+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3489+
3490+#define PAX_DELTA_MMAP_LEN 16
3491+#define PAX_DELTA_STACK_LEN 16
3492+#endif
3493+
3494 /* This yields a mask that user programs can use to figure out what
3495 instruction set this cpu supports. This can NOT be done in userspace
3496 on Sparc. */
3497diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3498index 7df8b7f..4946269 100644
3499--- a/arch/sparc/include/asm/elf_64.h
3500+++ b/arch/sparc/include/asm/elf_64.h
3501@@ -180,6 +180,13 @@ typedef struct {
3502 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3503 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3504
3505+#ifdef CONFIG_PAX_ASLR
3506+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3507+
3508+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3509+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3510+#endif
3511+
3512 extern unsigned long sparc64_elf_hwcap;
3513 #define ELF_HWCAP sparc64_elf_hwcap
3514
3515diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3516index a790cc6..091ed94 100644
3517--- a/arch/sparc/include/asm/pgtable_32.h
3518+++ b/arch/sparc/include/asm/pgtable_32.h
3519@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3520 BTFIXUPDEF_INT(page_none)
3521 BTFIXUPDEF_INT(page_copy)
3522 BTFIXUPDEF_INT(page_readonly)
3523+
3524+#ifdef CONFIG_PAX_PAGEEXEC
3525+BTFIXUPDEF_INT(page_shared_noexec)
3526+BTFIXUPDEF_INT(page_copy_noexec)
3527+BTFIXUPDEF_INT(page_readonly_noexec)
3528+#endif
3529+
3530 BTFIXUPDEF_INT(page_kernel)
3531
3532 #define PMD_SHIFT SUN4C_PMD_SHIFT
3533@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3534 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3535 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3536
3537+#ifdef CONFIG_PAX_PAGEEXEC
3538+extern pgprot_t PAGE_SHARED_NOEXEC;
3539+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3540+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3541+#else
3542+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3543+# define PAGE_COPY_NOEXEC PAGE_COPY
3544+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3545+#endif
3546+
3547 extern unsigned long page_kernel;
3548
3549 #ifdef MODULE
3550diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3551index f6ae2b2..b03ffc7 100644
3552--- a/arch/sparc/include/asm/pgtsrmmu.h
3553+++ b/arch/sparc/include/asm/pgtsrmmu.h
3554@@ -115,6 +115,13 @@
3555 SRMMU_EXEC | SRMMU_REF)
3556 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3557 SRMMU_EXEC | SRMMU_REF)
3558+
3559+#ifdef CONFIG_PAX_PAGEEXEC
3560+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3561+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3562+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3563+#endif
3564+
3565 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3566 SRMMU_DIRTY | SRMMU_REF)
3567
3568diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3569index 9689176..63c18ea 100644
3570--- a/arch/sparc/include/asm/spinlock_64.h
3571+++ b/arch/sparc/include/asm/spinlock_64.h
3572@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3573
3574 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3575
3576-static void inline arch_read_lock(arch_rwlock_t *lock)
3577+static inline void arch_read_lock(arch_rwlock_t *lock)
3578 {
3579 unsigned long tmp1, tmp2;
3580
3581 __asm__ __volatile__ (
3582 "1: ldsw [%2], %0\n"
3583 " brlz,pn %0, 2f\n"
3584-"4: add %0, 1, %1\n"
3585+"4: addcc %0, 1, %1\n"
3586+
3587+#ifdef CONFIG_PAX_REFCOUNT
3588+" tvs %%icc, 6\n"
3589+#endif
3590+
3591 " cas [%2], %0, %1\n"
3592 " cmp %0, %1\n"
3593 " bne,pn %%icc, 1b\n"
3594@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3595 " .previous"
3596 : "=&r" (tmp1), "=&r" (tmp2)
3597 : "r" (lock)
3598- : "memory");
3599+ : "memory", "cc");
3600 }
3601
3602-static int inline arch_read_trylock(arch_rwlock_t *lock)
3603+static inline int arch_read_trylock(arch_rwlock_t *lock)
3604 {
3605 int tmp1, tmp2;
3606
3607@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3608 "1: ldsw [%2], %0\n"
3609 " brlz,a,pn %0, 2f\n"
3610 " mov 0, %0\n"
3611-" add %0, 1, %1\n"
3612+" addcc %0, 1, %1\n"
3613+
3614+#ifdef CONFIG_PAX_REFCOUNT
3615+" tvs %%icc, 6\n"
3616+#endif
3617+
3618 " cas [%2], %0, %1\n"
3619 " cmp %0, %1\n"
3620 " bne,pn %%icc, 1b\n"
3621@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3622 return tmp1;
3623 }
3624
3625-static void inline arch_read_unlock(arch_rwlock_t *lock)
3626+static inline void arch_read_unlock(arch_rwlock_t *lock)
3627 {
3628 unsigned long tmp1, tmp2;
3629
3630 __asm__ __volatile__(
3631 "1: lduw [%2], %0\n"
3632-" sub %0, 1, %1\n"
3633+" subcc %0, 1, %1\n"
3634+
3635+#ifdef CONFIG_PAX_REFCOUNT
3636+" tvs %%icc, 6\n"
3637+#endif
3638+
3639 " cas [%2], %0, %1\n"
3640 " cmp %0, %1\n"
3641 " bne,pn %%xcc, 1b\n"
3642@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3643 : "memory");
3644 }
3645
3646-static void inline arch_write_lock(arch_rwlock_t *lock)
3647+static inline void arch_write_lock(arch_rwlock_t *lock)
3648 {
3649 unsigned long mask, tmp1, tmp2;
3650
3651@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3652 : "memory");
3653 }
3654
3655-static void inline arch_write_unlock(arch_rwlock_t *lock)
3656+static inline void arch_write_unlock(arch_rwlock_t *lock)
3657 {
3658 __asm__ __volatile__(
3659 " stw %%g0, [%0]"
3660@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3661 : "memory");
3662 }
3663
3664-static int inline arch_write_trylock(arch_rwlock_t *lock)
3665+static inline int arch_write_trylock(arch_rwlock_t *lock)
3666 {
3667 unsigned long mask, tmp1, tmp2, result;
3668
3669diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3670index fa57532..e1a4c53 100644
3671--- a/arch/sparc/include/asm/thread_info_32.h
3672+++ b/arch/sparc/include/asm/thread_info_32.h
3673@@ -50,6 +50,8 @@ struct thread_info {
3674 unsigned long w_saved;
3675
3676 struct restart_block restart_block;
3677+
3678+ unsigned long lowest_stack;
3679 };
3680
3681 /*
3682diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3683index 60d86be..952dea1 100644
3684--- a/arch/sparc/include/asm/thread_info_64.h
3685+++ b/arch/sparc/include/asm/thread_info_64.h
3686@@ -63,6 +63,8 @@ struct thread_info {
3687 struct pt_regs *kern_una_regs;
3688 unsigned int kern_una_insn;
3689
3690+ unsigned long lowest_stack;
3691+
3692 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3693 };
3694
3695diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3696index e88fbe5..96b0ce5 100644
3697--- a/arch/sparc/include/asm/uaccess.h
3698+++ b/arch/sparc/include/asm/uaccess.h
3699@@ -1,5 +1,13 @@
3700 #ifndef ___ASM_SPARC_UACCESS_H
3701 #define ___ASM_SPARC_UACCESS_H
3702+
3703+#ifdef __KERNEL__
3704+#ifndef __ASSEMBLY__
3705+#include <linux/types.h>
3706+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3707+#endif
3708+#endif
3709+
3710 #if defined(__sparc__) && defined(__arch64__)
3711 #include <asm/uaccess_64.h>
3712 #else
3713diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3714index 8303ac4..07f333d 100644
3715--- a/arch/sparc/include/asm/uaccess_32.h
3716+++ b/arch/sparc/include/asm/uaccess_32.h
3717@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3718
3719 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3720 {
3721- if (n && __access_ok((unsigned long) to, n))
3722+ if ((long)n < 0)
3723+ return n;
3724+
3725+ if (n && __access_ok((unsigned long) to, n)) {
3726+ if (!__builtin_constant_p(n))
3727+ check_object_size(from, n, true);
3728 return __copy_user(to, (__force void __user *) from, n);
3729- else
3730+ } else
3731 return n;
3732 }
3733
3734 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3735 {
3736+ if ((long)n < 0)
3737+ return n;
3738+
3739+ if (!__builtin_constant_p(n))
3740+ check_object_size(from, n, true);
3741+
3742 return __copy_user(to, (__force void __user *) from, n);
3743 }
3744
3745 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3746 {
3747- if (n && __access_ok((unsigned long) from, n))
3748+ if ((long)n < 0)
3749+ return n;
3750+
3751+ if (n && __access_ok((unsigned long) from, n)) {
3752+ if (!__builtin_constant_p(n))
3753+ check_object_size(to, n, false);
3754 return __copy_user((__force void __user *) to, from, n);
3755- else
3756+ } else
3757 return n;
3758 }
3759
3760 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3761 {
3762+ if ((long)n < 0)
3763+ return n;
3764+
3765 return __copy_user((__force void __user *) to, from, n);
3766 }
3767
3768diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3769index 3e1449f..5293a0e 100644
3770--- a/arch/sparc/include/asm/uaccess_64.h
3771+++ b/arch/sparc/include/asm/uaccess_64.h
3772@@ -10,6 +10,7 @@
3773 #include <linux/compiler.h>
3774 #include <linux/string.h>
3775 #include <linux/thread_info.h>
3776+#include <linux/kernel.h>
3777 #include <asm/asi.h>
3778 #include <asm/system.h>
3779 #include <asm/spitfire.h>
3780@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
3781 static inline unsigned long __must_check
3782 copy_from_user(void *to, const void __user *from, unsigned long size)
3783 {
3784- unsigned long ret = ___copy_from_user(to, from, size);
3785+ unsigned long ret;
3786
3787+ if ((long)size < 0 || size > INT_MAX)
3788+ return size;
3789+
3790+ if (!__builtin_constant_p(size))
3791+ check_object_size(to, size, false);
3792+
3793+ ret = ___copy_from_user(to, from, size);
3794 if (unlikely(ret))
3795 ret = copy_from_user_fixup(to, from, size);
3796
3797@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
3798 static inline unsigned long __must_check
3799 copy_to_user(void __user *to, const void *from, unsigned long size)
3800 {
3801- unsigned long ret = ___copy_to_user(to, from, size);
3802+ unsigned long ret;
3803
3804+ if ((long)size < 0 || size > INT_MAX)
3805+ return size;
3806+
3807+ if (!__builtin_constant_p(size))
3808+ check_object_size(from, size, true);
3809+
3810+ ret = ___copy_to_user(to, from, size);
3811 if (unlikely(ret))
3812 ret = copy_to_user_fixup(to, from, size);
3813 return ret;
3814diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3815index cb85458..e063f17 100644
3816--- a/arch/sparc/kernel/Makefile
3817+++ b/arch/sparc/kernel/Makefile
3818@@ -3,7 +3,7 @@
3819 #
3820
3821 asflags-y := -ansi
3822-ccflags-y := -Werror
3823+#ccflags-y := -Werror
3824
3825 extra-y := head_$(BITS).o
3826 extra-y += init_task.o
3827diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3828index f793742..4d880af 100644
3829--- a/arch/sparc/kernel/process_32.c
3830+++ b/arch/sparc/kernel/process_32.c
3831@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3832 rw->ins[4], rw->ins[5],
3833 rw->ins[6],
3834 rw->ins[7]);
3835- printk("%pS\n", (void *) rw->ins[7]);
3836+ printk("%pA\n", (void *) rw->ins[7]);
3837 rw = (struct reg_window32 *) rw->ins[6];
3838 }
3839 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3840@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3841
3842 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3843 r->psr, r->pc, r->npc, r->y, print_tainted());
3844- printk("PC: <%pS>\n", (void *) r->pc);
3845+ printk("PC: <%pA>\n", (void *) r->pc);
3846 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3847 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3848 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3849 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3850 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3851 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3852- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3853+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3854
3855 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3856 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3857@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
3858 rw = (struct reg_window32 *) fp;
3859 pc = rw->ins[7];
3860 printk("[%08lx : ", pc);
3861- printk("%pS ] ", (void *) pc);
3862+ printk("%pA ] ", (void *) pc);
3863 fp = rw->ins[6];
3864 } while (++count < 16);
3865 printk("\n");
3866diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3867index 3739a06..48b2ff0 100644
3868--- a/arch/sparc/kernel/process_64.c
3869+++ b/arch/sparc/kernel/process_64.c
3870@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
3871 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3872 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3873 if (regs->tstate & TSTATE_PRIV)
3874- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3875+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3876 }
3877
3878 void show_regs(struct pt_regs *regs)
3879 {
3880 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3881 regs->tpc, regs->tnpc, regs->y, print_tainted());
3882- printk("TPC: <%pS>\n", (void *) regs->tpc);
3883+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3884 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3885 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3886 regs->u_regs[3]);
3887@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3888 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3889 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3890 regs->u_regs[15]);
3891- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3892+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3893 show_regwindow(regs);
3894 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3895 }
3896@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
3897 ((tp && tp->task) ? tp->task->pid : -1));
3898
3899 if (gp->tstate & TSTATE_PRIV) {
3900- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3901+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3902 (void *) gp->tpc,
3903 (void *) gp->o7,
3904 (void *) gp->i7,
3905diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3906index 42b282f..28ce9f2 100644
3907--- a/arch/sparc/kernel/sys_sparc_32.c
3908+++ b/arch/sparc/kernel/sys_sparc_32.c
3909@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3910 if (ARCH_SUN4C && len > 0x20000000)
3911 return -ENOMEM;
3912 if (!addr)
3913- addr = TASK_UNMAPPED_BASE;
3914+ addr = current->mm->mmap_base;
3915
3916 if (flags & MAP_SHARED)
3917 addr = COLOUR_ALIGN(addr);
3918@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3919 }
3920 if (TASK_SIZE - PAGE_SIZE - len < addr)
3921 return -ENOMEM;
3922- if (!vmm || addr + len <= vmm->vm_start)
3923+ if (check_heap_stack_gap(vmm, addr, len))
3924 return addr;
3925 addr = vmm->vm_end;
3926 if (flags & MAP_SHARED)
3927diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3928index 441521a..b767073 100644
3929--- a/arch/sparc/kernel/sys_sparc_64.c
3930+++ b/arch/sparc/kernel/sys_sparc_64.c
3931@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3932 /* We do not accept a shared mapping if it would violate
3933 * cache aliasing constraints.
3934 */
3935- if ((flags & MAP_SHARED) &&
3936+ if ((filp || (flags & MAP_SHARED)) &&
3937 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3938 return -EINVAL;
3939 return addr;
3940@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3941 if (filp || (flags & MAP_SHARED))
3942 do_color_align = 1;
3943
3944+#ifdef CONFIG_PAX_RANDMMAP
3945+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3946+#endif
3947+
3948 if (addr) {
3949 if (do_color_align)
3950 addr = COLOUR_ALIGN(addr, pgoff);
3951@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3952 addr = PAGE_ALIGN(addr);
3953
3954 vma = find_vma(mm, addr);
3955- if (task_size - len >= addr &&
3956- (!vma || addr + len <= vma->vm_start))
3957+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3958 return addr;
3959 }
3960
3961 if (len > mm->cached_hole_size) {
3962- start_addr = addr = mm->free_area_cache;
3963+ start_addr = addr = mm->free_area_cache;
3964 } else {
3965- start_addr = addr = TASK_UNMAPPED_BASE;
3966+ start_addr = addr = mm->mmap_base;
3967 mm->cached_hole_size = 0;
3968 }
3969
3970@@ -174,14 +177,14 @@ full_search:
3971 vma = find_vma(mm, VA_EXCLUDE_END);
3972 }
3973 if (unlikely(task_size < addr)) {
3974- if (start_addr != TASK_UNMAPPED_BASE) {
3975- start_addr = addr = TASK_UNMAPPED_BASE;
3976+ if (start_addr != mm->mmap_base) {
3977+ start_addr = addr = mm->mmap_base;
3978 mm->cached_hole_size = 0;
3979 goto full_search;
3980 }
3981 return -ENOMEM;
3982 }
3983- if (likely(!vma || addr + len <= vma->vm_start)) {
3984+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3985 /*
3986 * Remember the place where we stopped the search:
3987 */
3988@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3989 /* We do not accept a shared mapping if it would violate
3990 * cache aliasing constraints.
3991 */
3992- if ((flags & MAP_SHARED) &&
3993+ if ((filp || (flags & MAP_SHARED)) &&
3994 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3995 return -EINVAL;
3996 return addr;
3997@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3998 addr = PAGE_ALIGN(addr);
3999
4000 vma = find_vma(mm, addr);
4001- if (task_size - len >= addr &&
4002- (!vma || addr + len <= vma->vm_start))
4003+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4004 return addr;
4005 }
4006
4007@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4008 /* make sure it can fit in the remaining address space */
4009 if (likely(addr > len)) {
4010 vma = find_vma(mm, addr-len);
4011- if (!vma || addr <= vma->vm_start) {
4012+ if (check_heap_stack_gap(vma, addr - len, len)) {
4013 /* remember the address as a hint for next time */
4014 return (mm->free_area_cache = addr-len);
4015 }
4016@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4017 if (unlikely(mm->mmap_base < len))
4018 goto bottomup;
4019
4020- addr = mm->mmap_base-len;
4021- if (do_color_align)
4022- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4023+ addr = mm->mmap_base - len;
4024
4025 do {
4026+ if (do_color_align)
4027+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4028 /*
4029 * Lookup failure means no vma is above this address,
4030 * else if new region fits below vma->vm_start,
4031 * return with success:
4032 */
4033 vma = find_vma(mm, addr);
4034- if (likely(!vma || addr+len <= vma->vm_start)) {
4035+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4036 /* remember the address as a hint for next time */
4037 return (mm->free_area_cache = addr);
4038 }
4039@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4040 mm->cached_hole_size = vma->vm_start - addr;
4041
4042 /* try just below the current vma->vm_start */
4043- addr = vma->vm_start-len;
4044- if (do_color_align)
4045- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4046- } while (likely(len < vma->vm_start));
4047+ addr = skip_heap_stack_gap(vma, len);
4048+ } while (!IS_ERR_VALUE(addr));
4049
4050 bottomup:
4051 /*
4052@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 gap == RLIM_INFINITY ||
4054 sysctl_legacy_va_layout) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4056+
4057+#ifdef CONFIG_PAX_RANDMMAP
4058+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4059+ mm->mmap_base += mm->delta_mmap;
4060+#endif
4061+
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4066 gap = (task_size / 6 * 5);
4067
4068 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4069+
4070+#ifdef CONFIG_PAX_RANDMMAP
4071+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4072+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4073+#endif
4074+
4075 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4076 mm->unmap_area = arch_unmap_area_topdown;
4077 }
4078diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4079index 591f20c..0f1b925 100644
4080--- a/arch/sparc/kernel/traps_32.c
4081+++ b/arch/sparc/kernel/traps_32.c
4082@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4083 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4084 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4085
4086+extern void gr_handle_kernel_exploit(void);
4087+
4088 void die_if_kernel(char *str, struct pt_regs *regs)
4089 {
4090 static int die_counter;
4091@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4092 count++ < 30 &&
4093 (((unsigned long) rw) >= PAGE_OFFSET) &&
4094 !(((unsigned long) rw) & 0x7)) {
4095- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4096+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4097 (void *) rw->ins[7]);
4098 rw = (struct reg_window32 *)rw->ins[6];
4099 }
4100 }
4101 printk("Instruction DUMP:");
4102 instruction_dump ((unsigned long *) regs->pc);
4103- if(regs->psr & PSR_PS)
4104+ if(regs->psr & PSR_PS) {
4105+ gr_handle_kernel_exploit();
4106 do_exit(SIGKILL);
4107+ }
4108 do_exit(SIGSEGV);
4109 }
4110
4111diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4112index 0cbdaa4..438e4c9 100644
4113--- a/arch/sparc/kernel/traps_64.c
4114+++ b/arch/sparc/kernel/traps_64.c
4115@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4116 i + 1,
4117 p->trapstack[i].tstate, p->trapstack[i].tpc,
4118 p->trapstack[i].tnpc, p->trapstack[i].tt);
4119- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4120+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4121 }
4122 }
4123
4124@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4125
4126 lvl -= 0x100;
4127 if (regs->tstate & TSTATE_PRIV) {
4128+
4129+#ifdef CONFIG_PAX_REFCOUNT
4130+ if (lvl == 6)
4131+ pax_report_refcount_overflow(regs);
4132+#endif
4133+
4134 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4135 die_if_kernel(buffer, regs);
4136 }
4137@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4138 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4139 {
4140 char buffer[32];
4141-
4142+
4143 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4144 0, lvl, SIGTRAP) == NOTIFY_STOP)
4145 return;
4146
4147+#ifdef CONFIG_PAX_REFCOUNT
4148+ if (lvl == 6)
4149+ pax_report_refcount_overflow(regs);
4150+#endif
4151+
4152 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4153
4154 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4155@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4156 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4157 printk("%s" "ERROR(%d): ",
4158 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4159- printk("TPC<%pS>\n", (void *) regs->tpc);
4160+ printk("TPC<%pA>\n", (void *) regs->tpc);
4161 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4162 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4163 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4164@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4165 smp_processor_id(),
4166 (type & 0x1) ? 'I' : 'D',
4167 regs->tpc);
4168- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4169+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4170 panic("Irrecoverable Cheetah+ parity error.");
4171 }
4172
4173@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4174 smp_processor_id(),
4175 (type & 0x1) ? 'I' : 'D',
4176 regs->tpc);
4177- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4178+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4179 }
4180
4181 struct sun4v_error_entry {
4182@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4183
4184 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4185 regs->tpc, tl);
4186- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4187+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4188 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4189- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4190+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4191 (void *) regs->u_regs[UREG_I7]);
4192 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4193 "pte[%lx] error[%lx]\n",
4194@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4195
4196 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4197 regs->tpc, tl);
4198- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4199+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4200 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4201- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4202+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4203 (void *) regs->u_regs[UREG_I7]);
4204 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4205 "pte[%lx] error[%lx]\n",
4206@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4207 fp = (unsigned long)sf->fp + STACK_BIAS;
4208 }
4209
4210- printk(" [%016lx] %pS\n", pc, (void *) pc);
4211+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4212 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4213 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4214 int index = tsk->curr_ret_stack;
4215 if (tsk->ret_stack && index >= graph) {
4216 pc = tsk->ret_stack[index - graph].ret;
4217- printk(" [%016lx] %pS\n", pc, (void *) pc);
4218+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4219 graph++;
4220 }
4221 }
4222@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4223 return (struct reg_window *) (fp + STACK_BIAS);
4224 }
4225
4226+extern void gr_handle_kernel_exploit(void);
4227+
4228 void die_if_kernel(char *str, struct pt_regs *regs)
4229 {
4230 static int die_counter;
4231@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4232 while (rw &&
4233 count++ < 30 &&
4234 kstack_valid(tp, (unsigned long) rw)) {
4235- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4236+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4237 (void *) rw->ins[7]);
4238
4239 rw = kernel_stack_up(rw);
4240@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4241 }
4242 user_instruction_dump ((unsigned int __user *) regs->tpc);
4243 }
4244- if (regs->tstate & TSTATE_PRIV)
4245+ if (regs->tstate & TSTATE_PRIV) {
4246+ gr_handle_kernel_exploit();
4247 do_exit(SIGKILL);
4248+ }
4249 do_exit(SIGSEGV);
4250 }
4251 EXPORT_SYMBOL(die_if_kernel);
4252diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4253index 76e4ac1..78f8bb1 100644
4254--- a/arch/sparc/kernel/unaligned_64.c
4255+++ b/arch/sparc/kernel/unaligned_64.c
4256@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4257 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4258
4259 if (__ratelimit(&ratelimit)) {
4260- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4261+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4262 regs->tpc, (void *) regs->tpc);
4263 }
4264 }
4265diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4266index a3fc437..fea9957 100644
4267--- a/arch/sparc/lib/Makefile
4268+++ b/arch/sparc/lib/Makefile
4269@@ -2,7 +2,7 @@
4270 #
4271
4272 asflags-y := -ansi -DST_DIV0=0x02
4273-ccflags-y := -Werror
4274+#ccflags-y := -Werror
4275
4276 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4277 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4278diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4279index 59186e0..f747d7a 100644
4280--- a/arch/sparc/lib/atomic_64.S
4281+++ b/arch/sparc/lib/atomic_64.S
4282@@ -18,7 +18,12 @@
4283 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4284 BACKOFF_SETUP(%o2)
4285 1: lduw [%o1], %g1
4286- add %g1, %o0, %g7
4287+ addcc %g1, %o0, %g7
4288+
4289+#ifdef CONFIG_PAX_REFCOUNT
4290+ tvs %icc, 6
4291+#endif
4292+
4293 cas [%o1], %g1, %g7
4294 cmp %g1, %g7
4295 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4296@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4297 2: BACKOFF_SPIN(%o2, %o3, 1b)
4298 .size atomic_add, .-atomic_add
4299
4300+ .globl atomic_add_unchecked
4301+ .type atomic_add_unchecked,#function
4302+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4303+ BACKOFF_SETUP(%o2)
4304+1: lduw [%o1], %g1
4305+ add %g1, %o0, %g7
4306+ cas [%o1], %g1, %g7
4307+ cmp %g1, %g7
4308+ bne,pn %icc, 2f
4309+ nop
4310+ retl
4311+ nop
4312+2: BACKOFF_SPIN(%o2, %o3, 1b)
4313+ .size atomic_add_unchecked, .-atomic_add_unchecked
4314+
4315 .globl atomic_sub
4316 .type atomic_sub,#function
4317 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4318 BACKOFF_SETUP(%o2)
4319 1: lduw [%o1], %g1
4320- sub %g1, %o0, %g7
4321+ subcc %g1, %o0, %g7
4322+
4323+#ifdef CONFIG_PAX_REFCOUNT
4324+ tvs %icc, 6
4325+#endif
4326+
4327 cas [%o1], %g1, %g7
4328 cmp %g1, %g7
4329 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4330@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4331 2: BACKOFF_SPIN(%o2, %o3, 1b)
4332 .size atomic_sub, .-atomic_sub
4333
4334+ .globl atomic_sub_unchecked
4335+ .type atomic_sub_unchecked,#function
4336+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4337+ BACKOFF_SETUP(%o2)
4338+1: lduw [%o1], %g1
4339+ sub %g1, %o0, %g7
4340+ cas [%o1], %g1, %g7
4341+ cmp %g1, %g7
4342+ bne,pn %icc, 2f
4343+ nop
4344+ retl
4345+ nop
4346+2: BACKOFF_SPIN(%o2, %o3, 1b)
4347+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4348+
4349 .globl atomic_add_ret
4350 .type atomic_add_ret,#function
4351 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4352 BACKOFF_SETUP(%o2)
4353 1: lduw [%o1], %g1
4354- add %g1, %o0, %g7
4355+ addcc %g1, %o0, %g7
4356+
4357+#ifdef CONFIG_PAX_REFCOUNT
4358+ tvs %icc, 6
4359+#endif
4360+
4361 cas [%o1], %g1, %g7
4362 cmp %g1, %g7
4363 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4364@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4365 2: BACKOFF_SPIN(%o2, %o3, 1b)
4366 .size atomic_add_ret, .-atomic_add_ret
4367
4368+ .globl atomic_add_ret_unchecked
4369+ .type atomic_add_ret_unchecked,#function
4370+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4371+ BACKOFF_SETUP(%o2)
4372+1: lduw [%o1], %g1
4373+ addcc %g1, %o0, %g7
4374+ cas [%o1], %g1, %g7
4375+ cmp %g1, %g7
4376+ bne,pn %icc, 2f
4377+ add %g7, %o0, %g7
4378+ sra %g7, 0, %o0
4379+ retl
4380+ nop
4381+2: BACKOFF_SPIN(%o2, %o3, 1b)
4382+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4383+
4384 .globl atomic_sub_ret
4385 .type atomic_sub_ret,#function
4386 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4387 BACKOFF_SETUP(%o2)
4388 1: lduw [%o1], %g1
4389- sub %g1, %o0, %g7
4390+ subcc %g1, %o0, %g7
4391+
4392+#ifdef CONFIG_PAX_REFCOUNT
4393+ tvs %icc, 6
4394+#endif
4395+
4396 cas [%o1], %g1, %g7
4397 cmp %g1, %g7
4398 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4399@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4400 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4401 BACKOFF_SETUP(%o2)
4402 1: ldx [%o1], %g1
4403- add %g1, %o0, %g7
4404+ addcc %g1, %o0, %g7
4405+
4406+#ifdef CONFIG_PAX_REFCOUNT
4407+ tvs %xcc, 6
4408+#endif
4409+
4410 casx [%o1], %g1, %g7
4411 cmp %g1, %g7
4412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4413@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4414 2: BACKOFF_SPIN(%o2, %o3, 1b)
4415 .size atomic64_add, .-atomic64_add
4416
4417+ .globl atomic64_add_unchecked
4418+ .type atomic64_add_unchecked,#function
4419+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4420+ BACKOFF_SETUP(%o2)
4421+1: ldx [%o1], %g1
4422+ addcc %g1, %o0, %g7
4423+ casx [%o1], %g1, %g7
4424+ cmp %g1, %g7
4425+ bne,pn %xcc, 2f
4426+ nop
4427+ retl
4428+ nop
4429+2: BACKOFF_SPIN(%o2, %o3, 1b)
4430+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4431+
4432 .globl atomic64_sub
4433 .type atomic64_sub,#function
4434 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4435 BACKOFF_SETUP(%o2)
4436 1: ldx [%o1], %g1
4437- sub %g1, %o0, %g7
4438+ subcc %g1, %o0, %g7
4439+
4440+#ifdef CONFIG_PAX_REFCOUNT
4441+ tvs %xcc, 6
4442+#endif
4443+
4444 casx [%o1], %g1, %g7
4445 cmp %g1, %g7
4446 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4447@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4448 2: BACKOFF_SPIN(%o2, %o3, 1b)
4449 .size atomic64_sub, .-atomic64_sub
4450
4451+ .globl atomic64_sub_unchecked
4452+ .type atomic64_sub_unchecked,#function
4453+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4454+ BACKOFF_SETUP(%o2)
4455+1: ldx [%o1], %g1
4456+ subcc %g1, %o0, %g7
4457+ casx [%o1], %g1, %g7
4458+ cmp %g1, %g7
4459+ bne,pn %xcc, 2f
4460+ nop
4461+ retl
4462+ nop
4463+2: BACKOFF_SPIN(%o2, %o3, 1b)
4464+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4465+
4466 .globl atomic64_add_ret
4467 .type atomic64_add_ret,#function
4468 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4469 BACKOFF_SETUP(%o2)
4470 1: ldx [%o1], %g1
4471- add %g1, %o0, %g7
4472+ addcc %g1, %o0, %g7
4473+
4474+#ifdef CONFIG_PAX_REFCOUNT
4475+ tvs %xcc, 6
4476+#endif
4477+
4478 casx [%o1], %g1, %g7
4479 cmp %g1, %g7
4480 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4481@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4482 2: BACKOFF_SPIN(%o2, %o3, 1b)
4483 .size atomic64_add_ret, .-atomic64_add_ret
4484
4485+ .globl atomic64_add_ret_unchecked
4486+ .type atomic64_add_ret_unchecked,#function
4487+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4488+ BACKOFF_SETUP(%o2)
4489+1: ldx [%o1], %g1
4490+ addcc %g1, %o0, %g7
4491+ casx [%o1], %g1, %g7
4492+ cmp %g1, %g7
4493+ bne,pn %xcc, 2f
4494+ add %g7, %o0, %g7
4495+ mov %g7, %o0
4496+ retl
4497+ nop
4498+2: BACKOFF_SPIN(%o2, %o3, 1b)
4499+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4500+
4501 .globl atomic64_sub_ret
4502 .type atomic64_sub_ret,#function
4503 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4504 BACKOFF_SETUP(%o2)
4505 1: ldx [%o1], %g1
4506- sub %g1, %o0, %g7
4507+ subcc %g1, %o0, %g7
4508+
4509+#ifdef CONFIG_PAX_REFCOUNT
4510+ tvs %xcc, 6
4511+#endif
4512+
4513 casx [%o1], %g1, %g7
4514 cmp %g1, %g7
4515 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4516diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4517index 1b30bb3..b4a16c7 100644
4518--- a/arch/sparc/lib/ksyms.c
4519+++ b/arch/sparc/lib/ksyms.c
4520@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4521
4522 /* Atomic counter implementation. */
4523 EXPORT_SYMBOL(atomic_add);
4524+EXPORT_SYMBOL(atomic_add_unchecked);
4525 EXPORT_SYMBOL(atomic_add_ret);
4526+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4527 EXPORT_SYMBOL(atomic_sub);
4528+EXPORT_SYMBOL(atomic_sub_unchecked);
4529 EXPORT_SYMBOL(atomic_sub_ret);
4530 EXPORT_SYMBOL(atomic64_add);
4531+EXPORT_SYMBOL(atomic64_add_unchecked);
4532 EXPORT_SYMBOL(atomic64_add_ret);
4533+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4534 EXPORT_SYMBOL(atomic64_sub);
4535+EXPORT_SYMBOL(atomic64_sub_unchecked);
4536 EXPORT_SYMBOL(atomic64_sub_ret);
4537
4538 /* Atomic bit operations. */
4539diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4540index 301421c..e2535d1 100644
4541--- a/arch/sparc/mm/Makefile
4542+++ b/arch/sparc/mm/Makefile
4543@@ -2,7 +2,7 @@
4544 #
4545
4546 asflags-y := -ansi
4547-ccflags-y := -Werror
4548+#ccflags-y := -Werror
4549
4550 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4551 obj-y += fault_$(BITS).o
4552diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4553index 8023fd7..c8e89e9 100644
4554--- a/arch/sparc/mm/fault_32.c
4555+++ b/arch/sparc/mm/fault_32.c
4556@@ -21,6 +21,9 @@
4557 #include <linux/perf_event.h>
4558 #include <linux/interrupt.h>
4559 #include <linux/kdebug.h>
4560+#include <linux/slab.h>
4561+#include <linux/pagemap.h>
4562+#include <linux/compiler.h>
4563
4564 #include <asm/system.h>
4565 #include <asm/page.h>
4566@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4567 return safe_compute_effective_address(regs, insn);
4568 }
4569
4570+#ifdef CONFIG_PAX_PAGEEXEC
4571+#ifdef CONFIG_PAX_DLRESOLVE
4572+static void pax_emuplt_close(struct vm_area_struct *vma)
4573+{
4574+ vma->vm_mm->call_dl_resolve = 0UL;
4575+}
4576+
4577+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4578+{
4579+ unsigned int *kaddr;
4580+
4581+ vmf->page = alloc_page(GFP_HIGHUSER);
4582+ if (!vmf->page)
4583+ return VM_FAULT_OOM;
4584+
4585+ kaddr = kmap(vmf->page);
4586+ memset(kaddr, 0, PAGE_SIZE);
4587+ kaddr[0] = 0x9DE3BFA8U; /* save */
4588+ flush_dcache_page(vmf->page);
4589+ kunmap(vmf->page);
4590+ return VM_FAULT_MAJOR;
4591+}
4592+
4593+static const struct vm_operations_struct pax_vm_ops = {
4594+ .close = pax_emuplt_close,
4595+ .fault = pax_emuplt_fault
4596+};
4597+
4598+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4599+{
4600+ int ret;
4601+
4602+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4603+ vma->vm_mm = current->mm;
4604+ vma->vm_start = addr;
4605+ vma->vm_end = addr + PAGE_SIZE;
4606+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4607+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4608+ vma->vm_ops = &pax_vm_ops;
4609+
4610+ ret = insert_vm_struct(current->mm, vma);
4611+ if (ret)
4612+ return ret;
4613+
4614+ ++current->mm->total_vm;
4615+ return 0;
4616+}
4617+#endif
4618+
4619+/*
4620+ * PaX: decide what to do with offenders (regs->pc = fault address)
4621+ *
4622+ * returns 1 when task should be killed
4623+ * 2 when patched PLT trampoline was detected
4624+ * 3 when unpatched PLT trampoline was detected
4625+ */
4626+static int pax_handle_fetch_fault(struct pt_regs *regs)
4627+{
4628+
4629+#ifdef CONFIG_PAX_EMUPLT
4630+ int err;
4631+
4632+ do { /* PaX: patched PLT emulation #1 */
4633+ unsigned int sethi1, sethi2, jmpl;
4634+
4635+ err = get_user(sethi1, (unsigned int *)regs->pc);
4636+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4638+
4639+ if (err)
4640+ break;
4641+
4642+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4643+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4644+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4645+ {
4646+ unsigned int addr;
4647+
4648+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4649+ addr = regs->u_regs[UREG_G1];
4650+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4651+ regs->pc = addr;
4652+ regs->npc = addr+4;
4653+ return 2;
4654+ }
4655+ } while (0);
4656+
4657+ { /* PaX: patched PLT emulation #2 */
4658+ unsigned int ba;
4659+
4660+ err = get_user(ba, (unsigned int *)regs->pc);
4661+
4662+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4663+ unsigned int addr;
4664+
4665+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4666+ regs->pc = addr;
4667+ regs->npc = addr+4;
4668+ return 2;
4669+ }
4670+ }
4671+
4672+ do { /* PaX: patched PLT emulation #3 */
4673+ unsigned int sethi, jmpl, nop;
4674+
4675+ err = get_user(sethi, (unsigned int *)regs->pc);
4676+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4677+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4678+
4679+ if (err)
4680+ break;
4681+
4682+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4683+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4684+ nop == 0x01000000U)
4685+ {
4686+ unsigned int addr;
4687+
4688+ addr = (sethi & 0x003FFFFFU) << 10;
4689+ regs->u_regs[UREG_G1] = addr;
4690+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4691+ regs->pc = addr;
4692+ regs->npc = addr+4;
4693+ return 2;
4694+ }
4695+ } while (0);
4696+
4697+ do { /* PaX: unpatched PLT emulation step 1 */
4698+ unsigned int sethi, ba, nop;
4699+
4700+ err = get_user(sethi, (unsigned int *)regs->pc);
4701+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4702+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4703+
4704+ if (err)
4705+ break;
4706+
4707+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4708+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4709+ nop == 0x01000000U)
4710+ {
4711+ unsigned int addr, save, call;
4712+
4713+ if ((ba & 0xFFC00000U) == 0x30800000U)
4714+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4715+ else
4716+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4717+
4718+ err = get_user(save, (unsigned int *)addr);
4719+ err |= get_user(call, (unsigned int *)(addr+4));
4720+ err |= get_user(nop, (unsigned int *)(addr+8));
4721+ if (err)
4722+ break;
4723+
4724+#ifdef CONFIG_PAX_DLRESOLVE
4725+ if (save == 0x9DE3BFA8U &&
4726+ (call & 0xC0000000U) == 0x40000000U &&
4727+ nop == 0x01000000U)
4728+ {
4729+ struct vm_area_struct *vma;
4730+ unsigned long call_dl_resolve;
4731+
4732+ down_read(&current->mm->mmap_sem);
4733+ call_dl_resolve = current->mm->call_dl_resolve;
4734+ up_read(&current->mm->mmap_sem);
4735+ if (likely(call_dl_resolve))
4736+ goto emulate;
4737+
4738+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4739+
4740+ down_write(&current->mm->mmap_sem);
4741+ if (current->mm->call_dl_resolve) {
4742+ call_dl_resolve = current->mm->call_dl_resolve;
4743+ up_write(&current->mm->mmap_sem);
4744+ if (vma)
4745+ kmem_cache_free(vm_area_cachep, vma);
4746+ goto emulate;
4747+ }
4748+
4749+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4750+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4751+ up_write(&current->mm->mmap_sem);
4752+ if (vma)
4753+ kmem_cache_free(vm_area_cachep, vma);
4754+ return 1;
4755+ }
4756+
4757+ if (pax_insert_vma(vma, call_dl_resolve)) {
4758+ up_write(&current->mm->mmap_sem);
4759+ kmem_cache_free(vm_area_cachep, vma);
4760+ return 1;
4761+ }
4762+
4763+ current->mm->call_dl_resolve = call_dl_resolve;
4764+ up_write(&current->mm->mmap_sem);
4765+
4766+emulate:
4767+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4768+ regs->pc = call_dl_resolve;
4769+ regs->npc = addr+4;
4770+ return 3;
4771+ }
4772+#endif
4773+
4774+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4775+ if ((save & 0xFFC00000U) == 0x05000000U &&
4776+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4777+ nop == 0x01000000U)
4778+ {
4779+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4780+ regs->u_regs[UREG_G2] = addr + 4;
4781+ addr = (save & 0x003FFFFFU) << 10;
4782+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4783+ regs->pc = addr;
4784+ regs->npc = addr+4;
4785+ return 3;
4786+ }
4787+ }
4788+ } while (0);
4789+
4790+ do { /* PaX: unpatched PLT emulation step 2 */
4791+ unsigned int save, call, nop;
4792+
4793+ err = get_user(save, (unsigned int *)(regs->pc-4));
4794+ err |= get_user(call, (unsigned int *)regs->pc);
4795+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4796+ if (err)
4797+ break;
4798+
4799+ if (save == 0x9DE3BFA8U &&
4800+ (call & 0xC0000000U) == 0x40000000U &&
4801+ nop == 0x01000000U)
4802+ {
4803+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4804+
4805+ regs->u_regs[UREG_RETPC] = regs->pc;
4806+ regs->pc = dl_resolve;
4807+ regs->npc = dl_resolve+4;
4808+ return 3;
4809+ }
4810+ } while (0);
4811+#endif
4812+
4813+ return 1;
4814+}
4815+
4816+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4817+{
4818+ unsigned long i;
4819+
4820+ printk(KERN_ERR "PAX: bytes at PC: ");
4821+ for (i = 0; i < 8; i++) {
4822+ unsigned int c;
4823+ if (get_user(c, (unsigned int *)pc+i))
4824+ printk(KERN_CONT "???????? ");
4825+ else
4826+ printk(KERN_CONT "%08x ", c);
4827+ }
4828+ printk("\n");
4829+}
4830+#endif
4831+
4832 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4833 int text_fault)
4834 {
4835@@ -280,6 +545,24 @@ good_area:
4836 if(!(vma->vm_flags & VM_WRITE))
4837 goto bad_area;
4838 } else {
4839+
4840+#ifdef CONFIG_PAX_PAGEEXEC
4841+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4842+ up_read(&mm->mmap_sem);
4843+ switch (pax_handle_fetch_fault(regs)) {
4844+
4845+#ifdef CONFIG_PAX_EMUPLT
4846+ case 2:
4847+ case 3:
4848+ return;
4849+#endif
4850+
4851+ }
4852+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4853+ do_group_exit(SIGKILL);
4854+ }
4855+#endif
4856+
4857 /* Allow reads even for write-only mappings */
4858 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4859 goto bad_area;
4860diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4861index 504c062..6fcb9c6 100644
4862--- a/arch/sparc/mm/fault_64.c
4863+++ b/arch/sparc/mm/fault_64.c
4864@@ -21,6 +21,9 @@
4865 #include <linux/kprobes.h>
4866 #include <linux/kdebug.h>
4867 #include <linux/percpu.h>
4868+#include <linux/slab.h>
4869+#include <linux/pagemap.h>
4870+#include <linux/compiler.h>
4871
4872 #include <asm/page.h>
4873 #include <asm/pgtable.h>
4874@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
4875 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4876 regs->tpc);
4877 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4878- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4879+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4880 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4881 dump_stack();
4882 unhandled_fault(regs->tpc, current, regs);
4883@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
4884 show_regs(regs);
4885 }
4886
4887+#ifdef CONFIG_PAX_PAGEEXEC
4888+#ifdef CONFIG_PAX_DLRESOLVE
4889+static void pax_emuplt_close(struct vm_area_struct *vma)
4890+{
4891+ vma->vm_mm->call_dl_resolve = 0UL;
4892+}
4893+
4894+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4895+{
4896+ unsigned int *kaddr;
4897+
4898+ vmf->page = alloc_page(GFP_HIGHUSER);
4899+ if (!vmf->page)
4900+ return VM_FAULT_OOM;
4901+
4902+ kaddr = kmap(vmf->page);
4903+ memset(kaddr, 0, PAGE_SIZE);
4904+ kaddr[0] = 0x9DE3BFA8U; /* save */
4905+ flush_dcache_page(vmf->page);
4906+ kunmap(vmf->page);
4907+ return VM_FAULT_MAJOR;
4908+}
4909+
4910+static const struct vm_operations_struct pax_vm_ops = {
4911+ .close = pax_emuplt_close,
4912+ .fault = pax_emuplt_fault
4913+};
4914+
4915+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4916+{
4917+ int ret;
4918+
4919+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4920+ vma->vm_mm = current->mm;
4921+ vma->vm_start = addr;
4922+ vma->vm_end = addr + PAGE_SIZE;
4923+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4924+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4925+ vma->vm_ops = &pax_vm_ops;
4926+
4927+ ret = insert_vm_struct(current->mm, vma);
4928+ if (ret)
4929+ return ret;
4930+
4931+ ++current->mm->total_vm;
4932+ return 0;
4933+}
4934+#endif
4935+
4936+/*
4937+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4938+ *
4939+ * returns 1 when task should be killed
4940+ * 2 when patched PLT trampoline was detected
4941+ * 3 when unpatched PLT trampoline was detected
4942+ */
4943+static int pax_handle_fetch_fault(struct pt_regs *regs)
4944+{
4945+
4946+#ifdef CONFIG_PAX_EMUPLT
4947+ int err;
4948+
4949+ do { /* PaX: patched PLT emulation #1 */
4950+ unsigned int sethi1, sethi2, jmpl;
4951+
4952+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4953+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4954+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4955+
4956+ if (err)
4957+ break;
4958+
4959+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4960+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4961+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4962+ {
4963+ unsigned long addr;
4964+
4965+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4966+ addr = regs->u_regs[UREG_G1];
4967+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4968+
4969+ if (test_thread_flag(TIF_32BIT))
4970+ addr &= 0xFFFFFFFFUL;
4971+
4972+ regs->tpc = addr;
4973+ regs->tnpc = addr+4;
4974+ return 2;
4975+ }
4976+ } while (0);
4977+
4978+ { /* PaX: patched PLT emulation #2 */
4979+ unsigned int ba;
4980+
4981+ err = get_user(ba, (unsigned int *)regs->tpc);
4982+
4983+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4984+ unsigned long addr;
4985+
4986+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4987+
4988+ if (test_thread_flag(TIF_32BIT))
4989+ addr &= 0xFFFFFFFFUL;
4990+
4991+ regs->tpc = addr;
4992+ regs->tnpc = addr+4;
4993+ return 2;
4994+ }
4995+ }
4996+
4997+ do { /* PaX: patched PLT emulation #3 */
4998+ unsigned int sethi, jmpl, nop;
4999+
5000+ err = get_user(sethi, (unsigned int *)regs->tpc);
5001+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5002+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5003+
5004+ if (err)
5005+ break;
5006+
5007+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5008+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5009+ nop == 0x01000000U)
5010+ {
5011+ unsigned long addr;
5012+
5013+ addr = (sethi & 0x003FFFFFU) << 10;
5014+ regs->u_regs[UREG_G1] = addr;
5015+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5016+
5017+ if (test_thread_flag(TIF_32BIT))
5018+ addr &= 0xFFFFFFFFUL;
5019+
5020+ regs->tpc = addr;
5021+ regs->tnpc = addr+4;
5022+ return 2;
5023+ }
5024+ } while (0);
5025+
5026+ do { /* PaX: patched PLT emulation #4 */
5027+ unsigned int sethi, mov1, call, mov2;
5028+
5029+ err = get_user(sethi, (unsigned int *)regs->tpc);
5030+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5031+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5032+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5033+
5034+ if (err)
5035+ break;
5036+
5037+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5038+ mov1 == 0x8210000FU &&
5039+ (call & 0xC0000000U) == 0x40000000U &&
5040+ mov2 == 0x9E100001U)
5041+ {
5042+ unsigned long addr;
5043+
5044+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5045+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5046+
5047+ if (test_thread_flag(TIF_32BIT))
5048+ addr &= 0xFFFFFFFFUL;
5049+
5050+ regs->tpc = addr;
5051+ regs->tnpc = addr+4;
5052+ return 2;
5053+ }
5054+ } while (0);
5055+
5056+ do { /* PaX: patched PLT emulation #5 */
5057+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5058+
5059+ err = get_user(sethi, (unsigned int *)regs->tpc);
5060+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5061+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5062+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5063+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5064+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5065+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5066+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5067+
5068+ if (err)
5069+ break;
5070+
5071+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5072+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5073+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5074+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5075+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5076+ sllx == 0x83287020U &&
5077+ jmpl == 0x81C04005U &&
5078+ nop == 0x01000000U)
5079+ {
5080+ unsigned long addr;
5081+
5082+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5083+ regs->u_regs[UREG_G1] <<= 32;
5084+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5085+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5086+ regs->tpc = addr;
5087+ regs->tnpc = addr+4;
5088+ return 2;
5089+ }
5090+ } while (0);
5091+
5092+ do { /* PaX: patched PLT emulation #6 */
5093+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5094+
5095+ err = get_user(sethi, (unsigned int *)regs->tpc);
5096+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5097+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5098+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5099+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5100+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5101+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5102+
5103+ if (err)
5104+ break;
5105+
5106+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5107+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5108+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5109+ sllx == 0x83287020U &&
5110+ (or & 0xFFFFE000U) == 0x8A116000U &&
5111+ jmpl == 0x81C04005U &&
5112+ nop == 0x01000000U)
5113+ {
5114+ unsigned long addr;
5115+
5116+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5117+ regs->u_regs[UREG_G1] <<= 32;
5118+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5119+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5120+ regs->tpc = addr;
5121+ regs->tnpc = addr+4;
5122+ return 2;
5123+ }
5124+ } while (0);
5125+
5126+ do { /* PaX: unpatched PLT emulation step 1 */
5127+ unsigned int sethi, ba, nop;
5128+
5129+ err = get_user(sethi, (unsigned int *)regs->tpc);
5130+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5131+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5132+
5133+ if (err)
5134+ break;
5135+
5136+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5137+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5138+ nop == 0x01000000U)
5139+ {
5140+ unsigned long addr;
5141+ unsigned int save, call;
5142+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5143+
5144+ if ((ba & 0xFFC00000U) == 0x30800000U)
5145+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5146+ else
5147+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5148+
5149+ if (test_thread_flag(TIF_32BIT))
5150+ addr &= 0xFFFFFFFFUL;
5151+
5152+ err = get_user(save, (unsigned int *)addr);
5153+ err |= get_user(call, (unsigned int *)(addr+4));
5154+ err |= get_user(nop, (unsigned int *)(addr+8));
5155+ if (err)
5156+ break;
5157+
5158+#ifdef CONFIG_PAX_DLRESOLVE
5159+ if (save == 0x9DE3BFA8U &&
5160+ (call & 0xC0000000U) == 0x40000000U &&
5161+ nop == 0x01000000U)
5162+ {
5163+ struct vm_area_struct *vma;
5164+ unsigned long call_dl_resolve;
5165+
5166+ down_read(&current->mm->mmap_sem);
5167+ call_dl_resolve = current->mm->call_dl_resolve;
5168+ up_read(&current->mm->mmap_sem);
5169+ if (likely(call_dl_resolve))
5170+ goto emulate;
5171+
5172+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5173+
5174+ down_write(&current->mm->mmap_sem);
5175+ if (current->mm->call_dl_resolve) {
5176+ call_dl_resolve = current->mm->call_dl_resolve;
5177+ up_write(&current->mm->mmap_sem);
5178+ if (vma)
5179+ kmem_cache_free(vm_area_cachep, vma);
5180+ goto emulate;
5181+ }
5182+
5183+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5184+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5185+ up_write(&current->mm->mmap_sem);
5186+ if (vma)
5187+ kmem_cache_free(vm_area_cachep, vma);
5188+ return 1;
5189+ }
5190+
5191+ if (pax_insert_vma(vma, call_dl_resolve)) {
5192+ up_write(&current->mm->mmap_sem);
5193+ kmem_cache_free(vm_area_cachep, vma);
5194+ return 1;
5195+ }
5196+
5197+ current->mm->call_dl_resolve = call_dl_resolve;
5198+ up_write(&current->mm->mmap_sem);
5199+
5200+emulate:
5201+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5202+ regs->tpc = call_dl_resolve;
5203+ regs->tnpc = addr+4;
5204+ return 3;
5205+ }
5206+#endif
5207+
5208+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5209+ if ((save & 0xFFC00000U) == 0x05000000U &&
5210+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5211+ nop == 0x01000000U)
5212+ {
5213+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5214+ regs->u_regs[UREG_G2] = addr + 4;
5215+ addr = (save & 0x003FFFFFU) << 10;
5216+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5217+
5218+ if (test_thread_flag(TIF_32BIT))
5219+ addr &= 0xFFFFFFFFUL;
5220+
5221+ regs->tpc = addr;
5222+ regs->tnpc = addr+4;
5223+ return 3;
5224+ }
5225+
5226+ /* PaX: 64-bit PLT stub */
5227+ err = get_user(sethi1, (unsigned int *)addr);
5228+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5229+ err |= get_user(or1, (unsigned int *)(addr+8));
5230+ err |= get_user(or2, (unsigned int *)(addr+12));
5231+ err |= get_user(sllx, (unsigned int *)(addr+16));
5232+ err |= get_user(add, (unsigned int *)(addr+20));
5233+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5234+ err |= get_user(nop, (unsigned int *)(addr+28));
5235+ if (err)
5236+ break;
5237+
5238+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5239+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5240+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5241+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5242+ sllx == 0x89293020U &&
5243+ add == 0x8A010005U &&
5244+ jmpl == 0x89C14000U &&
5245+ nop == 0x01000000U)
5246+ {
5247+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5248+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5249+ regs->u_regs[UREG_G4] <<= 32;
5250+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5251+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5252+ regs->u_regs[UREG_G4] = addr + 24;
5253+ addr = regs->u_regs[UREG_G5];
5254+ regs->tpc = addr;
5255+ regs->tnpc = addr+4;
5256+ return 3;
5257+ }
5258+ }
5259+ } while (0);
5260+
5261+#ifdef CONFIG_PAX_DLRESOLVE
5262+ do { /* PaX: unpatched PLT emulation step 2 */
5263+ unsigned int save, call, nop;
5264+
5265+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5266+ err |= get_user(call, (unsigned int *)regs->tpc);
5267+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5268+ if (err)
5269+ break;
5270+
5271+ if (save == 0x9DE3BFA8U &&
5272+ (call & 0xC0000000U) == 0x40000000U &&
5273+ nop == 0x01000000U)
5274+ {
5275+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5276+
5277+ if (test_thread_flag(TIF_32BIT))
5278+ dl_resolve &= 0xFFFFFFFFUL;
5279+
5280+ regs->u_regs[UREG_RETPC] = regs->tpc;
5281+ regs->tpc = dl_resolve;
5282+ regs->tnpc = dl_resolve+4;
5283+ return 3;
5284+ }
5285+ } while (0);
5286+#endif
5287+
5288+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5289+ unsigned int sethi, ba, nop;
5290+
5291+ err = get_user(sethi, (unsigned int *)regs->tpc);
5292+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5293+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5294+
5295+ if (err)
5296+ break;
5297+
5298+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5299+ (ba & 0xFFF00000U) == 0x30600000U &&
5300+ nop == 0x01000000U)
5301+ {
5302+ unsigned long addr;
5303+
5304+ addr = (sethi & 0x003FFFFFU) << 10;
5305+ regs->u_regs[UREG_G1] = addr;
5306+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5307+
5308+ if (test_thread_flag(TIF_32BIT))
5309+ addr &= 0xFFFFFFFFUL;
5310+
5311+ regs->tpc = addr;
5312+ regs->tnpc = addr+4;
5313+ return 2;
5314+ }
5315+ } while (0);
5316+
5317+#endif
5318+
5319+ return 1;
5320+}
5321+
5322+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5323+{
5324+ unsigned long i;
5325+
5326+ printk(KERN_ERR "PAX: bytes at PC: ");
5327+ for (i = 0; i < 8; i++) {
5328+ unsigned int c;
5329+ if (get_user(c, (unsigned int *)pc+i))
5330+ printk(KERN_CONT "???????? ");
5331+ else
5332+ printk(KERN_CONT "%08x ", c);
5333+ }
5334+ printk("\n");
5335+}
5336+#endif
5337+
5338 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5339 {
5340 struct mm_struct *mm = current->mm;
5341@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5342 if (!vma)
5343 goto bad_area;
5344
5345+#ifdef CONFIG_PAX_PAGEEXEC
5346+ /* PaX: detect ITLB misses on non-exec pages */
5347+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5348+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5349+ {
5350+ if (address != regs->tpc)
5351+ goto good_area;
5352+
5353+ up_read(&mm->mmap_sem);
5354+ switch (pax_handle_fetch_fault(regs)) {
5355+
5356+#ifdef CONFIG_PAX_EMUPLT
5357+ case 2:
5358+ case 3:
5359+ return;
5360+#endif
5361+
5362+ }
5363+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5364+ do_group_exit(SIGKILL);
5365+ }
5366+#endif
5367+
5368 /* Pure DTLB misses do not tell us whether the fault causing
5369 * load/store/atomic was a write or not, it only says that there
5370 * was no match. So in such a case we (carefully) read the
5371diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5372index 07e1453..0a7d9e9 100644
5373--- a/arch/sparc/mm/hugetlbpage.c
5374+++ b/arch/sparc/mm/hugetlbpage.c
5375@@ -67,7 +67,7 @@ full_search:
5376 }
5377 return -ENOMEM;
5378 }
5379- if (likely(!vma || addr + len <= vma->vm_start)) {
5380+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5381 /*
5382 * Remember the place where we stopped the search:
5383 */
5384@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5385 /* make sure it can fit in the remaining address space */
5386 if (likely(addr > len)) {
5387 vma = find_vma(mm, addr-len);
5388- if (!vma || addr <= vma->vm_start) {
5389+ if (check_heap_stack_gap(vma, addr - len, len)) {
5390 /* remember the address as a hint for next time */
5391 return (mm->free_area_cache = addr-len);
5392 }
5393@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5394 if (unlikely(mm->mmap_base < len))
5395 goto bottomup;
5396
5397- addr = (mm->mmap_base-len) & HPAGE_MASK;
5398+ addr = mm->mmap_base - len;
5399
5400 do {
5401+ addr &= HPAGE_MASK;
5402 /*
5403 * Lookup failure means no vma is above this address,
5404 * else if new region fits below vma->vm_start,
5405 * return with success:
5406 */
5407 vma = find_vma(mm, addr);
5408- if (likely(!vma || addr+len <= vma->vm_start)) {
5409+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5410 /* remember the address as a hint for next time */
5411 return (mm->free_area_cache = addr);
5412 }
5413@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 mm->cached_hole_size = vma->vm_start - addr;
5415
5416 /* try just below the current vma->vm_start */
5417- addr = (vma->vm_start-len) & HPAGE_MASK;
5418- } while (likely(len < vma->vm_start));
5419+ addr = skip_heap_stack_gap(vma, len);
5420+ } while (!IS_ERR_VALUE(addr));
5421
5422 bottomup:
5423 /*
5424@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5425 if (addr) {
5426 addr = ALIGN(addr, HPAGE_SIZE);
5427 vma = find_vma(mm, addr);
5428- if (task_size - len >= addr &&
5429- (!vma || addr + len <= vma->vm_start))
5430+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5431 return addr;
5432 }
5433 if (mm->get_unmapped_area == arch_get_unmapped_area)
5434diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5435index 7b00de6..78239f4 100644
5436--- a/arch/sparc/mm/init_32.c
5437+++ b/arch/sparc/mm/init_32.c
5438@@ -316,6 +316,9 @@ extern void device_scan(void);
5439 pgprot_t PAGE_SHARED __read_mostly;
5440 EXPORT_SYMBOL(PAGE_SHARED);
5441
5442+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5443+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5444+
5445 void __init paging_init(void)
5446 {
5447 switch(sparc_cpu_model) {
5448@@ -344,17 +347,17 @@ void __init paging_init(void)
5449
5450 /* Initialize the protection map with non-constant, MMU dependent values. */
5451 protection_map[0] = PAGE_NONE;
5452- protection_map[1] = PAGE_READONLY;
5453- protection_map[2] = PAGE_COPY;
5454- protection_map[3] = PAGE_COPY;
5455+ protection_map[1] = PAGE_READONLY_NOEXEC;
5456+ protection_map[2] = PAGE_COPY_NOEXEC;
5457+ protection_map[3] = PAGE_COPY_NOEXEC;
5458 protection_map[4] = PAGE_READONLY;
5459 protection_map[5] = PAGE_READONLY;
5460 protection_map[6] = PAGE_COPY;
5461 protection_map[7] = PAGE_COPY;
5462 protection_map[8] = PAGE_NONE;
5463- protection_map[9] = PAGE_READONLY;
5464- protection_map[10] = PAGE_SHARED;
5465- protection_map[11] = PAGE_SHARED;
5466+ protection_map[9] = PAGE_READONLY_NOEXEC;
5467+ protection_map[10] = PAGE_SHARED_NOEXEC;
5468+ protection_map[11] = PAGE_SHARED_NOEXEC;
5469 protection_map[12] = PAGE_READONLY;
5470 protection_map[13] = PAGE_READONLY;
5471 protection_map[14] = PAGE_SHARED;
5472diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5473index cbef74e..c38fead 100644
5474--- a/arch/sparc/mm/srmmu.c
5475+++ b/arch/sparc/mm/srmmu.c
5476@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5477 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5478 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5479 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5480+
5481+#ifdef CONFIG_PAX_PAGEEXEC
5482+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5483+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5484+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5485+#endif
5486+
5487 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5488 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5489
5490diff --git a/arch/um/Makefile b/arch/um/Makefile
5491index 7730af6..cce5b19 100644
5492--- a/arch/um/Makefile
5493+++ b/arch/um/Makefile
5494@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5495 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5496 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5497
5498+ifdef CONSTIFY_PLUGIN
5499+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5500+endif
5501+
5502 #This will adjust *FLAGS accordingly to the platform.
5503 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5504
5505diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5506index 6c03acd..a5e0215 100644
5507--- a/arch/um/include/asm/kmap_types.h
5508+++ b/arch/um/include/asm/kmap_types.h
5509@@ -23,6 +23,7 @@ enum km_type {
5510 KM_IRQ1,
5511 KM_SOFTIRQ0,
5512 KM_SOFTIRQ1,
5513+ KM_CLEARPAGE,
5514 KM_TYPE_NR
5515 };
5516
5517diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5518index 7cfc3ce..cbd1a58 100644
5519--- a/arch/um/include/asm/page.h
5520+++ b/arch/um/include/asm/page.h
5521@@ -14,6 +14,9 @@
5522 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5523 #define PAGE_MASK (~(PAGE_SIZE-1))
5524
5525+#define ktla_ktva(addr) (addr)
5526+#define ktva_ktla(addr) (addr)
5527+
5528 #ifndef __ASSEMBLY__
5529
5530 struct page;
5531diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5532index c533835..84db18e 100644
5533--- a/arch/um/kernel/process.c
5534+++ b/arch/um/kernel/process.c
5535@@ -406,22 +406,6 @@ int singlestepping(void * t)
5536 return 2;
5537 }
5538
5539-/*
5540- * Only x86 and x86_64 have an arch_align_stack().
5541- * All other arches have "#define arch_align_stack(x) (x)"
5542- * in their asm/system.h
5543- * As this is included in UML from asm-um/system-generic.h,
5544- * we can use it to behave as the subarch does.
5545- */
5546-#ifndef arch_align_stack
5547-unsigned long arch_align_stack(unsigned long sp)
5548-{
5549- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5550- sp -= get_random_int() % 8192;
5551- return sp & ~0xf;
5552-}
5553-#endif
5554-
5555 unsigned long get_wchan(struct task_struct *p)
5556 {
5557 unsigned long stack_page, sp, ip;
5558diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5559index efb4294..61bc18c 100644
5560--- a/arch/x86/Kconfig
5561+++ b/arch/x86/Kconfig
5562@@ -235,7 +235,7 @@ config X86_HT
5563
5564 config X86_32_LAZY_GS
5565 def_bool y
5566- depends on X86_32 && !CC_STACKPROTECTOR
5567+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5568
5569 config ARCH_HWEIGHT_CFLAGS
5570 string
5571@@ -1022,7 +1022,7 @@ choice
5572
5573 config NOHIGHMEM
5574 bool "off"
5575- depends on !X86_NUMAQ
5576+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5577 ---help---
5578 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5579 However, the address space of 32-bit x86 processors is only 4
5580@@ -1059,7 +1059,7 @@ config NOHIGHMEM
5581
5582 config HIGHMEM4G
5583 bool "4GB"
5584- depends on !X86_NUMAQ
5585+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5586 ---help---
5587 Select this if you have a 32-bit processor and between 1 and 4
5588 gigabytes of physical RAM.
5589@@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5590 hex
5591 default 0xB0000000 if VMSPLIT_3G_OPT
5592 default 0x80000000 if VMSPLIT_2G
5593- default 0x78000000 if VMSPLIT_2G_OPT
5594+ default 0x70000000 if VMSPLIT_2G_OPT
5595 default 0x40000000 if VMSPLIT_1G
5596 default 0xC0000000
5597 depends on X86_32
5598@@ -1496,6 +1496,7 @@ config SECCOMP
5599
5600 config CC_STACKPROTECTOR
5601 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5602+ depends on X86_64 || !PAX_MEMORY_UDEREF
5603 ---help---
5604 This option turns on the -fstack-protector GCC feature. This
5605 feature puts, at the beginning of functions, a canary value on
5606@@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5607 config PHYSICAL_START
5608 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5609 default "0x1000000"
5610+ range 0x400000 0x40000000
5611 ---help---
5612 This gives the physical address where the kernel is loaded.
5613
5614@@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5615 config PHYSICAL_ALIGN
5616 hex "Alignment value to which kernel should be aligned" if X86_32
5617 default "0x1000000"
5618+ range 0x400000 0x1000000 if PAX_KERNEXEC
5619 range 0x2000 0x1000000
5620 ---help---
5621 This value puts the alignment restrictions on physical address
5622@@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5623 Say N if you want to disable CPU hotplug.
5624
5625 config COMPAT_VDSO
5626- def_bool y
5627+ def_bool n
5628 prompt "Compat VDSO support"
5629 depends on X86_32 || IA32_EMULATION
5630+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5631 ---help---
5632 Map the 32-bit VDSO to the predictable old-style address too.
5633
5634diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5635index e3ca7e0..b30b28a 100644
5636--- a/arch/x86/Kconfig.cpu
5637+++ b/arch/x86/Kconfig.cpu
5638@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5639
5640 config X86_F00F_BUG
5641 def_bool y
5642- depends on M586MMX || M586TSC || M586 || M486 || M386
5643+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5644
5645 config X86_INVD_BUG
5646 def_bool y
5647@@ -365,7 +365,7 @@ config X86_POPAD_OK
5648
5649 config X86_ALIGNMENT_16
5650 def_bool y
5651- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5652+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5653
5654 config X86_INTEL_USERCOPY
5655 def_bool y
5656@@ -411,7 +411,7 @@ config X86_CMPXCHG64
5657 # generates cmov.
5658 config X86_CMOV
5659 def_bool y
5660- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5661+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5662
5663 config X86_MINIMUM_CPU_FAMILY
5664 int
5665diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5666index bf56e17..05f9891 100644
5667--- a/arch/x86/Kconfig.debug
5668+++ b/arch/x86/Kconfig.debug
5669@@ -81,7 +81,7 @@ config X86_PTDUMP
5670 config DEBUG_RODATA
5671 bool "Write protect kernel read-only data structures"
5672 default y
5673- depends on DEBUG_KERNEL
5674+ depends on DEBUG_KERNEL && BROKEN
5675 ---help---
5676 Mark the kernel read-only data as write-protected in the pagetables,
5677 in order to catch accidental (and incorrect) writes to such const
5678@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5679
5680 config DEBUG_SET_MODULE_RONX
5681 bool "Set loadable kernel module data as NX and text as RO"
5682- depends on MODULES
5683+ depends on MODULES && BROKEN
5684 ---help---
5685 This option helps catch unintended modifications to loadable
5686 kernel module's text and read-only data. It also prevents execution
5687diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5688index b02e509..2631e48 100644
5689--- a/arch/x86/Makefile
5690+++ b/arch/x86/Makefile
5691@@ -46,6 +46,7 @@ else
5692 UTS_MACHINE := x86_64
5693 CHECKFLAGS += -D__x86_64__ -m64
5694
5695+ biarch := $(call cc-option,-m64)
5696 KBUILD_AFLAGS += -m64
5697 KBUILD_CFLAGS += -m64
5698
5699@@ -195,3 +196,12 @@ define archhelp
5700 echo ' FDARGS="..." arguments for the booted kernel'
5701 echo ' FDINITRD=file initrd for the booted kernel'
5702 endef
5703+
5704+define OLD_LD
5705+
5706+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5707+*** Please upgrade your binutils to 2.18 or newer
5708+endef
5709+
5710+archprepare:
5711+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5712diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5713index 95365a8..52f857b 100644
5714--- a/arch/x86/boot/Makefile
5715+++ b/arch/x86/boot/Makefile
5716@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5717 $(call cc-option, -fno-stack-protector) \
5718 $(call cc-option, -mpreferred-stack-boundary=2)
5719 KBUILD_CFLAGS += $(call cc-option, -m32)
5720+ifdef CONSTIFY_PLUGIN
5721+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5722+endif
5723 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5724 GCOV_PROFILE := n
5725
5726diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5727index 878e4b9..20537ab 100644
5728--- a/arch/x86/boot/bitops.h
5729+++ b/arch/x86/boot/bitops.h
5730@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5731 u8 v;
5732 const u32 *p = (const u32 *)addr;
5733
5734- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5735+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5736 return v;
5737 }
5738
5739@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5740
5741 static inline void set_bit(int nr, void *addr)
5742 {
5743- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5744+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5745 }
5746
5747 #endif /* BOOT_BITOPS_H */
5748diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5749index c7093bd..d4247ffe0 100644
5750--- a/arch/x86/boot/boot.h
5751+++ b/arch/x86/boot/boot.h
5752@@ -85,7 +85,7 @@ static inline void io_delay(void)
5753 static inline u16 ds(void)
5754 {
5755 u16 seg;
5756- asm("movw %%ds,%0" : "=rm" (seg));
5757+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5758 return seg;
5759 }
5760
5761@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
5762 static inline int memcmp(const void *s1, const void *s2, size_t len)
5763 {
5764 u8 diff;
5765- asm("repe; cmpsb; setnz %0"
5766+ asm volatile("repe; cmpsb; setnz %0"
5767 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5768 return diff;
5769 }
5770diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5771index 09664ef..edc5d03 100644
5772--- a/arch/x86/boot/compressed/Makefile
5773+++ b/arch/x86/boot/compressed/Makefile
5774@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5775 KBUILD_CFLAGS += $(cflags-y)
5776 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5777 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5778+ifdef CONSTIFY_PLUGIN
5779+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5780+endif
5781
5782 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5783 GCOV_PROFILE := n
5784diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5785index 67a655a..b924059 100644
5786--- a/arch/x86/boot/compressed/head_32.S
5787+++ b/arch/x86/boot/compressed/head_32.S
5788@@ -76,7 +76,7 @@ ENTRY(startup_32)
5789 notl %eax
5790 andl %eax, %ebx
5791 #else
5792- movl $LOAD_PHYSICAL_ADDR, %ebx
5793+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5794 #endif
5795
5796 /* Target address to relocate to for decompression */
5797@@ -162,7 +162,7 @@ relocated:
5798 * and where it was actually loaded.
5799 */
5800 movl %ebp, %ebx
5801- subl $LOAD_PHYSICAL_ADDR, %ebx
5802+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5803 jz 2f /* Nothing to be done if loaded at compiled addr. */
5804 /*
5805 * Process relocations.
5806@@ -170,8 +170,7 @@ relocated:
5807
5808 1: subl $4, %edi
5809 movl (%edi), %ecx
5810- testl %ecx, %ecx
5811- jz 2f
5812+ jecxz 2f
5813 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5814 jmp 1b
5815 2:
5816diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5817index 35af09d..99c9676 100644
5818--- a/arch/x86/boot/compressed/head_64.S
5819+++ b/arch/x86/boot/compressed/head_64.S
5820@@ -91,7 +91,7 @@ ENTRY(startup_32)
5821 notl %eax
5822 andl %eax, %ebx
5823 #else
5824- movl $LOAD_PHYSICAL_ADDR, %ebx
5825+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5826 #endif
5827
5828 /* Target address to relocate to for decompression */
5829@@ -233,7 +233,7 @@ ENTRY(startup_64)
5830 notq %rax
5831 andq %rax, %rbp
5832 #else
5833- movq $LOAD_PHYSICAL_ADDR, %rbp
5834+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5835 #endif
5836
5837 /* Target address to relocate to for decompression */
5838diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5839index 3a19d04..7c1d55a 100644
5840--- a/arch/x86/boot/compressed/misc.c
5841+++ b/arch/x86/boot/compressed/misc.c
5842@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5843 case PT_LOAD:
5844 #ifdef CONFIG_RELOCATABLE
5845 dest = output;
5846- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5847+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5848 #else
5849 dest = (void *)(phdr->p_paddr);
5850 #endif
5851@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
5852 error("Destination address too large");
5853 #endif
5854 #ifndef CONFIG_RELOCATABLE
5855- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5856+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5857 error("Wrong destination address");
5858 #endif
5859
5860diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5861index 89bbf4e..869908e 100644
5862--- a/arch/x86/boot/compressed/relocs.c
5863+++ b/arch/x86/boot/compressed/relocs.c
5864@@ -13,8 +13,11 @@
5865
5866 static void die(char *fmt, ...);
5867
5868+#include "../../../../include/generated/autoconf.h"
5869+
5870 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5871 static Elf32_Ehdr ehdr;
5872+static Elf32_Phdr *phdr;
5873 static unsigned long reloc_count, reloc_idx;
5874 static unsigned long *relocs;
5875
5876@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5877 }
5878 }
5879
5880+static void read_phdrs(FILE *fp)
5881+{
5882+ unsigned int i;
5883+
5884+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5885+ if (!phdr) {
5886+ die("Unable to allocate %d program headers\n",
5887+ ehdr.e_phnum);
5888+ }
5889+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5890+ die("Seek to %d failed: %s\n",
5891+ ehdr.e_phoff, strerror(errno));
5892+ }
5893+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5894+ die("Cannot read ELF program headers: %s\n",
5895+ strerror(errno));
5896+ }
5897+ for(i = 0; i < ehdr.e_phnum; i++) {
5898+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5899+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5900+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5901+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5902+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5903+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5904+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5905+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5906+ }
5907+
5908+}
5909+
5910 static void read_shdrs(FILE *fp)
5911 {
5912- int i;
5913+ unsigned int i;
5914 Elf32_Shdr shdr;
5915
5916 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5917@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5918
5919 static void read_strtabs(FILE *fp)
5920 {
5921- int i;
5922+ unsigned int i;
5923 for (i = 0; i < ehdr.e_shnum; i++) {
5924 struct section *sec = &secs[i];
5925 if (sec->shdr.sh_type != SHT_STRTAB) {
5926@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5927
5928 static void read_symtabs(FILE *fp)
5929 {
5930- int i,j;
5931+ unsigned int i,j;
5932 for (i = 0; i < ehdr.e_shnum; i++) {
5933 struct section *sec = &secs[i];
5934 if (sec->shdr.sh_type != SHT_SYMTAB) {
5935@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5936
5937 static void read_relocs(FILE *fp)
5938 {
5939- int i,j;
5940+ unsigned int i,j;
5941+ uint32_t base;
5942+
5943 for (i = 0; i < ehdr.e_shnum; i++) {
5944 struct section *sec = &secs[i];
5945 if (sec->shdr.sh_type != SHT_REL) {
5946@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5947 die("Cannot read symbol table: %s\n",
5948 strerror(errno));
5949 }
5950+ base = 0;
5951+ for (j = 0; j < ehdr.e_phnum; j++) {
5952+ if (phdr[j].p_type != PT_LOAD )
5953+ continue;
5954+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5955+ continue;
5956+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5957+ break;
5958+ }
5959 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5960 Elf32_Rel *rel = &sec->reltab[j];
5961- rel->r_offset = elf32_to_cpu(rel->r_offset);
5962+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5963 rel->r_info = elf32_to_cpu(rel->r_info);
5964 }
5965 }
5966@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5967
5968 static void print_absolute_symbols(void)
5969 {
5970- int i;
5971+ unsigned int i;
5972 printf("Absolute symbols\n");
5973 printf(" Num: Value Size Type Bind Visibility Name\n");
5974 for (i = 0; i < ehdr.e_shnum; i++) {
5975 struct section *sec = &secs[i];
5976 char *sym_strtab;
5977 Elf32_Sym *sh_symtab;
5978- int j;
5979+ unsigned int j;
5980
5981 if (sec->shdr.sh_type != SHT_SYMTAB) {
5982 continue;
5983@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5984
5985 static void print_absolute_relocs(void)
5986 {
5987- int i, printed = 0;
5988+ unsigned int i, printed = 0;
5989
5990 for (i = 0; i < ehdr.e_shnum; i++) {
5991 struct section *sec = &secs[i];
5992 struct section *sec_applies, *sec_symtab;
5993 char *sym_strtab;
5994 Elf32_Sym *sh_symtab;
5995- int j;
5996+ unsigned int j;
5997 if (sec->shdr.sh_type != SHT_REL) {
5998 continue;
5999 }
6000@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6001
6002 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6003 {
6004- int i;
6005+ unsigned int i;
6006 /* Walk through the relocations */
6007 for (i = 0; i < ehdr.e_shnum; i++) {
6008 char *sym_strtab;
6009 Elf32_Sym *sh_symtab;
6010 struct section *sec_applies, *sec_symtab;
6011- int j;
6012+ unsigned int j;
6013 struct section *sec = &secs[i];
6014
6015 if (sec->shdr.sh_type != SHT_REL) {
6016@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6017 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6018 continue;
6019 }
6020+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6021+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6022+ continue;
6023+
6024+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6025+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6026+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6027+ continue;
6028+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6029+ continue;
6030+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6031+ continue;
6032+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6033+ continue;
6034+#endif
6035+
6036 switch (r_type) {
6037 case R_386_NONE:
6038 case R_386_PC32:
6039@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6040
6041 static void emit_relocs(int as_text)
6042 {
6043- int i;
6044+ unsigned int i;
6045 /* Count how many relocations I have and allocate space for them. */
6046 reloc_count = 0;
6047 walk_relocs(count_reloc);
6048@@ -665,6 +725,7 @@ int main(int argc, char **argv)
6049 fname, strerror(errno));
6050 }
6051 read_ehdr(fp);
6052+ read_phdrs(fp);
6053 read_shdrs(fp);
6054 read_strtabs(fp);
6055 read_symtabs(fp);
6056diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6057index 4d3ff03..e4972ff 100644
6058--- a/arch/x86/boot/cpucheck.c
6059+++ b/arch/x86/boot/cpucheck.c
6060@@ -74,7 +74,7 @@ static int has_fpu(void)
6061 u16 fcw = -1, fsw = -1;
6062 u32 cr0;
6063
6064- asm("movl %%cr0,%0" : "=r" (cr0));
6065+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6066 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6067 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6068 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6069@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6070 {
6071 u32 f0, f1;
6072
6073- asm("pushfl ; "
6074+ asm volatile("pushfl ; "
6075 "pushfl ; "
6076 "popl %0 ; "
6077 "movl %0,%1 ; "
6078@@ -115,7 +115,7 @@ static void get_flags(void)
6079 set_bit(X86_FEATURE_FPU, cpu.flags);
6080
6081 if (has_eflag(X86_EFLAGS_ID)) {
6082- asm("cpuid"
6083+ asm volatile("cpuid"
6084 : "=a" (max_intel_level),
6085 "=b" (cpu_vendor[0]),
6086 "=d" (cpu_vendor[1]),
6087@@ -124,7 +124,7 @@ static void get_flags(void)
6088
6089 if (max_intel_level >= 0x00000001 &&
6090 max_intel_level <= 0x0000ffff) {
6091- asm("cpuid"
6092+ asm volatile("cpuid"
6093 : "=a" (tfms),
6094 "=c" (cpu.flags[4]),
6095 "=d" (cpu.flags[0])
6096@@ -136,7 +136,7 @@ static void get_flags(void)
6097 cpu.model += ((tfms >> 16) & 0xf) << 4;
6098 }
6099
6100- asm("cpuid"
6101+ asm volatile("cpuid"
6102 : "=a" (max_amd_level)
6103 : "a" (0x80000000)
6104 : "ebx", "ecx", "edx");
6105@@ -144,7 +144,7 @@ static void get_flags(void)
6106 if (max_amd_level >= 0x80000001 &&
6107 max_amd_level <= 0x8000ffff) {
6108 u32 eax = 0x80000001;
6109- asm("cpuid"
6110+ asm volatile("cpuid"
6111 : "+a" (eax),
6112 "=c" (cpu.flags[6]),
6113 "=d" (cpu.flags[1])
6114@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6115 u32 ecx = MSR_K7_HWCR;
6116 u32 eax, edx;
6117
6118- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6119+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6120 eax &= ~(1 << 15);
6121- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6122+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6123
6124 get_flags(); /* Make sure it really did something */
6125 err = check_flags();
6126@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6127 u32 ecx = MSR_VIA_FCR;
6128 u32 eax, edx;
6129
6130- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6131+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6132 eax |= (1<<1)|(1<<7);
6133- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6134+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6135
6136 set_bit(X86_FEATURE_CX8, cpu.flags);
6137 err = check_flags();
6138@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6139 u32 eax, edx;
6140 u32 level = 1;
6141
6142- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6143- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6144- asm("cpuid"
6145+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6146+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6147+ asm volatile("cpuid"
6148 : "+a" (level), "=d" (cpu.flags[0])
6149 : : "ecx", "ebx");
6150- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6151+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6152
6153 err = check_flags();
6154 }
6155diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6156index bdb4d45..0476680 100644
6157--- a/arch/x86/boot/header.S
6158+++ b/arch/x86/boot/header.S
6159@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6160 # single linked list of
6161 # struct setup_data
6162
6163-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6164+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6165
6166 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6167 #define VO_INIT_SIZE (VO__end - VO__text)
6168diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6169index db75d07..8e6d0af 100644
6170--- a/arch/x86/boot/memory.c
6171+++ b/arch/x86/boot/memory.c
6172@@ -19,7 +19,7 @@
6173
6174 static int detect_memory_e820(void)
6175 {
6176- int count = 0;
6177+ unsigned int count = 0;
6178 struct biosregs ireg, oreg;
6179 struct e820entry *desc = boot_params.e820_map;
6180 static struct e820entry buf; /* static so it is zeroed */
6181diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6182index 11e8c6e..fdbb1ed 100644
6183--- a/arch/x86/boot/video-vesa.c
6184+++ b/arch/x86/boot/video-vesa.c
6185@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6186
6187 boot_params.screen_info.vesapm_seg = oreg.es;
6188 boot_params.screen_info.vesapm_off = oreg.di;
6189+ boot_params.screen_info.vesapm_size = oreg.cx;
6190 }
6191
6192 /*
6193diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6194index 43eda28..5ab5fdb 100644
6195--- a/arch/x86/boot/video.c
6196+++ b/arch/x86/boot/video.c
6197@@ -96,7 +96,7 @@ static void store_mode_params(void)
6198 static unsigned int get_entry(void)
6199 {
6200 char entry_buf[4];
6201- int i, len = 0;
6202+ unsigned int i, len = 0;
6203 int key;
6204 unsigned int v;
6205
6206diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6207index 5b577d5..3c1fed4 100644
6208--- a/arch/x86/crypto/aes-x86_64-asm_64.S
6209+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6210@@ -8,6 +8,8 @@
6211 * including this sentence is retained in full.
6212 */
6213
6214+#include <asm/alternative-asm.h>
6215+
6216 .extern crypto_ft_tab
6217 .extern crypto_it_tab
6218 .extern crypto_fl_tab
6219@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6220 je B192; \
6221 leaq 32(r9),r9;
6222
6223+#define ret pax_force_retaddr 0, 1; ret
6224+
6225 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6226 movq r1,r2; \
6227 movq r3,r4; \
6228diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6229index be6d9e3..21fbbca 100644
6230--- a/arch/x86/crypto/aesni-intel_asm.S
6231+++ b/arch/x86/crypto/aesni-intel_asm.S
6232@@ -31,6 +31,7 @@
6233
6234 #include <linux/linkage.h>
6235 #include <asm/inst.h>
6236+#include <asm/alternative-asm.h>
6237
6238 #ifdef __x86_64__
6239 .data
6240@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6241 pop %r14
6242 pop %r13
6243 pop %r12
6244+ pax_force_retaddr 0, 1
6245 ret
6246+ENDPROC(aesni_gcm_dec)
6247
6248
6249 /*****************************************************************************
6250@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6251 pop %r14
6252 pop %r13
6253 pop %r12
6254+ pax_force_retaddr 0, 1
6255 ret
6256+ENDPROC(aesni_gcm_enc)
6257
6258 #endif
6259
6260@@ -1714,6 +1719,7 @@ _key_expansion_256a:
6261 pxor %xmm1, %xmm0
6262 movaps %xmm0, (TKEYP)
6263 add $0x10, TKEYP
6264+ pax_force_retaddr_bts
6265 ret
6266
6267 .align 4
6268@@ -1738,6 +1744,7 @@ _key_expansion_192a:
6269 shufps $0b01001110, %xmm2, %xmm1
6270 movaps %xmm1, 0x10(TKEYP)
6271 add $0x20, TKEYP
6272+ pax_force_retaddr_bts
6273 ret
6274
6275 .align 4
6276@@ -1757,6 +1764,7 @@ _key_expansion_192b:
6277
6278 movaps %xmm0, (TKEYP)
6279 add $0x10, TKEYP
6280+ pax_force_retaddr_bts
6281 ret
6282
6283 .align 4
6284@@ -1769,6 +1777,7 @@ _key_expansion_256b:
6285 pxor %xmm1, %xmm2
6286 movaps %xmm2, (TKEYP)
6287 add $0x10, TKEYP
6288+ pax_force_retaddr_bts
6289 ret
6290
6291 /*
6292@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6293 #ifndef __x86_64__
6294 popl KEYP
6295 #endif
6296+ pax_force_retaddr 0, 1
6297 ret
6298+ENDPROC(aesni_set_key)
6299
6300 /*
6301 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6302@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6303 popl KLEN
6304 popl KEYP
6305 #endif
6306+ pax_force_retaddr 0, 1
6307 ret
6308+ENDPROC(aesni_enc)
6309
6310 /*
6311 * _aesni_enc1: internal ABI
6312@@ -1959,6 +1972,7 @@ _aesni_enc1:
6313 AESENC KEY STATE
6314 movaps 0x70(TKEYP), KEY
6315 AESENCLAST KEY STATE
6316+ pax_force_retaddr_bts
6317 ret
6318
6319 /*
6320@@ -2067,6 +2081,7 @@ _aesni_enc4:
6321 AESENCLAST KEY STATE2
6322 AESENCLAST KEY STATE3
6323 AESENCLAST KEY STATE4
6324+ pax_force_retaddr_bts
6325 ret
6326
6327 /*
6328@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6329 popl KLEN
6330 popl KEYP
6331 #endif
6332+ pax_force_retaddr 0, 1
6333 ret
6334+ENDPROC(aesni_dec)
6335
6336 /*
6337 * _aesni_dec1: internal ABI
6338@@ -2146,6 +2163,7 @@ _aesni_dec1:
6339 AESDEC KEY STATE
6340 movaps 0x70(TKEYP), KEY
6341 AESDECLAST KEY STATE
6342+ pax_force_retaddr_bts
6343 ret
6344
6345 /*
6346@@ -2254,6 +2272,7 @@ _aesni_dec4:
6347 AESDECLAST KEY STATE2
6348 AESDECLAST KEY STATE3
6349 AESDECLAST KEY STATE4
6350+ pax_force_retaddr_bts
6351 ret
6352
6353 /*
6354@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6355 popl KEYP
6356 popl LEN
6357 #endif
6358+ pax_force_retaddr 0, 1
6359 ret
6360+ENDPROC(aesni_ecb_enc)
6361
6362 /*
6363 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6364@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6365 popl KEYP
6366 popl LEN
6367 #endif
6368+ pax_force_retaddr 0, 1
6369 ret
6370+ENDPROC(aesni_ecb_dec)
6371
6372 /*
6373 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6374@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6375 popl LEN
6376 popl IVP
6377 #endif
6378+ pax_force_retaddr 0, 1
6379 ret
6380+ENDPROC(aesni_cbc_enc)
6381
6382 /*
6383 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6384@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6385 popl LEN
6386 popl IVP
6387 #endif
6388+ pax_force_retaddr 0, 1
6389 ret
6390+ENDPROC(aesni_cbc_dec)
6391
6392 #ifdef __x86_64__
6393 .align 16
6394@@ -2524,6 +2551,7 @@ _aesni_inc_init:
6395 mov $1, TCTR_LOW
6396 MOVQ_R64_XMM TCTR_LOW INC
6397 MOVQ_R64_XMM CTR TCTR_LOW
6398+ pax_force_retaddr_bts
6399 ret
6400
6401 /*
6402@@ -2552,6 +2580,7 @@ _aesni_inc:
6403 .Linc_low:
6404 movaps CTR, IV
6405 PSHUFB_XMM BSWAP_MASK IV
6406+ pax_force_retaddr_bts
6407 ret
6408
6409 /*
6410@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6411 .Lctr_enc_ret:
6412 movups IV, (IVP)
6413 .Lctr_enc_just_ret:
6414+ pax_force_retaddr 0, 1
6415 ret
6416+ENDPROC(aesni_ctr_enc)
6417 #endif
6418diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6419index 391d245..67f35c2 100644
6420--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6421+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6422@@ -20,6 +20,8 @@
6423 *
6424 */
6425
6426+#include <asm/alternative-asm.h>
6427+
6428 .file "blowfish-x86_64-asm.S"
6429 .text
6430
6431@@ -151,9 +153,11 @@ __blowfish_enc_blk:
6432 jnz __enc_xor;
6433
6434 write_block();
6435+ pax_force_retaddr 0, 1
6436 ret;
6437 __enc_xor:
6438 xor_block();
6439+ pax_force_retaddr 0, 1
6440 ret;
6441
6442 .align 8
6443@@ -188,6 +192,7 @@ blowfish_dec_blk:
6444
6445 movq %r11, %rbp;
6446
6447+ pax_force_retaddr 0, 1
6448 ret;
6449
6450 /**********************************************************************
6451@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6452
6453 popq %rbx;
6454 popq %rbp;
6455+ pax_force_retaddr 0, 1
6456 ret;
6457
6458 __enc_xor4:
6459@@ -349,6 +355,7 @@ __enc_xor4:
6460
6461 popq %rbx;
6462 popq %rbp;
6463+ pax_force_retaddr 0, 1
6464 ret;
6465
6466 .align 8
6467@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6468 popq %rbx;
6469 popq %rbp;
6470
6471+ pax_force_retaddr 0, 1
6472 ret;
6473
6474diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6475index 6214a9b..1f4fc9a 100644
6476--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6477+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6478@@ -1,3 +1,5 @@
6479+#include <asm/alternative-asm.h>
6480+
6481 # enter ECRYPT_encrypt_bytes
6482 .text
6483 .p2align 5
6484@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6485 add %r11,%rsp
6486 mov %rdi,%rax
6487 mov %rsi,%rdx
6488+ pax_force_retaddr 0, 1
6489 ret
6490 # bytesatleast65:
6491 ._bytesatleast65:
6492@@ -891,6 +894,7 @@ ECRYPT_keysetup:
6493 add %r11,%rsp
6494 mov %rdi,%rax
6495 mov %rsi,%rdx
6496+ pax_force_retaddr
6497 ret
6498 # enter ECRYPT_ivsetup
6499 .text
6500@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6501 add %r11,%rsp
6502 mov %rdi,%rax
6503 mov %rsi,%rdx
6504+ pax_force_retaddr
6505 ret
6506diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6507index b2c2f57..8470cab 100644
6508--- a/arch/x86/crypto/sha1_ssse3_asm.S
6509+++ b/arch/x86/crypto/sha1_ssse3_asm.S
6510@@ -28,6 +28,8 @@
6511 * (at your option) any later version.
6512 */
6513
6514+#include <asm/alternative-asm.h>
6515+
6516 #define CTX %rdi // arg1
6517 #define BUF %rsi // arg2
6518 #define CNT %rdx // arg3
6519@@ -104,6 +106,7 @@
6520 pop %r12
6521 pop %rbp
6522 pop %rbx
6523+ pax_force_retaddr 0, 1
6524 ret
6525
6526 .size \name, .-\name
6527diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6528index 5b012a2..36d5364 100644
6529--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6530+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6531@@ -20,6 +20,8 @@
6532 *
6533 */
6534
6535+#include <asm/alternative-asm.h>
6536+
6537 .file "twofish-x86_64-asm-3way.S"
6538 .text
6539
6540@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6541 popq %r13;
6542 popq %r14;
6543 popq %r15;
6544+ pax_force_retaddr 0, 1
6545 ret;
6546
6547 __enc_xor3:
6548@@ -271,6 +274,7 @@ __enc_xor3:
6549 popq %r13;
6550 popq %r14;
6551 popq %r15;
6552+ pax_force_retaddr 0, 1
6553 ret;
6554
6555 .global twofish_dec_blk_3way
6556@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6557 popq %r13;
6558 popq %r14;
6559 popq %r15;
6560+ pax_force_retaddr 0, 1
6561 ret;
6562
6563diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6564index 7bcf3fc..f53832f 100644
6565--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6566+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6567@@ -21,6 +21,7 @@
6568 .text
6569
6570 #include <asm/asm-offsets.h>
6571+#include <asm/alternative-asm.h>
6572
6573 #define a_offset 0
6574 #define b_offset 4
6575@@ -268,6 +269,7 @@ twofish_enc_blk:
6576
6577 popq R1
6578 movq $1,%rax
6579+ pax_force_retaddr 0, 1
6580 ret
6581
6582 twofish_dec_blk:
6583@@ -319,4 +321,5 @@ twofish_dec_blk:
6584
6585 popq R1
6586 movq $1,%rax
6587+ pax_force_retaddr 0, 1
6588 ret
6589diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6590index fd84387..0b4af7d 100644
6591--- a/arch/x86/ia32/ia32_aout.c
6592+++ b/arch/x86/ia32/ia32_aout.c
6593@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6594 unsigned long dump_start, dump_size;
6595 struct user32 dump;
6596
6597+ memset(&dump, 0, sizeof(dump));
6598+
6599 fs = get_fs();
6600 set_fs(KERNEL_DS);
6601 has_dumped = 1;
6602diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6603index 6557769..ef6ae89 100644
6604--- a/arch/x86/ia32/ia32_signal.c
6605+++ b/arch/x86/ia32/ia32_signal.c
6606@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6607 }
6608 seg = get_fs();
6609 set_fs(KERNEL_DS);
6610- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6611+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6612 set_fs(seg);
6613 if (ret >= 0 && uoss_ptr) {
6614 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6615@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6616 */
6617 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6618 size_t frame_size,
6619- void **fpstate)
6620+ void __user **fpstate)
6621 {
6622 unsigned long sp;
6623
6624@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6625
6626 if (used_math()) {
6627 sp = sp - sig_xstate_ia32_size;
6628- *fpstate = (struct _fpstate_ia32 *) sp;
6629+ *fpstate = (struct _fpstate_ia32 __user *) sp;
6630 if (save_i387_xstate_ia32(*fpstate) < 0)
6631 return (void __user *) -1L;
6632 }
6633@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6634 sp -= frame_size;
6635 /* Align the stack pointer according to the i386 ABI,
6636 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6637- sp = ((sp + 4) & -16ul) - 4;
6638+ sp = ((sp - 12) & -16ul) - 4;
6639 return (void __user *) sp;
6640 }
6641
6642@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6643 * These are actually not used anymore, but left because some
6644 * gdb versions depend on them as a marker.
6645 */
6646- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6647+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6648 } put_user_catch(err);
6649
6650 if (err)
6651@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6652 0xb8,
6653 __NR_ia32_rt_sigreturn,
6654 0x80cd,
6655- 0,
6656+ 0
6657 };
6658
6659 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6660@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6661
6662 if (ka->sa.sa_flags & SA_RESTORER)
6663 restorer = ka->sa.sa_restorer;
6664+ else if (current->mm->context.vdso)
6665+ /* Return stub is in 32bit vsyscall page */
6666+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6667 else
6668- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6669- rt_sigreturn);
6670+ restorer = &frame->retcode;
6671 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6672
6673 /*
6674 * Not actually used anymore, but left because some gdb
6675 * versions need it.
6676 */
6677- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6678+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6679 } put_user_catch(err);
6680
6681 if (err)
6682diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6683index a6253ec..4ad2120 100644
6684--- a/arch/x86/ia32/ia32entry.S
6685+++ b/arch/x86/ia32/ia32entry.S
6686@@ -13,7 +13,9 @@
6687 #include <asm/thread_info.h>
6688 #include <asm/segment.h>
6689 #include <asm/irqflags.h>
6690+#include <asm/pgtable.h>
6691 #include <linux/linkage.h>
6692+#include <asm/alternative-asm.h>
6693
6694 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6695 #include <linux/elf-em.h>
6696@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6697 ENDPROC(native_irq_enable_sysexit)
6698 #endif
6699
6700+ .macro pax_enter_kernel_user
6701+ pax_set_fptr_mask
6702+#ifdef CONFIG_PAX_MEMORY_UDEREF
6703+ call pax_enter_kernel_user
6704+#endif
6705+ .endm
6706+
6707+ .macro pax_exit_kernel_user
6708+#ifdef CONFIG_PAX_MEMORY_UDEREF
6709+ call pax_exit_kernel_user
6710+#endif
6711+#ifdef CONFIG_PAX_RANDKSTACK
6712+ pushq %rax
6713+ pushq %r11
6714+ call pax_randomize_kstack
6715+ popq %r11
6716+ popq %rax
6717+#endif
6718+ .endm
6719+
6720+.macro pax_erase_kstack
6721+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6722+ call pax_erase_kstack
6723+#endif
6724+.endm
6725+
6726 /*
6727 * 32bit SYSENTER instruction entry.
6728 *
6729@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6730 CFI_REGISTER rsp,rbp
6731 SWAPGS_UNSAFE_STACK
6732 movq PER_CPU_VAR(kernel_stack), %rsp
6733- addq $(KERNEL_STACK_OFFSET),%rsp
6734- /*
6735- * No need to follow this irqs on/off section: the syscall
6736- * disabled irqs, here we enable it straight after entry:
6737- */
6738- ENABLE_INTERRUPTS(CLBR_NONE)
6739 movl %ebp,%ebp /* zero extension */
6740 pushq_cfi $__USER32_DS
6741 /*CFI_REL_OFFSET ss,0*/
6742@@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6743 CFI_REL_OFFSET rsp,0
6744 pushfq_cfi
6745 /*CFI_REL_OFFSET rflags,0*/
6746- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6747- CFI_REGISTER rip,r10
6748+ orl $X86_EFLAGS_IF,(%rsp)
6749+ GET_THREAD_INFO(%r11)
6750+ movl TI_sysenter_return(%r11), %r11d
6751+ CFI_REGISTER rip,r11
6752 pushq_cfi $__USER32_CS
6753 /*CFI_REL_OFFSET cs,0*/
6754 movl %eax, %eax
6755- pushq_cfi %r10
6756+ pushq_cfi %r11
6757 CFI_REL_OFFSET rip,0
6758 pushq_cfi %rax
6759 cld
6760 SAVE_ARGS 0,1,0
6761+ pax_enter_kernel_user
6762+ /*
6763+ * No need to follow this irqs on/off section: the syscall
6764+ * disabled irqs, here we enable it straight after entry:
6765+ */
6766+ ENABLE_INTERRUPTS(CLBR_NONE)
6767 /* no need to do an access_ok check here because rbp has been
6768 32bit zero extended */
6769+
6770+#ifdef CONFIG_PAX_MEMORY_UDEREF
6771+ mov $PAX_USER_SHADOW_BASE,%r11
6772+ add %r11,%rbp
6773+#endif
6774+
6775 1: movl (%rbp),%ebp
6776 .section __ex_table,"a"
6777 .quad 1b,ia32_badarg
6778 .previous
6779- GET_THREAD_INFO(%r10)
6780- orl $TS_COMPAT,TI_status(%r10)
6781- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6782+ GET_THREAD_INFO(%r11)
6783+ orl $TS_COMPAT,TI_status(%r11)
6784+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6785 CFI_REMEMBER_STATE
6786 jnz sysenter_tracesys
6787 cmpq $(IA32_NR_syscalls-1),%rax
6788@@ -162,13 +198,15 @@ sysenter_do_call:
6789 sysenter_dispatch:
6790 call *ia32_sys_call_table(,%rax,8)
6791 movq %rax,RAX-ARGOFFSET(%rsp)
6792- GET_THREAD_INFO(%r10)
6793+ GET_THREAD_INFO(%r11)
6794 DISABLE_INTERRUPTS(CLBR_NONE)
6795 TRACE_IRQS_OFF
6796- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6797+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6798 jnz sysexit_audit
6799 sysexit_from_sys_call:
6800- andl $~TS_COMPAT,TI_status(%r10)
6801+ pax_exit_kernel_user
6802+ pax_erase_kstack
6803+ andl $~TS_COMPAT,TI_status(%r11)
6804 /* clear IF, that popfq doesn't enable interrupts early */
6805 andl $~0x200,EFLAGS-R11(%rsp)
6806 movl RIP-R11(%rsp),%edx /* User %eip */
6807@@ -194,6 +232,9 @@ sysexit_from_sys_call:
6808 movl %eax,%esi /* 2nd arg: syscall number */
6809 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6810 call audit_syscall_entry
6811+
6812+ pax_erase_kstack
6813+
6814 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6815 cmpq $(IA32_NR_syscalls-1),%rax
6816 ja ia32_badsys
6817@@ -205,7 +246,7 @@ sysexit_from_sys_call:
6818 .endm
6819
6820 .macro auditsys_exit exit
6821- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6822+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6823 jnz ia32_ret_from_sys_call
6824 TRACE_IRQS_ON
6825 sti
6826@@ -215,12 +256,12 @@ sysexit_from_sys_call:
6827 movzbl %al,%edi /* zero-extend that into %edi */
6828 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6829 call audit_syscall_exit
6830- GET_THREAD_INFO(%r10)
6831+ GET_THREAD_INFO(%r11)
6832 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6833 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6834 cli
6835 TRACE_IRQS_OFF
6836- testl %edi,TI_flags(%r10)
6837+ testl %edi,TI_flags(%r11)
6838 jz \exit
6839 CLEAR_RREGS -ARGOFFSET
6840 jmp int_with_check
6841@@ -238,7 +279,7 @@ sysexit_audit:
6842
6843 sysenter_tracesys:
6844 #ifdef CONFIG_AUDITSYSCALL
6845- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6846+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6847 jz sysenter_auditsys
6848 #endif
6849 SAVE_REST
6850@@ -246,6 +287,9 @@ sysenter_tracesys:
6851 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6852 movq %rsp,%rdi /* &pt_regs -> arg1 */
6853 call syscall_trace_enter
6854+
6855+ pax_erase_kstack
6856+
6857 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6858 RESTORE_REST
6859 cmpq $(IA32_NR_syscalls-1),%rax
6860@@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
6861 ENTRY(ia32_cstar_target)
6862 CFI_STARTPROC32 simple
6863 CFI_SIGNAL_FRAME
6864- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6865+ CFI_DEF_CFA rsp,0
6866 CFI_REGISTER rip,rcx
6867 /*CFI_REGISTER rflags,r11*/
6868 SWAPGS_UNSAFE_STACK
6869 movl %esp,%r8d
6870 CFI_REGISTER rsp,r8
6871 movq PER_CPU_VAR(kernel_stack),%rsp
6872+ SAVE_ARGS 8*6,0,0
6873+ pax_enter_kernel_user
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879- SAVE_ARGS 8,0,0
6880 movl %eax,%eax /* zero extension */
6881 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6882 movq %rcx,RIP-ARGOFFSET(%rsp)
6883@@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
6884 /* no need to do an access_ok check here because r8 has been
6885 32bit zero extended */
6886 /* hardware stack frame is complete now */
6887+
6888+#ifdef CONFIG_PAX_MEMORY_UDEREF
6889+ mov $PAX_USER_SHADOW_BASE,%r11
6890+ add %r11,%r8
6891+#endif
6892+
6893 1: movl (%r8),%r9d
6894 .section __ex_table,"a"
6895 .quad 1b,ia32_badarg
6896 .previous
6897- GET_THREAD_INFO(%r10)
6898- orl $TS_COMPAT,TI_status(%r10)
6899- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6900+ GET_THREAD_INFO(%r11)
6901+ orl $TS_COMPAT,TI_status(%r11)
6902+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6903 CFI_REMEMBER_STATE
6904 jnz cstar_tracesys
6905 cmpq $IA32_NR_syscalls-1,%rax
6906@@ -321,13 +372,15 @@ cstar_do_call:
6907 cstar_dispatch:
6908 call *ia32_sys_call_table(,%rax,8)
6909 movq %rax,RAX-ARGOFFSET(%rsp)
6910- GET_THREAD_INFO(%r10)
6911+ GET_THREAD_INFO(%r11)
6912 DISABLE_INTERRUPTS(CLBR_NONE)
6913 TRACE_IRQS_OFF
6914- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6915+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6916 jnz sysretl_audit
6917 sysretl_from_sys_call:
6918- andl $~TS_COMPAT,TI_status(%r10)
6919+ pax_exit_kernel_user
6920+ pax_erase_kstack
6921+ andl $~TS_COMPAT,TI_status(%r11)
6922 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6923 movl RIP-ARGOFFSET(%rsp),%ecx
6924 CFI_REGISTER rip,rcx
6925@@ -355,7 +408,7 @@ sysretl_audit:
6926
6927 cstar_tracesys:
6928 #ifdef CONFIG_AUDITSYSCALL
6929- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6930+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6931 jz cstar_auditsys
6932 #endif
6933 xchgl %r9d,%ebp
6934@@ -364,6 +417,9 @@ cstar_tracesys:
6935 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6936 movq %rsp,%rdi /* &pt_regs -> arg1 */
6937 call syscall_trace_enter
6938+
6939+ pax_erase_kstack
6940+
6941 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6942 RESTORE_REST
6943 xchgl %ebp,%r9d
6944@@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
6945 CFI_REL_OFFSET rip,RIP-RIP
6946 PARAVIRT_ADJUST_EXCEPTION_FRAME
6947 SWAPGS
6948- /*
6949- * No need to follow this irqs on/off section: the syscall
6950- * disabled irqs and here we enable it straight after entry:
6951- */
6952- ENABLE_INTERRUPTS(CLBR_NONE)
6953 movl %eax,%eax
6954 pushq_cfi %rax
6955 cld
6956 /* note the registers are not zero extended to the sf.
6957 this could be a problem. */
6958 SAVE_ARGS 0,1,0
6959- GET_THREAD_INFO(%r10)
6960- orl $TS_COMPAT,TI_status(%r10)
6961- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6962+ pax_enter_kernel_user
6963+ /*
6964+ * No need to follow this irqs on/off section: the syscall
6965+ * disabled irqs and here we enable it straight after entry:
6966+ */
6967+ ENABLE_INTERRUPTS(CLBR_NONE)
6968+ GET_THREAD_INFO(%r11)
6969+ orl $TS_COMPAT,TI_status(%r11)
6970+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6971 jnz ia32_tracesys
6972 cmpq $(IA32_NR_syscalls-1),%rax
6973 ja ia32_badsys
6974@@ -441,6 +498,9 @@ ia32_tracesys:
6975 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6976 movq %rsp,%rdi /* &pt_regs -> arg1 */
6977 call syscall_trace_enter
6978+
6979+ pax_erase_kstack
6980+
6981 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6982 RESTORE_REST
6983 cmpq $(IA32_NR_syscalls-1),%rax
6984@@ -455,6 +515,7 @@ ia32_badsys:
6985
6986 quiet_ni_syscall:
6987 movq $-ENOSYS,%rax
6988+ pax_force_retaddr
6989 ret
6990 CFI_ENDPROC
6991
6992diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6993index f6f5c53..b358b28 100644
6994--- a/arch/x86/ia32/sys_ia32.c
6995+++ b/arch/x86/ia32/sys_ia32.c
6996@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6997 */
6998 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6999 {
7000- typeof(ubuf->st_uid) uid = 0;
7001- typeof(ubuf->st_gid) gid = 0;
7002+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
7003+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
7004 SET_UID(uid, stat->uid);
7005 SET_GID(gid, stat->gid);
7006 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7007@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7008 }
7009 set_fs(KERNEL_DS);
7010 ret = sys_rt_sigprocmask(how,
7011- set ? (sigset_t __user *)&s : NULL,
7012- oset ? (sigset_t __user *)&s : NULL,
7013+ set ? (sigset_t __force_user *)&s : NULL,
7014+ oset ? (sigset_t __force_user *)&s : NULL,
7015 sigsetsize);
7016 set_fs(old_fs);
7017 if (ret)
7018@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7019 return alarm_setitimer(seconds);
7020 }
7021
7022-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7023+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7024 int options)
7025 {
7026 return compat_sys_wait4(pid, stat_addr, options, NULL);
7027@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7028 mm_segment_t old_fs = get_fs();
7029
7030 set_fs(KERNEL_DS);
7031- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7032+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7033 set_fs(old_fs);
7034 if (put_compat_timespec(&t, interval))
7035 return -EFAULT;
7036@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7037 mm_segment_t old_fs = get_fs();
7038
7039 set_fs(KERNEL_DS);
7040- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7041+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7042 set_fs(old_fs);
7043 if (!ret) {
7044 switch (_NSIG_WORDS) {
7045@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7046 if (copy_siginfo_from_user32(&info, uinfo))
7047 return -EFAULT;
7048 set_fs(KERNEL_DS);
7049- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7050+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7051 set_fs(old_fs);
7052 return ret;
7053 }
7054@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7055 return -EFAULT;
7056
7057 set_fs(KERNEL_DS);
7058- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7059+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7060 count);
7061 set_fs(old_fs);
7062
7063diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7064index 091508b..e245ff2 100644
7065--- a/arch/x86/include/asm/alternative-asm.h
7066+++ b/arch/x86/include/asm/alternative-asm.h
7067@@ -4,10 +4,10 @@
7068
7069 #ifdef CONFIG_SMP
7070 .macro LOCK_PREFIX
7071-1: lock
7072+672: lock
7073 .section .smp_locks,"a"
7074 .balign 4
7075- .long 1b - .
7076+ .long 672b - .
7077 .previous
7078 .endm
7079 #else
7080@@ -15,6 +15,45 @@
7081 .endm
7082 #endif
7083
7084+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
7085+ .macro pax_force_retaddr_bts rip=0
7086+ btsq $63,\rip(%rsp)
7087+ .endm
7088+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7089+ .macro pax_force_retaddr rip=0, reload=0
7090+ btsq $63,\rip(%rsp)
7091+ .endm
7092+ .macro pax_force_fptr ptr
7093+ btsq $63,\ptr
7094+ .endm
7095+ .macro pax_set_fptr_mask
7096+ .endm
7097+#endif
7098+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7099+ .macro pax_force_retaddr rip=0, reload=0
7100+ .if \reload
7101+ pax_set_fptr_mask
7102+ .endif
7103+ orq %r10,\rip(%rsp)
7104+ .endm
7105+ .macro pax_force_fptr ptr
7106+ orq %r10,\ptr
7107+ .endm
7108+ .macro pax_set_fptr_mask
7109+ movabs $0x8000000000000000,%r10
7110+ .endm
7111+#endif
7112+#else
7113+ .macro pax_force_retaddr rip=0, reload=0
7114+ .endm
7115+ .macro pax_force_fptr ptr
7116+ .endm
7117+ .macro pax_force_retaddr_bts rip=0
7118+ .endm
7119+ .macro pax_set_fptr_mask
7120+ .endm
7121+#endif
7122+
7123 .macro altinstruction_entry orig alt feature orig_len alt_len
7124 .long \orig - .
7125 .long \alt - .
7126diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7127index 37ad100..7d47faa 100644
7128--- a/arch/x86/include/asm/alternative.h
7129+++ b/arch/x86/include/asm/alternative.h
7130@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7131 ".section .discard,\"aw\",@progbits\n" \
7132 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7133 ".previous\n" \
7134- ".section .altinstr_replacement, \"ax\"\n" \
7135+ ".section .altinstr_replacement, \"a\"\n" \
7136 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7137 ".previous"
7138
7139diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7140index 1a6c09a..fec2432 100644
7141--- a/arch/x86/include/asm/apic.h
7142+++ b/arch/x86/include/asm/apic.h
7143@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7144
7145 #ifdef CONFIG_X86_LOCAL_APIC
7146
7147-extern unsigned int apic_verbosity;
7148+extern int apic_verbosity;
7149 extern int local_apic_timer_c2_ok;
7150
7151 extern int disable_apic;
7152diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7153index 20370c6..a2eb9b0 100644
7154--- a/arch/x86/include/asm/apm.h
7155+++ b/arch/x86/include/asm/apm.h
7156@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7157 __asm__ __volatile__(APM_DO_ZERO_SEGS
7158 "pushl %%edi\n\t"
7159 "pushl %%ebp\n\t"
7160- "lcall *%%cs:apm_bios_entry\n\t"
7161+ "lcall *%%ss:apm_bios_entry\n\t"
7162 "setc %%al\n\t"
7163 "popl %%ebp\n\t"
7164 "popl %%edi\n\t"
7165@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7166 __asm__ __volatile__(APM_DO_ZERO_SEGS
7167 "pushl %%edi\n\t"
7168 "pushl %%ebp\n\t"
7169- "lcall *%%cs:apm_bios_entry\n\t"
7170+ "lcall *%%ss:apm_bios_entry\n\t"
7171 "setc %%bl\n\t"
7172 "popl %%ebp\n\t"
7173 "popl %%edi\n\t"
7174diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7175index 58cb6d4..ca9010d 100644
7176--- a/arch/x86/include/asm/atomic.h
7177+++ b/arch/x86/include/asm/atomic.h
7178@@ -22,7 +22,18 @@
7179 */
7180 static inline int atomic_read(const atomic_t *v)
7181 {
7182- return (*(volatile int *)&(v)->counter);
7183+ return (*(volatile const int *)&(v)->counter);
7184+}
7185+
7186+/**
7187+ * atomic_read_unchecked - read atomic variable
7188+ * @v: pointer of type atomic_unchecked_t
7189+ *
7190+ * Atomically reads the value of @v.
7191+ */
7192+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7193+{
7194+ return (*(volatile const int *)&(v)->counter);
7195 }
7196
7197 /**
7198@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7199 }
7200
7201 /**
7202+ * atomic_set_unchecked - set atomic variable
7203+ * @v: pointer of type atomic_unchecked_t
7204+ * @i: required value
7205+ *
7206+ * Atomically sets the value of @v to @i.
7207+ */
7208+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7209+{
7210+ v->counter = i;
7211+}
7212+
7213+/**
7214 * atomic_add - add integer to atomic variable
7215 * @i: integer value to add
7216 * @v: pointer of type atomic_t
7217@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7218 */
7219 static inline void atomic_add(int i, atomic_t *v)
7220 {
7221- asm volatile(LOCK_PREFIX "addl %1,%0"
7222+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7223+
7224+#ifdef CONFIG_PAX_REFCOUNT
7225+ "jno 0f\n"
7226+ LOCK_PREFIX "subl %1,%0\n"
7227+ "int $4\n0:\n"
7228+ _ASM_EXTABLE(0b, 0b)
7229+#endif
7230+
7231+ : "+m" (v->counter)
7232+ : "ir" (i));
7233+}
7234+
7235+/**
7236+ * atomic_add_unchecked - add integer to atomic variable
7237+ * @i: integer value to add
7238+ * @v: pointer of type atomic_unchecked_t
7239+ *
7240+ * Atomically adds @i to @v.
7241+ */
7242+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7243+{
7244+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7245 : "+m" (v->counter)
7246 : "ir" (i));
7247 }
7248@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7249 */
7250 static inline void atomic_sub(int i, atomic_t *v)
7251 {
7252- asm volatile(LOCK_PREFIX "subl %1,%0"
7253+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7254+
7255+#ifdef CONFIG_PAX_REFCOUNT
7256+ "jno 0f\n"
7257+ LOCK_PREFIX "addl %1,%0\n"
7258+ "int $4\n0:\n"
7259+ _ASM_EXTABLE(0b, 0b)
7260+#endif
7261+
7262+ : "+m" (v->counter)
7263+ : "ir" (i));
7264+}
7265+
7266+/**
7267+ * atomic_sub_unchecked - subtract integer from atomic variable
7268+ * @i: integer value to subtract
7269+ * @v: pointer of type atomic_unchecked_t
7270+ *
7271+ * Atomically subtracts @i from @v.
7272+ */
7273+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7274+{
7275+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7276 : "+m" (v->counter)
7277 : "ir" (i));
7278 }
7279@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7280 {
7281 unsigned char c;
7282
7283- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7284+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7285+
7286+#ifdef CONFIG_PAX_REFCOUNT
7287+ "jno 0f\n"
7288+ LOCK_PREFIX "addl %2,%0\n"
7289+ "int $4\n0:\n"
7290+ _ASM_EXTABLE(0b, 0b)
7291+#endif
7292+
7293+ "sete %1\n"
7294 : "+m" (v->counter), "=qm" (c)
7295 : "ir" (i) : "memory");
7296 return c;
7297@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7298 */
7299 static inline void atomic_inc(atomic_t *v)
7300 {
7301- asm volatile(LOCK_PREFIX "incl %0"
7302+ asm volatile(LOCK_PREFIX "incl %0\n"
7303+
7304+#ifdef CONFIG_PAX_REFCOUNT
7305+ "jno 0f\n"
7306+ LOCK_PREFIX "decl %0\n"
7307+ "int $4\n0:\n"
7308+ _ASM_EXTABLE(0b, 0b)
7309+#endif
7310+
7311+ : "+m" (v->counter));
7312+}
7313+
7314+/**
7315+ * atomic_inc_unchecked - increment atomic variable
7316+ * @v: pointer of type atomic_unchecked_t
7317+ *
7318+ * Atomically increments @v by 1.
7319+ */
7320+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7321+{
7322+ asm volatile(LOCK_PREFIX "incl %0\n"
7323 : "+m" (v->counter));
7324 }
7325
7326@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7327 */
7328 static inline void atomic_dec(atomic_t *v)
7329 {
7330- asm volatile(LOCK_PREFIX "decl %0"
7331+ asm volatile(LOCK_PREFIX "decl %0\n"
7332+
7333+#ifdef CONFIG_PAX_REFCOUNT
7334+ "jno 0f\n"
7335+ LOCK_PREFIX "incl %0\n"
7336+ "int $4\n0:\n"
7337+ _ASM_EXTABLE(0b, 0b)
7338+#endif
7339+
7340+ : "+m" (v->counter));
7341+}
7342+
7343+/**
7344+ * atomic_dec_unchecked - decrement atomic variable
7345+ * @v: pointer of type atomic_unchecked_t
7346+ *
7347+ * Atomically decrements @v by 1.
7348+ */
7349+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7350+{
7351+ asm volatile(LOCK_PREFIX "decl %0\n"
7352 : "+m" (v->counter));
7353 }
7354
7355@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7356 {
7357 unsigned char c;
7358
7359- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7360+ asm volatile(LOCK_PREFIX "decl %0\n"
7361+
7362+#ifdef CONFIG_PAX_REFCOUNT
7363+ "jno 0f\n"
7364+ LOCK_PREFIX "incl %0\n"
7365+ "int $4\n0:\n"
7366+ _ASM_EXTABLE(0b, 0b)
7367+#endif
7368+
7369+ "sete %1\n"
7370 : "+m" (v->counter), "=qm" (c)
7371 : : "memory");
7372 return c != 0;
7373@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7374 {
7375 unsigned char c;
7376
7377- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7378+ asm volatile(LOCK_PREFIX "incl %0\n"
7379+
7380+#ifdef CONFIG_PAX_REFCOUNT
7381+ "jno 0f\n"
7382+ LOCK_PREFIX "decl %0\n"
7383+ "int $4\n0:\n"
7384+ _ASM_EXTABLE(0b, 0b)
7385+#endif
7386+
7387+ "sete %1\n"
7388+ : "+m" (v->counter), "=qm" (c)
7389+ : : "memory");
7390+ return c != 0;
7391+}
7392+
7393+/**
7394+ * atomic_inc_and_test_unchecked - increment and test
7395+ * @v: pointer of type atomic_unchecked_t
7396+ *
7397+ * Atomically increments @v by 1
7398+ * and returns true if the result is zero, or false for all
7399+ * other cases.
7400+ */
7401+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7402+{
7403+ unsigned char c;
7404+
7405+ asm volatile(LOCK_PREFIX "incl %0\n"
7406+ "sete %1\n"
7407 : "+m" (v->counter), "=qm" (c)
7408 : : "memory");
7409 return c != 0;
7410@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7411 {
7412 unsigned char c;
7413
7414- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7415+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7416+
7417+#ifdef CONFIG_PAX_REFCOUNT
7418+ "jno 0f\n"
7419+ LOCK_PREFIX "subl %2,%0\n"
7420+ "int $4\n0:\n"
7421+ _ASM_EXTABLE(0b, 0b)
7422+#endif
7423+
7424+ "sets %1\n"
7425 : "+m" (v->counter), "=qm" (c)
7426 : "ir" (i) : "memory");
7427 return c;
7428@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7429 goto no_xadd;
7430 #endif
7431 /* Modern 486+ processor */
7432- return i + xadd(&v->counter, i);
7433+ return i + xadd_check_overflow(&v->counter, i);
7434
7435 #ifdef CONFIG_M386
7436 no_xadd: /* Legacy 386 processor */
7437@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7438 }
7439
7440 /**
7441+ * atomic_add_return_unchecked - add integer and return
7442+ * @i: integer value to add
7443+ * @v: pointer of type atomic_unchecked_t
7444+ *
7445+ * Atomically adds @i to @v and returns @i + @v
7446+ */
7447+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7448+{
7449+#ifdef CONFIG_M386
7450+ int __i;
7451+ unsigned long flags;
7452+ if (unlikely(boot_cpu_data.x86 <= 3))
7453+ goto no_xadd;
7454+#endif
7455+ /* Modern 486+ processor */
7456+ return i + xadd(&v->counter, i);
7457+
7458+#ifdef CONFIG_M386
7459+no_xadd: /* Legacy 386 processor */
7460+ raw_local_irq_save(flags);
7461+ __i = atomic_read_unchecked(v);
7462+ atomic_set_unchecked(v, i + __i);
7463+ raw_local_irq_restore(flags);
7464+ return i + __i;
7465+#endif
7466+}
7467+
7468+/**
7469 * atomic_sub_return - subtract integer and return
7470 * @v: pointer of type atomic_t
7471 * @i: integer value to subtract
7472@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7473 }
7474
7475 #define atomic_inc_return(v) (atomic_add_return(1, v))
7476+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7477+{
7478+ return atomic_add_return_unchecked(1, v);
7479+}
7480 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7481
7482 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7483@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7484 return cmpxchg(&v->counter, old, new);
7485 }
7486
7487+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7488+{
7489+ return cmpxchg(&v->counter, old, new);
7490+}
7491+
7492 static inline int atomic_xchg(atomic_t *v, int new)
7493 {
7494 return xchg(&v->counter, new);
7495 }
7496
7497+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7498+{
7499+ return xchg(&v->counter, new);
7500+}
7501+
7502 /**
7503 * __atomic_add_unless - add unless the number is already a given value
7504 * @v: pointer of type atomic_t
7505@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7506 */
7507 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7508 {
7509- int c, old;
7510+ int c, old, new;
7511 c = atomic_read(v);
7512 for (;;) {
7513- if (unlikely(c == (u)))
7514+ if (unlikely(c == u))
7515 break;
7516- old = atomic_cmpxchg((v), c, c + (a));
7517+
7518+ asm volatile("addl %2,%0\n"
7519+
7520+#ifdef CONFIG_PAX_REFCOUNT
7521+ "jno 0f\n"
7522+ "subl %2,%0\n"
7523+ "int $4\n0:\n"
7524+ _ASM_EXTABLE(0b, 0b)
7525+#endif
7526+
7527+ : "=r" (new)
7528+ : "0" (c), "ir" (a));
7529+
7530+ old = atomic_cmpxchg(v, c, new);
7531 if (likely(old == c))
7532 break;
7533 c = old;
7534@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7535 return c;
7536 }
7537
7538+/**
7539+ * atomic_inc_not_zero_hint - increment if not null
7540+ * @v: pointer of type atomic_t
7541+ * @hint: probable value of the atomic before the increment
7542+ *
7543+ * This version of atomic_inc_not_zero() gives a hint of probable
7544+ * value of the atomic. This helps processor to not read the memory
7545+ * before doing the atomic read/modify/write cycle, lowering
7546+ * number of bus transactions on some arches.
7547+ *
7548+ * Returns: 0 if increment was not done, 1 otherwise.
7549+ */
7550+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7551+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7552+{
7553+ int val, c = hint, new;
7554+
7555+ /* sanity test, should be removed by compiler if hint is a constant */
7556+ if (!hint)
7557+ return __atomic_add_unless(v, 1, 0);
7558+
7559+ do {
7560+ asm volatile("incl %0\n"
7561+
7562+#ifdef CONFIG_PAX_REFCOUNT
7563+ "jno 0f\n"
7564+ "decl %0\n"
7565+ "int $4\n0:\n"
7566+ _ASM_EXTABLE(0b, 0b)
7567+#endif
7568+
7569+ : "=r" (new)
7570+ : "0" (c));
7571+
7572+ val = atomic_cmpxchg(v, c, new);
7573+ if (val == c)
7574+ return 1;
7575+ c = val;
7576+ } while (c);
7577+
7578+ return 0;
7579+}
7580
7581 /*
7582 * atomic_dec_if_positive - decrement by 1 if old value positive
7583diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7584index 24098aa..1e37723 100644
7585--- a/arch/x86/include/asm/atomic64_32.h
7586+++ b/arch/x86/include/asm/atomic64_32.h
7587@@ -12,6 +12,14 @@ typedef struct {
7588 u64 __aligned(8) counter;
7589 } atomic64_t;
7590
7591+#ifdef CONFIG_PAX_REFCOUNT
7592+typedef struct {
7593+ u64 __aligned(8) counter;
7594+} atomic64_unchecked_t;
7595+#else
7596+typedef atomic64_t atomic64_unchecked_t;
7597+#endif
7598+
7599 #define ATOMIC64_INIT(val) { (val) }
7600
7601 #ifdef CONFIG_X86_CMPXCHG64
7602@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7603 }
7604
7605 /**
7606+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7607+ * @p: pointer to type atomic64_unchecked_t
7608+ * @o: expected value
7609+ * @n: new value
7610+ *
7611+ * Atomically sets @v to @n if it was equal to @o and returns
7612+ * the old value.
7613+ */
7614+
7615+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7616+{
7617+ return cmpxchg64(&v->counter, o, n);
7618+}
7619+
7620+/**
7621 * atomic64_xchg - xchg atomic64 variable
7622 * @v: pointer to type atomic64_t
7623 * @n: value to assign
7624@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7625 }
7626
7627 /**
7628+ * atomic64_set_unchecked - set atomic64 variable
7629+ * @v: pointer to type atomic64_unchecked_t
7630+ * @n: value to assign
7631+ *
7632+ * Atomically sets the value of @v to @n.
7633+ */
7634+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7635+{
7636+ unsigned high = (unsigned)(i >> 32);
7637+ unsigned low = (unsigned)i;
7638+ asm volatile(ATOMIC64_ALTERNATIVE(set)
7639+ : "+b" (low), "+c" (high)
7640+ : "S" (v)
7641+ : "eax", "edx", "memory"
7642+ );
7643+}
7644+
7645+/**
7646 * atomic64_read - read atomic64 variable
7647 * @v: pointer to type atomic64_t
7648 *
7649@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7650 }
7651
7652 /**
7653+ * atomic64_read_unchecked - read atomic64 variable
7654+ * @v: pointer to type atomic64_unchecked_t
7655+ *
7656+ * Atomically reads the value of @v and returns it.
7657+ */
7658+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7659+{
7660+ long long r;
7661+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7662+ : "=A" (r), "+c" (v)
7663+ : : "memory"
7664+ );
7665+ return r;
7666+ }
7667+
7668+/**
7669 * atomic64_add_return - add and return
7670 * @i: integer value to add
7671 * @v: pointer to type atomic64_t
7672@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7673 return i;
7674 }
7675
7676+/**
7677+ * atomic64_add_return_unchecked - add and return
7678+ * @i: integer value to add
7679+ * @v: pointer to type atomic64_unchecked_t
7680+ *
7681+ * Atomically adds @i to @v and returns @i + *@v
7682+ */
7683+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7684+{
7685+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7686+ : "+A" (i), "+c" (v)
7687+ : : "memory"
7688+ );
7689+ return i;
7690+}
7691+
7692 /*
7693 * Other variants with different arithmetic operators:
7694 */
7695@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7696 return a;
7697 }
7698
7699+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7700+{
7701+ long long a;
7702+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7703+ : "=A" (a)
7704+ : "S" (v)
7705+ : "memory", "ecx"
7706+ );
7707+ return a;
7708+}
7709+
7710 static inline long long atomic64_dec_return(atomic64_t *v)
7711 {
7712 long long a;
7713@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7714 }
7715
7716 /**
7717+ * atomic64_add_unchecked - add integer to atomic64 variable
7718+ * @i: integer value to add
7719+ * @v: pointer to type atomic64_unchecked_t
7720+ *
7721+ * Atomically adds @i to @v.
7722+ */
7723+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7724+{
7725+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7726+ : "+A" (i), "+c" (v)
7727+ : : "memory"
7728+ );
7729+ return i;
7730+}
7731+
7732+/**
7733 * atomic64_sub - subtract the atomic64 variable
7734 * @i: integer value to subtract
7735 * @v: pointer to type atomic64_t
7736diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7737index 0e1cbfc..5623683 100644
7738--- a/arch/x86/include/asm/atomic64_64.h
7739+++ b/arch/x86/include/asm/atomic64_64.h
7740@@ -18,7 +18,19 @@
7741 */
7742 static inline long atomic64_read(const atomic64_t *v)
7743 {
7744- return (*(volatile long *)&(v)->counter);
7745+ return (*(volatile const long *)&(v)->counter);
7746+}
7747+
7748+/**
7749+ * atomic64_read_unchecked - read atomic64 variable
7750+ * @v: pointer of type atomic64_unchecked_t
7751+ *
7752+ * Atomically reads the value of @v.
7753+ * Doesn't imply a read memory barrier.
7754+ */
7755+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7756+{
7757+ return (*(volatile const long *)&(v)->counter);
7758 }
7759
7760 /**
7761@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
7762 }
7763
7764 /**
7765+ * atomic64_set_unchecked - set atomic64 variable
7766+ * @v: pointer to type atomic64_unchecked_t
7767+ * @i: required value
7768+ *
7769+ * Atomically sets the value of @v to @i.
7770+ */
7771+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7772+{
7773+ v->counter = i;
7774+}
7775+
7776+/**
7777 * atomic64_add - add integer to atomic64 variable
7778 * @i: integer value to add
7779 * @v: pointer to type atomic64_t
7780@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
7781 */
7782 static inline void atomic64_add(long i, atomic64_t *v)
7783 {
7784+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7785+
7786+#ifdef CONFIG_PAX_REFCOUNT
7787+ "jno 0f\n"
7788+ LOCK_PREFIX "subq %1,%0\n"
7789+ "int $4\n0:\n"
7790+ _ASM_EXTABLE(0b, 0b)
7791+#endif
7792+
7793+ : "=m" (v->counter)
7794+ : "er" (i), "m" (v->counter));
7795+}
7796+
7797+/**
7798+ * atomic64_add_unchecked - add integer to atomic64 variable
7799+ * @i: integer value to add
7800+ * @v: pointer to type atomic64_unchecked_t
7801+ *
7802+ * Atomically adds @i to @v.
7803+ */
7804+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7805+{
7806 asm volatile(LOCK_PREFIX "addq %1,%0"
7807 : "=m" (v->counter)
7808 : "er" (i), "m" (v->counter));
7809@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
7810 */
7811 static inline void atomic64_sub(long i, atomic64_t *v)
7812 {
7813- asm volatile(LOCK_PREFIX "subq %1,%0"
7814+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7815+
7816+#ifdef CONFIG_PAX_REFCOUNT
7817+ "jno 0f\n"
7818+ LOCK_PREFIX "addq %1,%0\n"
7819+ "int $4\n0:\n"
7820+ _ASM_EXTABLE(0b, 0b)
7821+#endif
7822+
7823+ : "=m" (v->counter)
7824+ : "er" (i), "m" (v->counter));
7825+}
7826+
7827+/**
7828+ * atomic64_sub_unchecked - subtract the atomic64 variable
7829+ * @i: integer value to subtract
7830+ * @v: pointer to type atomic64_unchecked_t
7831+ *
7832+ * Atomically subtracts @i from @v.
7833+ */
7834+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7835+{
7836+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7837 : "=m" (v->counter)
7838 : "er" (i), "m" (v->counter));
7839 }
7840@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7841 {
7842 unsigned char c;
7843
7844- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7845+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7846+
7847+#ifdef CONFIG_PAX_REFCOUNT
7848+ "jno 0f\n"
7849+ LOCK_PREFIX "addq %2,%0\n"
7850+ "int $4\n0:\n"
7851+ _ASM_EXTABLE(0b, 0b)
7852+#endif
7853+
7854+ "sete %1\n"
7855 : "=m" (v->counter), "=qm" (c)
7856 : "er" (i), "m" (v->counter) : "memory");
7857 return c;
7858@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7859 */
7860 static inline void atomic64_inc(atomic64_t *v)
7861 {
7862+ asm volatile(LOCK_PREFIX "incq %0\n"
7863+
7864+#ifdef CONFIG_PAX_REFCOUNT
7865+ "jno 0f\n"
7866+ LOCK_PREFIX "decq %0\n"
7867+ "int $4\n0:\n"
7868+ _ASM_EXTABLE(0b, 0b)
7869+#endif
7870+
7871+ : "=m" (v->counter)
7872+ : "m" (v->counter));
7873+}
7874+
7875+/**
7876+ * atomic64_inc_unchecked - increment atomic64 variable
7877+ * @v: pointer to type atomic64_unchecked_t
7878+ *
7879+ * Atomically increments @v by 1.
7880+ */
7881+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7882+{
7883 asm volatile(LOCK_PREFIX "incq %0"
7884 : "=m" (v->counter)
7885 : "m" (v->counter));
7886@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
7887 */
7888 static inline void atomic64_dec(atomic64_t *v)
7889 {
7890- asm volatile(LOCK_PREFIX "decq %0"
7891+ asm volatile(LOCK_PREFIX "decq %0\n"
7892+
7893+#ifdef CONFIG_PAX_REFCOUNT
7894+ "jno 0f\n"
7895+ LOCK_PREFIX "incq %0\n"
7896+ "int $4\n0:\n"
7897+ _ASM_EXTABLE(0b, 0b)
7898+#endif
7899+
7900+ : "=m" (v->counter)
7901+ : "m" (v->counter));
7902+}
7903+
7904+/**
7905+ * atomic64_dec_unchecked - decrement atomic64 variable
7906+ * @v: pointer to type atomic64_t
7907+ *
7908+ * Atomically decrements @v by 1.
7909+ */
7910+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7911+{
7912+ asm volatile(LOCK_PREFIX "decq %0\n"
7913 : "=m" (v->counter)
7914 : "m" (v->counter));
7915 }
7916@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
7917 {
7918 unsigned char c;
7919
7920- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7921+ asm volatile(LOCK_PREFIX "decq %0\n"
7922+
7923+#ifdef CONFIG_PAX_REFCOUNT
7924+ "jno 0f\n"
7925+ LOCK_PREFIX "incq %0\n"
7926+ "int $4\n0:\n"
7927+ _ASM_EXTABLE(0b, 0b)
7928+#endif
7929+
7930+ "sete %1\n"
7931 : "=m" (v->counter), "=qm" (c)
7932 : "m" (v->counter) : "memory");
7933 return c != 0;
7934@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
7935 {
7936 unsigned char c;
7937
7938- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7939+ asm volatile(LOCK_PREFIX "incq %0\n"
7940+
7941+#ifdef CONFIG_PAX_REFCOUNT
7942+ "jno 0f\n"
7943+ LOCK_PREFIX "decq %0\n"
7944+ "int $4\n0:\n"
7945+ _ASM_EXTABLE(0b, 0b)
7946+#endif
7947+
7948+ "sete %1\n"
7949 : "=m" (v->counter), "=qm" (c)
7950 : "m" (v->counter) : "memory");
7951 return c != 0;
7952@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7953 {
7954 unsigned char c;
7955
7956- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7957+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
7958+
7959+#ifdef CONFIG_PAX_REFCOUNT
7960+ "jno 0f\n"
7961+ LOCK_PREFIX "subq %2,%0\n"
7962+ "int $4\n0:\n"
7963+ _ASM_EXTABLE(0b, 0b)
7964+#endif
7965+
7966+ "sets %1\n"
7967 : "=m" (v->counter), "=qm" (c)
7968 : "er" (i), "m" (v->counter) : "memory");
7969 return c;
7970@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7971 */
7972 static inline long atomic64_add_return(long i, atomic64_t *v)
7973 {
7974+ return i + xadd_check_overflow(&v->counter, i);
7975+}
7976+
7977+/**
7978+ * atomic64_add_return_unchecked - add and return
7979+ * @i: integer value to add
7980+ * @v: pointer to type atomic64_unchecked_t
7981+ *
7982+ * Atomically adds @i to @v and returns @i + @v
7983+ */
7984+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7985+{
7986 return i + xadd(&v->counter, i);
7987 }
7988
7989@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
7990 }
7991
7992 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7993+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7994+{
7995+ return atomic64_add_return_unchecked(1, v);
7996+}
7997 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7998
7999 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8000@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8001 return cmpxchg(&v->counter, old, new);
8002 }
8003
8004+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8005+{
8006+ return cmpxchg(&v->counter, old, new);
8007+}
8008+
8009 static inline long atomic64_xchg(atomic64_t *v, long new)
8010 {
8011 return xchg(&v->counter, new);
8012@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8013 */
8014 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8015 {
8016- long c, old;
8017+ long c, old, new;
8018 c = atomic64_read(v);
8019 for (;;) {
8020- if (unlikely(c == (u)))
8021+ if (unlikely(c == u))
8022 break;
8023- old = atomic64_cmpxchg((v), c, c + (a));
8024+
8025+ asm volatile("add %2,%0\n"
8026+
8027+#ifdef CONFIG_PAX_REFCOUNT
8028+ "jno 0f\n"
8029+ "sub %2,%0\n"
8030+ "int $4\n0:\n"
8031+ _ASM_EXTABLE(0b, 0b)
8032+#endif
8033+
8034+ : "=r" (new)
8035+ : "0" (c), "ir" (a));
8036+
8037+ old = atomic64_cmpxchg(v, c, new);
8038 if (likely(old == c))
8039 break;
8040 c = old;
8041 }
8042- return c != (u);
8043+ return c != u;
8044 }
8045
8046 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8047diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8048index 1775d6e..b65017f 100644
8049--- a/arch/x86/include/asm/bitops.h
8050+++ b/arch/x86/include/asm/bitops.h
8051@@ -38,7 +38,7 @@
8052 * a mask operation on a byte.
8053 */
8054 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8055-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8056+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8057 #define CONST_MASK(nr) (1 << ((nr) & 7))
8058
8059 /**
8060diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8061index 5e1a2ee..c9f9533 100644
8062--- a/arch/x86/include/asm/boot.h
8063+++ b/arch/x86/include/asm/boot.h
8064@@ -11,10 +11,15 @@
8065 #include <asm/pgtable_types.h>
8066
8067 /* Physical address where kernel should be loaded. */
8068-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8069+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8070 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8071 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8072
8073+#ifndef __ASSEMBLY__
8074+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8075+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8076+#endif
8077+
8078 /* Minimum kernel alignment, as a power of two */
8079 #ifdef CONFIG_X86_64
8080 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8081diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8082index 48f99f1..d78ebf9 100644
8083--- a/arch/x86/include/asm/cache.h
8084+++ b/arch/x86/include/asm/cache.h
8085@@ -5,12 +5,13 @@
8086
8087 /* L1 cache line size */
8088 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8089-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8090+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8091
8092 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8093+#define __read_only __attribute__((__section__(".data..read_only")))
8094
8095 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8096-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8097+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8098
8099 #ifdef CONFIG_X86_VSMP
8100 #ifdef CONFIG_SMP
8101diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8102index 4e12668..501d239 100644
8103--- a/arch/x86/include/asm/cacheflush.h
8104+++ b/arch/x86/include/asm/cacheflush.h
8105@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8106 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8107
8108 if (pg_flags == _PGMT_DEFAULT)
8109- return -1;
8110+ return ~0UL;
8111 else if (pg_flags == _PGMT_WC)
8112 return _PAGE_CACHE_WC;
8113 else if (pg_flags == _PGMT_UC_MINUS)
8114diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8115index 46fc474..b02b0f9 100644
8116--- a/arch/x86/include/asm/checksum_32.h
8117+++ b/arch/x86/include/asm/checksum_32.h
8118@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8119 int len, __wsum sum,
8120 int *src_err_ptr, int *dst_err_ptr);
8121
8122+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8123+ int len, __wsum sum,
8124+ int *src_err_ptr, int *dst_err_ptr);
8125+
8126+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8127+ int len, __wsum sum,
8128+ int *src_err_ptr, int *dst_err_ptr);
8129+
8130 /*
8131 * Note: when you get a NULL pointer exception here this means someone
8132 * passed in an incorrect kernel address to one of these functions.
8133@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8134 int *err_ptr)
8135 {
8136 might_sleep();
8137- return csum_partial_copy_generic((__force void *)src, dst,
8138+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8139 len, sum, err_ptr, NULL);
8140 }
8141
8142@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8143 {
8144 might_sleep();
8145 if (access_ok(VERIFY_WRITE, dst, len))
8146- return csum_partial_copy_generic(src, (__force void *)dst,
8147+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8148 len, sum, NULL, err_ptr);
8149
8150 if (len)
8151diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8152index 5d3acdf..6447a02 100644
8153--- a/arch/x86/include/asm/cmpxchg.h
8154+++ b/arch/x86/include/asm/cmpxchg.h
8155@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8156 __compiletime_error("Bad argument size for cmpxchg");
8157 extern void __xadd_wrong_size(void)
8158 __compiletime_error("Bad argument size for xadd");
8159+extern void __xadd_check_overflow_wrong_size(void)
8160+ __compiletime_error("Bad argument size for xadd_check_overflow");
8161
8162 /*
8163 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8164@@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8165 __ret; \
8166 })
8167
8168+#define __xadd_check_overflow(ptr, inc, lock) \
8169+ ({ \
8170+ __typeof__ (*(ptr)) __ret = (inc); \
8171+ switch (sizeof(*(ptr))) { \
8172+ case __X86_CASE_L: \
8173+ asm volatile (lock "xaddl %0, %1\n" \
8174+ "jno 0f\n" \
8175+ "mov %0,%1\n" \
8176+ "int $4\n0:\n" \
8177+ _ASM_EXTABLE(0b, 0b) \
8178+ : "+r" (__ret), "+m" (*(ptr)) \
8179+ : : "memory", "cc"); \
8180+ break; \
8181+ case __X86_CASE_Q: \
8182+ asm volatile (lock "xaddq %q0, %1\n" \
8183+ "jno 0f\n" \
8184+ "mov %0,%1\n" \
8185+ "int $4\n0:\n" \
8186+ _ASM_EXTABLE(0b, 0b) \
8187+ : "+r" (__ret), "+m" (*(ptr)) \
8188+ : : "memory", "cc"); \
8189+ break; \
8190+ default: \
8191+ __xadd_check_overflow_wrong_size(); \
8192+ } \
8193+ __ret; \
8194+ })
8195+
8196 /*
8197 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8198 * value of "*ptr".
8199@@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8200 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8201 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8202
8203+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8204+
8205 #endif /* ASM_X86_CMPXCHG_H */
8206diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8207index f3444f7..051a196 100644
8208--- a/arch/x86/include/asm/cpufeature.h
8209+++ b/arch/x86/include/asm/cpufeature.h
8210@@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8211 ".section .discard,\"aw\",@progbits\n"
8212 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8213 ".previous\n"
8214- ".section .altinstr_replacement,\"ax\"\n"
8215+ ".section .altinstr_replacement,\"a\"\n"
8216 "3: movb $1,%0\n"
8217 "4:\n"
8218 ".previous\n"
8219diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8220index 41935fa..3b40db8 100644
8221--- a/arch/x86/include/asm/desc.h
8222+++ b/arch/x86/include/asm/desc.h
8223@@ -4,6 +4,7 @@
8224 #include <asm/desc_defs.h>
8225 #include <asm/ldt.h>
8226 #include <asm/mmu.h>
8227+#include <asm/pgtable.h>
8228
8229 #include <linux/smp.h>
8230
8231@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8232
8233 desc->type = (info->read_exec_only ^ 1) << 1;
8234 desc->type |= info->contents << 2;
8235+ desc->type |= info->seg_not_present ^ 1;
8236
8237 desc->s = 1;
8238 desc->dpl = 0x3;
8239@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8240 }
8241
8242 extern struct desc_ptr idt_descr;
8243-extern gate_desc idt_table[];
8244-
8245-struct gdt_page {
8246- struct desc_struct gdt[GDT_ENTRIES];
8247-} __attribute__((aligned(PAGE_SIZE)));
8248-
8249-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8250+extern gate_desc idt_table[256];
8251
8252+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8253 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8254 {
8255- return per_cpu(gdt_page, cpu).gdt;
8256+ return cpu_gdt_table[cpu];
8257 }
8258
8259 #ifdef CONFIG_X86_64
8260@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8261 unsigned long base, unsigned dpl, unsigned flags,
8262 unsigned short seg)
8263 {
8264- gate->a = (seg << 16) | (base & 0xffff);
8265- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8266+ gate->gate.offset_low = base;
8267+ gate->gate.seg = seg;
8268+ gate->gate.reserved = 0;
8269+ gate->gate.type = type;
8270+ gate->gate.s = 0;
8271+ gate->gate.dpl = dpl;
8272+ gate->gate.p = 1;
8273+ gate->gate.offset_high = base >> 16;
8274 }
8275
8276 #endif
8277@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8278
8279 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8280 {
8281+ pax_open_kernel();
8282 memcpy(&idt[entry], gate, sizeof(*gate));
8283+ pax_close_kernel();
8284 }
8285
8286 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8287 {
8288+ pax_open_kernel();
8289 memcpy(&ldt[entry], desc, 8);
8290+ pax_close_kernel();
8291 }
8292
8293 static inline void
8294@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8295 default: size = sizeof(*gdt); break;
8296 }
8297
8298+ pax_open_kernel();
8299 memcpy(&gdt[entry], desc, size);
8300+ pax_close_kernel();
8301 }
8302
8303 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8304@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8305
8306 static inline void native_load_tr_desc(void)
8307 {
8308+ pax_open_kernel();
8309 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8310+ pax_close_kernel();
8311 }
8312
8313 static inline void native_load_gdt(const struct desc_ptr *dtr)
8314@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8315 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8316 unsigned int i;
8317
8318+ pax_open_kernel();
8319 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8320 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8321+ pax_close_kernel();
8322 }
8323
8324 #define _LDT_empty(info) \
8325@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8326 desc->limit = (limit >> 16) & 0xf;
8327 }
8328
8329-static inline void _set_gate(int gate, unsigned type, void *addr,
8330+static inline void _set_gate(int gate, unsigned type, const void *addr,
8331 unsigned dpl, unsigned ist, unsigned seg)
8332 {
8333 gate_desc s;
8334@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8335 * Pentium F0 0F bugfix can have resulted in the mapped
8336 * IDT being write-protected.
8337 */
8338-static inline void set_intr_gate(unsigned int n, void *addr)
8339+static inline void set_intr_gate(unsigned int n, const void *addr)
8340 {
8341 BUG_ON((unsigned)n > 0xFF);
8342 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8343@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8344 /*
8345 * This routine sets up an interrupt gate at directory privilege level 3.
8346 */
8347-static inline void set_system_intr_gate(unsigned int n, void *addr)
8348+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8349 {
8350 BUG_ON((unsigned)n > 0xFF);
8351 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8352 }
8353
8354-static inline void set_system_trap_gate(unsigned int n, void *addr)
8355+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8356 {
8357 BUG_ON((unsigned)n > 0xFF);
8358 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8359 }
8360
8361-static inline void set_trap_gate(unsigned int n, void *addr)
8362+static inline void set_trap_gate(unsigned int n, const void *addr)
8363 {
8364 BUG_ON((unsigned)n > 0xFF);
8365 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8366@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8367 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8368 {
8369 BUG_ON((unsigned)n > 0xFF);
8370- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8371+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8372 }
8373
8374-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8375+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8376 {
8377 BUG_ON((unsigned)n > 0xFF);
8378 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8379 }
8380
8381-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8382+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8383 {
8384 BUG_ON((unsigned)n > 0xFF);
8385 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8386 }
8387
8388+#ifdef CONFIG_X86_32
8389+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8390+{
8391+ struct desc_struct d;
8392+
8393+ if (likely(limit))
8394+ limit = (limit - 1UL) >> PAGE_SHIFT;
8395+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8396+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8397+}
8398+#endif
8399+
8400 #endif /* _ASM_X86_DESC_H */
8401diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8402index 278441f..b95a174 100644
8403--- a/arch/x86/include/asm/desc_defs.h
8404+++ b/arch/x86/include/asm/desc_defs.h
8405@@ -31,6 +31,12 @@ struct desc_struct {
8406 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8407 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8408 };
8409+ struct {
8410+ u16 offset_low;
8411+ u16 seg;
8412+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8413+ unsigned offset_high: 16;
8414+ } gate;
8415 };
8416 } __attribute__((packed));
8417
8418diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8419index 908b969..a1f4eb4 100644
8420--- a/arch/x86/include/asm/e820.h
8421+++ b/arch/x86/include/asm/e820.h
8422@@ -69,7 +69,7 @@ struct e820map {
8423 #define ISA_START_ADDRESS 0xa0000
8424 #define ISA_END_ADDRESS 0x100000
8425
8426-#define BIOS_BEGIN 0x000a0000
8427+#define BIOS_BEGIN 0x000c0000
8428 #define BIOS_END 0x00100000
8429
8430 #define BIOS_ROM_BASE 0xffe00000
8431diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8432index 5f962df..7289f09 100644
8433--- a/arch/x86/include/asm/elf.h
8434+++ b/arch/x86/include/asm/elf.h
8435@@ -238,7 +238,25 @@ extern int force_personality32;
8436 the loader. We need to make sure that it is out of the way of the program
8437 that it will "exec", and that there is sufficient room for the brk. */
8438
8439+#ifdef CONFIG_PAX_SEGMEXEC
8440+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8441+#else
8442 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8443+#endif
8444+
8445+#ifdef CONFIG_PAX_ASLR
8446+#ifdef CONFIG_X86_32
8447+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8448+
8449+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8450+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8451+#else
8452+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8453+
8454+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8455+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8456+#endif
8457+#endif
8458
8459 /* This yields a mask that user programs can use to figure out what
8460 instruction set this CPU supports. This could be done in user space,
8461@@ -291,9 +309,7 @@ do { \
8462
8463 #define ARCH_DLINFO \
8464 do { \
8465- if (vdso_enabled) \
8466- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8467- (unsigned long)current->mm->context.vdso); \
8468+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8469 } while (0)
8470
8471 #define AT_SYSINFO 32
8472@@ -304,7 +320,7 @@ do { \
8473
8474 #endif /* !CONFIG_X86_32 */
8475
8476-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8477+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8478
8479 #define VDSO_ENTRY \
8480 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8481@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8482 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8483 #define compat_arch_setup_additional_pages syscall32_setup_pages
8484
8485-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8486-#define arch_randomize_brk arch_randomize_brk
8487-
8488 /*
8489 * True on X86_32 or when emulating IA32 on X86_64
8490 */
8491diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8492index cc70c1c..d96d011 100644
8493--- a/arch/x86/include/asm/emergency-restart.h
8494+++ b/arch/x86/include/asm/emergency-restart.h
8495@@ -15,6 +15,6 @@ enum reboot_type {
8496
8497 extern enum reboot_type reboot_type;
8498
8499-extern void machine_emergency_restart(void);
8500+extern void machine_emergency_restart(void) __noreturn;
8501
8502 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8503diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8504index d09bb03..4ea4194 100644
8505--- a/arch/x86/include/asm/futex.h
8506+++ b/arch/x86/include/asm/futex.h
8507@@ -12,16 +12,18 @@
8508 #include <asm/system.h>
8509
8510 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8511+ typecheck(u32 __user *, uaddr); \
8512 asm volatile("1:\t" insn "\n" \
8513 "2:\t.section .fixup,\"ax\"\n" \
8514 "3:\tmov\t%3, %1\n" \
8515 "\tjmp\t2b\n" \
8516 "\t.previous\n" \
8517 _ASM_EXTABLE(1b, 3b) \
8518- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8519+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8520 : "i" (-EFAULT), "0" (oparg), "1" (0))
8521
8522 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8523+ typecheck(u32 __user *, uaddr); \
8524 asm volatile("1:\tmovl %2, %0\n" \
8525 "\tmovl\t%0, %3\n" \
8526 "\t" insn "\n" \
8527@@ -34,7 +36,7 @@
8528 _ASM_EXTABLE(1b, 4b) \
8529 _ASM_EXTABLE(2b, 4b) \
8530 : "=&a" (oldval), "=&r" (ret), \
8531- "+m" (*uaddr), "=&r" (tem) \
8532+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8533 : "r" (oparg), "i" (-EFAULT), "1" (0))
8534
8535 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8536@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8537
8538 switch (op) {
8539 case FUTEX_OP_SET:
8540- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8541+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8542 break;
8543 case FUTEX_OP_ADD:
8544- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8545+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8546 uaddr, oparg);
8547 break;
8548 case FUTEX_OP_OR:
8549@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8550 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8551 return -EFAULT;
8552
8553- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8554+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8555 "2:\t.section .fixup, \"ax\"\n"
8556 "3:\tmov %3, %0\n"
8557 "\tjmp 2b\n"
8558 "\t.previous\n"
8559 _ASM_EXTABLE(1b, 3b)
8560- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8561+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8562 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8563 : "memory"
8564 );
8565diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8566index eb92a6e..b98b2f4 100644
8567--- a/arch/x86/include/asm/hw_irq.h
8568+++ b/arch/x86/include/asm/hw_irq.h
8569@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8570 extern void enable_IO_APIC(void);
8571
8572 /* Statistics */
8573-extern atomic_t irq_err_count;
8574-extern atomic_t irq_mis_count;
8575+extern atomic_unchecked_t irq_err_count;
8576+extern atomic_unchecked_t irq_mis_count;
8577
8578 /* EISA */
8579 extern void eisa_set_level_irq(unsigned int irq);
8580diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8581index c9e09ea..73888df 100644
8582--- a/arch/x86/include/asm/i387.h
8583+++ b/arch/x86/include/asm/i387.h
8584@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8585 {
8586 int err;
8587
8588+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8589+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8590+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8591+#endif
8592+
8593 /* See comment in fxsave() below. */
8594 #ifdef CONFIG_AS_FXSAVEQ
8595 asm volatile("1: fxrstorq %[fx]\n\t"
8596@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8597 {
8598 int err;
8599
8600+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8601+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8602+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8603+#endif
8604+
8605 /*
8606 * Clear the bytes not touched by the fxsave and reserved
8607 * for the SW usage.
8608@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8609 #endif /* CONFIG_X86_64 */
8610
8611 /* We need a safe address that is cheap to find and that is already
8612- in L1 during context switch. The best choices are unfortunately
8613- different for UP and SMP */
8614-#ifdef CONFIG_SMP
8615-#define safe_address (__per_cpu_offset[0])
8616-#else
8617-#define safe_address (kstat_cpu(0).cpustat.user)
8618-#endif
8619+ in L1 during context switch. */
8620+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8621
8622 /*
8623 * These must be called with preempt disabled
8624@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8625 struct thread_info *me = current_thread_info();
8626 preempt_disable();
8627 if (me->status & TS_USEDFPU)
8628- __save_init_fpu(me->task);
8629+ __save_init_fpu(current);
8630 else
8631 clts();
8632 }
8633diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8634index d8e8eef..99f81ae 100644
8635--- a/arch/x86/include/asm/io.h
8636+++ b/arch/x86/include/asm/io.h
8637@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8638
8639 #include <linux/vmalloc.h>
8640
8641+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8642+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8643+{
8644+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8645+}
8646+
8647+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8648+{
8649+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8650+}
8651+
8652 /*
8653 * Convert a virtual cached pointer to an uncached pointer
8654 */
8655diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8656index bba3cf8..06bc8da 100644
8657--- a/arch/x86/include/asm/irqflags.h
8658+++ b/arch/x86/include/asm/irqflags.h
8659@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8660 sti; \
8661 sysexit
8662
8663+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8664+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8665+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8666+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8667+
8668 #else
8669 #define INTERRUPT_RETURN iret
8670 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8671diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8672index 5478825..839e88c 100644
8673--- a/arch/x86/include/asm/kprobes.h
8674+++ b/arch/x86/include/asm/kprobes.h
8675@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8676 #define RELATIVEJUMP_SIZE 5
8677 #define RELATIVECALL_OPCODE 0xe8
8678 #define RELATIVE_ADDR_SIZE 4
8679-#define MAX_STACK_SIZE 64
8680-#define MIN_STACK_SIZE(ADDR) \
8681- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8682- THREAD_SIZE - (unsigned long)(ADDR))) \
8683- ? (MAX_STACK_SIZE) \
8684- : (((unsigned long)current_thread_info()) + \
8685- THREAD_SIZE - (unsigned long)(ADDR)))
8686+#define MAX_STACK_SIZE 64UL
8687+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8688
8689 #define flush_insn_slot(p) do { } while (0)
8690
8691diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8692index b4973f4..7c4d3fc 100644
8693--- a/arch/x86/include/asm/kvm_host.h
8694+++ b/arch/x86/include/asm/kvm_host.h
8695@@ -459,7 +459,7 @@ struct kvm_arch {
8696 unsigned int n_requested_mmu_pages;
8697 unsigned int n_max_mmu_pages;
8698 unsigned int indirect_shadow_pages;
8699- atomic_t invlpg_counter;
8700+ atomic_unchecked_t invlpg_counter;
8701 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8702 /*
8703 * Hash table of struct kvm_mmu_page.
8704@@ -638,7 +638,7 @@ struct kvm_x86_ops {
8705 int (*check_intercept)(struct kvm_vcpu *vcpu,
8706 struct x86_instruction_info *info,
8707 enum x86_intercept_stage stage);
8708-};
8709+} __do_const;
8710
8711 struct kvm_arch_async_pf {
8712 u32 token;
8713diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8714index 9cdae5d..300d20f 100644
8715--- a/arch/x86/include/asm/local.h
8716+++ b/arch/x86/include/asm/local.h
8717@@ -18,26 +18,58 @@ typedef struct {
8718
8719 static inline void local_inc(local_t *l)
8720 {
8721- asm volatile(_ASM_INC "%0"
8722+ asm volatile(_ASM_INC "%0\n"
8723+
8724+#ifdef CONFIG_PAX_REFCOUNT
8725+ "jno 0f\n"
8726+ _ASM_DEC "%0\n"
8727+ "int $4\n0:\n"
8728+ _ASM_EXTABLE(0b, 0b)
8729+#endif
8730+
8731 : "+m" (l->a.counter));
8732 }
8733
8734 static inline void local_dec(local_t *l)
8735 {
8736- asm volatile(_ASM_DEC "%0"
8737+ asm volatile(_ASM_DEC "%0\n"
8738+
8739+#ifdef CONFIG_PAX_REFCOUNT
8740+ "jno 0f\n"
8741+ _ASM_INC "%0\n"
8742+ "int $4\n0:\n"
8743+ _ASM_EXTABLE(0b, 0b)
8744+#endif
8745+
8746 : "+m" (l->a.counter));
8747 }
8748
8749 static inline void local_add(long i, local_t *l)
8750 {
8751- asm volatile(_ASM_ADD "%1,%0"
8752+ asm volatile(_ASM_ADD "%1,%0\n"
8753+
8754+#ifdef CONFIG_PAX_REFCOUNT
8755+ "jno 0f\n"
8756+ _ASM_SUB "%1,%0\n"
8757+ "int $4\n0:\n"
8758+ _ASM_EXTABLE(0b, 0b)
8759+#endif
8760+
8761 : "+m" (l->a.counter)
8762 : "ir" (i));
8763 }
8764
8765 static inline void local_sub(long i, local_t *l)
8766 {
8767- asm volatile(_ASM_SUB "%1,%0"
8768+ asm volatile(_ASM_SUB "%1,%0\n"
8769+
8770+#ifdef CONFIG_PAX_REFCOUNT
8771+ "jno 0f\n"
8772+ _ASM_ADD "%1,%0\n"
8773+ "int $4\n0:\n"
8774+ _ASM_EXTABLE(0b, 0b)
8775+#endif
8776+
8777 : "+m" (l->a.counter)
8778 : "ir" (i));
8779 }
8780@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
8781 {
8782 unsigned char c;
8783
8784- asm volatile(_ASM_SUB "%2,%0; sete %1"
8785+ asm volatile(_ASM_SUB "%2,%0\n"
8786+
8787+#ifdef CONFIG_PAX_REFCOUNT
8788+ "jno 0f\n"
8789+ _ASM_ADD "%2,%0\n"
8790+ "int $4\n0:\n"
8791+ _ASM_EXTABLE(0b, 0b)
8792+#endif
8793+
8794+ "sete %1\n"
8795 : "+m" (l->a.counter), "=qm" (c)
8796 : "ir" (i) : "memory");
8797 return c;
8798@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
8799 {
8800 unsigned char c;
8801
8802- asm volatile(_ASM_DEC "%0; sete %1"
8803+ asm volatile(_ASM_DEC "%0\n"
8804+
8805+#ifdef CONFIG_PAX_REFCOUNT
8806+ "jno 0f\n"
8807+ _ASM_INC "%0\n"
8808+ "int $4\n0:\n"
8809+ _ASM_EXTABLE(0b, 0b)
8810+#endif
8811+
8812+ "sete %1\n"
8813 : "+m" (l->a.counter), "=qm" (c)
8814 : : "memory");
8815 return c != 0;
8816@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
8817 {
8818 unsigned char c;
8819
8820- asm volatile(_ASM_INC "%0; sete %1"
8821+ asm volatile(_ASM_INC "%0\n"
8822+
8823+#ifdef CONFIG_PAX_REFCOUNT
8824+ "jno 0f\n"
8825+ _ASM_DEC "%0\n"
8826+ "int $4\n0:\n"
8827+ _ASM_EXTABLE(0b, 0b)
8828+#endif
8829+
8830+ "sete %1\n"
8831 : "+m" (l->a.counter), "=qm" (c)
8832 : : "memory");
8833 return c != 0;
8834@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
8835 {
8836 unsigned char c;
8837
8838- asm volatile(_ASM_ADD "%2,%0; sets %1"
8839+ asm volatile(_ASM_ADD "%2,%0\n"
8840+
8841+#ifdef CONFIG_PAX_REFCOUNT
8842+ "jno 0f\n"
8843+ _ASM_SUB "%2,%0\n"
8844+ "int $4\n0:\n"
8845+ _ASM_EXTABLE(0b, 0b)
8846+#endif
8847+
8848+ "sets %1\n"
8849 : "+m" (l->a.counter), "=qm" (c)
8850 : "ir" (i) : "memory");
8851 return c;
8852@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
8853 #endif
8854 /* Modern 486+ processor */
8855 __i = i;
8856- asm volatile(_ASM_XADD "%0, %1;"
8857+ asm volatile(_ASM_XADD "%0, %1\n"
8858+
8859+#ifdef CONFIG_PAX_REFCOUNT
8860+ "jno 0f\n"
8861+ _ASM_MOV "%0,%1\n"
8862+ "int $4\n0:\n"
8863+ _ASM_EXTABLE(0b, 0b)
8864+#endif
8865+
8866 : "+r" (i), "+m" (l->a.counter)
8867 : : "memory");
8868 return i + __i;
8869diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8870index 593e51d..fa69c9a 100644
8871--- a/arch/x86/include/asm/mman.h
8872+++ b/arch/x86/include/asm/mman.h
8873@@ -5,4 +5,14 @@
8874
8875 #include <asm-generic/mman.h>
8876
8877+#ifdef __KERNEL__
8878+#ifndef __ASSEMBLY__
8879+#ifdef CONFIG_X86_32
8880+#define arch_mmap_check i386_mmap_check
8881+int i386_mmap_check(unsigned long addr, unsigned long len,
8882+ unsigned long flags);
8883+#endif
8884+#endif
8885+#endif
8886+
8887 #endif /* _ASM_X86_MMAN_H */
8888diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8889index 5f55e69..e20bfb1 100644
8890--- a/arch/x86/include/asm/mmu.h
8891+++ b/arch/x86/include/asm/mmu.h
8892@@ -9,7 +9,7 @@
8893 * we put the segment information here.
8894 */
8895 typedef struct {
8896- void *ldt;
8897+ struct desc_struct *ldt;
8898 int size;
8899
8900 #ifdef CONFIG_X86_64
8901@@ -18,7 +18,19 @@ typedef struct {
8902 #endif
8903
8904 struct mutex lock;
8905- void *vdso;
8906+ unsigned long vdso;
8907+
8908+#ifdef CONFIG_X86_32
8909+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8910+ unsigned long user_cs_base;
8911+ unsigned long user_cs_limit;
8912+
8913+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8914+ cpumask_t cpu_user_cs_mask;
8915+#endif
8916+
8917+#endif
8918+#endif
8919 } mm_context_t;
8920
8921 #ifdef CONFIG_SMP
8922diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8923index 6902152..399f3a2 100644
8924--- a/arch/x86/include/asm/mmu_context.h
8925+++ b/arch/x86/include/asm/mmu_context.h
8926@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
8927
8928 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8929 {
8930+
8931+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8932+ unsigned int i;
8933+ pgd_t *pgd;
8934+
8935+ pax_open_kernel();
8936+ pgd = get_cpu_pgd(smp_processor_id());
8937+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8938+ set_pgd_batched(pgd+i, native_make_pgd(0));
8939+ pax_close_kernel();
8940+#endif
8941+
8942 #ifdef CONFIG_SMP
8943 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8944 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8945@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8946 struct task_struct *tsk)
8947 {
8948 unsigned cpu = smp_processor_id();
8949+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8950+ int tlbstate = TLBSTATE_OK;
8951+#endif
8952
8953 if (likely(prev != next)) {
8954 #ifdef CONFIG_SMP
8955+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8956+ tlbstate = percpu_read(cpu_tlbstate.state);
8957+#endif
8958 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8959 percpu_write(cpu_tlbstate.active_mm, next);
8960 #endif
8961 cpumask_set_cpu(cpu, mm_cpumask(next));
8962
8963 /* Re-load page tables */
8964+#ifdef CONFIG_PAX_PER_CPU_PGD
8965+ pax_open_kernel();
8966+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8967+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8968+ pax_close_kernel();
8969+ load_cr3(get_cpu_pgd(cpu));
8970+#else
8971 load_cr3(next->pgd);
8972+#endif
8973
8974 /* stop flush ipis for the previous mm */
8975 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8976@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8977 */
8978 if (unlikely(prev->context.ldt != next->context.ldt))
8979 load_LDT_nolock(&next->context);
8980- }
8981+
8982+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8983+ if (!(__supported_pte_mask & _PAGE_NX)) {
8984+ smp_mb__before_clear_bit();
8985+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8986+ smp_mb__after_clear_bit();
8987+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8988+ }
8989+#endif
8990+
8991+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8992+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8993+ prev->context.user_cs_limit != next->context.user_cs_limit))
8994+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8995 #ifdef CONFIG_SMP
8996+ else if (unlikely(tlbstate != TLBSTATE_OK))
8997+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8998+#endif
8999+#endif
9000+
9001+ }
9002 else {
9003+
9004+#ifdef CONFIG_PAX_PER_CPU_PGD
9005+ pax_open_kernel();
9006+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9007+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9008+ pax_close_kernel();
9009+ load_cr3(get_cpu_pgd(cpu));
9010+#endif
9011+
9012+#ifdef CONFIG_SMP
9013 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9014 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9015
9016@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9017 * tlb flush IPI delivery. We must reload CR3
9018 * to make sure to use no freed page tables.
9019 */
9020+
9021+#ifndef CONFIG_PAX_PER_CPU_PGD
9022 load_cr3(next->pgd);
9023+#endif
9024+
9025 load_LDT_nolock(&next->context);
9026+
9027+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9028+ if (!(__supported_pte_mask & _PAGE_NX))
9029+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9030+#endif
9031+
9032+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9033+#ifdef CONFIG_PAX_PAGEEXEC
9034+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9035+#endif
9036+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9037+#endif
9038+
9039 }
9040+#endif
9041 }
9042-#endif
9043 }
9044
9045 #define activate_mm(prev, next) \
9046diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9047index 9eae775..c914fea 100644
9048--- a/arch/x86/include/asm/module.h
9049+++ b/arch/x86/include/asm/module.h
9050@@ -5,6 +5,7 @@
9051
9052 #ifdef CONFIG_X86_64
9053 /* X86_64 does not define MODULE_PROC_FAMILY */
9054+#define MODULE_PROC_FAMILY ""
9055 #elif defined CONFIG_M386
9056 #define MODULE_PROC_FAMILY "386 "
9057 #elif defined CONFIG_M486
9058@@ -59,8 +60,20 @@
9059 #error unknown processor family
9060 #endif
9061
9062-#ifdef CONFIG_X86_32
9063-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9064+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9065+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9066+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9067+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9068+#else
9069+#define MODULE_PAX_KERNEXEC ""
9070 #endif
9071
9072+#ifdef CONFIG_PAX_MEMORY_UDEREF
9073+#define MODULE_PAX_UDEREF "UDEREF "
9074+#else
9075+#define MODULE_PAX_UDEREF ""
9076+#endif
9077+
9078+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9079+
9080 #endif /* _ASM_X86_MODULE_H */
9081diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9082index 7639dbf..e08a58c 100644
9083--- a/arch/x86/include/asm/page_64_types.h
9084+++ b/arch/x86/include/asm/page_64_types.h
9085@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9086
9087 /* duplicated to the one in bootmem.h */
9088 extern unsigned long max_pfn;
9089-extern unsigned long phys_base;
9090+extern const unsigned long phys_base;
9091
9092 extern unsigned long __phys_addr(unsigned long);
9093 #define __phys_reloc_hide(x) (x)
9094diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9095index a7d2db9..edb023e 100644
9096--- a/arch/x86/include/asm/paravirt.h
9097+++ b/arch/x86/include/asm/paravirt.h
9098@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9099 val);
9100 }
9101
9102+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9103+{
9104+ pgdval_t val = native_pgd_val(pgd);
9105+
9106+ if (sizeof(pgdval_t) > sizeof(long))
9107+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9108+ val, (u64)val >> 32);
9109+ else
9110+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9111+ val);
9112+}
9113+
9114 static inline void pgd_clear(pgd_t *pgdp)
9115 {
9116 set_pgd(pgdp, __pgd(0));
9117@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9118 pv_mmu_ops.set_fixmap(idx, phys, flags);
9119 }
9120
9121+#ifdef CONFIG_PAX_KERNEXEC
9122+static inline unsigned long pax_open_kernel(void)
9123+{
9124+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9125+}
9126+
9127+static inline unsigned long pax_close_kernel(void)
9128+{
9129+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9130+}
9131+#else
9132+static inline unsigned long pax_open_kernel(void) { return 0; }
9133+static inline unsigned long pax_close_kernel(void) { return 0; }
9134+#endif
9135+
9136 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9137
9138 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9139@@ -964,7 +991,7 @@ extern void default_banner(void);
9140
9141 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9142 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9143-#define PARA_INDIRECT(addr) *%cs:addr
9144+#define PARA_INDIRECT(addr) *%ss:addr
9145 #endif
9146
9147 #define INTERRUPT_RETURN \
9148@@ -1041,6 +1068,21 @@ extern void default_banner(void);
9149 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9150 CLBR_NONE, \
9151 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9152+
9153+#define GET_CR0_INTO_RDI \
9154+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9155+ mov %rax,%rdi
9156+
9157+#define SET_RDI_INTO_CR0 \
9158+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9159+
9160+#define GET_CR3_INTO_RDI \
9161+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9162+ mov %rax,%rdi
9163+
9164+#define SET_RDI_INTO_CR3 \
9165+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9166+
9167 #endif /* CONFIG_X86_32 */
9168
9169 #endif /* __ASSEMBLY__ */
9170diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9171index 8e8b9a4..f07d725 100644
9172--- a/arch/x86/include/asm/paravirt_types.h
9173+++ b/arch/x86/include/asm/paravirt_types.h
9174@@ -84,20 +84,20 @@ struct pv_init_ops {
9175 */
9176 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9177 unsigned long addr, unsigned len);
9178-};
9179+} __no_const;
9180
9181
9182 struct pv_lazy_ops {
9183 /* Set deferred update mode, used for batching operations. */
9184 void (*enter)(void);
9185 void (*leave)(void);
9186-};
9187+} __no_const;
9188
9189 struct pv_time_ops {
9190 unsigned long long (*sched_clock)(void);
9191 unsigned long long (*steal_clock)(int cpu);
9192 unsigned long (*get_tsc_khz)(void);
9193-};
9194+} __no_const;
9195
9196 struct pv_cpu_ops {
9197 /* hooks for various privileged instructions */
9198@@ -193,7 +193,7 @@ struct pv_cpu_ops {
9199
9200 void (*start_context_switch)(struct task_struct *prev);
9201 void (*end_context_switch)(struct task_struct *next);
9202-};
9203+} __no_const;
9204
9205 struct pv_irq_ops {
9206 /*
9207@@ -224,7 +224,7 @@ struct pv_apic_ops {
9208 unsigned long start_eip,
9209 unsigned long start_esp);
9210 #endif
9211-};
9212+} __no_const;
9213
9214 struct pv_mmu_ops {
9215 unsigned long (*read_cr2)(void);
9216@@ -313,6 +313,7 @@ struct pv_mmu_ops {
9217 struct paravirt_callee_save make_pud;
9218
9219 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9220+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9221 #endif /* PAGETABLE_LEVELS == 4 */
9222 #endif /* PAGETABLE_LEVELS >= 3 */
9223
9224@@ -324,6 +325,12 @@ struct pv_mmu_ops {
9225 an mfn. We can tell which is which from the index. */
9226 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9227 phys_addr_t phys, pgprot_t flags);
9228+
9229+#ifdef CONFIG_PAX_KERNEXEC
9230+ unsigned long (*pax_open_kernel)(void);
9231+ unsigned long (*pax_close_kernel)(void);
9232+#endif
9233+
9234 };
9235
9236 struct arch_spinlock;
9237@@ -334,7 +341,7 @@ struct pv_lock_ops {
9238 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9239 int (*spin_trylock)(struct arch_spinlock *lock);
9240 void (*spin_unlock)(struct arch_spinlock *lock);
9241-};
9242+} __no_const;
9243
9244 /* This contains all the paravirt structures: we get a convenient
9245 * number for each function using the offset which we use to indicate
9246diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9247index b4389a4..b7ff22c 100644
9248--- a/arch/x86/include/asm/pgalloc.h
9249+++ b/arch/x86/include/asm/pgalloc.h
9250@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9251 pmd_t *pmd, pte_t *pte)
9252 {
9253 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9254+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9255+}
9256+
9257+static inline void pmd_populate_user(struct mm_struct *mm,
9258+ pmd_t *pmd, pte_t *pte)
9259+{
9260+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9261 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9262 }
9263
9264diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9265index 98391db..8f6984e 100644
9266--- a/arch/x86/include/asm/pgtable-2level.h
9267+++ b/arch/x86/include/asm/pgtable-2level.h
9268@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9269
9270 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9271 {
9272+ pax_open_kernel();
9273 *pmdp = pmd;
9274+ pax_close_kernel();
9275 }
9276
9277 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9278diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9279index effff47..f9e4035 100644
9280--- a/arch/x86/include/asm/pgtable-3level.h
9281+++ b/arch/x86/include/asm/pgtable-3level.h
9282@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9283
9284 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9285 {
9286+ pax_open_kernel();
9287 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9288+ pax_close_kernel();
9289 }
9290
9291 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9292 {
9293+ pax_open_kernel();
9294 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9295+ pax_close_kernel();
9296 }
9297
9298 /*
9299diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9300index 18601c8..3d716d1 100644
9301--- a/arch/x86/include/asm/pgtable.h
9302+++ b/arch/x86/include/asm/pgtable.h
9303@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9304
9305 #ifndef __PAGETABLE_PUD_FOLDED
9306 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9307+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9308 #define pgd_clear(pgd) native_pgd_clear(pgd)
9309 #endif
9310
9311@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9312
9313 #define arch_end_context_switch(prev) do {} while(0)
9314
9315+#define pax_open_kernel() native_pax_open_kernel()
9316+#define pax_close_kernel() native_pax_close_kernel()
9317 #endif /* CONFIG_PARAVIRT */
9318
9319+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9320+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9321+
9322+#ifdef CONFIG_PAX_KERNEXEC
9323+static inline unsigned long native_pax_open_kernel(void)
9324+{
9325+ unsigned long cr0;
9326+
9327+ preempt_disable();
9328+ barrier();
9329+ cr0 = read_cr0() ^ X86_CR0_WP;
9330+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9331+ write_cr0(cr0);
9332+ return cr0 ^ X86_CR0_WP;
9333+}
9334+
9335+static inline unsigned long native_pax_close_kernel(void)
9336+{
9337+ unsigned long cr0;
9338+
9339+ cr0 = read_cr0() ^ X86_CR0_WP;
9340+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9341+ write_cr0(cr0);
9342+ barrier();
9343+ preempt_enable_no_resched();
9344+ return cr0 ^ X86_CR0_WP;
9345+}
9346+#else
9347+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9348+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9349+#endif
9350+
9351 /*
9352 * The following only work if pte_present() is true.
9353 * Undefined behaviour if not..
9354 */
9355+static inline int pte_user(pte_t pte)
9356+{
9357+ return pte_val(pte) & _PAGE_USER;
9358+}
9359+
9360 static inline int pte_dirty(pte_t pte)
9361 {
9362 return pte_flags(pte) & _PAGE_DIRTY;
9363@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9364 return pte_clear_flags(pte, _PAGE_RW);
9365 }
9366
9367+static inline pte_t pte_mkread(pte_t pte)
9368+{
9369+ return __pte(pte_val(pte) | _PAGE_USER);
9370+}
9371+
9372 static inline pte_t pte_mkexec(pte_t pte)
9373 {
9374- return pte_clear_flags(pte, _PAGE_NX);
9375+#ifdef CONFIG_X86_PAE
9376+ if (__supported_pte_mask & _PAGE_NX)
9377+ return pte_clear_flags(pte, _PAGE_NX);
9378+ else
9379+#endif
9380+ return pte_set_flags(pte, _PAGE_USER);
9381+}
9382+
9383+static inline pte_t pte_exprotect(pte_t pte)
9384+{
9385+#ifdef CONFIG_X86_PAE
9386+ if (__supported_pte_mask & _PAGE_NX)
9387+ return pte_set_flags(pte, _PAGE_NX);
9388+ else
9389+#endif
9390+ return pte_clear_flags(pte, _PAGE_USER);
9391 }
9392
9393 static inline pte_t pte_mkdirty(pte_t pte)
9394@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9395 #endif
9396
9397 #ifndef __ASSEMBLY__
9398+
9399+#ifdef CONFIG_PAX_PER_CPU_PGD
9400+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9401+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9402+{
9403+ return cpu_pgd[cpu];
9404+}
9405+#endif
9406+
9407 #include <linux/mm_types.h>
9408
9409 static inline int pte_none(pte_t pte)
9410@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9411
9412 static inline int pgd_bad(pgd_t pgd)
9413 {
9414- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9415+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9416 }
9417
9418 static inline int pgd_none(pgd_t pgd)
9419@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9420 * pgd_offset() returns a (pgd_t *)
9421 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9422 */
9423-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9424+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9425+
9426+#ifdef CONFIG_PAX_PER_CPU_PGD
9427+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9428+#endif
9429+
9430 /*
9431 * a shortcut which implies the use of the kernel's pgd, instead
9432 * of a process's
9433@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9434 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9435 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9436
9437+#ifdef CONFIG_X86_32
9438+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9439+#else
9440+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9441+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9442+
9443+#ifdef CONFIG_PAX_MEMORY_UDEREF
9444+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9445+#else
9446+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9447+#endif
9448+
9449+#endif
9450+
9451 #ifndef __ASSEMBLY__
9452
9453 extern int direct_gbpages;
9454@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9455 * dst and src can be on the same page, but the range must not overlap,
9456 * and must not cross a page boundary.
9457 */
9458-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9459+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9460 {
9461- memcpy(dst, src, count * sizeof(pgd_t));
9462+ pax_open_kernel();
9463+ while (count--)
9464+ *dst++ = *src++;
9465+ pax_close_kernel();
9466 }
9467
9468+#ifdef CONFIG_PAX_PER_CPU_PGD
9469+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9470+#endif
9471+
9472+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9473+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9474+#else
9475+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9476+#endif
9477
9478 #include <asm-generic/pgtable.h>
9479 #endif /* __ASSEMBLY__ */
9480diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9481index 0c92113..34a77c6 100644
9482--- a/arch/x86/include/asm/pgtable_32.h
9483+++ b/arch/x86/include/asm/pgtable_32.h
9484@@ -25,9 +25,6 @@
9485 struct mm_struct;
9486 struct vm_area_struct;
9487
9488-extern pgd_t swapper_pg_dir[1024];
9489-extern pgd_t initial_page_table[1024];
9490-
9491 static inline void pgtable_cache_init(void) { }
9492 static inline void check_pgt_cache(void) { }
9493 void paging_init(void);
9494@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9495 # include <asm/pgtable-2level.h>
9496 #endif
9497
9498+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9499+extern pgd_t initial_page_table[PTRS_PER_PGD];
9500+#ifdef CONFIG_X86_PAE
9501+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9502+#endif
9503+
9504 #if defined(CONFIG_HIGHPTE)
9505 #define pte_offset_map(dir, address) \
9506 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9507@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9508 /* Clear a kernel PTE and flush it from the TLB */
9509 #define kpte_clear_flush(ptep, vaddr) \
9510 do { \
9511+ pax_open_kernel(); \
9512 pte_clear(&init_mm, (vaddr), (ptep)); \
9513+ pax_close_kernel(); \
9514 __flush_tlb_one((vaddr)); \
9515 } while (0)
9516
9517@@ -74,6 +79,9 @@ do { \
9518
9519 #endif /* !__ASSEMBLY__ */
9520
9521+#define HAVE_ARCH_UNMAPPED_AREA
9522+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9523+
9524 /*
9525 * kern_addr_valid() is (1) for FLATMEM and (0) for
9526 * SPARSEMEM and DISCONTIGMEM
9527diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9528index ed5903b..c7fe163 100644
9529--- a/arch/x86/include/asm/pgtable_32_types.h
9530+++ b/arch/x86/include/asm/pgtable_32_types.h
9531@@ -8,7 +8,7 @@
9532 */
9533 #ifdef CONFIG_X86_PAE
9534 # include <asm/pgtable-3level_types.h>
9535-# define PMD_SIZE (1UL << PMD_SHIFT)
9536+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9537 # define PMD_MASK (~(PMD_SIZE - 1))
9538 #else
9539 # include <asm/pgtable-2level_types.h>
9540@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9541 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9542 #endif
9543
9544+#ifdef CONFIG_PAX_KERNEXEC
9545+#ifndef __ASSEMBLY__
9546+extern unsigned char MODULES_EXEC_VADDR[];
9547+extern unsigned char MODULES_EXEC_END[];
9548+#endif
9549+#include <asm/boot.h>
9550+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9551+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9552+#else
9553+#define ktla_ktva(addr) (addr)
9554+#define ktva_ktla(addr) (addr)
9555+#endif
9556+
9557 #define MODULES_VADDR VMALLOC_START
9558 #define MODULES_END VMALLOC_END
9559 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9560diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9561index 975f709..107976d 100644
9562--- a/arch/x86/include/asm/pgtable_64.h
9563+++ b/arch/x86/include/asm/pgtable_64.h
9564@@ -16,10 +16,14 @@
9565
9566 extern pud_t level3_kernel_pgt[512];
9567 extern pud_t level3_ident_pgt[512];
9568+extern pud_t level3_vmalloc_start_pgt[512];
9569+extern pud_t level3_vmalloc_end_pgt[512];
9570+extern pud_t level3_vmemmap_pgt[512];
9571+extern pud_t level2_vmemmap_pgt[512];
9572 extern pmd_t level2_kernel_pgt[512];
9573 extern pmd_t level2_fixmap_pgt[512];
9574-extern pmd_t level2_ident_pgt[512];
9575-extern pgd_t init_level4_pgt[];
9576+extern pmd_t level2_ident_pgt[512*2];
9577+extern pgd_t init_level4_pgt[512];
9578
9579 #define swapper_pg_dir init_level4_pgt
9580
9581@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9582
9583 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9584 {
9585+ pax_open_kernel();
9586 *pmdp = pmd;
9587+ pax_close_kernel();
9588 }
9589
9590 static inline void native_pmd_clear(pmd_t *pmd)
9591@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9592
9593 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9594 {
9595+ pax_open_kernel();
9596+ *pgdp = pgd;
9597+ pax_close_kernel();
9598+}
9599+
9600+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9601+{
9602 *pgdp = pgd;
9603 }
9604
9605diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9606index 766ea16..5b96cb3 100644
9607--- a/arch/x86/include/asm/pgtable_64_types.h
9608+++ b/arch/x86/include/asm/pgtable_64_types.h
9609@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9610 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9611 #define MODULES_END _AC(0xffffffffff000000, UL)
9612 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9613+#define MODULES_EXEC_VADDR MODULES_VADDR
9614+#define MODULES_EXEC_END MODULES_END
9615+
9616+#define ktla_ktva(addr) (addr)
9617+#define ktva_ktla(addr) (addr)
9618
9619 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9620diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9621index 013286a..8b42f4f 100644
9622--- a/arch/x86/include/asm/pgtable_types.h
9623+++ b/arch/x86/include/asm/pgtable_types.h
9624@@ -16,13 +16,12 @@
9625 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9626 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9627 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9628-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9629+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9630 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9631 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9632 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9633-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9634-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9635-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9636+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9637+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9638 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9639
9640 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9641@@ -40,7 +39,6 @@
9642 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9643 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9644 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9645-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9646 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9647 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9648 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9649@@ -57,8 +55,10 @@
9650
9651 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9652 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9653-#else
9654+#elif defined(CONFIG_KMEMCHECK)
9655 #define _PAGE_NX (_AT(pteval_t, 0))
9656+#else
9657+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9658 #endif
9659
9660 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9661@@ -96,6 +96,9 @@
9662 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9663 _PAGE_ACCESSED)
9664
9665+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9666+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9667+
9668 #define __PAGE_KERNEL_EXEC \
9669 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9670 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9671@@ -106,7 +109,7 @@
9672 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9673 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9674 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9675-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9676+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9677 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9678 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9679 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9680@@ -168,8 +171,8 @@
9681 * bits are combined, this will alow user to access the high address mapped
9682 * VDSO in the presence of CONFIG_COMPAT_VDSO
9683 */
9684-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9685-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9686+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9687+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9688 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9689 #endif
9690
9691@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9692 {
9693 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9694 }
9695+#endif
9696
9697+#if PAGETABLE_LEVELS == 3
9698+#include <asm-generic/pgtable-nopud.h>
9699+#endif
9700+
9701+#if PAGETABLE_LEVELS == 2
9702+#include <asm-generic/pgtable-nopmd.h>
9703+#endif
9704+
9705+#ifndef __ASSEMBLY__
9706 #if PAGETABLE_LEVELS > 3
9707 typedef struct { pudval_t pud; } pud_t;
9708
9709@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9710 return pud.pud;
9711 }
9712 #else
9713-#include <asm-generic/pgtable-nopud.h>
9714-
9715 static inline pudval_t native_pud_val(pud_t pud)
9716 {
9717 return native_pgd_val(pud.pgd);
9718@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9719 return pmd.pmd;
9720 }
9721 #else
9722-#include <asm-generic/pgtable-nopmd.h>
9723-
9724 static inline pmdval_t native_pmd_val(pmd_t pmd)
9725 {
9726 return native_pgd_val(pmd.pud.pgd);
9727@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9728
9729 extern pteval_t __supported_pte_mask;
9730 extern void set_nx(void);
9731-extern int nx_enabled;
9732
9733 #define pgprot_writecombine pgprot_writecombine
9734 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9735diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9736index b650435..eefa566 100644
9737--- a/arch/x86/include/asm/processor.h
9738+++ b/arch/x86/include/asm/processor.h
9739@@ -268,7 +268,7 @@ struct tss_struct {
9740
9741 } ____cacheline_aligned;
9742
9743-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9744+extern struct tss_struct init_tss[NR_CPUS];
9745
9746 /*
9747 * Save the original ist values for checking stack pointers during debugging
9748@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
9749 */
9750 #define TASK_SIZE PAGE_OFFSET
9751 #define TASK_SIZE_MAX TASK_SIZE
9752+
9753+#ifdef CONFIG_PAX_SEGMEXEC
9754+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9755+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9756+#else
9757 #define STACK_TOP TASK_SIZE
9758-#define STACK_TOP_MAX STACK_TOP
9759+#endif
9760+
9761+#define STACK_TOP_MAX TASK_SIZE
9762
9763 #define INIT_THREAD { \
9764- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9765+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9766 .vm86_info = NULL, \
9767 .sysenter_cs = __KERNEL_CS, \
9768 .io_bitmap_ptr = NULL, \
9769@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
9770 */
9771 #define INIT_TSS { \
9772 .x86_tss = { \
9773- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9774+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9775 .ss0 = __KERNEL_DS, \
9776 .ss1 = __KERNEL_CS, \
9777 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9778@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
9779 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9780
9781 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9782-#define KSTK_TOP(info) \
9783-({ \
9784- unsigned long *__ptr = (unsigned long *)(info); \
9785- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9786-})
9787+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9788
9789 /*
9790 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9791@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9792 #define task_pt_regs(task) \
9793 ({ \
9794 struct pt_regs *__regs__; \
9795- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9796+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9797 __regs__ - 1; \
9798 })
9799
9800@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9801 /*
9802 * User space process size. 47bits minus one guard page.
9803 */
9804-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9805+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9806
9807 /* This decides where the kernel will search for a free chunk of vm
9808 * space during mmap's.
9809 */
9810 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9811- 0xc0000000 : 0xFFFFe000)
9812+ 0xc0000000 : 0xFFFFf000)
9813
9814 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9815 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9816@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9817 #define STACK_TOP_MAX TASK_SIZE_MAX
9818
9819 #define INIT_THREAD { \
9820- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9821+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9822 }
9823
9824 #define INIT_TSS { \
9825- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9826+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9827 }
9828
9829 /*
9830@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
9831 */
9832 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9833
9834+#ifdef CONFIG_PAX_SEGMEXEC
9835+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9836+#endif
9837+
9838 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9839
9840 /* Get/set a process' ability to use the timestamp counter instruction */
9841diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9842index 3566454..4bdfb8c 100644
9843--- a/arch/x86/include/asm/ptrace.h
9844+++ b/arch/x86/include/asm/ptrace.h
9845@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
9846 }
9847
9848 /*
9849- * user_mode_vm(regs) determines whether a register set came from user mode.
9850+ * user_mode(regs) determines whether a register set came from user mode.
9851 * This is true if V8086 mode was enabled OR if the register set was from
9852 * protected mode with RPL-3 CS value. This tricky test checks that with
9853 * one comparison. Many places in the kernel can bypass this full check
9854- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9855+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9856+ * be used.
9857 */
9858-static inline int user_mode(struct pt_regs *regs)
9859+static inline int user_mode_novm(struct pt_regs *regs)
9860 {
9861 #ifdef CONFIG_X86_32
9862 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9863 #else
9864- return !!(regs->cs & 3);
9865+ return !!(regs->cs & SEGMENT_RPL_MASK);
9866 #endif
9867 }
9868
9869-static inline int user_mode_vm(struct pt_regs *regs)
9870+static inline int user_mode(struct pt_regs *regs)
9871 {
9872 #ifdef CONFIG_X86_32
9873 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9874 USER_RPL;
9875 #else
9876- return user_mode(regs);
9877+ return user_mode_novm(regs);
9878 #endif
9879 }
9880
9881@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
9882 #ifdef CONFIG_X86_64
9883 static inline bool user_64bit_mode(struct pt_regs *regs)
9884 {
9885+ unsigned long cs = regs->cs & 0xffff;
9886 #ifndef CONFIG_PARAVIRT
9887 /*
9888 * On non-paravirt systems, this is the only long mode CPL 3
9889 * selector. We do not allow long mode selectors in the LDT.
9890 */
9891- return regs->cs == __USER_CS;
9892+ return cs == __USER_CS;
9893 #else
9894 /* Headers are too twisted for this to go in paravirt.h. */
9895- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9896+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9897 #endif
9898 }
9899 #endif
9900diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9901index 92f29706..a79cbbb 100644
9902--- a/arch/x86/include/asm/reboot.h
9903+++ b/arch/x86/include/asm/reboot.h
9904@@ -6,19 +6,19 @@
9905 struct pt_regs;
9906
9907 struct machine_ops {
9908- void (*restart)(char *cmd);
9909- void (*halt)(void);
9910- void (*power_off)(void);
9911+ void (* __noreturn restart)(char *cmd);
9912+ void (* __noreturn halt)(void);
9913+ void (* __noreturn power_off)(void);
9914 void (*shutdown)(void);
9915 void (*crash_shutdown)(struct pt_regs *);
9916- void (*emergency_restart)(void);
9917-};
9918+ void (* __noreturn emergency_restart)(void);
9919+} __no_const;
9920
9921 extern struct machine_ops machine_ops;
9922
9923 void native_machine_crash_shutdown(struct pt_regs *regs);
9924 void native_machine_shutdown(void);
9925-void machine_real_restart(unsigned int type);
9926+void machine_real_restart(unsigned int type) __noreturn;
9927 /* These must match dispatch_table in reboot_32.S */
9928 #define MRR_BIOS 0
9929 #define MRR_APM 1
9930diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9931index 2dbe4a7..ce1db00 100644
9932--- a/arch/x86/include/asm/rwsem.h
9933+++ b/arch/x86/include/asm/rwsem.h
9934@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
9935 {
9936 asm volatile("# beginning down_read\n\t"
9937 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9938+
9939+#ifdef CONFIG_PAX_REFCOUNT
9940+ "jno 0f\n"
9941+ LOCK_PREFIX _ASM_DEC "(%1)\n"
9942+ "int $4\n0:\n"
9943+ _ASM_EXTABLE(0b, 0b)
9944+#endif
9945+
9946 /* adds 0x00000001 */
9947 " jns 1f\n"
9948 " call call_rwsem_down_read_failed\n"
9949@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
9950 "1:\n\t"
9951 " mov %1,%2\n\t"
9952 " add %3,%2\n\t"
9953+
9954+#ifdef CONFIG_PAX_REFCOUNT
9955+ "jno 0f\n"
9956+ "sub %3,%2\n"
9957+ "int $4\n0:\n"
9958+ _ASM_EXTABLE(0b, 0b)
9959+#endif
9960+
9961 " jle 2f\n\t"
9962 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9963 " jnz 1b\n\t"
9964@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
9965 long tmp;
9966 asm volatile("# beginning down_write\n\t"
9967 LOCK_PREFIX " xadd %1,(%2)\n\t"
9968+
9969+#ifdef CONFIG_PAX_REFCOUNT
9970+ "jno 0f\n"
9971+ "mov %1,(%2)\n"
9972+ "int $4\n0:\n"
9973+ _ASM_EXTABLE(0b, 0b)
9974+#endif
9975+
9976 /* adds 0xffff0001, returns the old value */
9977 " test %1,%1\n\t"
9978 /* was the count 0 before? */
9979@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
9980 long tmp;
9981 asm volatile("# beginning __up_read\n\t"
9982 LOCK_PREFIX " xadd %1,(%2)\n\t"
9983+
9984+#ifdef CONFIG_PAX_REFCOUNT
9985+ "jno 0f\n"
9986+ "mov %1,(%2)\n"
9987+ "int $4\n0:\n"
9988+ _ASM_EXTABLE(0b, 0b)
9989+#endif
9990+
9991 /* subtracts 1, returns the old value */
9992 " jns 1f\n\t"
9993 " call call_rwsem_wake\n" /* expects old value in %edx */
9994@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
9995 long tmp;
9996 asm volatile("# beginning __up_write\n\t"
9997 LOCK_PREFIX " xadd %1,(%2)\n\t"
9998+
9999+#ifdef CONFIG_PAX_REFCOUNT
10000+ "jno 0f\n"
10001+ "mov %1,(%2)\n"
10002+ "int $4\n0:\n"
10003+ _ASM_EXTABLE(0b, 0b)
10004+#endif
10005+
10006 /* subtracts 0xffff0001, returns the old value */
10007 " jns 1f\n\t"
10008 " call call_rwsem_wake\n" /* expects old value in %edx */
10009@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10010 {
10011 asm volatile("# beginning __downgrade_write\n\t"
10012 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10013+
10014+#ifdef CONFIG_PAX_REFCOUNT
10015+ "jno 0f\n"
10016+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10017+ "int $4\n0:\n"
10018+ _ASM_EXTABLE(0b, 0b)
10019+#endif
10020+
10021 /*
10022 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10023 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10024@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10025 */
10026 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10027 {
10028- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10029+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10030+
10031+#ifdef CONFIG_PAX_REFCOUNT
10032+ "jno 0f\n"
10033+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10034+ "int $4\n0:\n"
10035+ _ASM_EXTABLE(0b, 0b)
10036+#endif
10037+
10038 : "+m" (sem->count)
10039 : "er" (delta));
10040 }
10041@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10042 */
10043 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10044 {
10045- return delta + xadd(&sem->count, delta);
10046+ return delta + xadd_check_overflow(&sem->count, delta);
10047 }
10048
10049 #endif /* __KERNEL__ */
10050diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10051index 5e64171..f58957e 100644
10052--- a/arch/x86/include/asm/segment.h
10053+++ b/arch/x86/include/asm/segment.h
10054@@ -64,10 +64,15 @@
10055 * 26 - ESPFIX small SS
10056 * 27 - per-cpu [ offset to per-cpu data area ]
10057 * 28 - stack_canary-20 [ for stack protector ]
10058- * 29 - unused
10059- * 30 - unused
10060+ * 29 - PCI BIOS CS
10061+ * 30 - PCI BIOS DS
10062 * 31 - TSS for double fault handler
10063 */
10064+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10065+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10066+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10067+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10068+
10069 #define GDT_ENTRY_TLS_MIN 6
10070 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10071
10072@@ -79,6 +84,8 @@
10073
10074 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10075
10076+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10077+
10078 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10079
10080 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10081@@ -104,6 +111,12 @@
10082 #define __KERNEL_STACK_CANARY 0
10083 #endif
10084
10085+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10086+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10087+
10088+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10089+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10090+
10091 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10092
10093 /*
10094@@ -141,7 +154,7 @@
10095 */
10096
10097 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10098-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10099+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10100
10101
10102 #else
10103@@ -165,6 +178,8 @@
10104 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10105 #define __USER32_DS __USER_DS
10106
10107+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10108+
10109 #define GDT_ENTRY_TSS 8 /* needs two entries */
10110 #define GDT_ENTRY_LDT 10 /* needs two entries */
10111 #define GDT_ENTRY_TLS_MIN 12
10112@@ -185,6 +200,7 @@
10113 #endif
10114
10115 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10116+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10117 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10118 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10119 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10120diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10121index 73b11bc..d4a3b63 100644
10122--- a/arch/x86/include/asm/smp.h
10123+++ b/arch/x86/include/asm/smp.h
10124@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10125 /* cpus sharing the last level cache: */
10126 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10127 DECLARE_PER_CPU(u16, cpu_llc_id);
10128-DECLARE_PER_CPU(int, cpu_number);
10129+DECLARE_PER_CPU(unsigned int, cpu_number);
10130
10131 static inline struct cpumask *cpu_sibling_mask(int cpu)
10132 {
10133@@ -77,7 +77,7 @@ struct smp_ops {
10134
10135 void (*send_call_func_ipi)(const struct cpumask *mask);
10136 void (*send_call_func_single_ipi)(int cpu);
10137-};
10138+} __no_const;
10139
10140 /* Globals due to paravirt */
10141 extern void set_cpu_sibling_map(int cpu);
10142@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10143 extern int safe_smp_processor_id(void);
10144
10145 #elif defined(CONFIG_X86_64_SMP)
10146-#define raw_smp_processor_id() (percpu_read(cpu_number))
10147-
10148-#define stack_smp_processor_id() \
10149-({ \
10150- struct thread_info *ti; \
10151- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10152- ti->cpu; \
10153-})
10154+#define raw_smp_processor_id() (percpu_read(cpu_number))
10155+#define stack_smp_processor_id() raw_smp_processor_id()
10156 #define safe_smp_processor_id() smp_processor_id()
10157
10158 #endif
10159diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10160index 972c260..43ab1fd 100644
10161--- a/arch/x86/include/asm/spinlock.h
10162+++ b/arch/x86/include/asm/spinlock.h
10163@@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10164 static inline void arch_read_lock(arch_rwlock_t *rw)
10165 {
10166 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10167+
10168+#ifdef CONFIG_PAX_REFCOUNT
10169+ "jno 0f\n"
10170+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10171+ "int $4\n0:\n"
10172+ _ASM_EXTABLE(0b, 0b)
10173+#endif
10174+
10175 "jns 1f\n"
10176 "call __read_lock_failed\n\t"
10177 "1:\n"
10178@@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10179 static inline void arch_write_lock(arch_rwlock_t *rw)
10180 {
10181 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10182+
10183+#ifdef CONFIG_PAX_REFCOUNT
10184+ "jno 0f\n"
10185+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10186+ "int $4\n0:\n"
10187+ _ASM_EXTABLE(0b, 0b)
10188+#endif
10189+
10190 "jz 1f\n"
10191 "call __write_lock_failed\n\t"
10192 "1:\n"
10193@@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10194
10195 static inline void arch_read_unlock(arch_rwlock_t *rw)
10196 {
10197- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10198+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10199+
10200+#ifdef CONFIG_PAX_REFCOUNT
10201+ "jno 0f\n"
10202+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10203+ "int $4\n0:\n"
10204+ _ASM_EXTABLE(0b, 0b)
10205+#endif
10206+
10207 :"+m" (rw->lock) : : "memory");
10208 }
10209
10210 static inline void arch_write_unlock(arch_rwlock_t *rw)
10211 {
10212- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10213+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10214+
10215+#ifdef CONFIG_PAX_REFCOUNT
10216+ "jno 0f\n"
10217+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10218+ "int $4\n0:\n"
10219+ _ASM_EXTABLE(0b, 0b)
10220+#endif
10221+
10222 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10223 }
10224
10225diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10226index 1575177..cb23f52 100644
10227--- a/arch/x86/include/asm/stackprotector.h
10228+++ b/arch/x86/include/asm/stackprotector.h
10229@@ -48,7 +48,7 @@
10230 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10231 */
10232 #define GDT_STACK_CANARY_INIT \
10233- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10234+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10235
10236 /*
10237 * Initialize the stackprotector canary value.
10238@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10239
10240 static inline void load_stack_canary_segment(void)
10241 {
10242-#ifdef CONFIG_X86_32
10243+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10244 asm volatile ("mov %0, %%gs" : : "r" (0));
10245 #endif
10246 }
10247diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10248index 70bbe39..4ae2bd4 100644
10249--- a/arch/x86/include/asm/stacktrace.h
10250+++ b/arch/x86/include/asm/stacktrace.h
10251@@ -11,28 +11,20 @@
10252
10253 extern int kstack_depth_to_print;
10254
10255-struct thread_info;
10256+struct task_struct;
10257 struct stacktrace_ops;
10258
10259-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10260- unsigned long *stack,
10261- unsigned long bp,
10262- const struct stacktrace_ops *ops,
10263- void *data,
10264- unsigned long *end,
10265- int *graph);
10266+typedef unsigned long walk_stack_t(struct task_struct *task,
10267+ void *stack_start,
10268+ unsigned long *stack,
10269+ unsigned long bp,
10270+ const struct stacktrace_ops *ops,
10271+ void *data,
10272+ unsigned long *end,
10273+ int *graph);
10274
10275-extern unsigned long
10276-print_context_stack(struct thread_info *tinfo,
10277- unsigned long *stack, unsigned long bp,
10278- const struct stacktrace_ops *ops, void *data,
10279- unsigned long *end, int *graph);
10280-
10281-extern unsigned long
10282-print_context_stack_bp(struct thread_info *tinfo,
10283- unsigned long *stack, unsigned long bp,
10284- const struct stacktrace_ops *ops, void *data,
10285- unsigned long *end, int *graph);
10286+extern walk_stack_t print_context_stack;
10287+extern walk_stack_t print_context_stack_bp;
10288
10289 /* Generic stack tracer with callbacks */
10290
10291@@ -40,7 +32,7 @@ struct stacktrace_ops {
10292 void (*address)(void *data, unsigned long address, int reliable);
10293 /* On negative return stop dumping */
10294 int (*stack)(void *data, char *name);
10295- walk_stack_t walk_stack;
10296+ walk_stack_t *walk_stack;
10297 };
10298
10299 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10300diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10301index cb23852..2dde194 100644
10302--- a/arch/x86/include/asm/sys_ia32.h
10303+++ b/arch/x86/include/asm/sys_ia32.h
10304@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10305 compat_sigset_t __user *, unsigned int);
10306 asmlinkage long sys32_alarm(unsigned int);
10307
10308-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10309+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10310 asmlinkage long sys32_sysfs(int, u32, u32);
10311
10312 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10313diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10314index 2d2f01c..f985723 100644
10315--- a/arch/x86/include/asm/system.h
10316+++ b/arch/x86/include/asm/system.h
10317@@ -129,7 +129,7 @@ do { \
10318 "call __switch_to\n\t" \
10319 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10320 __switch_canary \
10321- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10322+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10323 "movq %%rax,%%rdi\n\t" \
10324 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10325 "jnz ret_from_fork\n\t" \
10326@@ -140,7 +140,7 @@ do { \
10327 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10328 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10329 [_tif_fork] "i" (_TIF_FORK), \
10330- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10331+ [thread_info] "m" (current_tinfo), \
10332 [current_task] "m" (current_task) \
10333 __switch_canary_iparam \
10334 : "memory", "cc" __EXTRA_CLOBBER)
10335@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10336 {
10337 unsigned long __limit;
10338 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10339- return __limit + 1;
10340+ return __limit;
10341 }
10342
10343 static inline void native_clts(void)
10344@@ -397,13 +397,13 @@ void enable_hlt(void);
10345
10346 void cpu_idle_wait(void);
10347
10348-extern unsigned long arch_align_stack(unsigned long sp);
10349+#define arch_align_stack(x) ((x) & ~0xfUL)
10350 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10351
10352 void default_idle(void);
10353 bool set_pm_idle_to_default(void);
10354
10355-void stop_this_cpu(void *dummy);
10356+void stop_this_cpu(void *dummy) __noreturn;
10357
10358 /*
10359 * Force strict CPU ordering.
10360diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10361index a1fe5c1..ee326d8 100644
10362--- a/arch/x86/include/asm/thread_info.h
10363+++ b/arch/x86/include/asm/thread_info.h
10364@@ -10,6 +10,7 @@
10365 #include <linux/compiler.h>
10366 #include <asm/page.h>
10367 #include <asm/types.h>
10368+#include <asm/percpu.h>
10369
10370 /*
10371 * low level task data that entry.S needs immediate access to
10372@@ -24,7 +25,6 @@ struct exec_domain;
10373 #include <linux/atomic.h>
10374
10375 struct thread_info {
10376- struct task_struct *task; /* main task structure */
10377 struct exec_domain *exec_domain; /* execution domain */
10378 __u32 flags; /* low level flags */
10379 __u32 status; /* thread synchronous flags */
10380@@ -34,18 +34,12 @@ struct thread_info {
10381 mm_segment_t addr_limit;
10382 struct restart_block restart_block;
10383 void __user *sysenter_return;
10384-#ifdef CONFIG_X86_32
10385- unsigned long previous_esp; /* ESP of the previous stack in
10386- case of nested (IRQ) stacks
10387- */
10388- __u8 supervisor_stack[0];
10389-#endif
10390+ unsigned long lowest_stack;
10391 int uaccess_err;
10392 };
10393
10394-#define INIT_THREAD_INFO(tsk) \
10395+#define INIT_THREAD_INFO \
10396 { \
10397- .task = &tsk, \
10398 .exec_domain = &default_exec_domain, \
10399 .flags = 0, \
10400 .cpu = 0, \
10401@@ -56,7 +50,7 @@ struct thread_info {
10402 }, \
10403 }
10404
10405-#define init_thread_info (init_thread_union.thread_info)
10406+#define init_thread_info (init_thread_union.stack)
10407 #define init_stack (init_thread_union.stack)
10408
10409 #else /* !__ASSEMBLY__ */
10410@@ -170,45 +164,40 @@ struct thread_info {
10411 ret; \
10412 })
10413
10414-#ifdef CONFIG_X86_32
10415-
10416-#define STACK_WARN (THREAD_SIZE/8)
10417-/*
10418- * macros/functions for gaining access to the thread information structure
10419- *
10420- * preempt_count needs to be 1 initially, until the scheduler is functional.
10421- */
10422-#ifndef __ASSEMBLY__
10423-
10424-
10425-/* how to get the current stack pointer from C */
10426-register unsigned long current_stack_pointer asm("esp") __used;
10427-
10428-/* how to get the thread information struct from C */
10429-static inline struct thread_info *current_thread_info(void)
10430-{
10431- return (struct thread_info *)
10432- (current_stack_pointer & ~(THREAD_SIZE - 1));
10433-}
10434-
10435-#else /* !__ASSEMBLY__ */
10436-
10437+#ifdef __ASSEMBLY__
10438 /* how to get the thread information struct from ASM */
10439 #define GET_THREAD_INFO(reg) \
10440- movl $-THREAD_SIZE, reg; \
10441- andl %esp, reg
10442+ mov PER_CPU_VAR(current_tinfo), reg
10443
10444 /* use this one if reg already contains %esp */
10445-#define GET_THREAD_INFO_WITH_ESP(reg) \
10446- andl $-THREAD_SIZE, reg
10447+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10448+#else
10449+/* how to get the thread information struct from C */
10450+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10451+
10452+static __always_inline struct thread_info *current_thread_info(void)
10453+{
10454+ return percpu_read_stable(current_tinfo);
10455+}
10456+#endif
10457+
10458+#ifdef CONFIG_X86_32
10459+
10460+#define STACK_WARN (THREAD_SIZE/8)
10461+/*
10462+ * macros/functions for gaining access to the thread information structure
10463+ *
10464+ * preempt_count needs to be 1 initially, until the scheduler is functional.
10465+ */
10466+#ifndef __ASSEMBLY__
10467+
10468+/* how to get the current stack pointer from C */
10469+register unsigned long current_stack_pointer asm("esp") __used;
10470
10471 #endif
10472
10473 #else /* X86_32 */
10474
10475-#include <asm/percpu.h>
10476-#define KERNEL_STACK_OFFSET (5*8)
10477-
10478 /*
10479 * macros/functions for gaining access to the thread information structure
10480 * preempt_count needs to be 1 initially, until the scheduler is functional.
10481@@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10482 #ifndef __ASSEMBLY__
10483 DECLARE_PER_CPU(unsigned long, kernel_stack);
10484
10485-static inline struct thread_info *current_thread_info(void)
10486-{
10487- struct thread_info *ti;
10488- ti = (void *)(percpu_read_stable(kernel_stack) +
10489- KERNEL_STACK_OFFSET - THREAD_SIZE);
10490- return ti;
10491-}
10492-
10493-#else /* !__ASSEMBLY__ */
10494-
10495-/* how to get the thread information struct from ASM */
10496-#define GET_THREAD_INFO(reg) \
10497- movq PER_CPU_VAR(kernel_stack),reg ; \
10498- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10499-
10500+/* how to get the current stack pointer from C */
10501+register unsigned long current_stack_pointer asm("rsp") __used;
10502 #endif
10503
10504 #endif /* !X86_32 */
10505@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10506 extern void free_thread_info(struct thread_info *ti);
10507 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10508 #define arch_task_cache_init arch_task_cache_init
10509+
10510+#define __HAVE_THREAD_FUNCTIONS
10511+#define task_thread_info(task) (&(task)->tinfo)
10512+#define task_stack_page(task) ((task)->stack)
10513+#define setup_thread_stack(p, org) do {} while (0)
10514+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10515+
10516+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10517+extern struct task_struct *alloc_task_struct_node(int node);
10518+extern void free_task_struct(struct task_struct *);
10519+
10520 #endif
10521 #endif /* _ASM_X86_THREAD_INFO_H */
10522diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10523index 36361bf..324f262 100644
10524--- a/arch/x86/include/asm/uaccess.h
10525+++ b/arch/x86/include/asm/uaccess.h
10526@@ -7,12 +7,15 @@
10527 #include <linux/compiler.h>
10528 #include <linux/thread_info.h>
10529 #include <linux/string.h>
10530+#include <linux/sched.h>
10531 #include <asm/asm.h>
10532 #include <asm/page.h>
10533
10534 #define VERIFY_READ 0
10535 #define VERIFY_WRITE 1
10536
10537+extern void check_object_size(const void *ptr, unsigned long n, bool to);
10538+
10539 /*
10540 * The fs value determines whether argument validity checking should be
10541 * performed or not. If get_fs() == USER_DS, checking is performed, with
10542@@ -28,7 +31,12 @@
10543
10544 #define get_ds() (KERNEL_DS)
10545 #define get_fs() (current_thread_info()->addr_limit)
10546+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10547+void __set_fs(mm_segment_t x);
10548+void set_fs(mm_segment_t x);
10549+#else
10550 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10551+#endif
10552
10553 #define segment_eq(a, b) ((a).seg == (b).seg)
10554
10555@@ -76,7 +84,33 @@
10556 * checks that the pointer is in the user space range - after calling
10557 * this function, memory access functions may still return -EFAULT.
10558 */
10559-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10560+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10561+#define access_ok(type, addr, size) \
10562+({ \
10563+ long __size = size; \
10564+ unsigned long __addr = (unsigned long)addr; \
10565+ unsigned long __addr_ao = __addr & PAGE_MASK; \
10566+ unsigned long __end_ao = __addr + __size - 1; \
10567+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10568+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10569+ while(__addr_ao <= __end_ao) { \
10570+ char __c_ao; \
10571+ __addr_ao += PAGE_SIZE; \
10572+ if (__size > PAGE_SIZE) \
10573+ cond_resched(); \
10574+ if (__get_user(__c_ao, (char __user *)__addr)) \
10575+ break; \
10576+ if (type != VERIFY_WRITE) { \
10577+ __addr = __addr_ao; \
10578+ continue; \
10579+ } \
10580+ if (__put_user(__c_ao, (char __user *)__addr)) \
10581+ break; \
10582+ __addr = __addr_ao; \
10583+ } \
10584+ } \
10585+ __ret_ao; \
10586+})
10587
10588 /*
10589 * The exception table consists of pairs of addresses: the first is the
10590@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10591 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10592 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10593
10594-
10595+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10596+#define __copyuser_seg "gs;"
10597+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10598+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10599+#else
10600+#define __copyuser_seg
10601+#define __COPYUSER_SET_ES
10602+#define __COPYUSER_RESTORE_ES
10603+#endif
10604
10605 #ifdef CONFIG_X86_32
10606 #define __put_user_asm_u64(x, addr, err, errret) \
10607- asm volatile("1: movl %%eax,0(%2)\n" \
10608- "2: movl %%edx,4(%2)\n" \
10609+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10610+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10611 "3:\n" \
10612 ".section .fixup,\"ax\"\n" \
10613 "4: movl %3,%0\n" \
10614@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10615 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10616
10617 #define __put_user_asm_ex_u64(x, addr) \
10618- asm volatile("1: movl %%eax,0(%1)\n" \
10619- "2: movl %%edx,4(%1)\n" \
10620+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10621+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10622 "3:\n" \
10623 _ASM_EXTABLE(1b, 2b - 1b) \
10624 _ASM_EXTABLE(2b, 3b - 2b) \
10625@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10626 __typeof__(*(ptr)) __pu_val; \
10627 __chk_user_ptr(ptr); \
10628 might_fault(); \
10629- __pu_val = x; \
10630+ __pu_val = (x); \
10631 switch (sizeof(*(ptr))) { \
10632 case 1: \
10633 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10634@@ -373,7 +415,7 @@ do { \
10635 } while (0)
10636
10637 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10638- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10639+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10640 "2:\n" \
10641 ".section .fixup,\"ax\"\n" \
10642 "3: mov %3,%0\n" \
10643@@ -381,7 +423,7 @@ do { \
10644 " jmp 2b\n" \
10645 ".previous\n" \
10646 _ASM_EXTABLE(1b, 3b) \
10647- : "=r" (err), ltype(x) \
10648+ : "=r" (err), ltype (x) \
10649 : "m" (__m(addr)), "i" (errret), "0" (err))
10650
10651 #define __get_user_size_ex(x, ptr, size) \
10652@@ -406,7 +448,7 @@ do { \
10653 } while (0)
10654
10655 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10656- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10657+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10658 "2:\n" \
10659 _ASM_EXTABLE(1b, 2b - 1b) \
10660 : ltype(x) : "m" (__m(addr)))
10661@@ -423,13 +465,24 @@ do { \
10662 int __gu_err; \
10663 unsigned long __gu_val; \
10664 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10665- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10666+ (x) = (__typeof__(*(ptr)))__gu_val; \
10667 __gu_err; \
10668 })
10669
10670 /* FIXME: this hack is definitely wrong -AK */
10671 struct __large_struct { unsigned long buf[100]; };
10672-#define __m(x) (*(struct __large_struct __user *)(x))
10673+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10674+#define ____m(x) \
10675+({ \
10676+ unsigned long ____x = (unsigned long)(x); \
10677+ if (____x < PAX_USER_SHADOW_BASE) \
10678+ ____x += PAX_USER_SHADOW_BASE; \
10679+ (void __user *)____x; \
10680+})
10681+#else
10682+#define ____m(x) (x)
10683+#endif
10684+#define __m(x) (*(struct __large_struct __user *)____m(x))
10685
10686 /*
10687 * Tell gcc we read from memory instead of writing: this is because
10688@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10689 * aliasing issues.
10690 */
10691 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10692- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10693+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10694 "2:\n" \
10695 ".section .fixup,\"ax\"\n" \
10696 "3: mov %3,%0\n" \
10697@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10698 ".previous\n" \
10699 _ASM_EXTABLE(1b, 3b) \
10700 : "=r"(err) \
10701- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10702+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10703
10704 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10705- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10706+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10707 "2:\n" \
10708 _ASM_EXTABLE(1b, 2b - 1b) \
10709 : : ltype(x), "m" (__m(addr)))
10710@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10711 * On error, the variable @x is set to zero.
10712 */
10713
10714+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10715+#define __get_user(x, ptr) get_user((x), (ptr))
10716+#else
10717 #define __get_user(x, ptr) \
10718 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10719+#endif
10720
10721 /**
10722 * __put_user: - Write a simple value into user space, with less checking.
10723@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10724 * Returns zero on success, or -EFAULT on error.
10725 */
10726
10727+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10728+#define __put_user(x, ptr) put_user((x), (ptr))
10729+#else
10730 #define __put_user(x, ptr) \
10731 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10732+#endif
10733
10734 #define __get_user_unaligned __get_user
10735 #define __put_user_unaligned __put_user
10736@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10737 #define get_user_ex(x, ptr) do { \
10738 unsigned long __gue_val; \
10739 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10740- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10741+ (x) = (__typeof__(*(ptr)))__gue_val; \
10742 } while (0)
10743
10744 #ifdef CONFIG_X86_WP_WORKS_OK
10745diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10746index 566e803..b9521e9 100644
10747--- a/arch/x86/include/asm/uaccess_32.h
10748+++ b/arch/x86/include/asm/uaccess_32.h
10749@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10750 static __always_inline unsigned long __must_check
10751 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10752 {
10753+ if ((long)n < 0)
10754+ return n;
10755+
10756 if (__builtin_constant_p(n)) {
10757 unsigned long ret;
10758
10759@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10760 return ret;
10761 }
10762 }
10763+ if (!__builtin_constant_p(n))
10764+ check_object_size(from, n, true);
10765 return __copy_to_user_ll(to, from, n);
10766 }
10767
10768@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
10769 __copy_to_user(void __user *to, const void *from, unsigned long n)
10770 {
10771 might_fault();
10772+
10773 return __copy_to_user_inatomic(to, from, n);
10774 }
10775
10776 static __always_inline unsigned long
10777 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10778 {
10779+ if ((long)n < 0)
10780+ return n;
10781+
10782 /* Avoid zeroing the tail if the copy fails..
10783 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10784 * but as the zeroing behaviour is only significant when n is not
10785@@ -137,6 +146,10 @@ static __always_inline unsigned long
10786 __copy_from_user(void *to, const void __user *from, unsigned long n)
10787 {
10788 might_fault();
10789+
10790+ if ((long)n < 0)
10791+ return n;
10792+
10793 if (__builtin_constant_p(n)) {
10794 unsigned long ret;
10795
10796@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
10797 return ret;
10798 }
10799 }
10800+ if (!__builtin_constant_p(n))
10801+ check_object_size(to, n, false);
10802 return __copy_from_user_ll(to, from, n);
10803 }
10804
10805@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
10806 const void __user *from, unsigned long n)
10807 {
10808 might_fault();
10809+
10810+ if ((long)n < 0)
10811+ return n;
10812+
10813 if (__builtin_constant_p(n)) {
10814 unsigned long ret;
10815
10816@@ -181,15 +200,19 @@ static __always_inline unsigned long
10817 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10818 unsigned long n)
10819 {
10820- return __copy_from_user_ll_nocache_nozero(to, from, n);
10821+ if ((long)n < 0)
10822+ return n;
10823+
10824+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10825 }
10826
10827-unsigned long __must_check copy_to_user(void __user *to,
10828- const void *from, unsigned long n);
10829-unsigned long __must_check _copy_from_user(void *to,
10830- const void __user *from,
10831- unsigned long n);
10832-
10833+extern void copy_to_user_overflow(void)
10834+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10835+ __compiletime_error("copy_to_user() buffer size is not provably correct")
10836+#else
10837+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
10838+#endif
10839+;
10840
10841 extern void copy_from_user_overflow(void)
10842 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10843@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
10844 #endif
10845 ;
10846
10847-static inline unsigned long __must_check copy_from_user(void *to,
10848- const void __user *from,
10849- unsigned long n)
10850+/**
10851+ * copy_to_user: - Copy a block of data into user space.
10852+ * @to: Destination address, in user space.
10853+ * @from: Source address, in kernel space.
10854+ * @n: Number of bytes to copy.
10855+ *
10856+ * Context: User context only. This function may sleep.
10857+ *
10858+ * Copy data from kernel space to user space.
10859+ *
10860+ * Returns number of bytes that could not be copied.
10861+ * On success, this will be zero.
10862+ */
10863+static inline unsigned long __must_check
10864+copy_to_user(void __user *to, const void *from, unsigned long n)
10865+{
10866+ int sz = __compiletime_object_size(from);
10867+
10868+ if (unlikely(sz != -1 && sz < n))
10869+ copy_to_user_overflow();
10870+ else if (access_ok(VERIFY_WRITE, to, n))
10871+ n = __copy_to_user(to, from, n);
10872+ return n;
10873+}
10874+
10875+/**
10876+ * copy_from_user: - Copy a block of data from user space.
10877+ * @to: Destination address, in kernel space.
10878+ * @from: Source address, in user space.
10879+ * @n: Number of bytes to copy.
10880+ *
10881+ * Context: User context only. This function may sleep.
10882+ *
10883+ * Copy data from user space to kernel space.
10884+ *
10885+ * Returns number of bytes that could not be copied.
10886+ * On success, this will be zero.
10887+ *
10888+ * If some data could not be copied, this function will pad the copied
10889+ * data to the requested size using zero bytes.
10890+ */
10891+static inline unsigned long __must_check
10892+copy_from_user(void *to, const void __user *from, unsigned long n)
10893 {
10894 int sz = __compiletime_object_size(to);
10895
10896- if (likely(sz == -1 || sz >= n))
10897- n = _copy_from_user(to, from, n);
10898- else
10899+ if (unlikely(sz != -1 && sz < n))
10900 copy_from_user_overflow();
10901-
10902+ else if (access_ok(VERIFY_READ, from, n))
10903+ n = __copy_from_user(to, from, n);
10904+ else if ((long)n > 0) {
10905+ if (!__builtin_constant_p(n))
10906+ check_object_size(to, n, false);
10907+ memset(to, 0, n);
10908+ }
10909 return n;
10910 }
10911
10912diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10913index 1c66d30..23ab77d 100644
10914--- a/arch/x86/include/asm/uaccess_64.h
10915+++ b/arch/x86/include/asm/uaccess_64.h
10916@@ -10,6 +10,9 @@
10917 #include <asm/alternative.h>
10918 #include <asm/cpufeature.h>
10919 #include <asm/page.h>
10920+#include <asm/pgtable.h>
10921+
10922+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10923
10924 /*
10925 * Copy To/From Userspace
10926@@ -17,12 +20,12 @@
10927
10928 /* Handles exceptions in both to and from, but doesn't do access_ok */
10929 __must_check unsigned long
10930-copy_user_generic_string(void *to, const void *from, unsigned len);
10931+copy_user_generic_string(void *to, const void *from, unsigned long len);
10932 __must_check unsigned long
10933-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
10934+copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
10935
10936 static __always_inline __must_check unsigned long
10937-copy_user_generic(void *to, const void *from, unsigned len)
10938+copy_user_generic(void *to, const void *from, unsigned long len)
10939 {
10940 unsigned ret;
10941
10942@@ -36,138 +39,222 @@ copy_user_generic(void *to, const void *from, unsigned len)
10943 return ret;
10944 }
10945
10946+static __always_inline __must_check unsigned long
10947+__copy_to_user(void __user *to, const void *from, unsigned long len);
10948+static __always_inline __must_check unsigned long
10949+__copy_from_user(void *to, const void __user *from, unsigned long len);
10950 __must_check unsigned long
10951-_copy_to_user(void __user *to, const void *from, unsigned len);
10952-__must_check unsigned long
10953-_copy_from_user(void *to, const void __user *from, unsigned len);
10954-__must_check unsigned long
10955-copy_in_user(void __user *to, const void __user *from, unsigned len);
10956+copy_in_user(void __user *to, const void __user *from, unsigned long len);
10957
10958 static inline unsigned long __must_check copy_from_user(void *to,
10959 const void __user *from,
10960 unsigned long n)
10961 {
10962- int sz = __compiletime_object_size(to);
10963-
10964 might_fault();
10965- if (likely(sz == -1 || sz >= n))
10966- n = _copy_from_user(to, from, n);
10967-#ifdef CONFIG_DEBUG_VM
10968- else
10969- WARN(1, "Buffer overflow detected!\n");
10970-#endif
10971+
10972+ if (access_ok(VERIFY_READ, from, n))
10973+ n = __copy_from_user(to, from, n);
10974+ else if (n < INT_MAX) {
10975+ if (!__builtin_constant_p(n))
10976+ check_object_size(to, n, false);
10977+ memset(to, 0, n);
10978+ }
10979 return n;
10980 }
10981
10982 static __always_inline __must_check
10983-int copy_to_user(void __user *dst, const void *src, unsigned size)
10984+int copy_to_user(void __user *dst, const void *src, unsigned long size)
10985 {
10986 might_fault();
10987
10988- return _copy_to_user(dst, src, size);
10989+ if (access_ok(VERIFY_WRITE, dst, size))
10990+ size = __copy_to_user(dst, src, size);
10991+ return size;
10992 }
10993
10994 static __always_inline __must_check
10995-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10996+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
10997 {
10998- int ret = 0;
10999+ int sz = __compiletime_object_size(dst);
11000+ unsigned ret = 0;
11001
11002 might_fault();
11003- if (!__builtin_constant_p(size))
11004- return copy_user_generic(dst, (__force void *)src, size);
11005+
11006+ if (size > INT_MAX)
11007+ return size;
11008+
11009+#ifdef CONFIG_PAX_MEMORY_UDEREF
11010+ if (!__access_ok(VERIFY_READ, src, size))
11011+ return size;
11012+#endif
11013+
11014+ if (unlikely(sz != -1 && sz < size)) {
11015+#ifdef CONFIG_DEBUG_VM
11016+ WARN(1, "Buffer overflow detected!\n");
11017+#endif
11018+ return size;
11019+ }
11020+
11021+ if (!__builtin_constant_p(size)) {
11022+ check_object_size(dst, size, false);
11023+
11024+#ifdef CONFIG_PAX_MEMORY_UDEREF
11025+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11026+ src += PAX_USER_SHADOW_BASE;
11027+#endif
11028+
11029+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11030+ }
11031 switch (size) {
11032- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11033+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11034 ret, "b", "b", "=q", 1);
11035 return ret;
11036- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11037+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11038 ret, "w", "w", "=r", 2);
11039 return ret;
11040- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11041+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11042 ret, "l", "k", "=r", 4);
11043 return ret;
11044- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11045+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11046 ret, "q", "", "=r", 8);
11047 return ret;
11048 case 10:
11049- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11050+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11051 ret, "q", "", "=r", 10);
11052 if (unlikely(ret))
11053 return ret;
11054 __get_user_asm(*(u16 *)(8 + (char *)dst),
11055- (u16 __user *)(8 + (char __user *)src),
11056+ (const u16 __user *)(8 + (const char __user *)src),
11057 ret, "w", "w", "=r", 2);
11058 return ret;
11059 case 16:
11060- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11061+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11062 ret, "q", "", "=r", 16);
11063 if (unlikely(ret))
11064 return ret;
11065 __get_user_asm(*(u64 *)(8 + (char *)dst),
11066- (u64 __user *)(8 + (char __user *)src),
11067+ (const u64 __user *)(8 + (const char __user *)src),
11068 ret, "q", "", "=r", 8);
11069 return ret;
11070 default:
11071- return copy_user_generic(dst, (__force void *)src, size);
11072+
11073+#ifdef CONFIG_PAX_MEMORY_UDEREF
11074+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11075+ src += PAX_USER_SHADOW_BASE;
11076+#endif
11077+
11078+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11079 }
11080 }
11081
11082 static __always_inline __must_check
11083-int __copy_to_user(void __user *dst, const void *src, unsigned size)
11084+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11085 {
11086- int ret = 0;
11087+ int sz = __compiletime_object_size(src);
11088+ unsigned ret = 0;
11089
11090 might_fault();
11091- if (!__builtin_constant_p(size))
11092- return copy_user_generic((__force void *)dst, src, size);
11093+
11094+ if (size > INT_MAX)
11095+ return size;
11096+
11097+#ifdef CONFIG_PAX_MEMORY_UDEREF
11098+ if (!__access_ok(VERIFY_WRITE, dst, size))
11099+ return size;
11100+#endif
11101+
11102+ if (unlikely(sz != -1 && sz < size)) {
11103+#ifdef CONFIG_DEBUG_VM
11104+ WARN(1, "Buffer overflow detected!\n");
11105+#endif
11106+ return size;
11107+ }
11108+
11109+ if (!__builtin_constant_p(size)) {
11110+ check_object_size(src, size, true);
11111+
11112+#ifdef CONFIG_PAX_MEMORY_UDEREF
11113+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11114+ dst += PAX_USER_SHADOW_BASE;
11115+#endif
11116+
11117+ return copy_user_generic((__force_kernel void *)dst, src, size);
11118+ }
11119 switch (size) {
11120- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11121+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11122 ret, "b", "b", "iq", 1);
11123 return ret;
11124- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11125+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11126 ret, "w", "w", "ir", 2);
11127 return ret;
11128- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11129+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11130 ret, "l", "k", "ir", 4);
11131 return ret;
11132- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11133+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11134 ret, "q", "", "er", 8);
11135 return ret;
11136 case 10:
11137- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11138+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11139 ret, "q", "", "er", 10);
11140 if (unlikely(ret))
11141 return ret;
11142 asm("":::"memory");
11143- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11144+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11145 ret, "w", "w", "ir", 2);
11146 return ret;
11147 case 16:
11148- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11149+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11150 ret, "q", "", "er", 16);
11151 if (unlikely(ret))
11152 return ret;
11153 asm("":::"memory");
11154- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11155+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11156 ret, "q", "", "er", 8);
11157 return ret;
11158 default:
11159- return copy_user_generic((__force void *)dst, src, size);
11160+
11161+#ifdef CONFIG_PAX_MEMORY_UDEREF
11162+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11163+ dst += PAX_USER_SHADOW_BASE;
11164+#endif
11165+
11166+ return copy_user_generic((__force_kernel void *)dst, src, size);
11167 }
11168 }
11169
11170 static __always_inline __must_check
11171-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11172+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11173 {
11174- int ret = 0;
11175+ unsigned ret = 0;
11176
11177 might_fault();
11178- if (!__builtin_constant_p(size))
11179- return copy_user_generic((__force void *)dst,
11180- (__force void *)src, size);
11181+
11182+ if (size > INT_MAX)
11183+ return size;
11184+
11185+#ifdef CONFIG_PAX_MEMORY_UDEREF
11186+ if (!__access_ok(VERIFY_READ, src, size))
11187+ return size;
11188+ if (!__access_ok(VERIFY_WRITE, dst, size))
11189+ return size;
11190+#endif
11191+
11192+ if (!__builtin_constant_p(size)) {
11193+
11194+#ifdef CONFIG_PAX_MEMORY_UDEREF
11195+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11196+ src += PAX_USER_SHADOW_BASE;
11197+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11198+ dst += PAX_USER_SHADOW_BASE;
11199+#endif
11200+
11201+ return copy_user_generic((__force_kernel void *)dst,
11202+ (__force_kernel const void *)src, size);
11203+ }
11204 switch (size) {
11205 case 1: {
11206 u8 tmp;
11207- __get_user_asm(tmp, (u8 __user *)src,
11208+ __get_user_asm(tmp, (const u8 __user *)src,
11209 ret, "b", "b", "=q", 1);
11210 if (likely(!ret))
11211 __put_user_asm(tmp, (u8 __user *)dst,
11212@@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11213 }
11214 case 2: {
11215 u16 tmp;
11216- __get_user_asm(tmp, (u16 __user *)src,
11217+ __get_user_asm(tmp, (const u16 __user *)src,
11218 ret, "w", "w", "=r", 2);
11219 if (likely(!ret))
11220 __put_user_asm(tmp, (u16 __user *)dst,
11221@@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11222
11223 case 4: {
11224 u32 tmp;
11225- __get_user_asm(tmp, (u32 __user *)src,
11226+ __get_user_asm(tmp, (const u32 __user *)src,
11227 ret, "l", "k", "=r", 4);
11228 if (likely(!ret))
11229 __put_user_asm(tmp, (u32 __user *)dst,
11230@@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11231 }
11232 case 8: {
11233 u64 tmp;
11234- __get_user_asm(tmp, (u64 __user *)src,
11235+ __get_user_asm(tmp, (const u64 __user *)src,
11236 ret, "q", "", "=r", 8);
11237 if (likely(!ret))
11238 __put_user_asm(tmp, (u64 __user *)dst,
11239@@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11240 return ret;
11241 }
11242 default:
11243- return copy_user_generic((__force void *)dst,
11244- (__force void *)src, size);
11245+
11246+#ifdef CONFIG_PAX_MEMORY_UDEREF
11247+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11248+ src += PAX_USER_SHADOW_BASE;
11249+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11250+ dst += PAX_USER_SHADOW_BASE;
11251+#endif
11252+
11253+ return copy_user_generic((__force_kernel void *)dst,
11254+ (__force_kernel const void *)src, size);
11255 }
11256 }
11257
11258@@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11259 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11260
11261 static __must_check __always_inline int
11262-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11263+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11264 {
11265- return copy_user_generic(dst, (__force const void *)src, size);
11266+ if (size > INT_MAX)
11267+ return size;
11268+
11269+#ifdef CONFIG_PAX_MEMORY_UDEREF
11270+ if (!__access_ok(VERIFY_READ, src, size))
11271+ return size;
11272+
11273+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11274+ src += PAX_USER_SHADOW_BASE;
11275+#endif
11276+
11277+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11278 }
11279
11280-static __must_check __always_inline int
11281-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11282+static __must_check __always_inline unsigned long
11283+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11284 {
11285- return copy_user_generic((__force void *)dst, src, size);
11286+ if (size > INT_MAX)
11287+ return size;
11288+
11289+#ifdef CONFIG_PAX_MEMORY_UDEREF
11290+ if (!__access_ok(VERIFY_WRITE, dst, size))
11291+ return size;
11292+
11293+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11294+ dst += PAX_USER_SHADOW_BASE;
11295+#endif
11296+
11297+ return copy_user_generic((__force_kernel void *)dst, src, size);
11298 }
11299
11300-extern long __copy_user_nocache(void *dst, const void __user *src,
11301- unsigned size, int zerorest);
11302+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11303+ unsigned long size, int zerorest);
11304
11305-static inline int
11306-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11307+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11308 {
11309 might_sleep();
11310+
11311+ if (size > INT_MAX)
11312+ return size;
11313+
11314+#ifdef CONFIG_PAX_MEMORY_UDEREF
11315+ if (!__access_ok(VERIFY_READ, src, size))
11316+ return size;
11317+#endif
11318+
11319 return __copy_user_nocache(dst, src, size, 1);
11320 }
11321
11322-static inline int
11323-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11324- unsigned size)
11325+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11326+ unsigned long size)
11327 {
11328+ if (size > INT_MAX)
11329+ return size;
11330+
11331+#ifdef CONFIG_PAX_MEMORY_UDEREF
11332+ if (!__access_ok(VERIFY_READ, src, size))
11333+ return size;
11334+#endif
11335+
11336 return __copy_user_nocache(dst, src, size, 0);
11337 }
11338
11339-unsigned long
11340-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11341+extern unsigned long
11342+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11343
11344 #endif /* _ASM_X86_UACCESS_64_H */
11345diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11346index bb05228..d763d5b 100644
11347--- a/arch/x86/include/asm/vdso.h
11348+++ b/arch/x86/include/asm/vdso.h
11349@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11350 #define VDSO32_SYMBOL(base, name) \
11351 ({ \
11352 extern const char VDSO32_##name[]; \
11353- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11354+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11355 })
11356 #endif
11357
11358diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11359index 1971e65..1e3559b 100644
11360--- a/arch/x86/include/asm/x86_init.h
11361+++ b/arch/x86/include/asm/x86_init.h
11362@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11363 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11364 void (*find_smp_config)(void);
11365 void (*get_smp_config)(unsigned int early);
11366-};
11367+} __no_const;
11368
11369 /**
11370 * struct x86_init_resources - platform specific resource related ops
11371@@ -42,7 +42,7 @@ struct x86_init_resources {
11372 void (*probe_roms)(void);
11373 void (*reserve_resources)(void);
11374 char *(*memory_setup)(void);
11375-};
11376+} __no_const;
11377
11378 /**
11379 * struct x86_init_irqs - platform specific interrupt setup
11380@@ -55,7 +55,7 @@ struct x86_init_irqs {
11381 void (*pre_vector_init)(void);
11382 void (*intr_init)(void);
11383 void (*trap_init)(void);
11384-};
11385+} __no_const;
11386
11387 /**
11388 * struct x86_init_oem - oem platform specific customizing functions
11389@@ -65,7 +65,7 @@ struct x86_init_irqs {
11390 struct x86_init_oem {
11391 void (*arch_setup)(void);
11392 void (*banner)(void);
11393-};
11394+} __no_const;
11395
11396 /**
11397 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11398@@ -76,7 +76,7 @@ struct x86_init_oem {
11399 */
11400 struct x86_init_mapping {
11401 void (*pagetable_reserve)(u64 start, u64 end);
11402-};
11403+} __no_const;
11404
11405 /**
11406 * struct x86_init_paging - platform specific paging functions
11407@@ -86,7 +86,7 @@ struct x86_init_mapping {
11408 struct x86_init_paging {
11409 void (*pagetable_setup_start)(pgd_t *base);
11410 void (*pagetable_setup_done)(pgd_t *base);
11411-};
11412+} __no_const;
11413
11414 /**
11415 * struct x86_init_timers - platform specific timer setup
11416@@ -101,7 +101,7 @@ struct x86_init_timers {
11417 void (*tsc_pre_init)(void);
11418 void (*timer_init)(void);
11419 void (*wallclock_init)(void);
11420-};
11421+} __no_const;
11422
11423 /**
11424 * struct x86_init_iommu - platform specific iommu setup
11425@@ -109,7 +109,7 @@ struct x86_init_timers {
11426 */
11427 struct x86_init_iommu {
11428 int (*iommu_init)(void);
11429-};
11430+} __no_const;
11431
11432 /**
11433 * struct x86_init_pci - platform specific pci init functions
11434@@ -123,7 +123,7 @@ struct x86_init_pci {
11435 int (*init)(void);
11436 void (*init_irq)(void);
11437 void (*fixup_irqs)(void);
11438-};
11439+} __no_const;
11440
11441 /**
11442 * struct x86_init_ops - functions for platform specific setup
11443@@ -139,7 +139,7 @@ struct x86_init_ops {
11444 struct x86_init_timers timers;
11445 struct x86_init_iommu iommu;
11446 struct x86_init_pci pci;
11447-};
11448+} __no_const;
11449
11450 /**
11451 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11452@@ -147,7 +147,7 @@ struct x86_init_ops {
11453 */
11454 struct x86_cpuinit_ops {
11455 void (*setup_percpu_clockev)(void);
11456-};
11457+} __no_const;
11458
11459 /**
11460 * struct x86_platform_ops - platform specific runtime functions
11461@@ -169,7 +169,7 @@ struct x86_platform_ops {
11462 void (*nmi_init)(void);
11463 unsigned char (*get_nmi_reason)(void);
11464 int (*i8042_detect)(void);
11465-};
11466+} __no_const;
11467
11468 struct pci_dev;
11469
11470@@ -177,7 +177,7 @@ struct x86_msi_ops {
11471 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11472 void (*teardown_msi_irq)(unsigned int irq);
11473 void (*teardown_msi_irqs)(struct pci_dev *dev);
11474-};
11475+} __no_const;
11476
11477 extern struct x86_init_ops x86_init;
11478 extern struct x86_cpuinit_ops x86_cpuinit;
11479diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11480index c6ce245..ffbdab7 100644
11481--- a/arch/x86/include/asm/xsave.h
11482+++ b/arch/x86/include/asm/xsave.h
11483@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11484 {
11485 int err;
11486
11487+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11488+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11489+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11490+#endif
11491+
11492 /*
11493 * Clear the xsave header first, so that reserved fields are
11494 * initialized to zero.
11495@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11496 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11497 {
11498 int err;
11499- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11500+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11501 u32 lmask = mask;
11502 u32 hmask = mask >> 32;
11503
11504+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11505+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11506+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11507+#endif
11508+
11509 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11510 "2:\n"
11511 ".section .fixup,\"ax\"\n"
11512diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11513index 6a564ac..9b1340c 100644
11514--- a/arch/x86/kernel/acpi/realmode/Makefile
11515+++ b/arch/x86/kernel/acpi/realmode/Makefile
11516@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11517 $(call cc-option, -fno-stack-protector) \
11518 $(call cc-option, -mpreferred-stack-boundary=2)
11519 KBUILD_CFLAGS += $(call cc-option, -m32)
11520+ifdef CONSTIFY_PLUGIN
11521+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11522+endif
11523 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11524 GCOV_PROFILE := n
11525
11526diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11527index b4fd836..4358fe3 100644
11528--- a/arch/x86/kernel/acpi/realmode/wakeup.S
11529+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11530@@ -108,6 +108,9 @@ wakeup_code:
11531 /* Do any other stuff... */
11532
11533 #ifndef CONFIG_64BIT
11534+ /* Recheck NX bit overrides (64bit path does this in trampoline */
11535+ call verify_cpu
11536+
11537 /* This could also be done in C code... */
11538 movl pmode_cr3, %eax
11539 movl %eax, %cr3
11540@@ -131,6 +134,7 @@ wakeup_code:
11541 movl pmode_cr0, %eax
11542 movl %eax, %cr0
11543 jmp pmode_return
11544+# include "../../verify_cpu.S"
11545 #else
11546 pushw $0
11547 pushw trampoline_segment
11548diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11549index 103b6ab..2004d0a 100644
11550--- a/arch/x86/kernel/acpi/sleep.c
11551+++ b/arch/x86/kernel/acpi/sleep.c
11552@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11553 header->trampoline_segment = trampoline_address() >> 4;
11554 #ifdef CONFIG_SMP
11555 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11556+
11557+ pax_open_kernel();
11558 early_gdt_descr.address =
11559 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11560+ pax_close_kernel();
11561+
11562 initial_gs = per_cpu_offset(smp_processor_id());
11563 #endif
11564 initial_code = (unsigned long)wakeup_long64;
11565diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11566index 13ab720..95d5442 100644
11567--- a/arch/x86/kernel/acpi/wakeup_32.S
11568+++ b/arch/x86/kernel/acpi/wakeup_32.S
11569@@ -30,13 +30,11 @@ wakeup_pmode_return:
11570 # and restore the stack ... but you need gdt for this to work
11571 movl saved_context_esp, %esp
11572
11573- movl %cs:saved_magic, %eax
11574- cmpl $0x12345678, %eax
11575+ cmpl $0x12345678, saved_magic
11576 jne bogus_magic
11577
11578 # jump to place where we left off
11579- movl saved_eip, %eax
11580- jmp *%eax
11581+ jmp *(saved_eip)
11582
11583 bogus_magic:
11584 jmp bogus_magic
11585diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11586index 1f84794..e23f862 100644
11587--- a/arch/x86/kernel/alternative.c
11588+++ b/arch/x86/kernel/alternative.c
11589@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11590 */
11591 for (a = start; a < end; a++) {
11592 instr = (u8 *)&a->instr_offset + a->instr_offset;
11593+
11594+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11595+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11596+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11597+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11598+#endif
11599+
11600 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11601 BUG_ON(a->replacementlen > a->instrlen);
11602 BUG_ON(a->instrlen > sizeof(insnbuf));
11603@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11604 for (poff = start; poff < end; poff++) {
11605 u8 *ptr = (u8 *)poff + *poff;
11606
11607+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11608+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11609+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11610+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11611+#endif
11612+
11613 if (!*poff || ptr < text || ptr >= text_end)
11614 continue;
11615 /* turn DS segment override prefix into lock prefix */
11616- if (*ptr == 0x3e)
11617+ if (*ktla_ktva(ptr) == 0x3e)
11618 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11619 };
11620 mutex_unlock(&text_mutex);
11621@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11622 for (poff = start; poff < end; poff++) {
11623 u8 *ptr = (u8 *)poff + *poff;
11624
11625+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11626+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11627+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11628+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11629+#endif
11630+
11631 if (!*poff || ptr < text || ptr >= text_end)
11632 continue;
11633 /* turn lock prefix into DS segment override prefix */
11634- if (*ptr == 0xf0)
11635+ if (*ktla_ktva(ptr) == 0xf0)
11636 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11637 };
11638 mutex_unlock(&text_mutex);
11639@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11640
11641 BUG_ON(p->len > MAX_PATCH_LEN);
11642 /* prep the buffer with the original instructions */
11643- memcpy(insnbuf, p->instr, p->len);
11644+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11645 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11646 (unsigned long)p->instr, p->len);
11647
11648@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11649 if (smp_alt_once)
11650 free_init_pages("SMP alternatives",
11651 (unsigned long)__smp_locks,
11652- (unsigned long)__smp_locks_end);
11653+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11654
11655 restart_nmi();
11656 }
11657@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11658 * instructions. And on the local CPU you need to be protected again NMI or MCE
11659 * handlers seeing an inconsistent instruction while you patch.
11660 */
11661-void *__init_or_module text_poke_early(void *addr, const void *opcode,
11662+void *__kprobes text_poke_early(void *addr, const void *opcode,
11663 size_t len)
11664 {
11665 unsigned long flags;
11666 local_irq_save(flags);
11667- memcpy(addr, opcode, len);
11668+
11669+ pax_open_kernel();
11670+ memcpy(ktla_ktva(addr), opcode, len);
11671 sync_core();
11672+ pax_close_kernel();
11673+
11674 local_irq_restore(flags);
11675 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11676 that causes hangs on some VIA CPUs. */
11677@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11678 */
11679 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11680 {
11681- unsigned long flags;
11682- char *vaddr;
11683+ unsigned char *vaddr = ktla_ktva(addr);
11684 struct page *pages[2];
11685- int i;
11686+ size_t i;
11687
11688 if (!core_kernel_text((unsigned long)addr)) {
11689- pages[0] = vmalloc_to_page(addr);
11690- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11691+ pages[0] = vmalloc_to_page(vaddr);
11692+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11693 } else {
11694- pages[0] = virt_to_page(addr);
11695+ pages[0] = virt_to_page(vaddr);
11696 WARN_ON(!PageReserved(pages[0]));
11697- pages[1] = virt_to_page(addr + PAGE_SIZE);
11698+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11699 }
11700 BUG_ON(!pages[0]);
11701- local_irq_save(flags);
11702- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11703- if (pages[1])
11704- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11705- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11706- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11707- clear_fixmap(FIX_TEXT_POKE0);
11708- if (pages[1])
11709- clear_fixmap(FIX_TEXT_POKE1);
11710- local_flush_tlb();
11711- sync_core();
11712- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11713- that causes hangs on some VIA CPUs. */
11714+ text_poke_early(addr, opcode, len);
11715 for (i = 0; i < len; i++)
11716- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11717- local_irq_restore(flags);
11718+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11719 return addr;
11720 }
11721
11722diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11723index f98d84c..e402a69 100644
11724--- a/arch/x86/kernel/apic/apic.c
11725+++ b/arch/x86/kernel/apic/apic.c
11726@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11727 /*
11728 * Debug level, exported for io_apic.c
11729 */
11730-unsigned int apic_verbosity;
11731+int apic_verbosity;
11732
11733 int pic_mode;
11734
11735@@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11736 apic_write(APIC_ESR, 0);
11737 v1 = apic_read(APIC_ESR);
11738 ack_APIC_irq();
11739- atomic_inc(&irq_err_count);
11740+ atomic_inc_unchecked(&irq_err_count);
11741
11742 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11743 smp_processor_id(), v0 , v1);
11744diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11745index 6d939d7..0697fcc 100644
11746--- a/arch/x86/kernel/apic/io_apic.c
11747+++ b/arch/x86/kernel/apic/io_apic.c
11748@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11749 }
11750 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11751
11752-void lock_vector_lock(void)
11753+void lock_vector_lock(void) __acquires(vector_lock)
11754 {
11755 /* Used to the online set of cpus does not change
11756 * during assign_irq_vector.
11757@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
11758 raw_spin_lock(&vector_lock);
11759 }
11760
11761-void unlock_vector_lock(void)
11762+void unlock_vector_lock(void) __releases(vector_lock)
11763 {
11764 raw_spin_unlock(&vector_lock);
11765 }
11766@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
11767 ack_APIC_irq();
11768 }
11769
11770-atomic_t irq_mis_count;
11771+atomic_unchecked_t irq_mis_count;
11772
11773 static void ack_apic_level(struct irq_data *data)
11774 {
11775@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
11776 * at the cpu.
11777 */
11778 if (!(v & (1 << (i & 0x1f)))) {
11779- atomic_inc(&irq_mis_count);
11780+ atomic_inc_unchecked(&irq_mis_count);
11781
11782 eoi_ioapic_irq(irq, cfg);
11783 }
11784diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11785index a46bd38..6b906d7 100644
11786--- a/arch/x86/kernel/apm_32.c
11787+++ b/arch/x86/kernel/apm_32.c
11788@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
11789 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11790 * even though they are called in protected mode.
11791 */
11792-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11793+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11794 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11795
11796 static const char driver_version[] = "1.16ac"; /* no spaces */
11797@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
11798 BUG_ON(cpu != 0);
11799 gdt = get_cpu_gdt_table(cpu);
11800 save_desc_40 = gdt[0x40 / 8];
11801+
11802+ pax_open_kernel();
11803 gdt[0x40 / 8] = bad_bios_desc;
11804+ pax_close_kernel();
11805
11806 apm_irq_save(flags);
11807 APM_DO_SAVE_SEGS;
11808@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
11809 &call->esi);
11810 APM_DO_RESTORE_SEGS;
11811 apm_irq_restore(flags);
11812+
11813+ pax_open_kernel();
11814 gdt[0x40 / 8] = save_desc_40;
11815+ pax_close_kernel();
11816+
11817 put_cpu();
11818
11819 return call->eax & 0xff;
11820@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
11821 BUG_ON(cpu != 0);
11822 gdt = get_cpu_gdt_table(cpu);
11823 save_desc_40 = gdt[0x40 / 8];
11824+
11825+ pax_open_kernel();
11826 gdt[0x40 / 8] = bad_bios_desc;
11827+ pax_close_kernel();
11828
11829 apm_irq_save(flags);
11830 APM_DO_SAVE_SEGS;
11831@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
11832 &call->eax);
11833 APM_DO_RESTORE_SEGS;
11834 apm_irq_restore(flags);
11835+
11836+ pax_open_kernel();
11837 gdt[0x40 / 8] = save_desc_40;
11838+ pax_close_kernel();
11839+
11840 put_cpu();
11841 return error;
11842 }
11843@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
11844 * code to that CPU.
11845 */
11846 gdt = get_cpu_gdt_table(0);
11847+
11848+ pax_open_kernel();
11849 set_desc_base(&gdt[APM_CS >> 3],
11850 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11851 set_desc_base(&gdt[APM_CS_16 >> 3],
11852 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11853 set_desc_base(&gdt[APM_DS >> 3],
11854 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11855+ pax_close_kernel();
11856
11857 proc_create("apm", 0, NULL, &apm_file_ops);
11858
11859diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11860index 4f13faf..87db5d2 100644
11861--- a/arch/x86/kernel/asm-offsets.c
11862+++ b/arch/x86/kernel/asm-offsets.c
11863@@ -33,6 +33,8 @@ void common(void) {
11864 OFFSET(TI_status, thread_info, status);
11865 OFFSET(TI_addr_limit, thread_info, addr_limit);
11866 OFFSET(TI_preempt_count, thread_info, preempt_count);
11867+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11868+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11869
11870 BLANK();
11871 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11872@@ -53,8 +55,26 @@ void common(void) {
11873 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11874 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11875 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11876+
11877+#ifdef CONFIG_PAX_KERNEXEC
11878+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11879 #endif
11880
11881+#ifdef CONFIG_PAX_MEMORY_UDEREF
11882+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11883+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11884+#ifdef CONFIG_X86_64
11885+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11886+#endif
11887+#endif
11888+
11889+#endif
11890+
11891+ BLANK();
11892+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11893+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11894+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11895+
11896 #ifdef CONFIG_XEN
11897 BLANK();
11898 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11899diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11900index e72a119..6e2955d 100644
11901--- a/arch/x86/kernel/asm-offsets_64.c
11902+++ b/arch/x86/kernel/asm-offsets_64.c
11903@@ -69,6 +69,7 @@ int main(void)
11904 BLANK();
11905 #undef ENTRY
11906
11907+ DEFINE(TSS_size, sizeof(struct tss_struct));
11908 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11909 BLANK();
11910
11911diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11912index 25f24dc..4094a7f 100644
11913--- a/arch/x86/kernel/cpu/Makefile
11914+++ b/arch/x86/kernel/cpu/Makefile
11915@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11916 CFLAGS_REMOVE_perf_event.o = -pg
11917 endif
11918
11919-# Make sure load_percpu_segment has no stackprotector
11920-nostackp := $(call cc-option, -fno-stack-protector)
11921-CFLAGS_common.o := $(nostackp)
11922-
11923 obj-y := intel_cacheinfo.o scattered.o topology.o
11924 obj-y += proc.o capflags.o powerflags.o common.o
11925 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11926diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11927index 0bab2b1..d0a1bf8 100644
11928--- a/arch/x86/kernel/cpu/amd.c
11929+++ b/arch/x86/kernel/cpu/amd.c
11930@@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
11931 unsigned int size)
11932 {
11933 /* AMD errata T13 (order #21922) */
11934- if ((c->x86 == 6)) {
11935+ if (c->x86 == 6) {
11936 /* Duron Rev A0 */
11937 if (c->x86_model == 3 && c->x86_mask == 0)
11938 size = 64;
11939diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11940index aa003b1..47ea638 100644
11941--- a/arch/x86/kernel/cpu/common.c
11942+++ b/arch/x86/kernel/cpu/common.c
11943@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
11944
11945 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11946
11947-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11948-#ifdef CONFIG_X86_64
11949- /*
11950- * We need valid kernel segments for data and code in long mode too
11951- * IRET will check the segment types kkeil 2000/10/28
11952- * Also sysret mandates a special GDT layout
11953- *
11954- * TLS descriptors are currently at a different place compared to i386.
11955- * Hopefully nobody expects them at a fixed place (Wine?)
11956- */
11957- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11958- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11959- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11960- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11961- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11962- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11963-#else
11964- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11965- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11966- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11967- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11968- /*
11969- * Segments used for calling PnP BIOS have byte granularity.
11970- * They code segments and data segments have fixed 64k limits,
11971- * the transfer segment sizes are set at run time.
11972- */
11973- /* 32-bit code */
11974- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11975- /* 16-bit code */
11976- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11977- /* 16-bit data */
11978- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11979- /* 16-bit data */
11980- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11981- /* 16-bit data */
11982- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11983- /*
11984- * The APM segments have byte granularity and their bases
11985- * are set at run time. All have 64k limits.
11986- */
11987- /* 32-bit code */
11988- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11989- /* 16-bit code */
11990- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11991- /* data */
11992- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11993-
11994- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11995- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11996- GDT_STACK_CANARY_INIT
11997-#endif
11998-} };
11999-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12000-
12001 static int __init x86_xsave_setup(char *s)
12002 {
12003 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12004@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12005 {
12006 struct desc_ptr gdt_descr;
12007
12008- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12009+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12010 gdt_descr.size = GDT_SIZE - 1;
12011 load_gdt(&gdt_descr);
12012 /* Reload the per-cpu base */
12013@@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12014 /* Filter out anything that depends on CPUID levels we don't have */
12015 filter_cpuid_features(c, true);
12016
12017+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12018+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12019+#endif
12020+
12021 /* If the model name is still unset, do table lookup. */
12022 if (!c->x86_model_id[0]) {
12023 const char *p;
12024@@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12025 }
12026 __setup("clearcpuid=", setup_disablecpuid);
12027
12028+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12029+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12030+
12031 #ifdef CONFIG_X86_64
12032 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12033
12034@@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12035 EXPORT_PER_CPU_SYMBOL(current_task);
12036
12037 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12038- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12039+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12040 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12041
12042 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12043@@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12044 {
12045 memset(regs, 0, sizeof(struct pt_regs));
12046 regs->fs = __KERNEL_PERCPU;
12047- regs->gs = __KERNEL_STACK_CANARY;
12048+ savesegment(gs, regs->gs);
12049
12050 return regs;
12051 }
12052@@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12053 int i;
12054
12055 cpu = stack_smp_processor_id();
12056- t = &per_cpu(init_tss, cpu);
12057+ t = init_tss + cpu;
12058 oist = &per_cpu(orig_ist, cpu);
12059
12060 #ifdef CONFIG_NUMA
12061@@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12062 switch_to_new_gdt(cpu);
12063 loadsegment(fs, 0);
12064
12065- load_idt((const struct desc_ptr *)&idt_descr);
12066+ load_idt(&idt_descr);
12067
12068 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12069 syscall_init();
12070@@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12071 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12072 barrier();
12073
12074- x86_configure_nx();
12075 if (cpu != 0)
12076 enable_x2apic();
12077
12078@@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12079 {
12080 int cpu = smp_processor_id();
12081 struct task_struct *curr = current;
12082- struct tss_struct *t = &per_cpu(init_tss, cpu);
12083+ struct tss_struct *t = init_tss + cpu;
12084 struct thread_struct *thread = &curr->thread;
12085
12086 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12087diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12088index 5231312..a78a987 100644
12089--- a/arch/x86/kernel/cpu/intel.c
12090+++ b/arch/x86/kernel/cpu/intel.c
12091@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12092 * Update the IDT descriptor and reload the IDT so that
12093 * it uses the read-only mapped virtual address.
12094 */
12095- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12096+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12097 load_idt(&idt_descr);
12098 }
12099 #endif
12100diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12101index 2af127d..8ff7ac0 100644
12102--- a/arch/x86/kernel/cpu/mcheck/mce.c
12103+++ b/arch/x86/kernel/cpu/mcheck/mce.c
12104@@ -42,6 +42,7 @@
12105 #include <asm/processor.h>
12106 #include <asm/mce.h>
12107 #include <asm/msr.h>
12108+#include <asm/local.h>
12109
12110 #include "mce-internal.h"
12111
12112@@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12113 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12114 m->cs, m->ip);
12115
12116- if (m->cs == __KERNEL_CS)
12117+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12118 print_symbol("{%s}", m->ip);
12119 pr_cont("\n");
12120 }
12121@@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12122
12123 #define PANIC_TIMEOUT 5 /* 5 seconds */
12124
12125-static atomic_t mce_paniced;
12126+static atomic_unchecked_t mce_paniced;
12127
12128 static int fake_panic;
12129-static atomic_t mce_fake_paniced;
12130+static atomic_unchecked_t mce_fake_paniced;
12131
12132 /* Panic in progress. Enable interrupts and wait for final IPI */
12133 static void wait_for_panic(void)
12134@@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12135 /*
12136 * Make sure only one CPU runs in machine check panic
12137 */
12138- if (atomic_inc_return(&mce_paniced) > 1)
12139+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12140 wait_for_panic();
12141 barrier();
12142
12143@@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12144 console_verbose();
12145 } else {
12146 /* Don't log too much for fake panic */
12147- if (atomic_inc_return(&mce_fake_paniced) > 1)
12148+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12149 return;
12150 }
12151 /* First print corrected ones that are still unlogged */
12152@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12153 * might have been modified by someone else.
12154 */
12155 rmb();
12156- if (atomic_read(&mce_paniced))
12157+ if (atomic_read_unchecked(&mce_paniced))
12158 wait_for_panic();
12159 if (!monarch_timeout)
12160 goto out;
12161@@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12162 }
12163
12164 /* Call the installed machine check handler for this CPU setup. */
12165-void (*machine_check_vector)(struct pt_regs *, long error_code) =
12166+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12167 unexpected_machine_check;
12168
12169 /*
12170@@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12171 return;
12172 }
12173
12174+ pax_open_kernel();
12175 machine_check_vector = do_machine_check;
12176+ pax_close_kernel();
12177
12178 __mcheck_cpu_init_generic();
12179 __mcheck_cpu_init_vendor(c);
12180@@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12181 */
12182
12183 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12184-static int mce_chrdev_open_count; /* #times opened */
12185+static local_t mce_chrdev_open_count; /* #times opened */
12186 static int mce_chrdev_open_exclu; /* already open exclusive? */
12187
12188 static int mce_chrdev_open(struct inode *inode, struct file *file)
12189@@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12190 spin_lock(&mce_chrdev_state_lock);
12191
12192 if (mce_chrdev_open_exclu ||
12193- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12194+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12195 spin_unlock(&mce_chrdev_state_lock);
12196
12197 return -EBUSY;
12198@@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12199
12200 if (file->f_flags & O_EXCL)
12201 mce_chrdev_open_exclu = 1;
12202- mce_chrdev_open_count++;
12203+ local_inc(&mce_chrdev_open_count);
12204
12205 spin_unlock(&mce_chrdev_state_lock);
12206
12207@@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12208 {
12209 spin_lock(&mce_chrdev_state_lock);
12210
12211- mce_chrdev_open_count--;
12212+ local_dec(&mce_chrdev_open_count);
12213 mce_chrdev_open_exclu = 0;
12214
12215 spin_unlock(&mce_chrdev_state_lock);
12216@@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12217 static void mce_reset(void)
12218 {
12219 cpu_missing = 0;
12220- atomic_set(&mce_fake_paniced, 0);
12221+ atomic_set_unchecked(&mce_fake_paniced, 0);
12222 atomic_set(&mce_executing, 0);
12223 atomic_set(&mce_callin, 0);
12224 atomic_set(&global_nwo, 0);
12225diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12226index 5c0e653..0882b0a 100644
12227--- a/arch/x86/kernel/cpu/mcheck/p5.c
12228+++ b/arch/x86/kernel/cpu/mcheck/p5.c
12229@@ -12,6 +12,7 @@
12230 #include <asm/system.h>
12231 #include <asm/mce.h>
12232 #include <asm/msr.h>
12233+#include <asm/pgtable.h>
12234
12235 /* By default disabled */
12236 int mce_p5_enabled __read_mostly;
12237@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12238 if (!cpu_has(c, X86_FEATURE_MCE))
12239 return;
12240
12241+ pax_open_kernel();
12242 machine_check_vector = pentium_machine_check;
12243+ pax_close_kernel();
12244 /* Make sure the vector pointer is visible before we enable MCEs: */
12245 wmb();
12246
12247diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12248index 54060f5..c1a7577 100644
12249--- a/arch/x86/kernel/cpu/mcheck/winchip.c
12250+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12251@@ -11,6 +11,7 @@
12252 #include <asm/system.h>
12253 #include <asm/mce.h>
12254 #include <asm/msr.h>
12255+#include <asm/pgtable.h>
12256
12257 /* Machine check handler for WinChip C6: */
12258 static void winchip_machine_check(struct pt_regs *regs, long error_code)
12259@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12260 {
12261 u32 lo, hi;
12262
12263+ pax_open_kernel();
12264 machine_check_vector = winchip_machine_check;
12265+ pax_close_kernel();
12266 /* Make sure the vector pointer is visible before we enable MCEs: */
12267 wmb();
12268
12269diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12270index 6b96110..0da73eb 100644
12271--- a/arch/x86/kernel/cpu/mtrr/main.c
12272+++ b/arch/x86/kernel/cpu/mtrr/main.c
12273@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12274 u64 size_or_mask, size_and_mask;
12275 static bool mtrr_aps_delayed_init;
12276
12277-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12278+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12279
12280 const struct mtrr_ops *mtrr_if;
12281
12282diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12283index df5e41f..816c719 100644
12284--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12285+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12286@@ -25,7 +25,7 @@ struct mtrr_ops {
12287 int (*validate_add_page)(unsigned long base, unsigned long size,
12288 unsigned int type);
12289 int (*have_wrcomb)(void);
12290-};
12291+} __do_const;
12292
12293 extern int generic_get_free_region(unsigned long base, unsigned long size,
12294 int replace_reg);
12295diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12296index 2bda212..78cc605 100644
12297--- a/arch/x86/kernel/cpu/perf_event.c
12298+++ b/arch/x86/kernel/cpu/perf_event.c
12299@@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12300 break;
12301
12302 perf_callchain_store(entry, frame.return_address);
12303- fp = frame.next_frame;
12304+ fp = (const void __force_user *)frame.next_frame;
12305 }
12306 }
12307
12308diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12309index 13ad899..f642b9a 100644
12310--- a/arch/x86/kernel/crash.c
12311+++ b/arch/x86/kernel/crash.c
12312@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12313 {
12314 #ifdef CONFIG_X86_32
12315 struct pt_regs fixed_regs;
12316-#endif
12317
12318-#ifdef CONFIG_X86_32
12319- if (!user_mode_vm(regs)) {
12320+ if (!user_mode(regs)) {
12321 crash_fixup_ss_esp(&fixed_regs, regs);
12322 regs = &fixed_regs;
12323 }
12324diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12325index 37250fe..bf2ec74 100644
12326--- a/arch/x86/kernel/doublefault_32.c
12327+++ b/arch/x86/kernel/doublefault_32.c
12328@@ -11,7 +11,7 @@
12329
12330 #define DOUBLEFAULT_STACKSIZE (1024)
12331 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12332-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12333+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12334
12335 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12336
12337@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12338 unsigned long gdt, tss;
12339
12340 store_gdt(&gdt_desc);
12341- gdt = gdt_desc.address;
12342+ gdt = (unsigned long)gdt_desc.address;
12343
12344 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12345
12346@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12347 /* 0x2 bit is always set */
12348 .flags = X86_EFLAGS_SF | 0x2,
12349 .sp = STACK_START,
12350- .es = __USER_DS,
12351+ .es = __KERNEL_DS,
12352 .cs = __KERNEL_CS,
12353 .ss = __KERNEL_DS,
12354- .ds = __USER_DS,
12355+ .ds = __KERNEL_DS,
12356 .fs = __KERNEL_PERCPU,
12357
12358 .__cr3 = __pa_nodebug(swapper_pg_dir),
12359diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12360index 1aae78f..aab3a3d 100644
12361--- a/arch/x86/kernel/dumpstack.c
12362+++ b/arch/x86/kernel/dumpstack.c
12363@@ -2,6 +2,9 @@
12364 * Copyright (C) 1991, 1992 Linus Torvalds
12365 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12366 */
12367+#ifdef CONFIG_GRKERNSEC_HIDESYM
12368+#define __INCLUDED_BY_HIDESYM 1
12369+#endif
12370 #include <linux/kallsyms.h>
12371 #include <linux/kprobes.h>
12372 #include <linux/uaccess.h>
12373@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12374 static void
12375 print_ftrace_graph_addr(unsigned long addr, void *data,
12376 const struct stacktrace_ops *ops,
12377- struct thread_info *tinfo, int *graph)
12378+ struct task_struct *task, int *graph)
12379 {
12380- struct task_struct *task = tinfo->task;
12381 unsigned long ret_addr;
12382 int index = task->curr_ret_stack;
12383
12384@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12385 static inline void
12386 print_ftrace_graph_addr(unsigned long addr, void *data,
12387 const struct stacktrace_ops *ops,
12388- struct thread_info *tinfo, int *graph)
12389+ struct task_struct *task, int *graph)
12390 { }
12391 #endif
12392
12393@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12394 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12395 */
12396
12397-static inline int valid_stack_ptr(struct thread_info *tinfo,
12398- void *p, unsigned int size, void *end)
12399+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12400 {
12401- void *t = tinfo;
12402 if (end) {
12403 if (p < end && p >= (end-THREAD_SIZE))
12404 return 1;
12405@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12406 }
12407
12408 unsigned long
12409-print_context_stack(struct thread_info *tinfo,
12410+print_context_stack(struct task_struct *task, void *stack_start,
12411 unsigned long *stack, unsigned long bp,
12412 const struct stacktrace_ops *ops, void *data,
12413 unsigned long *end, int *graph)
12414 {
12415 struct stack_frame *frame = (struct stack_frame *)bp;
12416
12417- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12418+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12419 unsigned long addr;
12420
12421 addr = *stack;
12422@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12423 } else {
12424 ops->address(data, addr, 0);
12425 }
12426- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12427+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12428 }
12429 stack++;
12430 }
12431@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12432 EXPORT_SYMBOL_GPL(print_context_stack);
12433
12434 unsigned long
12435-print_context_stack_bp(struct thread_info *tinfo,
12436+print_context_stack_bp(struct task_struct *task, void *stack_start,
12437 unsigned long *stack, unsigned long bp,
12438 const struct stacktrace_ops *ops, void *data,
12439 unsigned long *end, int *graph)
12440@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12441 struct stack_frame *frame = (struct stack_frame *)bp;
12442 unsigned long *ret_addr = &frame->return_address;
12443
12444- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12445+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12446 unsigned long addr = *ret_addr;
12447
12448 if (!__kernel_text_address(addr))
12449@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12450 ops->address(data, addr, 1);
12451 frame = frame->next_frame;
12452 ret_addr = &frame->return_address;
12453- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12454+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12455 }
12456
12457 return (unsigned long)frame;
12458@@ -186,7 +186,7 @@ void dump_stack(void)
12459
12460 bp = stack_frame(current, NULL);
12461 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12462- current->pid, current->comm, print_tainted(),
12463+ task_pid_nr(current), current->comm, print_tainted(),
12464 init_utsname()->release,
12465 (int)strcspn(init_utsname()->version, " "),
12466 init_utsname()->version);
12467@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12468 }
12469 EXPORT_SYMBOL_GPL(oops_begin);
12470
12471+extern void gr_handle_kernel_exploit(void);
12472+
12473 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12474 {
12475 if (regs && kexec_should_crash(current))
12476@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12477 panic("Fatal exception in interrupt");
12478 if (panic_on_oops)
12479 panic("Fatal exception");
12480- do_exit(signr);
12481+
12482+ gr_handle_kernel_exploit();
12483+
12484+ do_group_exit(signr);
12485 }
12486
12487 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12488@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12489
12490 show_registers(regs);
12491 #ifdef CONFIG_X86_32
12492- if (user_mode_vm(regs)) {
12493+ if (user_mode(regs)) {
12494 sp = regs->sp;
12495 ss = regs->ss & 0xffff;
12496 } else {
12497@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12498 unsigned long flags = oops_begin();
12499 int sig = SIGSEGV;
12500
12501- if (!user_mode_vm(regs))
12502+ if (!user_mode(regs))
12503 report_bug(regs->ip, regs);
12504
12505 if (__die(str, regs, err))
12506diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12507index c99f9ed..2a15d80 100644
12508--- a/arch/x86/kernel/dumpstack_32.c
12509+++ b/arch/x86/kernel/dumpstack_32.c
12510@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12511 bp = stack_frame(task, regs);
12512
12513 for (;;) {
12514- struct thread_info *context;
12515+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12516
12517- context = (struct thread_info *)
12518- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12519- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12520+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12521
12522- stack = (unsigned long *)context->previous_esp;
12523- if (!stack)
12524+ if (stack_start == task_stack_page(task))
12525 break;
12526+ stack = *(unsigned long **)stack_start;
12527 if (ops->stack(data, "IRQ") < 0)
12528 break;
12529 touch_nmi_watchdog();
12530@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12531 * When in-kernel, we also print out the stack and code at the
12532 * time of the fault..
12533 */
12534- if (!user_mode_vm(regs)) {
12535+ if (!user_mode(regs)) {
12536 unsigned int code_prologue = code_bytes * 43 / 64;
12537 unsigned int code_len = code_bytes;
12538 unsigned char c;
12539 u8 *ip;
12540+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12541
12542 printk(KERN_EMERG "Stack:\n");
12543 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12544
12545 printk(KERN_EMERG "Code: ");
12546
12547- ip = (u8 *)regs->ip - code_prologue;
12548+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12549 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12550 /* try starting at IP */
12551- ip = (u8 *)regs->ip;
12552+ ip = (u8 *)regs->ip + cs_base;
12553 code_len = code_len - code_prologue + 1;
12554 }
12555 for (i = 0; i < code_len; i++, ip++) {
12556@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12557 printk(KERN_CONT " Bad EIP value.");
12558 break;
12559 }
12560- if (ip == (u8 *)regs->ip)
12561+ if (ip == (u8 *)regs->ip + cs_base)
12562 printk(KERN_CONT "<%02x> ", c);
12563 else
12564 printk(KERN_CONT "%02x ", c);
12565@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12566 {
12567 unsigned short ud2;
12568
12569+ ip = ktla_ktva(ip);
12570 if (ip < PAGE_OFFSET)
12571 return 0;
12572 if (probe_kernel_address((unsigned short *)ip, ud2))
12573@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12574
12575 return ud2 == 0x0b0f;
12576 }
12577+
12578+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12579+void pax_check_alloca(unsigned long size)
12580+{
12581+ unsigned long sp = (unsigned long)&sp, stack_left;
12582+
12583+ /* all kernel stacks are of the same size */
12584+ stack_left = sp & (THREAD_SIZE - 1);
12585+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12586+}
12587+EXPORT_SYMBOL(pax_check_alloca);
12588+#endif
12589diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12590index 6d728d9..279514e 100644
12591--- a/arch/x86/kernel/dumpstack_64.c
12592+++ b/arch/x86/kernel/dumpstack_64.c
12593@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12594 unsigned long *irq_stack_end =
12595 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12596 unsigned used = 0;
12597- struct thread_info *tinfo;
12598 int graph = 0;
12599 unsigned long dummy;
12600+ void *stack_start;
12601
12602 if (!task)
12603 task = current;
12604@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12605 * current stack address. If the stacks consist of nested
12606 * exceptions
12607 */
12608- tinfo = task_thread_info(task);
12609 for (;;) {
12610 char *id;
12611 unsigned long *estack_end;
12612+
12613 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12614 &used, &id);
12615
12616@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12617 if (ops->stack(data, id) < 0)
12618 break;
12619
12620- bp = ops->walk_stack(tinfo, stack, bp, ops,
12621+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12622 data, estack_end, &graph);
12623 ops->stack(data, "<EOE>");
12624 /*
12625@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12626 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12627 if (ops->stack(data, "IRQ") < 0)
12628 break;
12629- bp = ops->walk_stack(tinfo, stack, bp,
12630+ bp = ops->walk_stack(task, irq_stack, stack, bp,
12631 ops, data, irq_stack_end, &graph);
12632 /*
12633 * We link to the next stack (which would be
12634@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12635 /*
12636 * This handles the process stack:
12637 */
12638- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12639+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12640+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12641 put_cpu();
12642 }
12643 EXPORT_SYMBOL(dump_trace);
12644@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12645
12646 return ud2 == 0x0b0f;
12647 }
12648+
12649+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12650+void pax_check_alloca(unsigned long size)
12651+{
12652+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12653+ unsigned cpu, used;
12654+ char *id;
12655+
12656+ /* check the process stack first */
12657+ stack_start = (unsigned long)task_stack_page(current);
12658+ stack_end = stack_start + THREAD_SIZE;
12659+ if (likely(stack_start <= sp && sp < stack_end)) {
12660+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
12661+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12662+ return;
12663+ }
12664+
12665+ cpu = get_cpu();
12666+
12667+ /* check the irq stacks */
12668+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12669+ stack_start = stack_end - IRQ_STACK_SIZE;
12670+ if (stack_start <= sp && sp < stack_end) {
12671+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12672+ put_cpu();
12673+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12674+ return;
12675+ }
12676+
12677+ /* check the exception stacks */
12678+ used = 0;
12679+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12680+ stack_start = stack_end - EXCEPTION_STKSZ;
12681+ if (stack_end && stack_start <= sp && sp < stack_end) {
12682+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12683+ put_cpu();
12684+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12685+ return;
12686+ }
12687+
12688+ put_cpu();
12689+
12690+ /* unknown stack */
12691+ BUG();
12692+}
12693+EXPORT_SYMBOL(pax_check_alloca);
12694+#endif
12695diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12696index cd28a35..c72ed9a 100644
12697--- a/arch/x86/kernel/early_printk.c
12698+++ b/arch/x86/kernel/early_printk.c
12699@@ -7,6 +7,7 @@
12700 #include <linux/pci_regs.h>
12701 #include <linux/pci_ids.h>
12702 #include <linux/errno.h>
12703+#include <linux/sched.h>
12704 #include <asm/io.h>
12705 #include <asm/processor.h>
12706 #include <asm/fcntl.h>
12707diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12708index f3f6f53..0841b66 100644
12709--- a/arch/x86/kernel/entry_32.S
12710+++ b/arch/x86/kernel/entry_32.S
12711@@ -186,13 +186,146 @@
12712 /*CFI_REL_OFFSET gs, PT_GS*/
12713 .endm
12714 .macro SET_KERNEL_GS reg
12715+
12716+#ifdef CONFIG_CC_STACKPROTECTOR
12717 movl $(__KERNEL_STACK_CANARY), \reg
12718+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12719+ movl $(__USER_DS), \reg
12720+#else
12721+ xorl \reg, \reg
12722+#endif
12723+
12724 movl \reg, %gs
12725 .endm
12726
12727 #endif /* CONFIG_X86_32_LAZY_GS */
12728
12729-.macro SAVE_ALL
12730+.macro pax_enter_kernel
12731+#ifdef CONFIG_PAX_KERNEXEC
12732+ call pax_enter_kernel
12733+#endif
12734+.endm
12735+
12736+.macro pax_exit_kernel
12737+#ifdef CONFIG_PAX_KERNEXEC
12738+ call pax_exit_kernel
12739+#endif
12740+.endm
12741+
12742+#ifdef CONFIG_PAX_KERNEXEC
12743+ENTRY(pax_enter_kernel)
12744+#ifdef CONFIG_PARAVIRT
12745+ pushl %eax
12746+ pushl %ecx
12747+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12748+ mov %eax, %esi
12749+#else
12750+ mov %cr0, %esi
12751+#endif
12752+ bts $16, %esi
12753+ jnc 1f
12754+ mov %cs, %esi
12755+ cmp $__KERNEL_CS, %esi
12756+ jz 3f
12757+ ljmp $__KERNEL_CS, $3f
12758+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12759+2:
12760+#ifdef CONFIG_PARAVIRT
12761+ mov %esi, %eax
12762+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12763+#else
12764+ mov %esi, %cr0
12765+#endif
12766+3:
12767+#ifdef CONFIG_PARAVIRT
12768+ popl %ecx
12769+ popl %eax
12770+#endif
12771+ ret
12772+ENDPROC(pax_enter_kernel)
12773+
12774+ENTRY(pax_exit_kernel)
12775+#ifdef CONFIG_PARAVIRT
12776+ pushl %eax
12777+ pushl %ecx
12778+#endif
12779+ mov %cs, %esi
12780+ cmp $__KERNEXEC_KERNEL_CS, %esi
12781+ jnz 2f
12782+#ifdef CONFIG_PARAVIRT
12783+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12784+ mov %eax, %esi
12785+#else
12786+ mov %cr0, %esi
12787+#endif
12788+ btr $16, %esi
12789+ ljmp $__KERNEL_CS, $1f
12790+1:
12791+#ifdef CONFIG_PARAVIRT
12792+ mov %esi, %eax
12793+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12794+#else
12795+ mov %esi, %cr0
12796+#endif
12797+2:
12798+#ifdef CONFIG_PARAVIRT
12799+ popl %ecx
12800+ popl %eax
12801+#endif
12802+ ret
12803+ENDPROC(pax_exit_kernel)
12804+#endif
12805+
12806+.macro pax_erase_kstack
12807+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12808+ call pax_erase_kstack
12809+#endif
12810+.endm
12811+
12812+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12813+/*
12814+ * ebp: thread_info
12815+ * ecx, edx: can be clobbered
12816+ */
12817+ENTRY(pax_erase_kstack)
12818+ pushl %edi
12819+ pushl %eax
12820+
12821+ mov TI_lowest_stack(%ebp), %edi
12822+ mov $-0xBEEF, %eax
12823+ std
12824+
12825+1: mov %edi, %ecx
12826+ and $THREAD_SIZE_asm - 1, %ecx
12827+ shr $2, %ecx
12828+ repne scasl
12829+ jecxz 2f
12830+
12831+ cmp $2*16, %ecx
12832+ jc 2f
12833+
12834+ mov $2*16, %ecx
12835+ repe scasl
12836+ jecxz 2f
12837+ jne 1b
12838+
12839+2: cld
12840+ mov %esp, %ecx
12841+ sub %edi, %ecx
12842+ shr $2, %ecx
12843+ rep stosl
12844+
12845+ mov TI_task_thread_sp0(%ebp), %edi
12846+ sub $128, %edi
12847+ mov %edi, TI_lowest_stack(%ebp)
12848+
12849+ popl %eax
12850+ popl %edi
12851+ ret
12852+ENDPROC(pax_erase_kstack)
12853+#endif
12854+
12855+.macro __SAVE_ALL _DS
12856 cld
12857 PUSH_GS
12858 pushl_cfi %fs
12859@@ -215,7 +348,7 @@
12860 CFI_REL_OFFSET ecx, 0
12861 pushl_cfi %ebx
12862 CFI_REL_OFFSET ebx, 0
12863- movl $(__USER_DS), %edx
12864+ movl $\_DS, %edx
12865 movl %edx, %ds
12866 movl %edx, %es
12867 movl $(__KERNEL_PERCPU), %edx
12868@@ -223,6 +356,15 @@
12869 SET_KERNEL_GS %edx
12870 .endm
12871
12872+.macro SAVE_ALL
12873+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12874+ __SAVE_ALL __KERNEL_DS
12875+ pax_enter_kernel
12876+#else
12877+ __SAVE_ALL __USER_DS
12878+#endif
12879+.endm
12880+
12881 .macro RESTORE_INT_REGS
12882 popl_cfi %ebx
12883 CFI_RESTORE ebx
12884@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12885 popfl_cfi
12886 jmp syscall_exit
12887 CFI_ENDPROC
12888-END(ret_from_fork)
12889+ENDPROC(ret_from_fork)
12890
12891 /*
12892 * Interrupt exit functions should be protected against kprobes
12893@@ -333,7 +475,15 @@ check_userspace:
12894 movb PT_CS(%esp), %al
12895 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12896 cmpl $USER_RPL, %eax
12897+
12898+#ifdef CONFIG_PAX_KERNEXEC
12899+ jae resume_userspace
12900+
12901+ PAX_EXIT_KERNEL
12902+ jmp resume_kernel
12903+#else
12904 jb resume_kernel # not returning to v8086 or userspace
12905+#endif
12906
12907 ENTRY(resume_userspace)
12908 LOCKDEP_SYS_EXIT
12909@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12910 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12911 # int/exception return?
12912 jne work_pending
12913- jmp restore_all
12914-END(ret_from_exception)
12915+ jmp restore_all_pax
12916+ENDPROC(ret_from_exception)
12917
12918 #ifdef CONFIG_PREEMPT
12919 ENTRY(resume_kernel)
12920@@ -361,7 +511,7 @@ need_resched:
12921 jz restore_all
12922 call preempt_schedule_irq
12923 jmp need_resched
12924-END(resume_kernel)
12925+ENDPROC(resume_kernel)
12926 #endif
12927 CFI_ENDPROC
12928 /*
12929@@ -395,23 +545,34 @@ sysenter_past_esp:
12930 /*CFI_REL_OFFSET cs, 0*/
12931 /*
12932 * Push current_thread_info()->sysenter_return to the stack.
12933- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12934- * pushed above; +8 corresponds to copy_thread's esp0 setting.
12935 */
12936- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12937+ pushl_cfi $0
12938 CFI_REL_OFFSET eip, 0
12939
12940 pushl_cfi %eax
12941 SAVE_ALL
12942+ GET_THREAD_INFO(%ebp)
12943+ movl TI_sysenter_return(%ebp),%ebp
12944+ movl %ebp,PT_EIP(%esp)
12945 ENABLE_INTERRUPTS(CLBR_NONE)
12946
12947 /*
12948 * Load the potential sixth argument from user stack.
12949 * Careful about security.
12950 */
12951+ movl PT_OLDESP(%esp),%ebp
12952+
12953+#ifdef CONFIG_PAX_MEMORY_UDEREF
12954+ mov PT_OLDSS(%esp),%ds
12955+1: movl %ds:(%ebp),%ebp
12956+ push %ss
12957+ pop %ds
12958+#else
12959 cmpl $__PAGE_OFFSET-3,%ebp
12960 jae syscall_fault
12961 1: movl (%ebp),%ebp
12962+#endif
12963+
12964 movl %ebp,PT_EBP(%esp)
12965 .section __ex_table,"a"
12966 .align 4
12967@@ -434,12 +595,24 @@ sysenter_do_call:
12968 testl $_TIF_ALLWORK_MASK, %ecx
12969 jne sysexit_audit
12970 sysenter_exit:
12971+
12972+#ifdef CONFIG_PAX_RANDKSTACK
12973+ pushl_cfi %eax
12974+ movl %esp, %eax
12975+ call pax_randomize_kstack
12976+ popl_cfi %eax
12977+#endif
12978+
12979+ pax_erase_kstack
12980+
12981 /* if something modifies registers it must also disable sysexit */
12982 movl PT_EIP(%esp), %edx
12983 movl PT_OLDESP(%esp), %ecx
12984 xorl %ebp,%ebp
12985 TRACE_IRQS_ON
12986 1: mov PT_FS(%esp), %fs
12987+2: mov PT_DS(%esp), %ds
12988+3: mov PT_ES(%esp), %es
12989 PTGS_TO_GS
12990 ENABLE_INTERRUPTS_SYSEXIT
12991
12992@@ -456,6 +629,9 @@ sysenter_audit:
12993 movl %eax,%edx /* 2nd arg: syscall number */
12994 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12995 call audit_syscall_entry
12996+
12997+ pax_erase_kstack
12998+
12999 pushl_cfi %ebx
13000 movl PT_EAX(%esp),%eax /* reload syscall number */
13001 jmp sysenter_do_call
13002@@ -482,11 +658,17 @@ sysexit_audit:
13003
13004 CFI_ENDPROC
13005 .pushsection .fixup,"ax"
13006-2: movl $0,PT_FS(%esp)
13007+4: movl $0,PT_FS(%esp)
13008+ jmp 1b
13009+5: movl $0,PT_DS(%esp)
13010+ jmp 1b
13011+6: movl $0,PT_ES(%esp)
13012 jmp 1b
13013 .section __ex_table,"a"
13014 .align 4
13015- .long 1b,2b
13016+ .long 1b,4b
13017+ .long 2b,5b
13018+ .long 3b,6b
13019 .popsection
13020 PTGS_TO_GS_EX
13021 ENDPROC(ia32_sysenter_target)
13022@@ -519,6 +701,15 @@ syscall_exit:
13023 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13024 jne syscall_exit_work
13025
13026+restore_all_pax:
13027+
13028+#ifdef CONFIG_PAX_RANDKSTACK
13029+ movl %esp, %eax
13030+ call pax_randomize_kstack
13031+#endif
13032+
13033+ pax_erase_kstack
13034+
13035 restore_all:
13036 TRACE_IRQS_IRET
13037 restore_all_notrace:
13038@@ -578,14 +769,34 @@ ldt_ss:
13039 * compensating for the offset by changing to the ESPFIX segment with
13040 * a base address that matches for the difference.
13041 */
13042-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13043+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13044 mov %esp, %edx /* load kernel esp */
13045 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13046 mov %dx, %ax /* eax: new kernel esp */
13047 sub %eax, %edx /* offset (low word is 0) */
13048+#ifdef CONFIG_SMP
13049+ movl PER_CPU_VAR(cpu_number), %ebx
13050+ shll $PAGE_SHIFT_asm, %ebx
13051+ addl $cpu_gdt_table, %ebx
13052+#else
13053+ movl $cpu_gdt_table, %ebx
13054+#endif
13055 shr $16, %edx
13056- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13057- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13058+
13059+#ifdef CONFIG_PAX_KERNEXEC
13060+ mov %cr0, %esi
13061+ btr $16, %esi
13062+ mov %esi, %cr0
13063+#endif
13064+
13065+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13066+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13067+
13068+#ifdef CONFIG_PAX_KERNEXEC
13069+ bts $16, %esi
13070+ mov %esi, %cr0
13071+#endif
13072+
13073 pushl_cfi $__ESPFIX_SS
13074 pushl_cfi %eax /* new kernel esp */
13075 /* Disable interrupts, but do not irqtrace this section: we
13076@@ -614,34 +825,28 @@ work_resched:
13077 movl TI_flags(%ebp), %ecx
13078 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13079 # than syscall tracing?
13080- jz restore_all
13081+ jz restore_all_pax
13082 testb $_TIF_NEED_RESCHED, %cl
13083 jnz work_resched
13084
13085 work_notifysig: # deal with pending signals and
13086 # notify-resume requests
13087+ movl %esp, %eax
13088 #ifdef CONFIG_VM86
13089 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13090- movl %esp, %eax
13091- jne work_notifysig_v86 # returning to kernel-space or
13092+ jz 1f # returning to kernel-space or
13093 # vm86-space
13094- xorl %edx, %edx
13095- call do_notify_resume
13096- jmp resume_userspace_sig
13097
13098- ALIGN
13099-work_notifysig_v86:
13100 pushl_cfi %ecx # save ti_flags for do_notify_resume
13101 call save_v86_state # %eax contains pt_regs pointer
13102 popl_cfi %ecx
13103 movl %eax, %esp
13104-#else
13105- movl %esp, %eax
13106+1:
13107 #endif
13108 xorl %edx, %edx
13109 call do_notify_resume
13110 jmp resume_userspace_sig
13111-END(work_pending)
13112+ENDPROC(work_pending)
13113
13114 # perform syscall exit tracing
13115 ALIGN
13116@@ -649,11 +854,14 @@ syscall_trace_entry:
13117 movl $-ENOSYS,PT_EAX(%esp)
13118 movl %esp, %eax
13119 call syscall_trace_enter
13120+
13121+ pax_erase_kstack
13122+
13123 /* What it returned is what we'll actually use. */
13124 cmpl $(nr_syscalls), %eax
13125 jnae syscall_call
13126 jmp syscall_exit
13127-END(syscall_trace_entry)
13128+ENDPROC(syscall_trace_entry)
13129
13130 # perform syscall exit tracing
13131 ALIGN
13132@@ -666,20 +874,24 @@ syscall_exit_work:
13133 movl %esp, %eax
13134 call syscall_trace_leave
13135 jmp resume_userspace
13136-END(syscall_exit_work)
13137+ENDPROC(syscall_exit_work)
13138 CFI_ENDPROC
13139
13140 RING0_INT_FRAME # can't unwind into user space anyway
13141 syscall_fault:
13142+#ifdef CONFIG_PAX_MEMORY_UDEREF
13143+ push %ss
13144+ pop %ds
13145+#endif
13146 GET_THREAD_INFO(%ebp)
13147 movl $-EFAULT,PT_EAX(%esp)
13148 jmp resume_userspace
13149-END(syscall_fault)
13150+ENDPROC(syscall_fault)
13151
13152 syscall_badsys:
13153 movl $-ENOSYS,PT_EAX(%esp)
13154 jmp resume_userspace
13155-END(syscall_badsys)
13156+ENDPROC(syscall_badsys)
13157 CFI_ENDPROC
13158 /*
13159 * End of kprobes section
13160@@ -753,6 +965,36 @@ ptregs_clone:
13161 CFI_ENDPROC
13162 ENDPROC(ptregs_clone)
13163
13164+ ALIGN;
13165+ENTRY(kernel_execve)
13166+ CFI_STARTPROC
13167+ pushl_cfi %ebp
13168+ sub $PT_OLDSS+4,%esp
13169+ pushl_cfi %edi
13170+ pushl_cfi %ecx
13171+ pushl_cfi %eax
13172+ lea 3*4(%esp),%edi
13173+ mov $PT_OLDSS/4+1,%ecx
13174+ xorl %eax,%eax
13175+ rep stosl
13176+ popl_cfi %eax
13177+ popl_cfi %ecx
13178+ popl_cfi %edi
13179+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13180+ pushl_cfi %esp
13181+ call sys_execve
13182+ add $4,%esp
13183+ CFI_ADJUST_CFA_OFFSET -4
13184+ GET_THREAD_INFO(%ebp)
13185+ test %eax,%eax
13186+ jz syscall_exit
13187+ add $PT_OLDSS+4,%esp
13188+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13189+ popl_cfi %ebp
13190+ ret
13191+ CFI_ENDPROC
13192+ENDPROC(kernel_execve)
13193+
13194 .macro FIXUP_ESPFIX_STACK
13195 /*
13196 * Switch back for ESPFIX stack to the normal zerobased stack
13197@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13198 * normal stack and adjusts ESP with the matching offset.
13199 */
13200 /* fixup the stack */
13201- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13202- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13203+#ifdef CONFIG_SMP
13204+ movl PER_CPU_VAR(cpu_number), %ebx
13205+ shll $PAGE_SHIFT_asm, %ebx
13206+ addl $cpu_gdt_table, %ebx
13207+#else
13208+ movl $cpu_gdt_table, %ebx
13209+#endif
13210+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13211+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13212 shl $16, %eax
13213 addl %esp, %eax /* the adjusted stack pointer */
13214 pushl_cfi $__KERNEL_DS
13215@@ -816,7 +1065,7 @@ vector=vector+1
13216 .endr
13217 2: jmp common_interrupt
13218 .endr
13219-END(irq_entries_start)
13220+ENDPROC(irq_entries_start)
13221
13222 .previous
13223 END(interrupt)
13224@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13225 pushl_cfi $do_coprocessor_error
13226 jmp error_code
13227 CFI_ENDPROC
13228-END(coprocessor_error)
13229+ENDPROC(coprocessor_error)
13230
13231 ENTRY(simd_coprocessor_error)
13232 RING0_INT_FRAME
13233@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13234 #endif
13235 jmp error_code
13236 CFI_ENDPROC
13237-END(simd_coprocessor_error)
13238+ENDPROC(simd_coprocessor_error)
13239
13240 ENTRY(device_not_available)
13241 RING0_INT_FRAME
13242@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13243 pushl_cfi $do_device_not_available
13244 jmp error_code
13245 CFI_ENDPROC
13246-END(device_not_available)
13247+ENDPROC(device_not_available)
13248
13249 #ifdef CONFIG_PARAVIRT
13250 ENTRY(native_iret)
13251@@ -902,12 +1151,12 @@ ENTRY(native_iret)
13252 .align 4
13253 .long native_iret, iret_exc
13254 .previous
13255-END(native_iret)
13256+ENDPROC(native_iret)
13257
13258 ENTRY(native_irq_enable_sysexit)
13259 sti
13260 sysexit
13261-END(native_irq_enable_sysexit)
13262+ENDPROC(native_irq_enable_sysexit)
13263 #endif
13264
13265 ENTRY(overflow)
13266@@ -916,7 +1165,7 @@ ENTRY(overflow)
13267 pushl_cfi $do_overflow
13268 jmp error_code
13269 CFI_ENDPROC
13270-END(overflow)
13271+ENDPROC(overflow)
13272
13273 ENTRY(bounds)
13274 RING0_INT_FRAME
13275@@ -924,7 +1173,7 @@ ENTRY(bounds)
13276 pushl_cfi $do_bounds
13277 jmp error_code
13278 CFI_ENDPROC
13279-END(bounds)
13280+ENDPROC(bounds)
13281
13282 ENTRY(invalid_op)
13283 RING0_INT_FRAME
13284@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13285 pushl_cfi $do_invalid_op
13286 jmp error_code
13287 CFI_ENDPROC
13288-END(invalid_op)
13289+ENDPROC(invalid_op)
13290
13291 ENTRY(coprocessor_segment_overrun)
13292 RING0_INT_FRAME
13293@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13294 pushl_cfi $do_coprocessor_segment_overrun
13295 jmp error_code
13296 CFI_ENDPROC
13297-END(coprocessor_segment_overrun)
13298+ENDPROC(coprocessor_segment_overrun)
13299
13300 ENTRY(invalid_TSS)
13301 RING0_EC_FRAME
13302 pushl_cfi $do_invalid_TSS
13303 jmp error_code
13304 CFI_ENDPROC
13305-END(invalid_TSS)
13306+ENDPROC(invalid_TSS)
13307
13308 ENTRY(segment_not_present)
13309 RING0_EC_FRAME
13310 pushl_cfi $do_segment_not_present
13311 jmp error_code
13312 CFI_ENDPROC
13313-END(segment_not_present)
13314+ENDPROC(segment_not_present)
13315
13316 ENTRY(stack_segment)
13317 RING0_EC_FRAME
13318 pushl_cfi $do_stack_segment
13319 jmp error_code
13320 CFI_ENDPROC
13321-END(stack_segment)
13322+ENDPROC(stack_segment)
13323
13324 ENTRY(alignment_check)
13325 RING0_EC_FRAME
13326 pushl_cfi $do_alignment_check
13327 jmp error_code
13328 CFI_ENDPROC
13329-END(alignment_check)
13330+ENDPROC(alignment_check)
13331
13332 ENTRY(divide_error)
13333 RING0_INT_FRAME
13334@@ -976,7 +1225,7 @@ ENTRY(divide_error)
13335 pushl_cfi $do_divide_error
13336 jmp error_code
13337 CFI_ENDPROC
13338-END(divide_error)
13339+ENDPROC(divide_error)
13340
13341 #ifdef CONFIG_X86_MCE
13342 ENTRY(machine_check)
13343@@ -985,7 +1234,7 @@ ENTRY(machine_check)
13344 pushl_cfi machine_check_vector
13345 jmp error_code
13346 CFI_ENDPROC
13347-END(machine_check)
13348+ENDPROC(machine_check)
13349 #endif
13350
13351 ENTRY(spurious_interrupt_bug)
13352@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13353 pushl_cfi $do_spurious_interrupt_bug
13354 jmp error_code
13355 CFI_ENDPROC
13356-END(spurious_interrupt_bug)
13357+ENDPROC(spurious_interrupt_bug)
13358 /*
13359 * End of kprobes section
13360 */
13361@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13362
13363 ENTRY(mcount)
13364 ret
13365-END(mcount)
13366+ENDPROC(mcount)
13367
13368 ENTRY(ftrace_caller)
13369 cmpl $0, function_trace_stop
13370@@ -1138,7 +1387,7 @@ ftrace_graph_call:
13371 .globl ftrace_stub
13372 ftrace_stub:
13373 ret
13374-END(ftrace_caller)
13375+ENDPROC(ftrace_caller)
13376
13377 #else /* ! CONFIG_DYNAMIC_FTRACE */
13378
13379@@ -1174,7 +1423,7 @@ trace:
13380 popl %ecx
13381 popl %eax
13382 jmp ftrace_stub
13383-END(mcount)
13384+ENDPROC(mcount)
13385 #endif /* CONFIG_DYNAMIC_FTRACE */
13386 #endif /* CONFIG_FUNCTION_TRACER */
13387
13388@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13389 popl %ecx
13390 popl %eax
13391 ret
13392-END(ftrace_graph_caller)
13393+ENDPROC(ftrace_graph_caller)
13394
13395 .globl return_to_handler
13396 return_to_handler:
13397@@ -1209,7 +1458,6 @@ return_to_handler:
13398 jmp *%ecx
13399 #endif
13400
13401-.section .rodata,"a"
13402 #include "syscall_table_32.S"
13403
13404 syscall_table_size=(.-sys_call_table)
13405@@ -1255,15 +1503,18 @@ error_code:
13406 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13407 REG_TO_PTGS %ecx
13408 SET_KERNEL_GS %ecx
13409- movl $(__USER_DS), %ecx
13410+ movl $(__KERNEL_DS), %ecx
13411 movl %ecx, %ds
13412 movl %ecx, %es
13413+
13414+ pax_enter_kernel
13415+
13416 TRACE_IRQS_OFF
13417 movl %esp,%eax # pt_regs pointer
13418 call *%edi
13419 jmp ret_from_exception
13420 CFI_ENDPROC
13421-END(page_fault)
13422+ENDPROC(page_fault)
13423
13424 /*
13425 * Debug traps and NMI can happen at the one SYSENTER instruction
13426@@ -1305,7 +1556,7 @@ debug_stack_correct:
13427 call do_debug
13428 jmp ret_from_exception
13429 CFI_ENDPROC
13430-END(debug)
13431+ENDPROC(debug)
13432
13433 /*
13434 * NMI is doubly nasty. It can happen _while_ we're handling
13435@@ -1342,6 +1593,9 @@ nmi_stack_correct:
13436 xorl %edx,%edx # zero error code
13437 movl %esp,%eax # pt_regs pointer
13438 call do_nmi
13439+
13440+ pax_exit_kernel
13441+
13442 jmp restore_all_notrace
13443 CFI_ENDPROC
13444
13445@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13446 FIXUP_ESPFIX_STACK # %eax == %esp
13447 xorl %edx,%edx # zero error code
13448 call do_nmi
13449+
13450+ pax_exit_kernel
13451+
13452 RESTORE_REGS
13453 lss 12+4(%esp), %esp # back to espfix stack
13454 CFI_ADJUST_CFA_OFFSET -24
13455 jmp irq_return
13456 CFI_ENDPROC
13457-END(nmi)
13458+ENDPROC(nmi)
13459
13460 ENTRY(int3)
13461 RING0_INT_FRAME
13462@@ -1395,14 +1652,14 @@ ENTRY(int3)
13463 call do_int3
13464 jmp ret_from_exception
13465 CFI_ENDPROC
13466-END(int3)
13467+ENDPROC(int3)
13468
13469 ENTRY(general_protection)
13470 RING0_EC_FRAME
13471 pushl_cfi $do_general_protection
13472 jmp error_code
13473 CFI_ENDPROC
13474-END(general_protection)
13475+ENDPROC(general_protection)
13476
13477 #ifdef CONFIG_KVM_GUEST
13478 ENTRY(async_page_fault)
13479@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13480 pushl_cfi $do_async_page_fault
13481 jmp error_code
13482 CFI_ENDPROC
13483-END(async_page_fault)
13484+ENDPROC(async_page_fault)
13485 #endif
13486
13487 /*
13488diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13489index faf8d5e..f58c441 100644
13490--- a/arch/x86/kernel/entry_64.S
13491+++ b/arch/x86/kernel/entry_64.S
13492@@ -55,6 +55,8 @@
13493 #include <asm/paravirt.h>
13494 #include <asm/ftrace.h>
13495 #include <asm/percpu.h>
13496+#include <asm/pgtable.h>
13497+#include <asm/alternative-asm.h>
13498
13499 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13500 #include <linux/elf-em.h>
13501@@ -68,8 +70,9 @@
13502 #ifdef CONFIG_FUNCTION_TRACER
13503 #ifdef CONFIG_DYNAMIC_FTRACE
13504 ENTRY(mcount)
13505+ pax_force_retaddr
13506 retq
13507-END(mcount)
13508+ENDPROC(mcount)
13509
13510 ENTRY(ftrace_caller)
13511 cmpl $0, function_trace_stop
13512@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13513 #endif
13514
13515 GLOBAL(ftrace_stub)
13516+ pax_force_retaddr
13517 retq
13518-END(ftrace_caller)
13519+ENDPROC(ftrace_caller)
13520
13521 #else /* ! CONFIG_DYNAMIC_FTRACE */
13522 ENTRY(mcount)
13523@@ -112,6 +116,7 @@ ENTRY(mcount)
13524 #endif
13525
13526 GLOBAL(ftrace_stub)
13527+ pax_force_retaddr
13528 retq
13529
13530 trace:
13531@@ -121,12 +126,13 @@ trace:
13532 movq 8(%rbp), %rsi
13533 subq $MCOUNT_INSN_SIZE, %rdi
13534
13535+ pax_force_fptr ftrace_trace_function
13536 call *ftrace_trace_function
13537
13538 MCOUNT_RESTORE_FRAME
13539
13540 jmp ftrace_stub
13541-END(mcount)
13542+ENDPROC(mcount)
13543 #endif /* CONFIG_DYNAMIC_FTRACE */
13544 #endif /* CONFIG_FUNCTION_TRACER */
13545
13546@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13547
13548 MCOUNT_RESTORE_FRAME
13549
13550+ pax_force_retaddr
13551 retq
13552-END(ftrace_graph_caller)
13553+ENDPROC(ftrace_graph_caller)
13554
13555 GLOBAL(return_to_handler)
13556 subq $24, %rsp
13557@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13558 movq 8(%rsp), %rdx
13559 movq (%rsp), %rax
13560 addq $24, %rsp
13561+ pax_force_fptr %rdi
13562 jmp *%rdi
13563 #endif
13564
13565@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13566 ENDPROC(native_usergs_sysret64)
13567 #endif /* CONFIG_PARAVIRT */
13568
13569+ .macro ljmpq sel, off
13570+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13571+ .byte 0x48; ljmp *1234f(%rip)
13572+ .pushsection .rodata
13573+ .align 16
13574+ 1234: .quad \off; .word \sel
13575+ .popsection
13576+#else
13577+ pushq $\sel
13578+ pushq $\off
13579+ lretq
13580+#endif
13581+ .endm
13582+
13583+ .macro pax_enter_kernel
13584+ pax_set_fptr_mask
13585+#ifdef CONFIG_PAX_KERNEXEC
13586+ call pax_enter_kernel
13587+#endif
13588+ .endm
13589+
13590+ .macro pax_exit_kernel
13591+#ifdef CONFIG_PAX_KERNEXEC
13592+ call pax_exit_kernel
13593+#endif
13594+ .endm
13595+
13596+#ifdef CONFIG_PAX_KERNEXEC
13597+ENTRY(pax_enter_kernel)
13598+ pushq %rdi
13599+
13600+#ifdef CONFIG_PARAVIRT
13601+ PV_SAVE_REGS(CLBR_RDI)
13602+#endif
13603+
13604+ GET_CR0_INTO_RDI
13605+ bts $16,%rdi
13606+ jnc 3f
13607+ mov %cs,%edi
13608+ cmp $__KERNEL_CS,%edi
13609+ jnz 2f
13610+1:
13611+
13612+#ifdef CONFIG_PARAVIRT
13613+ PV_RESTORE_REGS(CLBR_RDI)
13614+#endif
13615+
13616+ popq %rdi
13617+ pax_force_retaddr
13618+ retq
13619+
13620+2: ljmpq __KERNEL_CS,1f
13621+3: ljmpq __KERNEXEC_KERNEL_CS,4f
13622+4: SET_RDI_INTO_CR0
13623+ jmp 1b
13624+ENDPROC(pax_enter_kernel)
13625+
13626+ENTRY(pax_exit_kernel)
13627+ pushq %rdi
13628+
13629+#ifdef CONFIG_PARAVIRT
13630+ PV_SAVE_REGS(CLBR_RDI)
13631+#endif
13632+
13633+ mov %cs,%rdi
13634+ cmp $__KERNEXEC_KERNEL_CS,%edi
13635+ jz 2f
13636+1:
13637+
13638+#ifdef CONFIG_PARAVIRT
13639+ PV_RESTORE_REGS(CLBR_RDI);
13640+#endif
13641+
13642+ popq %rdi
13643+ pax_force_retaddr
13644+ retq
13645+
13646+2: GET_CR0_INTO_RDI
13647+ btr $16,%rdi
13648+ ljmpq __KERNEL_CS,3f
13649+3: SET_RDI_INTO_CR0
13650+ jmp 1b
13651+#ifdef CONFIG_PARAVIRT
13652+ PV_RESTORE_REGS(CLBR_RDI);
13653+#endif
13654+
13655+ popq %rdi
13656+ pax_force_retaddr
13657+ retq
13658+ENDPROC(pax_exit_kernel)
13659+#endif
13660+
13661+ .macro pax_enter_kernel_user
13662+ pax_set_fptr_mask
13663+#ifdef CONFIG_PAX_MEMORY_UDEREF
13664+ call pax_enter_kernel_user
13665+#endif
13666+ .endm
13667+
13668+ .macro pax_exit_kernel_user
13669+#ifdef CONFIG_PAX_MEMORY_UDEREF
13670+ call pax_exit_kernel_user
13671+#endif
13672+#ifdef CONFIG_PAX_RANDKSTACK
13673+ pushq %rax
13674+ call pax_randomize_kstack
13675+ popq %rax
13676+#endif
13677+ .endm
13678+
13679+#ifdef CONFIG_PAX_MEMORY_UDEREF
13680+ENTRY(pax_enter_kernel_user)
13681+ pushq %rdi
13682+ pushq %rbx
13683+
13684+#ifdef CONFIG_PARAVIRT
13685+ PV_SAVE_REGS(CLBR_RDI)
13686+#endif
13687+
13688+ GET_CR3_INTO_RDI
13689+ mov %rdi,%rbx
13690+ add $__START_KERNEL_map,%rbx
13691+ sub phys_base(%rip),%rbx
13692+
13693+#ifdef CONFIG_PARAVIRT
13694+ pushq %rdi
13695+ cmpl $0, pv_info+PARAVIRT_enabled
13696+ jz 1f
13697+ i = 0
13698+ .rept USER_PGD_PTRS
13699+ mov i*8(%rbx),%rsi
13700+ mov $0,%sil
13701+ lea i*8(%rbx),%rdi
13702+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13703+ i = i + 1
13704+ .endr
13705+ jmp 2f
13706+1:
13707+#endif
13708+
13709+ i = 0
13710+ .rept USER_PGD_PTRS
13711+ movb $0,i*8(%rbx)
13712+ i = i + 1
13713+ .endr
13714+
13715+#ifdef CONFIG_PARAVIRT
13716+2: popq %rdi
13717+#endif
13718+ SET_RDI_INTO_CR3
13719+
13720+#ifdef CONFIG_PAX_KERNEXEC
13721+ GET_CR0_INTO_RDI
13722+ bts $16,%rdi
13723+ SET_RDI_INTO_CR0
13724+#endif
13725+
13726+#ifdef CONFIG_PARAVIRT
13727+ PV_RESTORE_REGS(CLBR_RDI)
13728+#endif
13729+
13730+ popq %rbx
13731+ popq %rdi
13732+ pax_force_retaddr
13733+ retq
13734+ENDPROC(pax_enter_kernel_user)
13735+
13736+ENTRY(pax_exit_kernel_user)
13737+ push %rdi
13738+
13739+#ifdef CONFIG_PARAVIRT
13740+ pushq %rbx
13741+ PV_SAVE_REGS(CLBR_RDI)
13742+#endif
13743+
13744+#ifdef CONFIG_PAX_KERNEXEC
13745+ GET_CR0_INTO_RDI
13746+ btr $16,%rdi
13747+ SET_RDI_INTO_CR0
13748+#endif
13749+
13750+ GET_CR3_INTO_RDI
13751+ add $__START_KERNEL_map,%rdi
13752+ sub phys_base(%rip),%rdi
13753+
13754+#ifdef CONFIG_PARAVIRT
13755+ cmpl $0, pv_info+PARAVIRT_enabled
13756+ jz 1f
13757+ mov %rdi,%rbx
13758+ i = 0
13759+ .rept USER_PGD_PTRS
13760+ mov i*8(%rbx),%rsi
13761+ mov $0x67,%sil
13762+ lea i*8(%rbx),%rdi
13763+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13764+ i = i + 1
13765+ .endr
13766+ jmp 2f
13767+1:
13768+#endif
13769+
13770+ i = 0
13771+ .rept USER_PGD_PTRS
13772+ movb $0x67,i*8(%rdi)
13773+ i = i + 1
13774+ .endr
13775+
13776+#ifdef CONFIG_PARAVIRT
13777+2: PV_RESTORE_REGS(CLBR_RDI)
13778+ popq %rbx
13779+#endif
13780+
13781+ popq %rdi
13782+ pax_force_retaddr
13783+ retq
13784+ENDPROC(pax_exit_kernel_user)
13785+#endif
13786+
13787+.macro pax_erase_kstack
13788+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13789+ call pax_erase_kstack
13790+#endif
13791+.endm
13792+
13793+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13794+/*
13795+ * r11: thread_info
13796+ * rcx, rdx: can be clobbered
13797+ */
13798+ENTRY(pax_erase_kstack)
13799+ pushq %rdi
13800+ pushq %rax
13801+ pushq %r11
13802+
13803+ GET_THREAD_INFO(%r11)
13804+ mov TI_lowest_stack(%r11), %rdi
13805+ mov $-0xBEEF, %rax
13806+ std
13807+
13808+1: mov %edi, %ecx
13809+ and $THREAD_SIZE_asm - 1, %ecx
13810+ shr $3, %ecx
13811+ repne scasq
13812+ jecxz 2f
13813+
13814+ cmp $2*8, %ecx
13815+ jc 2f
13816+
13817+ mov $2*8, %ecx
13818+ repe scasq
13819+ jecxz 2f
13820+ jne 1b
13821+
13822+2: cld
13823+ mov %esp, %ecx
13824+ sub %edi, %ecx
13825+
13826+ cmp $THREAD_SIZE_asm, %rcx
13827+ jb 3f
13828+ ud2
13829+3:
13830+
13831+ shr $3, %ecx
13832+ rep stosq
13833+
13834+ mov TI_task_thread_sp0(%r11), %rdi
13835+ sub $256, %rdi
13836+ mov %rdi, TI_lowest_stack(%r11)
13837+
13838+ popq %r11
13839+ popq %rax
13840+ popq %rdi
13841+ pax_force_retaddr
13842+ ret
13843+ENDPROC(pax_erase_kstack)
13844+#endif
13845
13846 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13847 #ifdef CONFIG_TRACE_IRQFLAGS
13848@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
13849 .endm
13850
13851 .macro UNFAKE_STACK_FRAME
13852- addq $8*6, %rsp
13853- CFI_ADJUST_CFA_OFFSET -(6*8)
13854+ addq $8*6 + ARG_SKIP, %rsp
13855+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
13856 .endm
13857
13858 /*
13859@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13860 movq %rsp, %rsi
13861
13862 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13863- testl $3, CS(%rdi)
13864+ testb $3, CS(%rdi)
13865 je 1f
13866 SWAPGS
13867 /*
13868@@ -355,9 +639,10 @@ ENTRY(save_rest)
13869 movq_cfi r15, R15+16
13870 movq %r11, 8(%rsp) /* return address */
13871 FIXUP_TOP_OF_STACK %r11, 16
13872+ pax_force_retaddr
13873 ret
13874 CFI_ENDPROC
13875-END(save_rest)
13876+ENDPROC(save_rest)
13877
13878 /* save complete stack frame */
13879 .pushsection .kprobes.text, "ax"
13880@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
13881 js 1f /* negative -> in kernel */
13882 SWAPGS
13883 xorl %ebx,%ebx
13884-1: ret
13885+1: pax_force_retaddr_bts
13886+ ret
13887 CFI_ENDPROC
13888-END(save_paranoid)
13889+ENDPROC(save_paranoid)
13890 .popsection
13891
13892 /*
13893@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
13894
13895 RESTORE_REST
13896
13897- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13898+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13899 je int_ret_from_sys_call
13900
13901 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13902@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
13903 jmp ret_from_sys_call # go to the SYSRET fastpath
13904
13905 CFI_ENDPROC
13906-END(ret_from_fork)
13907+ENDPROC(ret_from_fork)
13908
13909 /*
13910 * System call entry. Up to 6 arguments in registers are supported.
13911@@ -456,7 +742,7 @@ END(ret_from_fork)
13912 ENTRY(system_call)
13913 CFI_STARTPROC simple
13914 CFI_SIGNAL_FRAME
13915- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13916+ CFI_DEF_CFA rsp,0
13917 CFI_REGISTER rip,rcx
13918 /*CFI_REGISTER rflags,r11*/
13919 SWAPGS_UNSAFE_STACK
13920@@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
13921
13922 movq %rsp,PER_CPU_VAR(old_rsp)
13923 movq PER_CPU_VAR(kernel_stack),%rsp
13924+ SAVE_ARGS 8*6,0
13925+ pax_enter_kernel_user
13926 /*
13927 * No need to follow this irqs off/on section - it's straight
13928 * and short:
13929 */
13930 ENABLE_INTERRUPTS(CLBR_NONE)
13931- SAVE_ARGS 8,0
13932 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13933 movq %rcx,RIP-ARGOFFSET(%rsp)
13934 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13935@@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
13936 system_call_fastpath:
13937 cmpq $__NR_syscall_max,%rax
13938 ja badsys
13939- movq %r10,%rcx
13940+ movq R10-ARGOFFSET(%rsp),%rcx
13941 call *sys_call_table(,%rax,8) # XXX: rip relative
13942 movq %rax,RAX-ARGOFFSET(%rsp)
13943 /*
13944@@ -503,6 +790,8 @@ sysret_check:
13945 andl %edi,%edx
13946 jnz sysret_careful
13947 CFI_REMEMBER_STATE
13948+ pax_exit_kernel_user
13949+ pax_erase_kstack
13950 /*
13951 * sysretq will re-enable interrupts:
13952 */
13953@@ -554,14 +843,18 @@ badsys:
13954 * jump back to the normal fast path.
13955 */
13956 auditsys:
13957- movq %r10,%r9 /* 6th arg: 4th syscall arg */
13958+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13959 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13960 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13961 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13962 movq %rax,%rsi /* 2nd arg: syscall number */
13963 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13964 call audit_syscall_entry
13965+
13966+ pax_erase_kstack
13967+
13968 LOAD_ARGS 0 /* reload call-clobbered registers */
13969+ pax_set_fptr_mask
13970 jmp system_call_fastpath
13971
13972 /*
13973@@ -591,16 +884,20 @@ tracesys:
13974 FIXUP_TOP_OF_STACK %rdi
13975 movq %rsp,%rdi
13976 call syscall_trace_enter
13977+
13978+ pax_erase_kstack
13979+
13980 /*
13981 * Reload arg registers from stack in case ptrace changed them.
13982 * We don't reload %rax because syscall_trace_enter() returned
13983 * the value it wants us to use in the table lookup.
13984 */
13985 LOAD_ARGS ARGOFFSET, 1
13986+ pax_set_fptr_mask
13987 RESTORE_REST
13988 cmpq $__NR_syscall_max,%rax
13989 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13990- movq %r10,%rcx /* fixup for C */
13991+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13992 call *sys_call_table(,%rax,8)
13993 movq %rax,RAX-ARGOFFSET(%rsp)
13994 /* Use IRET because user could have changed frame */
13995@@ -612,7 +909,7 @@ tracesys:
13996 GLOBAL(int_ret_from_sys_call)
13997 DISABLE_INTERRUPTS(CLBR_NONE)
13998 TRACE_IRQS_OFF
13999- testl $3,CS-ARGOFFSET(%rsp)
14000+ testb $3,CS-ARGOFFSET(%rsp)
14001 je retint_restore_args
14002 movl $_TIF_ALLWORK_MASK,%edi
14003 /* edi: mask to check */
14004@@ -669,7 +966,7 @@ int_restore_rest:
14005 TRACE_IRQS_OFF
14006 jmp int_with_check
14007 CFI_ENDPROC
14008-END(system_call)
14009+ENDPROC(system_call)
14010
14011 /*
14012 * Certain special system calls that need to save a complete full stack frame.
14013@@ -685,7 +982,7 @@ ENTRY(\label)
14014 call \func
14015 jmp ptregscall_common
14016 CFI_ENDPROC
14017-END(\label)
14018+ENDPROC(\label)
14019 .endm
14020
14021 PTREGSCALL stub_clone, sys_clone, %r8
14022@@ -703,9 +1000,10 @@ ENTRY(ptregscall_common)
14023 movq_cfi_restore R12+8, r12
14024 movq_cfi_restore RBP+8, rbp
14025 movq_cfi_restore RBX+8, rbx
14026+ pax_force_retaddr
14027 ret $REST_SKIP /* pop extended registers */
14028 CFI_ENDPROC
14029-END(ptregscall_common)
14030+ENDPROC(ptregscall_common)
14031
14032 ENTRY(stub_execve)
14033 CFI_STARTPROC
14034@@ -720,7 +1018,7 @@ ENTRY(stub_execve)
14035 RESTORE_REST
14036 jmp int_ret_from_sys_call
14037 CFI_ENDPROC
14038-END(stub_execve)
14039+ENDPROC(stub_execve)
14040
14041 /*
14042 * sigreturn is special because it needs to restore all registers on return.
14043@@ -738,7 +1036,7 @@ ENTRY(stub_rt_sigreturn)
14044 RESTORE_REST
14045 jmp int_ret_from_sys_call
14046 CFI_ENDPROC
14047-END(stub_rt_sigreturn)
14048+ENDPROC(stub_rt_sigreturn)
14049
14050 /*
14051 * Build the entry stubs and pointer table with some assembler magic.
14052@@ -773,7 +1071,7 @@ vector=vector+1
14053 2: jmp common_interrupt
14054 .endr
14055 CFI_ENDPROC
14056-END(irq_entries_start)
14057+ENDPROC(irq_entries_start)
14058
14059 .previous
14060 END(interrupt)
14061@@ -793,6 +1091,16 @@ END(interrupt)
14062 subq $ORIG_RAX-RBP, %rsp
14063 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14064 SAVE_ARGS_IRQ
14065+#ifdef CONFIG_PAX_MEMORY_UDEREF
14066+ testb $3, CS(%rdi)
14067+ jnz 1f
14068+ pax_enter_kernel
14069+ jmp 2f
14070+1: pax_enter_kernel_user
14071+2:
14072+#else
14073+ pax_enter_kernel
14074+#endif
14075 call \func
14076 .endm
14077
14078@@ -824,7 +1132,7 @@ ret_from_intr:
14079
14080 exit_intr:
14081 GET_THREAD_INFO(%rcx)
14082- testl $3,CS-ARGOFFSET(%rsp)
14083+ testb $3,CS-ARGOFFSET(%rsp)
14084 je retint_kernel
14085
14086 /* Interrupt came from user space */
14087@@ -846,12 +1154,16 @@ retint_swapgs: /* return to user-space */
14088 * The iretq could re-enable interrupts:
14089 */
14090 DISABLE_INTERRUPTS(CLBR_ANY)
14091+ pax_exit_kernel_user
14092+ pax_erase_kstack
14093 TRACE_IRQS_IRETQ
14094 SWAPGS
14095 jmp restore_args
14096
14097 retint_restore_args: /* return to kernel space */
14098 DISABLE_INTERRUPTS(CLBR_ANY)
14099+ pax_exit_kernel
14100+ pax_force_retaddr RIP-ARGOFFSET
14101 /*
14102 * The iretq could re-enable interrupts:
14103 */
14104@@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14105 #endif
14106
14107 CFI_ENDPROC
14108-END(common_interrupt)
14109+ENDPROC(common_interrupt)
14110 /*
14111 * End of kprobes section
14112 */
14113@@ -956,7 +1268,7 @@ ENTRY(\sym)
14114 interrupt \do_sym
14115 jmp ret_from_intr
14116 CFI_ENDPROC
14117-END(\sym)
14118+ENDPROC(\sym)
14119 .endm
14120
14121 #ifdef CONFIG_SMP
14122@@ -1021,12 +1333,22 @@ ENTRY(\sym)
14123 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14124 call error_entry
14125 DEFAULT_FRAME 0
14126+#ifdef CONFIG_PAX_MEMORY_UDEREF
14127+ testb $3, CS(%rsp)
14128+ jnz 1f
14129+ pax_enter_kernel
14130+ jmp 2f
14131+1: pax_enter_kernel_user
14132+2:
14133+#else
14134+ pax_enter_kernel
14135+#endif
14136 movq %rsp,%rdi /* pt_regs pointer */
14137 xorl %esi,%esi /* no error code */
14138 call \do_sym
14139 jmp error_exit /* %ebx: no swapgs flag */
14140 CFI_ENDPROC
14141-END(\sym)
14142+ENDPROC(\sym)
14143 .endm
14144
14145 .macro paranoidzeroentry sym do_sym
14146@@ -1038,15 +1360,25 @@ ENTRY(\sym)
14147 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14148 call save_paranoid
14149 TRACE_IRQS_OFF
14150+#ifdef CONFIG_PAX_MEMORY_UDEREF
14151+ testb $3, CS(%rsp)
14152+ jnz 1f
14153+ pax_enter_kernel
14154+ jmp 2f
14155+1: pax_enter_kernel_user
14156+2:
14157+#else
14158+ pax_enter_kernel
14159+#endif
14160 movq %rsp,%rdi /* pt_regs pointer */
14161 xorl %esi,%esi /* no error code */
14162 call \do_sym
14163 jmp paranoid_exit /* %ebx: no swapgs flag */
14164 CFI_ENDPROC
14165-END(\sym)
14166+ENDPROC(\sym)
14167 .endm
14168
14169-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14170+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14171 .macro paranoidzeroentry_ist sym do_sym ist
14172 ENTRY(\sym)
14173 INTR_FRAME
14174@@ -1056,14 +1388,30 @@ ENTRY(\sym)
14175 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14176 call save_paranoid
14177 TRACE_IRQS_OFF
14178+#ifdef CONFIG_PAX_MEMORY_UDEREF
14179+ testb $3, CS(%rsp)
14180+ jnz 1f
14181+ pax_enter_kernel
14182+ jmp 2f
14183+1: pax_enter_kernel_user
14184+2:
14185+#else
14186+ pax_enter_kernel
14187+#endif
14188 movq %rsp,%rdi /* pt_regs pointer */
14189 xorl %esi,%esi /* no error code */
14190+#ifdef CONFIG_SMP
14191+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14192+ lea init_tss(%r12), %r12
14193+#else
14194+ lea init_tss(%rip), %r12
14195+#endif
14196 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14197 call \do_sym
14198 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14199 jmp paranoid_exit /* %ebx: no swapgs flag */
14200 CFI_ENDPROC
14201-END(\sym)
14202+ENDPROC(\sym)
14203 .endm
14204
14205 .macro errorentry sym do_sym
14206@@ -1074,13 +1422,23 @@ ENTRY(\sym)
14207 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14208 call error_entry
14209 DEFAULT_FRAME 0
14210+#ifdef CONFIG_PAX_MEMORY_UDEREF
14211+ testb $3, CS(%rsp)
14212+ jnz 1f
14213+ pax_enter_kernel
14214+ jmp 2f
14215+1: pax_enter_kernel_user
14216+2:
14217+#else
14218+ pax_enter_kernel
14219+#endif
14220 movq %rsp,%rdi /* pt_regs pointer */
14221 movq ORIG_RAX(%rsp),%rsi /* get error code */
14222 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14223 call \do_sym
14224 jmp error_exit /* %ebx: no swapgs flag */
14225 CFI_ENDPROC
14226-END(\sym)
14227+ENDPROC(\sym)
14228 .endm
14229
14230 /* error code is on the stack already */
14231@@ -1093,13 +1451,23 @@ ENTRY(\sym)
14232 call save_paranoid
14233 DEFAULT_FRAME 0
14234 TRACE_IRQS_OFF
14235+#ifdef CONFIG_PAX_MEMORY_UDEREF
14236+ testb $3, CS(%rsp)
14237+ jnz 1f
14238+ pax_enter_kernel
14239+ jmp 2f
14240+1: pax_enter_kernel_user
14241+2:
14242+#else
14243+ pax_enter_kernel
14244+#endif
14245 movq %rsp,%rdi /* pt_regs pointer */
14246 movq ORIG_RAX(%rsp),%rsi /* get error code */
14247 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14248 call \do_sym
14249 jmp paranoid_exit /* %ebx: no swapgs flag */
14250 CFI_ENDPROC
14251-END(\sym)
14252+ENDPROC(\sym)
14253 .endm
14254
14255 zeroentry divide_error do_divide_error
14256@@ -1129,9 +1497,10 @@ gs_change:
14257 2: mfence /* workaround */
14258 SWAPGS
14259 popfq_cfi
14260+ pax_force_retaddr
14261 ret
14262 CFI_ENDPROC
14263-END(native_load_gs_index)
14264+ENDPROC(native_load_gs_index)
14265
14266 .section __ex_table,"a"
14267 .align 8
14268@@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14269 * Here we are in the child and the registers are set as they were
14270 * at kernel_thread() invocation in the parent.
14271 */
14272+ pax_force_fptr %rsi
14273 call *%rsi
14274 # exit
14275 mov %eax, %edi
14276 call do_exit
14277 ud2 # padding for call trace
14278 CFI_ENDPROC
14279-END(kernel_thread_helper)
14280+ENDPROC(kernel_thread_helper)
14281
14282 /*
14283 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14284@@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14285 RESTORE_REST
14286 testq %rax,%rax
14287 je int_ret_from_sys_call
14288- RESTORE_ARGS
14289 UNFAKE_STACK_FRAME
14290+ pax_force_retaddr
14291 ret
14292 CFI_ENDPROC
14293-END(kernel_execve)
14294+ENDPROC(kernel_execve)
14295
14296 /* Call softirq on interrupt stack. Interrupts are off. */
14297 ENTRY(call_softirq)
14298@@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14299 CFI_DEF_CFA_REGISTER rsp
14300 CFI_ADJUST_CFA_OFFSET -8
14301 decl PER_CPU_VAR(irq_count)
14302+ pax_force_retaddr
14303 ret
14304 CFI_ENDPROC
14305-END(call_softirq)
14306+ENDPROC(call_softirq)
14307
14308 #ifdef CONFIG_XEN
14309 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14310@@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14311 decl PER_CPU_VAR(irq_count)
14312 jmp error_exit
14313 CFI_ENDPROC
14314-END(xen_do_hypervisor_callback)
14315+ENDPROC(xen_do_hypervisor_callback)
14316
14317 /*
14318 * Hypervisor uses this for application faults while it executes.
14319@@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14320 SAVE_ALL
14321 jmp error_exit
14322 CFI_ENDPROC
14323-END(xen_failsafe_callback)
14324+ENDPROC(xen_failsafe_callback)
14325
14326 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14327 xen_hvm_callback_vector xen_evtchn_do_upcall
14328@@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14329 TRACE_IRQS_OFF
14330 testl %ebx,%ebx /* swapgs needed? */
14331 jnz paranoid_restore
14332- testl $3,CS(%rsp)
14333+ testb $3,CS(%rsp)
14334 jnz paranoid_userspace
14335+#ifdef CONFIG_PAX_MEMORY_UDEREF
14336+ pax_exit_kernel
14337+ TRACE_IRQS_IRETQ 0
14338+ SWAPGS_UNSAFE_STACK
14339+ RESTORE_ALL 8
14340+ pax_force_retaddr_bts
14341+ jmp irq_return
14342+#endif
14343 paranoid_swapgs:
14344+#ifdef CONFIG_PAX_MEMORY_UDEREF
14345+ pax_exit_kernel_user
14346+#else
14347+ pax_exit_kernel
14348+#endif
14349 TRACE_IRQS_IRETQ 0
14350 SWAPGS_UNSAFE_STACK
14351 RESTORE_ALL 8
14352 jmp irq_return
14353 paranoid_restore:
14354+ pax_exit_kernel
14355 TRACE_IRQS_IRETQ 0
14356 RESTORE_ALL 8
14357+ pax_force_retaddr_bts
14358 jmp irq_return
14359 paranoid_userspace:
14360 GET_THREAD_INFO(%rcx)
14361@@ -1394,7 +1780,7 @@ paranoid_schedule:
14362 TRACE_IRQS_OFF
14363 jmp paranoid_userspace
14364 CFI_ENDPROC
14365-END(paranoid_exit)
14366+ENDPROC(paranoid_exit)
14367
14368 /*
14369 * Exception entry point. This expects an error code/orig_rax on the stack.
14370@@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14371 movq_cfi r14, R14+8
14372 movq_cfi r15, R15+8
14373 xorl %ebx,%ebx
14374- testl $3,CS+8(%rsp)
14375+ testb $3,CS+8(%rsp)
14376 je error_kernelspace
14377 error_swapgs:
14378 SWAPGS
14379 error_sti:
14380 TRACE_IRQS_OFF
14381+ pax_force_retaddr_bts
14382 ret
14383
14384 /*
14385@@ -1453,7 +1840,7 @@ bstep_iret:
14386 movq %rcx,RIP+8(%rsp)
14387 jmp error_swapgs
14388 CFI_ENDPROC
14389-END(error_entry)
14390+ENDPROC(error_entry)
14391
14392
14393 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14394@@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14395 jnz retint_careful
14396 jmp retint_swapgs
14397 CFI_ENDPROC
14398-END(error_exit)
14399+ENDPROC(error_exit)
14400
14401
14402 /* runs on exception stack */
14403@@ -1485,6 +1872,16 @@ ENTRY(nmi)
14404 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14405 call save_paranoid
14406 DEFAULT_FRAME 0
14407+#ifdef CONFIG_PAX_MEMORY_UDEREF
14408+ testb $3, CS(%rsp)
14409+ jnz 1f
14410+ pax_enter_kernel
14411+ jmp 2f
14412+1: pax_enter_kernel_user
14413+2:
14414+#else
14415+ pax_enter_kernel
14416+#endif
14417 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14418 movq %rsp,%rdi
14419 movq $-1,%rsi
14420@@ -1495,12 +1892,28 @@ ENTRY(nmi)
14421 DISABLE_INTERRUPTS(CLBR_NONE)
14422 testl %ebx,%ebx /* swapgs needed? */
14423 jnz nmi_restore
14424- testl $3,CS(%rsp)
14425+ testb $3,CS(%rsp)
14426 jnz nmi_userspace
14427+#ifdef CONFIG_PAX_MEMORY_UDEREF
14428+ pax_exit_kernel
14429+ SWAPGS_UNSAFE_STACK
14430+ RESTORE_ALL 8
14431+ pax_force_retaddr_bts
14432+ jmp irq_return
14433+#endif
14434 nmi_swapgs:
14435+#ifdef CONFIG_PAX_MEMORY_UDEREF
14436+ pax_exit_kernel_user
14437+#else
14438+ pax_exit_kernel
14439+#endif
14440 SWAPGS_UNSAFE_STACK
14441+ RESTORE_ALL 8
14442+ jmp irq_return
14443 nmi_restore:
14444+ pax_exit_kernel
14445 RESTORE_ALL 8
14446+ pax_force_retaddr_bts
14447 jmp irq_return
14448 nmi_userspace:
14449 GET_THREAD_INFO(%rcx)
14450@@ -1529,14 +1942,14 @@ nmi_schedule:
14451 jmp paranoid_exit
14452 CFI_ENDPROC
14453 #endif
14454-END(nmi)
14455+ENDPROC(nmi)
14456
14457 ENTRY(ignore_sysret)
14458 CFI_STARTPROC
14459 mov $-ENOSYS,%eax
14460 sysret
14461 CFI_ENDPROC
14462-END(ignore_sysret)
14463+ENDPROC(ignore_sysret)
14464
14465 /*
14466 * End of kprobes section
14467diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14468index c9a281f..ce2f317 100644
14469--- a/arch/x86/kernel/ftrace.c
14470+++ b/arch/x86/kernel/ftrace.c
14471@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14472 static const void *mod_code_newcode; /* holds the text to write to the IP */
14473
14474 static unsigned nmi_wait_count;
14475-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14476+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14477
14478 int ftrace_arch_read_dyn_info(char *buf, int size)
14479 {
14480@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14481
14482 r = snprintf(buf, size, "%u %u",
14483 nmi_wait_count,
14484- atomic_read(&nmi_update_count));
14485+ atomic_read_unchecked(&nmi_update_count));
14486 return r;
14487 }
14488
14489@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14490
14491 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14492 smp_rmb();
14493+ pax_open_kernel();
14494 ftrace_mod_code();
14495- atomic_inc(&nmi_update_count);
14496+ pax_close_kernel();
14497+ atomic_inc_unchecked(&nmi_update_count);
14498 }
14499 /* Must have previous changes seen before executions */
14500 smp_mb();
14501@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14502 {
14503 unsigned char replaced[MCOUNT_INSN_SIZE];
14504
14505+ ip = ktla_ktva(ip);
14506+
14507 /*
14508 * Note: Due to modules and __init, code can
14509 * disappear and change, we need to protect against faulting
14510@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14511 unsigned char old[MCOUNT_INSN_SIZE], *new;
14512 int ret;
14513
14514- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14515+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14516 new = ftrace_call_replace(ip, (unsigned long)func);
14517 ret = ftrace_modify_code(ip, old, new);
14518
14519@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14520 {
14521 unsigned char code[MCOUNT_INSN_SIZE];
14522
14523+ ip = ktla_ktva(ip);
14524+
14525 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14526 return -EFAULT;
14527
14528diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14529index 3bb0850..55a56f4 100644
14530--- a/arch/x86/kernel/head32.c
14531+++ b/arch/x86/kernel/head32.c
14532@@ -19,6 +19,7 @@
14533 #include <asm/io_apic.h>
14534 #include <asm/bios_ebda.h>
14535 #include <asm/tlbflush.h>
14536+#include <asm/boot.h>
14537
14538 static void __init i386_default_early_setup(void)
14539 {
14540@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14541 {
14542 memblock_init();
14543
14544- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14545+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14546
14547 #ifdef CONFIG_BLK_DEV_INITRD
14548 /* Reserve INITRD */
14549diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14550index ce0be7c..c41476e 100644
14551--- a/arch/x86/kernel/head_32.S
14552+++ b/arch/x86/kernel/head_32.S
14553@@ -25,6 +25,12 @@
14554 /* Physical address */
14555 #define pa(X) ((X) - __PAGE_OFFSET)
14556
14557+#ifdef CONFIG_PAX_KERNEXEC
14558+#define ta(X) (X)
14559+#else
14560+#define ta(X) ((X) - __PAGE_OFFSET)
14561+#endif
14562+
14563 /*
14564 * References to members of the new_cpu_data structure.
14565 */
14566@@ -54,11 +60,7 @@
14567 * and small than max_low_pfn, otherwise will waste some page table entries
14568 */
14569
14570-#if PTRS_PER_PMD > 1
14571-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14572-#else
14573-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14574-#endif
14575+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14576
14577 /* Number of possible pages in the lowmem region */
14578 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14579@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14580 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14581
14582 /*
14583+ * Real beginning of normal "text" segment
14584+ */
14585+ENTRY(stext)
14586+ENTRY(_stext)
14587+
14588+/*
14589 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14590 * %esi points to the real-mode code as a 32-bit pointer.
14591 * CS and DS must be 4 GB flat segments, but we don't depend on
14592@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14593 * can.
14594 */
14595 __HEAD
14596+
14597+#ifdef CONFIG_PAX_KERNEXEC
14598+ jmp startup_32
14599+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14600+.fill PAGE_SIZE-5,1,0xcc
14601+#endif
14602+
14603 ENTRY(startup_32)
14604 movl pa(stack_start),%ecx
14605
14606@@ -105,6 +120,57 @@ ENTRY(startup_32)
14607 2:
14608 leal -__PAGE_OFFSET(%ecx),%esp
14609
14610+#ifdef CONFIG_SMP
14611+ movl $pa(cpu_gdt_table),%edi
14612+ movl $__per_cpu_load,%eax
14613+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14614+ rorl $16,%eax
14615+ movb %al,__KERNEL_PERCPU + 4(%edi)
14616+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14617+ movl $__per_cpu_end - 1,%eax
14618+ subl $__per_cpu_start,%eax
14619+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14620+#endif
14621+
14622+#ifdef CONFIG_PAX_MEMORY_UDEREF
14623+ movl $NR_CPUS,%ecx
14624+ movl $pa(cpu_gdt_table),%edi
14625+1:
14626+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14627+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14628+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14629+ addl $PAGE_SIZE_asm,%edi
14630+ loop 1b
14631+#endif
14632+
14633+#ifdef CONFIG_PAX_KERNEXEC
14634+ movl $pa(boot_gdt),%edi
14635+ movl $__LOAD_PHYSICAL_ADDR,%eax
14636+ movw %ax,__BOOT_CS + 2(%edi)
14637+ rorl $16,%eax
14638+ movb %al,__BOOT_CS + 4(%edi)
14639+ movb %ah,__BOOT_CS + 7(%edi)
14640+ rorl $16,%eax
14641+
14642+ ljmp $(__BOOT_CS),$1f
14643+1:
14644+
14645+ movl $NR_CPUS,%ecx
14646+ movl $pa(cpu_gdt_table),%edi
14647+ addl $__PAGE_OFFSET,%eax
14648+1:
14649+ movw %ax,__KERNEL_CS + 2(%edi)
14650+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14651+ rorl $16,%eax
14652+ movb %al,__KERNEL_CS + 4(%edi)
14653+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14654+ movb %ah,__KERNEL_CS + 7(%edi)
14655+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14656+ rorl $16,%eax
14657+ addl $PAGE_SIZE_asm,%edi
14658+ loop 1b
14659+#endif
14660+
14661 /*
14662 * Clear BSS first so that there are no surprises...
14663 */
14664@@ -195,8 +261,11 @@ ENTRY(startup_32)
14665 movl %eax, pa(max_pfn_mapped)
14666
14667 /* Do early initialization of the fixmap area */
14668- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14669- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14670+#ifdef CONFIG_COMPAT_VDSO
14671+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14672+#else
14673+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14674+#endif
14675 #else /* Not PAE */
14676
14677 page_pde_offset = (__PAGE_OFFSET >> 20);
14678@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14679 movl %eax, pa(max_pfn_mapped)
14680
14681 /* Do early initialization of the fixmap area */
14682- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14683- movl %eax,pa(initial_page_table+0xffc)
14684+#ifdef CONFIG_COMPAT_VDSO
14685+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14686+#else
14687+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14688+#endif
14689 #endif
14690
14691 #ifdef CONFIG_PARAVIRT
14692@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14693 cmpl $num_subarch_entries, %eax
14694 jae bad_subarch
14695
14696- movl pa(subarch_entries)(,%eax,4), %eax
14697- subl $__PAGE_OFFSET, %eax
14698- jmp *%eax
14699+ jmp *pa(subarch_entries)(,%eax,4)
14700
14701 bad_subarch:
14702 WEAK(lguest_entry)
14703@@ -255,10 +325,10 @@ WEAK(xen_entry)
14704 __INITDATA
14705
14706 subarch_entries:
14707- .long default_entry /* normal x86/PC */
14708- .long lguest_entry /* lguest hypervisor */
14709- .long xen_entry /* Xen hypervisor */
14710- .long default_entry /* Moorestown MID */
14711+ .long ta(default_entry) /* normal x86/PC */
14712+ .long ta(lguest_entry) /* lguest hypervisor */
14713+ .long ta(xen_entry) /* Xen hypervisor */
14714+ .long ta(default_entry) /* Moorestown MID */
14715 num_subarch_entries = (. - subarch_entries) / 4
14716 .previous
14717 #else
14718@@ -312,6 +382,7 @@ default_entry:
14719 orl %edx,%eax
14720 movl %eax,%cr4
14721
14722+#ifdef CONFIG_X86_PAE
14723 testb $X86_CR4_PAE, %al # check if PAE is enabled
14724 jz 6f
14725
14726@@ -340,6 +411,9 @@ default_entry:
14727 /* Make changes effective */
14728 wrmsr
14729
14730+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14731+#endif
14732+
14733 6:
14734
14735 /*
14736@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14737 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14738 movl %eax,%ss # after changing gdt.
14739
14740- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14741+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14742 movl %eax,%ds
14743 movl %eax,%es
14744
14745@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14746 */
14747 cmpb $0,ready
14748 jne 1f
14749- movl $gdt_page,%eax
14750+ movl $cpu_gdt_table,%eax
14751 movl $stack_canary,%ecx
14752+#ifdef CONFIG_SMP
14753+ addl $__per_cpu_load,%ecx
14754+#endif
14755 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14756 shrl $16, %ecx
14757 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14758 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14759 1:
14760-#endif
14761 movl $(__KERNEL_STACK_CANARY),%eax
14762+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14763+ movl $(__USER_DS),%eax
14764+#else
14765+ xorl %eax,%eax
14766+#endif
14767 movl %eax,%gs
14768
14769 xorl %eax,%eax # Clear LDT
14770@@ -558,22 +639,22 @@ early_page_fault:
14771 jmp early_fault
14772
14773 early_fault:
14774- cld
14775 #ifdef CONFIG_PRINTK
14776+ cmpl $1,%ss:early_recursion_flag
14777+ je hlt_loop
14778+ incl %ss:early_recursion_flag
14779+ cld
14780 pusha
14781 movl $(__KERNEL_DS),%eax
14782 movl %eax,%ds
14783 movl %eax,%es
14784- cmpl $2,early_recursion_flag
14785- je hlt_loop
14786- incl early_recursion_flag
14787 movl %cr2,%eax
14788 pushl %eax
14789 pushl %edx /* trapno */
14790 pushl $fault_msg
14791 call printk
14792+; call dump_stack
14793 #endif
14794- call dump_stack
14795 hlt_loop:
14796 hlt
14797 jmp hlt_loop
14798@@ -581,8 +662,11 @@ hlt_loop:
14799 /* This is the default interrupt "handler" :-) */
14800 ALIGN
14801 ignore_int:
14802- cld
14803 #ifdef CONFIG_PRINTK
14804+ cmpl $2,%ss:early_recursion_flag
14805+ je hlt_loop
14806+ incl %ss:early_recursion_flag
14807+ cld
14808 pushl %eax
14809 pushl %ecx
14810 pushl %edx
14811@@ -591,9 +675,6 @@ ignore_int:
14812 movl $(__KERNEL_DS),%eax
14813 movl %eax,%ds
14814 movl %eax,%es
14815- cmpl $2,early_recursion_flag
14816- je hlt_loop
14817- incl early_recursion_flag
14818 pushl 16(%esp)
14819 pushl 24(%esp)
14820 pushl 32(%esp)
14821@@ -622,29 +703,43 @@ ENTRY(initial_code)
14822 /*
14823 * BSS section
14824 */
14825-__PAGE_ALIGNED_BSS
14826- .align PAGE_SIZE
14827 #ifdef CONFIG_X86_PAE
14828+.section .initial_pg_pmd,"a",@progbits
14829 initial_pg_pmd:
14830 .fill 1024*KPMDS,4,0
14831 #else
14832+.section .initial_page_table,"a",@progbits
14833 ENTRY(initial_page_table)
14834 .fill 1024,4,0
14835 #endif
14836+.section .initial_pg_fixmap,"a",@progbits
14837 initial_pg_fixmap:
14838 .fill 1024,4,0
14839+.section .empty_zero_page,"a",@progbits
14840 ENTRY(empty_zero_page)
14841 .fill 4096,1,0
14842+.section .swapper_pg_dir,"a",@progbits
14843 ENTRY(swapper_pg_dir)
14844+#ifdef CONFIG_X86_PAE
14845+ .fill 4,8,0
14846+#else
14847 .fill 1024,4,0
14848+#endif
14849+
14850+/*
14851+ * The IDT has to be page-aligned to simplify the Pentium
14852+ * F0 0F bug workaround.. We have a special link segment
14853+ * for this.
14854+ */
14855+.section .idt,"a",@progbits
14856+ENTRY(idt_table)
14857+ .fill 256,8,0
14858
14859 /*
14860 * This starts the data section.
14861 */
14862 #ifdef CONFIG_X86_PAE
14863-__PAGE_ALIGNED_DATA
14864- /* Page-aligned for the benefit of paravirt? */
14865- .align PAGE_SIZE
14866+.section .initial_page_table,"a",@progbits
14867 ENTRY(initial_page_table)
14868 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14869 # if KPMDS == 3
14870@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14871 # error "Kernel PMDs should be 1, 2 or 3"
14872 # endif
14873 .align PAGE_SIZE /* needs to be page-sized too */
14874+
14875+#ifdef CONFIG_PAX_PER_CPU_PGD
14876+ENTRY(cpu_pgd)
14877+ .rept NR_CPUS
14878+ .fill 4,8,0
14879+ .endr
14880+#endif
14881+
14882 #endif
14883
14884 .data
14885 .balign 4
14886 ENTRY(stack_start)
14887- .long init_thread_union+THREAD_SIZE
14888+ .long init_thread_union+THREAD_SIZE-8
14889
14890+ready: .byte 0
14891+
14892+.section .rodata,"a",@progbits
14893 early_recursion_flag:
14894 .long 0
14895
14896-ready: .byte 0
14897-
14898 int_msg:
14899 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14900
14901@@ -707,7 +811,7 @@ fault_msg:
14902 .word 0 # 32 bit align gdt_desc.address
14903 boot_gdt_descr:
14904 .word __BOOT_DS+7
14905- .long boot_gdt - __PAGE_OFFSET
14906+ .long pa(boot_gdt)
14907
14908 .word 0 # 32-bit align idt_desc.address
14909 idt_descr:
14910@@ -718,7 +822,7 @@ idt_descr:
14911 .word 0 # 32 bit align gdt_desc.address
14912 ENTRY(early_gdt_descr)
14913 .word GDT_ENTRIES*8-1
14914- .long gdt_page /* Overwritten for secondary CPUs */
14915+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14916
14917 /*
14918 * The boot_gdt must mirror the equivalent in setup.S and is
14919@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14920 .align L1_CACHE_BYTES
14921 ENTRY(boot_gdt)
14922 .fill GDT_ENTRY_BOOT_CS,8,0
14923- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14924- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14925+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14926+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14927+
14928+ .align PAGE_SIZE_asm
14929+ENTRY(cpu_gdt_table)
14930+ .rept NR_CPUS
14931+ .quad 0x0000000000000000 /* NULL descriptor */
14932+ .quad 0x0000000000000000 /* 0x0b reserved */
14933+ .quad 0x0000000000000000 /* 0x13 reserved */
14934+ .quad 0x0000000000000000 /* 0x1b reserved */
14935+
14936+#ifdef CONFIG_PAX_KERNEXEC
14937+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14938+#else
14939+ .quad 0x0000000000000000 /* 0x20 unused */
14940+#endif
14941+
14942+ .quad 0x0000000000000000 /* 0x28 unused */
14943+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14944+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14945+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14946+ .quad 0x0000000000000000 /* 0x4b reserved */
14947+ .quad 0x0000000000000000 /* 0x53 reserved */
14948+ .quad 0x0000000000000000 /* 0x5b reserved */
14949+
14950+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14951+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14952+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14953+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14954+
14955+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14956+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14957+
14958+ /*
14959+ * Segments used for calling PnP BIOS have byte granularity.
14960+ * The code segments and data segments have fixed 64k limits,
14961+ * the transfer segment sizes are set at run time.
14962+ */
14963+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14964+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14965+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14966+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14967+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14968+
14969+ /*
14970+ * The APM segments have byte granularity and their bases
14971+ * are set at run time. All have 64k limits.
14972+ */
14973+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14974+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14975+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14976+
14977+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14978+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14979+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14980+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14981+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14982+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14983+
14984+ /* Be sure this is zeroed to avoid false validations in Xen */
14985+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14986+ .endr
14987diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
14988index e11e394..9aebc5d 100644
14989--- a/arch/x86/kernel/head_64.S
14990+++ b/arch/x86/kernel/head_64.S
14991@@ -19,6 +19,8 @@
14992 #include <asm/cache.h>
14993 #include <asm/processor-flags.h>
14994 #include <asm/percpu.h>
14995+#include <asm/cpufeature.h>
14996+#include <asm/alternative-asm.h>
14997
14998 #ifdef CONFIG_PARAVIRT
14999 #include <asm/asm-offsets.h>
15000@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
15001 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15002 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15003 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15004+L4_VMALLOC_START = pgd_index(VMALLOC_START)
15005+L3_VMALLOC_START = pud_index(VMALLOC_START)
15006+L4_VMALLOC_END = pgd_index(VMALLOC_END)
15007+L3_VMALLOC_END = pud_index(VMALLOC_END)
15008+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15009+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15010
15011 .text
15012 __HEAD
15013@@ -85,35 +93,23 @@ startup_64:
15014 */
15015 addq %rbp, init_level4_pgt + 0(%rip)
15016 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15017+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15018+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15019+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15020 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15021
15022 addq %rbp, level3_ident_pgt + 0(%rip)
15023+#ifndef CONFIG_XEN
15024+ addq %rbp, level3_ident_pgt + 8(%rip)
15025+#endif
15026
15027- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15028- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15029+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15030+
15031+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15032+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15033
15034 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15035-
15036- /* Add an Identity mapping if I am above 1G */
15037- leaq _text(%rip), %rdi
15038- andq $PMD_PAGE_MASK, %rdi
15039-
15040- movq %rdi, %rax
15041- shrq $PUD_SHIFT, %rax
15042- andq $(PTRS_PER_PUD - 1), %rax
15043- jz ident_complete
15044-
15045- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15046- leaq level3_ident_pgt(%rip), %rbx
15047- movq %rdx, 0(%rbx, %rax, 8)
15048-
15049- movq %rdi, %rax
15050- shrq $PMD_SHIFT, %rax
15051- andq $(PTRS_PER_PMD - 1), %rax
15052- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15053- leaq level2_spare_pgt(%rip), %rbx
15054- movq %rdx, 0(%rbx, %rax, 8)
15055-ident_complete:
15056+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15057
15058 /*
15059 * Fixup the kernel text+data virtual addresses. Note that
15060@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15061 * after the boot processor executes this code.
15062 */
15063
15064- /* Enable PAE mode and PGE */
15065- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15066+ /* Enable PAE mode and PSE/PGE */
15067+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15068 movq %rax, %cr4
15069
15070 /* Setup early boot stage 4 level pagetables. */
15071@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15072 movl $MSR_EFER, %ecx
15073 rdmsr
15074 btsl $_EFER_SCE, %eax /* Enable System Call */
15075- btl $20,%edi /* No Execute supported? */
15076+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15077 jnc 1f
15078 btsl $_EFER_NX, %eax
15079+ leaq init_level4_pgt(%rip), %rdi
15080+#ifndef CONFIG_EFI
15081+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15082+#endif
15083+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15084+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15085+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15086+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15087 1: wrmsr /* Make changes effective */
15088
15089 /* Setup cr0 */
15090@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15091 * jump. In addition we need to ensure %cs is set so we make this
15092 * a far return.
15093 */
15094+ pax_set_fptr_mask
15095 movq initial_code(%rip),%rax
15096 pushq $0 # fake return address to stop unwinder
15097 pushq $__KERNEL_CS # set correct cs
15098@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15099 bad_address:
15100 jmp bad_address
15101
15102- .section ".init.text","ax"
15103+ __INIT
15104 #ifdef CONFIG_EARLY_PRINTK
15105 .globl early_idt_handlers
15106 early_idt_handlers:
15107@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15108 #endif /* EARLY_PRINTK */
15109 1: hlt
15110 jmp 1b
15111+ .previous
15112
15113 #ifdef CONFIG_EARLY_PRINTK
15114+ __INITDATA
15115 early_recursion_flag:
15116 .long 0
15117+ .previous
15118
15119+ .section .rodata,"a",@progbits
15120 early_idt_msg:
15121 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15122 early_idt_ripmsg:
15123 .asciz "RIP %s\n"
15124+ .previous
15125 #endif /* CONFIG_EARLY_PRINTK */
15126- .previous
15127
15128+ .section .rodata,"a",@progbits
15129 #define NEXT_PAGE(name) \
15130 .balign PAGE_SIZE; \
15131 ENTRY(name)
15132@@ -338,7 +348,6 @@ ENTRY(name)
15133 i = i + 1 ; \
15134 .endr
15135
15136- .data
15137 /*
15138 * This default setting generates an ident mapping at address 0x100000
15139 * and a mapping for the kernel that precisely maps virtual address
15140@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15141 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15142 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15143 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15144+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
15145+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15146+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
15147+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15148+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15149+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15150 .org init_level4_pgt + L4_START_KERNEL*8, 0
15151 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15152 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15153
15154+#ifdef CONFIG_PAX_PER_CPU_PGD
15155+NEXT_PAGE(cpu_pgd)
15156+ .rept NR_CPUS
15157+ .fill 512,8,0
15158+ .endr
15159+#endif
15160+
15161 NEXT_PAGE(level3_ident_pgt)
15162 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15163+#ifdef CONFIG_XEN
15164 .fill 511,8,0
15165+#else
15166+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15167+ .fill 510,8,0
15168+#endif
15169+
15170+NEXT_PAGE(level3_vmalloc_start_pgt)
15171+ .fill 512,8,0
15172+
15173+NEXT_PAGE(level3_vmalloc_end_pgt)
15174+ .fill 512,8,0
15175+
15176+NEXT_PAGE(level3_vmemmap_pgt)
15177+ .fill L3_VMEMMAP_START,8,0
15178+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15179
15180 NEXT_PAGE(level3_kernel_pgt)
15181 .fill L3_START_KERNEL,8,0
15182@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15183 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15184 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15185
15186+NEXT_PAGE(level2_vmemmap_pgt)
15187+ .fill 512,8,0
15188+
15189 NEXT_PAGE(level2_fixmap_pgt)
15190- .fill 506,8,0
15191- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15192- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15193- .fill 5,8,0
15194+ .fill 507,8,0
15195+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15196+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15197+ .fill 4,8,0
15198
15199-NEXT_PAGE(level1_fixmap_pgt)
15200+NEXT_PAGE(level1_vsyscall_pgt)
15201 .fill 512,8,0
15202
15203-NEXT_PAGE(level2_ident_pgt)
15204- /* Since I easily can, map the first 1G.
15205+ /* Since I easily can, map the first 2G.
15206 * Don't set NX because code runs from these pages.
15207 */
15208- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15209+NEXT_PAGE(level2_ident_pgt)
15210+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15211
15212 NEXT_PAGE(level2_kernel_pgt)
15213 /*
15214@@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15215 * If you want to increase this then increase MODULES_VADDR
15216 * too.)
15217 */
15218- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15219- KERNEL_IMAGE_SIZE/PMD_SIZE)
15220-
15221-NEXT_PAGE(level2_spare_pgt)
15222- .fill 512, 8, 0
15223+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15224
15225 #undef PMDS
15226 #undef NEXT_PAGE
15227
15228- .data
15229+ .align PAGE_SIZE
15230+ENTRY(cpu_gdt_table)
15231+ .rept NR_CPUS
15232+ .quad 0x0000000000000000 /* NULL descriptor */
15233+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15234+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15235+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15236+ .quad 0x00cffb000000ffff /* __USER32_CS */
15237+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15238+ .quad 0x00affb000000ffff /* __USER_CS */
15239+
15240+#ifdef CONFIG_PAX_KERNEXEC
15241+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15242+#else
15243+ .quad 0x0 /* unused */
15244+#endif
15245+
15246+ .quad 0,0 /* TSS */
15247+ .quad 0,0 /* LDT */
15248+ .quad 0,0,0 /* three TLS descriptors */
15249+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15250+ /* asm/segment.h:GDT_ENTRIES must match this */
15251+
15252+ /* zero the remaining page */
15253+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15254+ .endr
15255+
15256 .align 16
15257 .globl early_gdt_descr
15258 early_gdt_descr:
15259 .word GDT_ENTRIES*8-1
15260 early_gdt_descr_base:
15261- .quad INIT_PER_CPU_VAR(gdt_page)
15262+ .quad cpu_gdt_table
15263
15264 ENTRY(phys_base)
15265 /* This must match the first entry in level2_kernel_pgt */
15266 .quad 0x0000000000000000
15267
15268 #include "../../x86/xen/xen-head.S"
15269-
15270- .section .bss, "aw", @nobits
15271+
15272+ .section .rodata,"a",@progbits
15273 .align L1_CACHE_BYTES
15274 ENTRY(idt_table)
15275- .skip IDT_ENTRIES * 16
15276+ .fill 512,8,0
15277
15278 __PAGE_ALIGNED_BSS
15279 .align PAGE_SIZE
15280diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15281index 9c3bd4a..e1d9b35 100644
15282--- a/arch/x86/kernel/i386_ksyms_32.c
15283+++ b/arch/x86/kernel/i386_ksyms_32.c
15284@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15285 EXPORT_SYMBOL(cmpxchg8b_emu);
15286 #endif
15287
15288+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15289+
15290 /* Networking helper routines. */
15291 EXPORT_SYMBOL(csum_partial_copy_generic);
15292+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15293+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15294
15295 EXPORT_SYMBOL(__get_user_1);
15296 EXPORT_SYMBOL(__get_user_2);
15297@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15298
15299 EXPORT_SYMBOL(csum_partial);
15300 EXPORT_SYMBOL(empty_zero_page);
15301+
15302+#ifdef CONFIG_PAX_KERNEXEC
15303+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15304+#endif
15305diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15306index 6104852..6114160 100644
15307--- a/arch/x86/kernel/i8259.c
15308+++ b/arch/x86/kernel/i8259.c
15309@@ -210,7 +210,7 @@ spurious_8259A_irq:
15310 "spurious 8259A interrupt: IRQ%d.\n", irq);
15311 spurious_irq_mask |= irqmask;
15312 }
15313- atomic_inc(&irq_err_count);
15314+ atomic_inc_unchecked(&irq_err_count);
15315 /*
15316 * Theoretically we do not have to handle this IRQ,
15317 * but in Linux this does not cause problems and is
15318diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15319index 43e9ccf..44ccf6f 100644
15320--- a/arch/x86/kernel/init_task.c
15321+++ b/arch/x86/kernel/init_task.c
15322@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15323 * way process stacks are handled. This is done by having a special
15324 * "init_task" linker map entry..
15325 */
15326-union thread_union init_thread_union __init_task_data =
15327- { INIT_THREAD_INFO(init_task) };
15328+union thread_union init_thread_union __init_task_data;
15329
15330 /*
15331 * Initial task structure.
15332@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15333 * section. Since TSS's are completely CPU-local, we want them
15334 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15335 */
15336-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15337-
15338+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15339+EXPORT_SYMBOL(init_tss);
15340diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15341index 8c96897..be66bfa 100644
15342--- a/arch/x86/kernel/ioport.c
15343+++ b/arch/x86/kernel/ioport.c
15344@@ -6,6 +6,7 @@
15345 #include <linux/sched.h>
15346 #include <linux/kernel.h>
15347 #include <linux/capability.h>
15348+#include <linux/security.h>
15349 #include <linux/errno.h>
15350 #include <linux/types.h>
15351 #include <linux/ioport.h>
15352@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15353
15354 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15355 return -EINVAL;
15356+#ifdef CONFIG_GRKERNSEC_IO
15357+ if (turn_on && grsec_disable_privio) {
15358+ gr_handle_ioperm();
15359+ return -EPERM;
15360+ }
15361+#endif
15362 if (turn_on && !capable(CAP_SYS_RAWIO))
15363 return -EPERM;
15364
15365@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15366 * because the ->io_bitmap_max value must match the bitmap
15367 * contents:
15368 */
15369- tss = &per_cpu(init_tss, get_cpu());
15370+ tss = init_tss + get_cpu();
15371
15372 if (turn_on)
15373 bitmap_clear(t->io_bitmap_ptr, from, num);
15374@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15375 return -EINVAL;
15376 /* Trying to gain more privileges? */
15377 if (level > old) {
15378+#ifdef CONFIG_GRKERNSEC_IO
15379+ if (grsec_disable_privio) {
15380+ gr_handle_iopl();
15381+ return -EPERM;
15382+ }
15383+#endif
15384 if (!capable(CAP_SYS_RAWIO))
15385 return -EPERM;
15386 }
15387diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15388index 429e0c9..17b3ece 100644
15389--- a/arch/x86/kernel/irq.c
15390+++ b/arch/x86/kernel/irq.c
15391@@ -18,7 +18,7 @@
15392 #include <asm/mce.h>
15393 #include <asm/hw_irq.h>
15394
15395-atomic_t irq_err_count;
15396+atomic_unchecked_t irq_err_count;
15397
15398 /* Function pointer for generic interrupt vector handling */
15399 void (*x86_platform_ipi_callback)(void) = NULL;
15400@@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15401 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15402 seq_printf(p, " Machine check polls\n");
15403 #endif
15404- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15405+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15406 #if defined(CONFIG_X86_IO_APIC)
15407- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15408+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15409 #endif
15410 return 0;
15411 }
15412@@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15413
15414 u64 arch_irq_stat(void)
15415 {
15416- u64 sum = atomic_read(&irq_err_count);
15417+ u64 sum = atomic_read_unchecked(&irq_err_count);
15418
15419 #ifdef CONFIG_X86_IO_APIC
15420- sum += atomic_read(&irq_mis_count);
15421+ sum += atomic_read_unchecked(&irq_mis_count);
15422 #endif
15423 return sum;
15424 }
15425diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15426index 7209070..cbcd71a 100644
15427--- a/arch/x86/kernel/irq_32.c
15428+++ b/arch/x86/kernel/irq_32.c
15429@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15430 __asm__ __volatile__("andl %%esp,%0" :
15431 "=r" (sp) : "0" (THREAD_SIZE - 1));
15432
15433- return sp < (sizeof(struct thread_info) + STACK_WARN);
15434+ return sp < STACK_WARN;
15435 }
15436
15437 static void print_stack_overflow(void)
15438@@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15439 * per-CPU IRQ handling contexts (thread information and stack)
15440 */
15441 union irq_ctx {
15442- struct thread_info tinfo;
15443- u32 stack[THREAD_SIZE/sizeof(u32)];
15444+ unsigned long previous_esp;
15445+ u32 stack[THREAD_SIZE/sizeof(u32)];
15446 } __attribute__((aligned(THREAD_SIZE)));
15447
15448 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15449@@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15450 static inline int
15451 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15452 {
15453- union irq_ctx *curctx, *irqctx;
15454+ union irq_ctx *irqctx;
15455 u32 *isp, arg1, arg2;
15456
15457- curctx = (union irq_ctx *) current_thread_info();
15458 irqctx = __this_cpu_read(hardirq_ctx);
15459
15460 /*
15461@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15462 * handler) we can't do that and just have to keep using the
15463 * current stack (which is the irq stack already after all)
15464 */
15465- if (unlikely(curctx == irqctx))
15466+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15467 return 0;
15468
15469 /* build the stack frame on the IRQ stack */
15470- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15471- irqctx->tinfo.task = curctx->tinfo.task;
15472- irqctx->tinfo.previous_esp = current_stack_pointer;
15473+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15474+ irqctx->previous_esp = current_stack_pointer;
15475
15476- /*
15477- * Copy the softirq bits in preempt_count so that the
15478- * softirq checks work in the hardirq context.
15479- */
15480- irqctx->tinfo.preempt_count =
15481- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15482- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15483+#ifdef CONFIG_PAX_MEMORY_UDEREF
15484+ __set_fs(MAKE_MM_SEG(0));
15485+#endif
15486
15487 if (unlikely(overflow))
15488 call_on_stack(print_stack_overflow, isp);
15489@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15490 : "0" (irq), "1" (desc), "2" (isp),
15491 "D" (desc->handle_irq)
15492 : "memory", "cc", "ecx");
15493+
15494+#ifdef CONFIG_PAX_MEMORY_UDEREF
15495+ __set_fs(current_thread_info()->addr_limit);
15496+#endif
15497+
15498 return 1;
15499 }
15500
15501@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15502 */
15503 void __cpuinit irq_ctx_init(int cpu)
15504 {
15505- union irq_ctx *irqctx;
15506-
15507 if (per_cpu(hardirq_ctx, cpu))
15508 return;
15509
15510- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15511- THREAD_FLAGS,
15512- THREAD_ORDER));
15513- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15514- irqctx->tinfo.cpu = cpu;
15515- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15516- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15517-
15518- per_cpu(hardirq_ctx, cpu) = irqctx;
15519-
15520- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15521- THREAD_FLAGS,
15522- THREAD_ORDER));
15523- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15524- irqctx->tinfo.cpu = cpu;
15525- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15526-
15527- per_cpu(softirq_ctx, cpu) = irqctx;
15528+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15529+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15530
15531 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15532 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15533@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15534 asmlinkage void do_softirq(void)
15535 {
15536 unsigned long flags;
15537- struct thread_info *curctx;
15538 union irq_ctx *irqctx;
15539 u32 *isp;
15540
15541@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15542 local_irq_save(flags);
15543
15544 if (local_softirq_pending()) {
15545- curctx = current_thread_info();
15546 irqctx = __this_cpu_read(softirq_ctx);
15547- irqctx->tinfo.task = curctx->task;
15548- irqctx->tinfo.previous_esp = current_stack_pointer;
15549+ irqctx->previous_esp = current_stack_pointer;
15550
15551 /* build the stack frame on the softirq stack */
15552- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15553+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15554+
15555+#ifdef CONFIG_PAX_MEMORY_UDEREF
15556+ __set_fs(MAKE_MM_SEG(0));
15557+#endif
15558
15559 call_on_stack(__do_softirq, isp);
15560+
15561+#ifdef CONFIG_PAX_MEMORY_UDEREF
15562+ __set_fs(current_thread_info()->addr_limit);
15563+#endif
15564+
15565 /*
15566 * Shouldn't happen, we returned above if in_interrupt():
15567 */
15568diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15569index 69bca46..0bac999 100644
15570--- a/arch/x86/kernel/irq_64.c
15571+++ b/arch/x86/kernel/irq_64.c
15572@@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15573 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15574 u64 curbase = (u64)task_stack_page(current);
15575
15576- if (user_mode_vm(regs))
15577+ if (user_mode(regs))
15578 return;
15579
15580 WARN_ONCE(regs->sp >= curbase &&
15581diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15582index faba577..93b9e71 100644
15583--- a/arch/x86/kernel/kgdb.c
15584+++ b/arch/x86/kernel/kgdb.c
15585@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15586 #ifdef CONFIG_X86_32
15587 switch (regno) {
15588 case GDB_SS:
15589- if (!user_mode_vm(regs))
15590+ if (!user_mode(regs))
15591 *(unsigned long *)mem = __KERNEL_DS;
15592 break;
15593 case GDB_SP:
15594- if (!user_mode_vm(regs))
15595+ if (!user_mode(regs))
15596 *(unsigned long *)mem = kernel_stack_pointer(regs);
15597 break;
15598 case GDB_GS:
15599@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15600 case 'k':
15601 /* clear the trace bit */
15602 linux_regs->flags &= ~X86_EFLAGS_TF;
15603- atomic_set(&kgdb_cpu_doing_single_step, -1);
15604+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15605
15606 /* set the trace bit if we're stepping */
15607 if (remcomInBuffer[0] == 's') {
15608 linux_regs->flags |= X86_EFLAGS_TF;
15609- atomic_set(&kgdb_cpu_doing_single_step,
15610+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15611 raw_smp_processor_id());
15612 }
15613
15614@@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15615
15616 switch (cmd) {
15617 case DIE_DEBUG:
15618- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15619+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15620 if (user_mode(regs))
15621 return single_step_cont(regs, args);
15622 break;
15623diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15624index 7da647d..5d3c4c1 100644
15625--- a/arch/x86/kernel/kprobes.c
15626+++ b/arch/x86/kernel/kprobes.c
15627@@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15628 } __attribute__((packed)) *insn;
15629
15630 insn = (struct __arch_relative_insn *)from;
15631+
15632+ pax_open_kernel();
15633 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15634 insn->op = op;
15635+ pax_close_kernel();
15636 }
15637
15638 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15639@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15640 kprobe_opcode_t opcode;
15641 kprobe_opcode_t *orig_opcodes = opcodes;
15642
15643- if (search_exception_tables((unsigned long)opcodes))
15644+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15645 return 0; /* Page fault may occur on this address. */
15646
15647 retry:
15648@@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15649 }
15650 }
15651 insn_get_length(&insn);
15652+ pax_open_kernel();
15653 memcpy(dest, insn.kaddr, insn.length);
15654+ pax_close_kernel();
15655
15656 #ifdef CONFIG_X86_64
15657 if (insn_rip_relative(&insn)) {
15658@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15659 (u8 *) dest;
15660 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15661 disp = (u8 *) dest + insn_offset_displacement(&insn);
15662+ pax_open_kernel();
15663 *(s32 *) disp = (s32) newdisp;
15664+ pax_close_kernel();
15665 }
15666 #endif
15667 return insn.length;
15668@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15669 */
15670 __copy_instruction(p->ainsn.insn, p->addr, 0);
15671
15672- if (can_boost(p->addr))
15673+ if (can_boost(ktla_ktva(p->addr)))
15674 p->ainsn.boostable = 0;
15675 else
15676 p->ainsn.boostable = -1;
15677
15678- p->opcode = *p->addr;
15679+ p->opcode = *(ktla_ktva(p->addr));
15680 }
15681
15682 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15683@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15684 * nor set current_kprobe, because it doesn't use single
15685 * stepping.
15686 */
15687- regs->ip = (unsigned long)p->ainsn.insn;
15688+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15689 preempt_enable_no_resched();
15690 return;
15691 }
15692@@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15693 if (p->opcode == BREAKPOINT_INSTRUCTION)
15694 regs->ip = (unsigned long)p->addr;
15695 else
15696- regs->ip = (unsigned long)p->ainsn.insn;
15697+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15698 }
15699
15700 /*
15701@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15702 setup_singlestep(p, regs, kcb, 0);
15703 return 1;
15704 }
15705- } else if (*addr != BREAKPOINT_INSTRUCTION) {
15706+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15707 /*
15708 * The breakpoint instruction was removed right
15709 * after we hit it. Another cpu has removed
15710@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15711 " movq %rax, 152(%rsp)\n"
15712 RESTORE_REGS_STRING
15713 " popfq\n"
15714+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15715+ " btsq $63,(%rsp)\n"
15716+#endif
15717 #else
15718 " pushf\n"
15719 SAVE_REGS_STRING
15720@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15721 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15722 {
15723 unsigned long *tos = stack_addr(regs);
15724- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15725+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15726 unsigned long orig_ip = (unsigned long)p->addr;
15727 kprobe_opcode_t *insn = p->ainsn.insn;
15728
15729@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15730 struct die_args *args = data;
15731 int ret = NOTIFY_DONE;
15732
15733- if (args->regs && user_mode_vm(args->regs))
15734+ if (args->regs && user_mode(args->regs))
15735 return ret;
15736
15737 switch (val) {
15738@@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15739 * Verify if the address gap is in 2GB range, because this uses
15740 * a relative jump.
15741 */
15742- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15743+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15744 if (abs(rel) > 0x7fffffff)
15745 return -ERANGE;
15746
15747@@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15748 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15749
15750 /* Set probe function call */
15751- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15752+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15753
15754 /* Set returning jmp instruction at the tail of out-of-line buffer */
15755 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15756- (u8 *)op->kp.addr + op->optinsn.size);
15757+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15758
15759 flush_icache_range((unsigned long) buf,
15760 (unsigned long) buf + TMPL_END_IDX +
15761@@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
15762 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15763
15764 /* Backup instructions which will be replaced by jump address */
15765- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15766+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15767 RELATIVE_ADDR_SIZE);
15768
15769 insn_buf[0] = RELATIVEJUMP_OPCODE;
15770diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15771index a9c2116..a52d4fc 100644
15772--- a/arch/x86/kernel/kvm.c
15773+++ b/arch/x86/kernel/kvm.c
15774@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15775 pv_mmu_ops.set_pud = kvm_set_pud;
15776 #if PAGETABLE_LEVELS == 4
15777 pv_mmu_ops.set_pgd = kvm_set_pgd;
15778+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15779 #endif
15780 #endif
15781 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15782diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15783index ea69726..604d066 100644
15784--- a/arch/x86/kernel/ldt.c
15785+++ b/arch/x86/kernel/ldt.c
15786@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
15787 if (reload) {
15788 #ifdef CONFIG_SMP
15789 preempt_disable();
15790- load_LDT(pc);
15791+ load_LDT_nolock(pc);
15792 if (!cpumask_equal(mm_cpumask(current->mm),
15793 cpumask_of(smp_processor_id())))
15794 smp_call_function(flush_ldt, current->mm, 1);
15795 preempt_enable();
15796 #else
15797- load_LDT(pc);
15798+ load_LDT_nolock(pc);
15799 #endif
15800 }
15801 if (oldsize) {
15802@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
15803 return err;
15804
15805 for (i = 0; i < old->size; i++)
15806- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15807+ write_ldt_entry(new->ldt, i, old->ldt + i);
15808 return 0;
15809 }
15810
15811@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15812 retval = copy_ldt(&mm->context, &old_mm->context);
15813 mutex_unlock(&old_mm->context.lock);
15814 }
15815+
15816+ if (tsk == current) {
15817+ mm->context.vdso = 0;
15818+
15819+#ifdef CONFIG_X86_32
15820+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15821+ mm->context.user_cs_base = 0UL;
15822+ mm->context.user_cs_limit = ~0UL;
15823+
15824+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15825+ cpus_clear(mm->context.cpu_user_cs_mask);
15826+#endif
15827+
15828+#endif
15829+#endif
15830+
15831+ }
15832+
15833 return retval;
15834 }
15835
15836@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
15837 }
15838 }
15839
15840+#ifdef CONFIG_PAX_SEGMEXEC
15841+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15842+ error = -EINVAL;
15843+ goto out_unlock;
15844+ }
15845+#endif
15846+
15847 fill_ldt(&ldt, &ldt_info);
15848 if (oldmode)
15849 ldt.avl = 0;
15850diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15851index a3fa43b..8966f4c 100644
15852--- a/arch/x86/kernel/machine_kexec_32.c
15853+++ b/arch/x86/kernel/machine_kexec_32.c
15854@@ -27,7 +27,7 @@
15855 #include <asm/cacheflush.h>
15856 #include <asm/debugreg.h>
15857
15858-static void set_idt(void *newidt, __u16 limit)
15859+static void set_idt(struct desc_struct *newidt, __u16 limit)
15860 {
15861 struct desc_ptr curidt;
15862
15863@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
15864 }
15865
15866
15867-static void set_gdt(void *newgdt, __u16 limit)
15868+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15869 {
15870 struct desc_ptr curgdt;
15871
15872@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15873 }
15874
15875 control_page = page_address(image->control_code_page);
15876- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15877+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15878
15879 relocate_kernel_ptr = control_page;
15880 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15881diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15882index 3ca42d0..7cff8cc 100644
15883--- a/arch/x86/kernel/microcode_intel.c
15884+++ b/arch/x86/kernel/microcode_intel.c
15885@@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
15886
15887 static int get_ucode_user(void *to, const void *from, size_t n)
15888 {
15889- return copy_from_user(to, from, n);
15890+ return copy_from_user(to, (const void __force_user *)from, n);
15891 }
15892
15893 static enum ucode_state
15894 request_microcode_user(int cpu, const void __user *buf, size_t size)
15895 {
15896- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15897+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15898 }
15899
15900 static void microcode_fini_cpu(int cpu)
15901diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15902index 925179f..267ac7a 100644
15903--- a/arch/x86/kernel/module.c
15904+++ b/arch/x86/kernel/module.c
15905@@ -36,15 +36,60 @@
15906 #define DEBUGP(fmt...)
15907 #endif
15908
15909-void *module_alloc(unsigned long size)
15910+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15911 {
15912- if (PAGE_ALIGN(size) > MODULES_LEN)
15913+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
15914 return NULL;
15915 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15916- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15917+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15918 -1, __builtin_return_address(0));
15919 }
15920
15921+void *module_alloc(unsigned long size)
15922+{
15923+
15924+#ifdef CONFIG_PAX_KERNEXEC
15925+ return __module_alloc(size, PAGE_KERNEL);
15926+#else
15927+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15928+#endif
15929+
15930+}
15931+
15932+#ifdef CONFIG_PAX_KERNEXEC
15933+#ifdef CONFIG_X86_32
15934+void *module_alloc_exec(unsigned long size)
15935+{
15936+ struct vm_struct *area;
15937+
15938+ if (size == 0)
15939+ return NULL;
15940+
15941+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15942+ return area ? area->addr : NULL;
15943+}
15944+EXPORT_SYMBOL(module_alloc_exec);
15945+
15946+void module_free_exec(struct module *mod, void *module_region)
15947+{
15948+ vunmap(module_region);
15949+}
15950+EXPORT_SYMBOL(module_free_exec);
15951+#else
15952+void module_free_exec(struct module *mod, void *module_region)
15953+{
15954+ module_free(mod, module_region);
15955+}
15956+EXPORT_SYMBOL(module_free_exec);
15957+
15958+void *module_alloc_exec(unsigned long size)
15959+{
15960+ return __module_alloc(size, PAGE_KERNEL_RX);
15961+}
15962+EXPORT_SYMBOL(module_alloc_exec);
15963+#endif
15964+#endif
15965+
15966 #ifdef CONFIG_X86_32
15967 int apply_relocate(Elf32_Shdr *sechdrs,
15968 const char *strtab,
15969@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15970 unsigned int i;
15971 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15972 Elf32_Sym *sym;
15973- uint32_t *location;
15974+ uint32_t *plocation, location;
15975
15976 DEBUGP("Applying relocate section %u to %u\n", relsec,
15977 sechdrs[relsec].sh_info);
15978 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15979 /* This is where to make the change */
15980- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15981- + rel[i].r_offset;
15982+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15983+ location = (uint32_t)plocation;
15984+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15985+ plocation = ktla_ktva((void *)plocation);
15986 /* This is the symbol it is referring to. Note that all
15987 undefined symbols have been resolved. */
15988 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15989@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15990 switch (ELF32_R_TYPE(rel[i].r_info)) {
15991 case R_386_32:
15992 /* We add the value into the location given */
15993- *location += sym->st_value;
15994+ pax_open_kernel();
15995+ *plocation += sym->st_value;
15996+ pax_close_kernel();
15997 break;
15998 case R_386_PC32:
15999 /* Add the value, subtract its postition */
16000- *location += sym->st_value - (uint32_t)location;
16001+ pax_open_kernel();
16002+ *plocation += sym->st_value - location;
16003+ pax_close_kernel();
16004 break;
16005 default:
16006 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16007@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
16008 case R_X86_64_NONE:
16009 break;
16010 case R_X86_64_64:
16011+ pax_open_kernel();
16012 *(u64 *)loc = val;
16013+ pax_close_kernel();
16014 break;
16015 case R_X86_64_32:
16016+ pax_open_kernel();
16017 *(u32 *)loc = val;
16018+ pax_close_kernel();
16019 if (val != *(u32 *)loc)
16020 goto overflow;
16021 break;
16022 case R_X86_64_32S:
16023+ pax_open_kernel();
16024 *(s32 *)loc = val;
16025+ pax_close_kernel();
16026 if ((s64)val != *(s32 *)loc)
16027 goto overflow;
16028 break;
16029 case R_X86_64_PC32:
16030 val -= (u64)loc;
16031+ pax_open_kernel();
16032 *(u32 *)loc = val;
16033+ pax_close_kernel();
16034+
16035 #if 0
16036 if ((s64)val != *(s32 *)loc)
16037 goto overflow;
16038diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16039index e88f37b..1353db6 100644
16040--- a/arch/x86/kernel/nmi.c
16041+++ b/arch/x86/kernel/nmi.c
16042@@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16043 dotraplinkage notrace __kprobes void
16044 do_nmi(struct pt_regs *regs, long error_code)
16045 {
16046+
16047+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16048+ if (!user_mode(regs)) {
16049+ unsigned long cs = regs->cs & 0xFFFF;
16050+ unsigned long ip = ktva_ktla(regs->ip);
16051+
16052+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16053+ regs->ip = ip;
16054+ }
16055+#endif
16056+
16057 nmi_enter();
16058
16059 inc_irq_stat(__nmi_count);
16060diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16061index 676b8c7..870ba04 100644
16062--- a/arch/x86/kernel/paravirt-spinlocks.c
16063+++ b/arch/x86/kernel/paravirt-spinlocks.c
16064@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16065 arch_spin_lock(lock);
16066 }
16067
16068-struct pv_lock_ops pv_lock_ops = {
16069+struct pv_lock_ops pv_lock_ops __read_only = {
16070 #ifdef CONFIG_SMP
16071 .spin_is_locked = __ticket_spin_is_locked,
16072 .spin_is_contended = __ticket_spin_is_contended,
16073diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16074index d90272e..6bb013b 100644
16075--- a/arch/x86/kernel/paravirt.c
16076+++ b/arch/x86/kernel/paravirt.c
16077@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16078 {
16079 return x;
16080 }
16081+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16082+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16083+#endif
16084
16085 void __init default_banner(void)
16086 {
16087@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16088 if (opfunc == NULL)
16089 /* If there's no function, patch it with a ud2a (BUG) */
16090 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16091- else if (opfunc == _paravirt_nop)
16092+ else if (opfunc == (void *)_paravirt_nop)
16093 /* If the operation is a nop, then nop the callsite */
16094 ret = paravirt_patch_nop();
16095
16096 /* identity functions just return their single argument */
16097- else if (opfunc == _paravirt_ident_32)
16098+ else if (opfunc == (void *)_paravirt_ident_32)
16099 ret = paravirt_patch_ident_32(insnbuf, len);
16100- else if (opfunc == _paravirt_ident_64)
16101+ else if (opfunc == (void *)_paravirt_ident_64)
16102 ret = paravirt_patch_ident_64(insnbuf, len);
16103+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16104+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16105+ ret = paravirt_patch_ident_64(insnbuf, len);
16106+#endif
16107
16108 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16109 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16110@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16111 if (insn_len > len || start == NULL)
16112 insn_len = len;
16113 else
16114- memcpy(insnbuf, start, insn_len);
16115+ memcpy(insnbuf, ktla_ktva(start), insn_len);
16116
16117 return insn_len;
16118 }
16119@@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16120 preempt_enable();
16121 }
16122
16123-struct pv_info pv_info = {
16124+struct pv_info pv_info __read_only = {
16125 .name = "bare hardware",
16126 .paravirt_enabled = 0,
16127 .kernel_rpl = 0,
16128@@ -313,16 +320,16 @@ struct pv_info pv_info = {
16129 #endif
16130 };
16131
16132-struct pv_init_ops pv_init_ops = {
16133+struct pv_init_ops pv_init_ops __read_only = {
16134 .patch = native_patch,
16135 };
16136
16137-struct pv_time_ops pv_time_ops = {
16138+struct pv_time_ops pv_time_ops __read_only = {
16139 .sched_clock = native_sched_clock,
16140 .steal_clock = native_steal_clock,
16141 };
16142
16143-struct pv_irq_ops pv_irq_ops = {
16144+struct pv_irq_ops pv_irq_ops __read_only = {
16145 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16146 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16147 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16148@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16149 #endif
16150 };
16151
16152-struct pv_cpu_ops pv_cpu_ops = {
16153+struct pv_cpu_ops pv_cpu_ops __read_only = {
16154 .cpuid = native_cpuid,
16155 .get_debugreg = native_get_debugreg,
16156 .set_debugreg = native_set_debugreg,
16157@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16158 .end_context_switch = paravirt_nop,
16159 };
16160
16161-struct pv_apic_ops pv_apic_ops = {
16162+struct pv_apic_ops pv_apic_ops __read_only = {
16163 #ifdef CONFIG_X86_LOCAL_APIC
16164 .startup_ipi_hook = paravirt_nop,
16165 #endif
16166 };
16167
16168-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16169+#ifdef CONFIG_X86_32
16170+#ifdef CONFIG_X86_PAE
16171+/* 64-bit pagetable entries */
16172+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16173+#else
16174 /* 32-bit pagetable entries */
16175 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16176+#endif
16177 #else
16178 /* 64-bit pagetable entries */
16179 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16180 #endif
16181
16182-struct pv_mmu_ops pv_mmu_ops = {
16183+struct pv_mmu_ops pv_mmu_ops __read_only = {
16184
16185 .read_cr2 = native_read_cr2,
16186 .write_cr2 = native_write_cr2,
16187@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16188 .make_pud = PTE_IDENT,
16189
16190 .set_pgd = native_set_pgd,
16191+ .set_pgd_batched = native_set_pgd_batched,
16192 #endif
16193 #endif /* PAGETABLE_LEVELS >= 3 */
16194
16195@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16196 },
16197
16198 .set_fixmap = native_set_fixmap,
16199+
16200+#ifdef CONFIG_PAX_KERNEXEC
16201+ .pax_open_kernel = native_pax_open_kernel,
16202+ .pax_close_kernel = native_pax_close_kernel,
16203+#endif
16204+
16205 };
16206
16207 EXPORT_SYMBOL_GPL(pv_time_ops);
16208diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16209index 35ccf75..7a15747 100644
16210--- a/arch/x86/kernel/pci-iommu_table.c
16211+++ b/arch/x86/kernel/pci-iommu_table.c
16212@@ -2,7 +2,7 @@
16213 #include <asm/iommu_table.h>
16214 #include <linux/string.h>
16215 #include <linux/kallsyms.h>
16216-
16217+#include <linux/sched.h>
16218
16219 #define DEBUG 1
16220
16221diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16222index ee5d4fb..426649b 100644
16223--- a/arch/x86/kernel/process.c
16224+++ b/arch/x86/kernel/process.c
16225@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16226
16227 void free_thread_info(struct thread_info *ti)
16228 {
16229- free_thread_xstate(ti->task);
16230 free_pages((unsigned long)ti, THREAD_ORDER);
16231 }
16232
16233+static struct kmem_cache *task_struct_cachep;
16234+
16235 void arch_task_cache_init(void)
16236 {
16237- task_xstate_cachep =
16238- kmem_cache_create("task_xstate", xstate_size,
16239+ /* create a slab on which task_structs can be allocated */
16240+ task_struct_cachep =
16241+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16242+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16243+
16244+ task_xstate_cachep =
16245+ kmem_cache_create("task_xstate", xstate_size,
16246 __alignof__(union thread_xstate),
16247- SLAB_PANIC | SLAB_NOTRACK, NULL);
16248+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16249+}
16250+
16251+struct task_struct *alloc_task_struct_node(int node)
16252+{
16253+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16254+}
16255+
16256+void free_task_struct(struct task_struct *task)
16257+{
16258+ free_thread_xstate(task);
16259+ kmem_cache_free(task_struct_cachep, task);
16260 }
16261
16262 /*
16263@@ -70,7 +87,7 @@ void exit_thread(void)
16264 unsigned long *bp = t->io_bitmap_ptr;
16265
16266 if (bp) {
16267- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16268+ struct tss_struct *tss = init_tss + get_cpu();
16269
16270 t->io_bitmap_ptr = NULL;
16271 clear_thread_flag(TIF_IO_BITMAP);
16272@@ -106,7 +123,7 @@ void show_regs_common(void)
16273
16274 printk(KERN_CONT "\n");
16275 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16276- current->pid, current->comm, print_tainted(),
16277+ task_pid_nr(current), current->comm, print_tainted(),
16278 init_utsname()->release,
16279 (int)strcspn(init_utsname()->version, " "),
16280 init_utsname()->version);
16281@@ -120,6 +137,9 @@ void flush_thread(void)
16282 {
16283 struct task_struct *tsk = current;
16284
16285+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16286+ loadsegment(gs, 0);
16287+#endif
16288 flush_ptrace_hw_breakpoint(tsk);
16289 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16290 /*
16291@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16292 regs.di = (unsigned long) arg;
16293
16294 #ifdef CONFIG_X86_32
16295- regs.ds = __USER_DS;
16296- regs.es = __USER_DS;
16297+ regs.ds = __KERNEL_DS;
16298+ regs.es = __KERNEL_DS;
16299 regs.fs = __KERNEL_PERCPU;
16300- regs.gs = __KERNEL_STACK_CANARY;
16301+ savesegment(gs, regs.gs);
16302 #else
16303 regs.ss = __KERNEL_DS;
16304 #endif
16305@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16306
16307 return ret;
16308 }
16309-void stop_this_cpu(void *dummy)
16310+__noreturn void stop_this_cpu(void *dummy)
16311 {
16312 local_irq_disable();
16313 /*
16314@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16315 }
16316 early_param("idle", idle_setup);
16317
16318-unsigned long arch_align_stack(unsigned long sp)
16319+#ifdef CONFIG_PAX_RANDKSTACK
16320+void pax_randomize_kstack(struct pt_regs *regs)
16321 {
16322- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16323- sp -= get_random_int() % 8192;
16324- return sp & ~0xf;
16325-}
16326+ struct thread_struct *thread = &current->thread;
16327+ unsigned long time;
16328
16329-unsigned long arch_randomize_brk(struct mm_struct *mm)
16330-{
16331- unsigned long range_end = mm->brk + 0x02000000;
16332- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16333-}
16334+ if (!randomize_va_space)
16335+ return;
16336+
16337+ if (v8086_mode(regs))
16338+ return;
16339
16340+ rdtscl(time);
16341+
16342+ /* P4 seems to return a 0 LSB, ignore it */
16343+#ifdef CONFIG_MPENTIUM4
16344+ time &= 0x3EUL;
16345+ time <<= 2;
16346+#elif defined(CONFIG_X86_64)
16347+ time &= 0xFUL;
16348+ time <<= 4;
16349+#else
16350+ time &= 0x1FUL;
16351+ time <<= 3;
16352+#endif
16353+
16354+ thread->sp0 ^= time;
16355+ load_sp0(init_tss + smp_processor_id(), thread);
16356+
16357+#ifdef CONFIG_X86_64
16358+ percpu_write(kernel_stack, thread->sp0);
16359+#endif
16360+}
16361+#endif
16362diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16363index 795b79f..063767a 100644
16364--- a/arch/x86/kernel/process_32.c
16365+++ b/arch/x86/kernel/process_32.c
16366@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16367 unsigned long thread_saved_pc(struct task_struct *tsk)
16368 {
16369 return ((unsigned long *)tsk->thread.sp)[3];
16370+//XXX return tsk->thread.eip;
16371 }
16372
16373 #ifndef CONFIG_SMP
16374@@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16375 unsigned long sp;
16376 unsigned short ss, gs;
16377
16378- if (user_mode_vm(regs)) {
16379+ if (user_mode(regs)) {
16380 sp = regs->sp;
16381 ss = regs->ss & 0xffff;
16382- gs = get_user_gs(regs);
16383 } else {
16384 sp = kernel_stack_pointer(regs);
16385 savesegment(ss, ss);
16386- savesegment(gs, gs);
16387 }
16388+ gs = get_user_gs(regs);
16389
16390 show_regs_common();
16391
16392@@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16393 struct task_struct *tsk;
16394 int err;
16395
16396- childregs = task_pt_regs(p);
16397+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16398 *childregs = *regs;
16399 childregs->ax = 0;
16400 childregs->sp = sp;
16401
16402 p->thread.sp = (unsigned long) childregs;
16403 p->thread.sp0 = (unsigned long) (childregs+1);
16404+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16405
16406 p->thread.ip = (unsigned long) ret_from_fork;
16407
16408@@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16409 struct thread_struct *prev = &prev_p->thread,
16410 *next = &next_p->thread;
16411 int cpu = smp_processor_id();
16412- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16413+ struct tss_struct *tss = init_tss + cpu;
16414 bool preload_fpu;
16415
16416 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16417@@ -331,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16418 */
16419 lazy_save_gs(prev->gs);
16420
16421+#ifdef CONFIG_PAX_MEMORY_UDEREF
16422+ __set_fs(task_thread_info(next_p)->addr_limit);
16423+#endif
16424+
16425 /*
16426 * Load the per-thread Thread-Local Storage descriptor.
16427 */
16428@@ -366,6 +371,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16429 */
16430 arch_end_context_switch(next_p);
16431
16432+ percpu_write(current_task, next_p);
16433+ percpu_write(current_tinfo, &next_p->tinfo);
16434+
16435 if (preload_fpu)
16436 __math_state_restore();
16437
16438@@ -375,8 +383,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16439 if (prev->gs | next->gs)
16440 lazy_load_gs(next->gs);
16441
16442- percpu_write(current_task, next_p);
16443-
16444 return prev_p;
16445 }
16446
16447@@ -406,4 +412,3 @@ unsigned long get_wchan(struct task_struct *p)
16448 } while (count++ < 16);
16449 return 0;
16450 }
16451-
16452diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16453index 3bd7e6e..90b2bcf 100644
16454--- a/arch/x86/kernel/process_64.c
16455+++ b/arch/x86/kernel/process_64.c
16456@@ -89,7 +89,7 @@ static void __exit_idle(void)
16457 void exit_idle(void)
16458 {
16459 /* idle loop has pid 0 */
16460- if (current->pid)
16461+ if (task_pid_nr(current))
16462 return;
16463 __exit_idle();
16464 }
16465@@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16466 struct pt_regs *childregs;
16467 struct task_struct *me = current;
16468
16469- childregs = ((struct pt_regs *)
16470- (THREAD_SIZE + task_stack_page(p))) - 1;
16471+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16472 *childregs = *regs;
16473
16474 childregs->ax = 0;
16475@@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16476 p->thread.sp = (unsigned long) childregs;
16477 p->thread.sp0 = (unsigned long) (childregs+1);
16478 p->thread.usersp = me->thread.usersp;
16479+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16480
16481 set_tsk_thread_flag(p, TIF_FORK);
16482
16483@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16484 struct thread_struct *prev = &prev_p->thread;
16485 struct thread_struct *next = &next_p->thread;
16486 int cpu = smp_processor_id();
16487- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16488+ struct tss_struct *tss = init_tss + cpu;
16489 unsigned fsindex, gsindex;
16490 bool preload_fpu;
16491
16492@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16493 prev->usersp = percpu_read(old_rsp);
16494 percpu_write(old_rsp, next->usersp);
16495 percpu_write(current_task, next_p);
16496+ percpu_write(current_tinfo, &next_p->tinfo);
16497
16498- percpu_write(kernel_stack,
16499- (unsigned long)task_stack_page(next_p) +
16500- THREAD_SIZE - KERNEL_STACK_OFFSET);
16501+ percpu_write(kernel_stack, next->sp0);
16502
16503 /*
16504 * Now maybe reload the debug registers and handle I/O bitmaps
16505@@ -540,12 +539,11 @@ unsigned long get_wchan(struct task_struct *p)
16506 if (!p || p == current || p->state == TASK_RUNNING)
16507 return 0;
16508 stack = (unsigned long)task_stack_page(p);
16509- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16510+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16511 return 0;
16512 fp = *(u64 *)(p->thread.sp);
16513 do {
16514- if (fp < (unsigned long)stack ||
16515- fp >= (unsigned long)stack+THREAD_SIZE)
16516+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16517 return 0;
16518 ip = *(u64 *)(fp+8);
16519 if (!in_sched_functions(ip))
16520diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16521index 8252879..d3219e0 100644
16522--- a/arch/x86/kernel/ptrace.c
16523+++ b/arch/x86/kernel/ptrace.c
16524@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16525 unsigned long addr, unsigned long data)
16526 {
16527 int ret;
16528- unsigned long __user *datap = (unsigned long __user *)data;
16529+ unsigned long __user *datap = (__force unsigned long __user *)data;
16530
16531 switch (request) {
16532 /* read the word at location addr in the USER area. */
16533@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16534 if ((int) addr < 0)
16535 return -EIO;
16536 ret = do_get_thread_area(child, addr,
16537- (struct user_desc __user *)data);
16538+ (__force struct user_desc __user *) data);
16539 break;
16540
16541 case PTRACE_SET_THREAD_AREA:
16542 if ((int) addr < 0)
16543 return -EIO;
16544 ret = do_set_thread_area(child, addr,
16545- (struct user_desc __user *)data, 0);
16546+ (__force struct user_desc __user *) data, 0);
16547 break;
16548 #endif
16549
16550@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16551 memset(info, 0, sizeof(*info));
16552 info->si_signo = SIGTRAP;
16553 info->si_code = si_code;
16554- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16555+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16556 }
16557
16558 void user_single_step_siginfo(struct task_struct *tsk,
16559diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16560index 42eb330..139955c 100644
16561--- a/arch/x86/kernel/pvclock.c
16562+++ b/arch/x86/kernel/pvclock.c
16563@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16564 return pv_tsc_khz;
16565 }
16566
16567-static atomic64_t last_value = ATOMIC64_INIT(0);
16568+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16569
16570 void pvclock_resume(void)
16571 {
16572- atomic64_set(&last_value, 0);
16573+ atomic64_set_unchecked(&last_value, 0);
16574 }
16575
16576 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16577@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16578 * updating at the same time, and one of them could be slightly behind,
16579 * making the assumption that last_value always go forward fail to hold.
16580 */
16581- last = atomic64_read(&last_value);
16582+ last = atomic64_read_unchecked(&last_value);
16583 do {
16584 if (ret < last)
16585 return last;
16586- last = atomic64_cmpxchg(&last_value, last, ret);
16587+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16588 } while (unlikely(last != ret));
16589
16590 return ret;
16591diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16592index 37a458b..e63d183 100644
16593--- a/arch/x86/kernel/reboot.c
16594+++ b/arch/x86/kernel/reboot.c
16595@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16596 EXPORT_SYMBOL(pm_power_off);
16597
16598 static const struct desc_ptr no_idt = {};
16599-static int reboot_mode;
16600+static unsigned short reboot_mode;
16601 enum reboot_type reboot_type = BOOT_ACPI;
16602 int reboot_force;
16603
16604@@ -324,13 +324,17 @@ core_initcall(reboot_init);
16605 extern const unsigned char machine_real_restart_asm[];
16606 extern const u64 machine_real_restart_gdt[3];
16607
16608-void machine_real_restart(unsigned int type)
16609+__noreturn void machine_real_restart(unsigned int type)
16610 {
16611 void *restart_va;
16612 unsigned long restart_pa;
16613- void (*restart_lowmem)(unsigned int);
16614+ void (* __noreturn restart_lowmem)(unsigned int);
16615 u64 *lowmem_gdt;
16616
16617+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16618+ struct desc_struct *gdt;
16619+#endif
16620+
16621 local_irq_disable();
16622
16623 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16624@@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16625 boot)". This seems like a fairly standard thing that gets set by
16626 REBOOT.COM programs, and the previous reset routine did this
16627 too. */
16628- *((unsigned short *)0x472) = reboot_mode;
16629+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16630
16631 /* Patch the GDT in the low memory trampoline */
16632 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16633
16634 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16635 restart_pa = virt_to_phys(restart_va);
16636- restart_lowmem = (void (*)(unsigned int))restart_pa;
16637+ restart_lowmem = (void *)restart_pa;
16638
16639 /* GDT[0]: GDT self-pointer */
16640 lowmem_gdt[0] =
16641@@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16642 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16643
16644 /* Jump to the identity-mapped low memory code */
16645+
16646+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16647+ gdt = get_cpu_gdt_table(smp_processor_id());
16648+ pax_open_kernel();
16649+#ifdef CONFIG_PAX_MEMORY_UDEREF
16650+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16651+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16652+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16653+#endif
16654+#ifdef CONFIG_PAX_KERNEXEC
16655+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16656+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16657+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16658+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16659+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16660+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16661+#endif
16662+ pax_close_kernel();
16663+#endif
16664+
16665+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16666+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16667+ unreachable();
16668+#else
16669 restart_lowmem(type);
16670+#endif
16671+
16672 }
16673 #ifdef CONFIG_APM_MODULE
16674 EXPORT_SYMBOL(machine_real_restart);
16675@@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16676 * try to force a triple fault and then cycle between hitting the keyboard
16677 * controller and doing that
16678 */
16679-static void native_machine_emergency_restart(void)
16680+__noreturn static void native_machine_emergency_restart(void)
16681 {
16682 int i;
16683 int attempt = 0;
16684@@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16685 #endif
16686 }
16687
16688-static void __machine_emergency_restart(int emergency)
16689+static __noreturn void __machine_emergency_restart(int emergency)
16690 {
16691 reboot_emergency = emergency;
16692 machine_ops.emergency_restart();
16693 }
16694
16695-static void native_machine_restart(char *__unused)
16696+static __noreturn void native_machine_restart(char *__unused)
16697 {
16698 printk("machine restart\n");
16699
16700@@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16701 __machine_emergency_restart(0);
16702 }
16703
16704-static void native_machine_halt(void)
16705+static __noreturn void native_machine_halt(void)
16706 {
16707 /* stop other cpus and apics */
16708 machine_shutdown();
16709@@ -690,7 +720,7 @@ static void native_machine_halt(void)
16710 stop_this_cpu(NULL);
16711 }
16712
16713-static void native_machine_power_off(void)
16714+__noreturn static void native_machine_power_off(void)
16715 {
16716 if (pm_power_off) {
16717 if (!reboot_force)
16718@@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16719 }
16720 /* a fallback in case there is no PM info available */
16721 tboot_shutdown(TB_SHUTDOWN_HALT);
16722+ unreachable();
16723 }
16724
16725 struct machine_ops machine_ops = {
16726diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16727index 7a6f3b3..bed145d7 100644
16728--- a/arch/x86/kernel/relocate_kernel_64.S
16729+++ b/arch/x86/kernel/relocate_kernel_64.S
16730@@ -11,6 +11,7 @@
16731 #include <asm/kexec.h>
16732 #include <asm/processor-flags.h>
16733 #include <asm/pgtable_types.h>
16734+#include <asm/alternative-asm.h>
16735
16736 /*
16737 * Must be relocatable PIC code callable as a C function
16738@@ -160,13 +161,14 @@ identity_mapped:
16739 xorq %rbp, %rbp
16740 xorq %r8, %r8
16741 xorq %r9, %r9
16742- xorq %r10, %r9
16743+ xorq %r10, %r10
16744 xorq %r11, %r11
16745 xorq %r12, %r12
16746 xorq %r13, %r13
16747 xorq %r14, %r14
16748 xorq %r15, %r15
16749
16750+ pax_force_retaddr 0, 1
16751 ret
16752
16753 1:
16754diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16755index cf0ef98..e3f780b 100644
16756--- a/arch/x86/kernel/setup.c
16757+++ b/arch/x86/kernel/setup.c
16758@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
16759
16760 switch (data->type) {
16761 case SETUP_E820_EXT:
16762- parse_e820_ext(data);
16763+ parse_e820_ext((struct setup_data __force_kernel *)data);
16764 break;
16765 case SETUP_DTB:
16766 add_dtb(pa_data);
16767@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
16768 * area (640->1Mb) as ram even though it is not.
16769 * take them out.
16770 */
16771- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16772+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16773 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16774 }
16775
16776@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
16777
16778 if (!boot_params.hdr.root_flags)
16779 root_mountflags &= ~MS_RDONLY;
16780- init_mm.start_code = (unsigned long) _text;
16781- init_mm.end_code = (unsigned long) _etext;
16782+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16783+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16784 init_mm.end_data = (unsigned long) _edata;
16785 init_mm.brk = _brk_end;
16786
16787- code_resource.start = virt_to_phys(_text);
16788- code_resource.end = virt_to_phys(_etext)-1;
16789- data_resource.start = virt_to_phys(_etext);
16790+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16791+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16792+ data_resource.start = virt_to_phys(_sdata);
16793 data_resource.end = virt_to_phys(_edata)-1;
16794 bss_resource.start = virt_to_phys(&__bss_start);
16795 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16796diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16797index 71f4727..16dc9f7 100644
16798--- a/arch/x86/kernel/setup_percpu.c
16799+++ b/arch/x86/kernel/setup_percpu.c
16800@@ -21,19 +21,17 @@
16801 #include <asm/cpu.h>
16802 #include <asm/stackprotector.h>
16803
16804-DEFINE_PER_CPU(int, cpu_number);
16805+#ifdef CONFIG_SMP
16806+DEFINE_PER_CPU(unsigned int, cpu_number);
16807 EXPORT_PER_CPU_SYMBOL(cpu_number);
16808+#endif
16809
16810-#ifdef CONFIG_X86_64
16811 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16812-#else
16813-#define BOOT_PERCPU_OFFSET 0
16814-#endif
16815
16816 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16817 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16818
16819-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16820+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16821 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16822 };
16823 EXPORT_SYMBOL(__per_cpu_offset);
16824@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
16825 {
16826 #ifdef CONFIG_X86_32
16827 struct desc_struct gdt;
16828+ unsigned long base = per_cpu_offset(cpu);
16829
16830- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16831- 0x2 | DESCTYPE_S, 0x8);
16832- gdt.s = 1;
16833+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16834+ 0x83 | DESCTYPE_S, 0xC);
16835 write_gdt_entry(get_cpu_gdt_table(cpu),
16836 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16837 #endif
16838@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16839 /* alrighty, percpu areas up and running */
16840 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16841 for_each_possible_cpu(cpu) {
16842+#ifdef CONFIG_CC_STACKPROTECTOR
16843+#ifdef CONFIG_X86_32
16844+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16845+#endif
16846+#endif
16847 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16848 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16849 per_cpu(cpu_number, cpu) = cpu;
16850@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16851 */
16852 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16853 #endif
16854+#ifdef CONFIG_CC_STACKPROTECTOR
16855+#ifdef CONFIG_X86_32
16856+ if (!cpu)
16857+ per_cpu(stack_canary.canary, cpu) = canary;
16858+#endif
16859+#endif
16860 /*
16861 * Up to this point, the boot CPU has been using .init.data
16862 * area. Reload any changed state for the boot CPU.
16863diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16864index 54ddaeb2..22c3bdc 100644
16865--- a/arch/x86/kernel/signal.c
16866+++ b/arch/x86/kernel/signal.c
16867@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
16868 * Align the stack pointer according to the i386 ABI,
16869 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16870 */
16871- sp = ((sp + 4) & -16ul) - 4;
16872+ sp = ((sp - 12) & -16ul) - 4;
16873 #else /* !CONFIG_X86_32 */
16874 sp = round_down(sp, 16) - 8;
16875 #endif
16876@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
16877 * Return an always-bogus address instead so we will die with SIGSEGV.
16878 */
16879 if (onsigstack && !likely(on_sig_stack(sp)))
16880- return (void __user *)-1L;
16881+ return (__force void __user *)-1L;
16882
16883 /* save i387 state */
16884 if (used_math() && save_i387_xstate(*fpstate) < 0)
16885- return (void __user *)-1L;
16886+ return (__force void __user *)-1L;
16887
16888 return (void __user *)sp;
16889 }
16890@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16891 }
16892
16893 if (current->mm->context.vdso)
16894- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16895+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16896 else
16897- restorer = &frame->retcode;
16898+ restorer = (void __user *)&frame->retcode;
16899 if (ka->sa.sa_flags & SA_RESTORER)
16900 restorer = ka->sa.sa_restorer;
16901
16902@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16903 * reasons and because gdb uses it as a signature to notice
16904 * signal handler stack frames.
16905 */
16906- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16907+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16908
16909 if (err)
16910 return -EFAULT;
16911@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16912 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16913
16914 /* Set up to return from userspace. */
16915- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16916+ if (current->mm->context.vdso)
16917+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16918+ else
16919+ restorer = (void __user *)&frame->retcode;
16920 if (ka->sa.sa_flags & SA_RESTORER)
16921 restorer = ka->sa.sa_restorer;
16922 put_user_ex(restorer, &frame->pretcode);
16923@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16924 * reasons and because gdb uses it as a signature to notice
16925 * signal handler stack frames.
16926 */
16927- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16928+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16929 } put_user_catch(err);
16930
16931 if (err)
16932@@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
16933 * X86_32: vm86 regs switched out by assembly code before reaching
16934 * here, so testing against kernel CS suffices.
16935 */
16936- if (!user_mode(regs))
16937+ if (!user_mode_novm(regs))
16938 return;
16939
16940 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16941diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16942index 9f548cb..caf76f7 100644
16943--- a/arch/x86/kernel/smpboot.c
16944+++ b/arch/x86/kernel/smpboot.c
16945@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
16946 set_idle_for_cpu(cpu, c_idle.idle);
16947 do_rest:
16948 per_cpu(current_task, cpu) = c_idle.idle;
16949+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16950 #ifdef CONFIG_X86_32
16951 /* Stack for startup_32 can be just as for start_secondary onwards */
16952 irq_ctx_init(cpu);
16953 #else
16954 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16955 initial_gs = per_cpu_offset(cpu);
16956- per_cpu(kernel_stack, cpu) =
16957- (unsigned long)task_stack_page(c_idle.idle) -
16958- KERNEL_STACK_OFFSET + THREAD_SIZE;
16959+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16960 #endif
16961+
16962+ pax_open_kernel();
16963 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16964+ pax_close_kernel();
16965+
16966 initial_code = (unsigned long)start_secondary;
16967 stack_start = c_idle.idle->thread.sp;
16968
16969@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
16970
16971 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16972
16973+#ifdef CONFIG_PAX_PER_CPU_PGD
16974+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16975+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16976+ KERNEL_PGD_PTRS);
16977+#endif
16978+
16979 err = do_boot_cpu(apicid, cpu);
16980 if (err) {
16981 pr_debug("do_boot_cpu failed %d\n", err);
16982diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16983index c346d11..d43b163 100644
16984--- a/arch/x86/kernel/step.c
16985+++ b/arch/x86/kernel/step.c
16986@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16987 struct desc_struct *desc;
16988 unsigned long base;
16989
16990- seg &= ~7UL;
16991+ seg >>= 3;
16992
16993 mutex_lock(&child->mm->context.lock);
16994- if (unlikely((seg >> 3) >= child->mm->context.size))
16995+ if (unlikely(seg >= child->mm->context.size))
16996 addr = -1L; /* bogus selector, access would fault */
16997 else {
16998 desc = child->mm->context.ldt + seg;
16999@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17000 addr += base;
17001 }
17002 mutex_unlock(&child->mm->context.lock);
17003- }
17004+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17005+ addr = ktla_ktva(addr);
17006
17007 return addr;
17008 }
17009@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
17010 unsigned char opcode[15];
17011 unsigned long addr = convert_ip_to_linear(child, regs);
17012
17013+ if (addr == -EINVAL)
17014+ return 0;
17015+
17016 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17017 for (i = 0; i < copied; i++) {
17018 switch (opcode[i]) {
17019diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17020index 0b0cb5f..db6b9ed 100644
17021--- a/arch/x86/kernel/sys_i386_32.c
17022+++ b/arch/x86/kernel/sys_i386_32.c
17023@@ -24,17 +24,224 @@
17024
17025 #include <asm/syscalls.h>
17026
17027-/*
17028- * Do a system call from kernel instead of calling sys_execve so we
17029- * end up with proper pt_regs.
17030- */
17031-int kernel_execve(const char *filename,
17032- const char *const argv[],
17033- const char *const envp[])
17034+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17035 {
17036- long __res;
17037- asm volatile ("int $0x80"
17038- : "=a" (__res)
17039- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17040- return __res;
17041+ unsigned long pax_task_size = TASK_SIZE;
17042+
17043+#ifdef CONFIG_PAX_SEGMEXEC
17044+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17045+ pax_task_size = SEGMEXEC_TASK_SIZE;
17046+#endif
17047+
17048+ if (len > pax_task_size || addr > pax_task_size - len)
17049+ return -EINVAL;
17050+
17051+ return 0;
17052+}
17053+
17054+unsigned long
17055+arch_get_unmapped_area(struct file *filp, unsigned long addr,
17056+ unsigned long len, unsigned long pgoff, unsigned long flags)
17057+{
17058+ struct mm_struct *mm = current->mm;
17059+ struct vm_area_struct *vma;
17060+ unsigned long start_addr, pax_task_size = TASK_SIZE;
17061+
17062+#ifdef CONFIG_PAX_SEGMEXEC
17063+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17064+ pax_task_size = SEGMEXEC_TASK_SIZE;
17065+#endif
17066+
17067+ pax_task_size -= PAGE_SIZE;
17068+
17069+ if (len > pax_task_size)
17070+ return -ENOMEM;
17071+
17072+ if (flags & MAP_FIXED)
17073+ return addr;
17074+
17075+#ifdef CONFIG_PAX_RANDMMAP
17076+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17077+#endif
17078+
17079+ if (addr) {
17080+ addr = PAGE_ALIGN(addr);
17081+ if (pax_task_size - len >= addr) {
17082+ vma = find_vma(mm, addr);
17083+ if (check_heap_stack_gap(vma, addr, len))
17084+ return addr;
17085+ }
17086+ }
17087+ if (len > mm->cached_hole_size) {
17088+ start_addr = addr = mm->free_area_cache;
17089+ } else {
17090+ start_addr = addr = mm->mmap_base;
17091+ mm->cached_hole_size = 0;
17092+ }
17093+
17094+#ifdef CONFIG_PAX_PAGEEXEC
17095+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17096+ start_addr = 0x00110000UL;
17097+
17098+#ifdef CONFIG_PAX_RANDMMAP
17099+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17100+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17101+#endif
17102+
17103+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17104+ start_addr = addr = mm->mmap_base;
17105+ else
17106+ addr = start_addr;
17107+ }
17108+#endif
17109+
17110+full_search:
17111+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17112+ /* At this point: (!vma || addr < vma->vm_end). */
17113+ if (pax_task_size - len < addr) {
17114+ /*
17115+ * Start a new search - just in case we missed
17116+ * some holes.
17117+ */
17118+ if (start_addr != mm->mmap_base) {
17119+ start_addr = addr = mm->mmap_base;
17120+ mm->cached_hole_size = 0;
17121+ goto full_search;
17122+ }
17123+ return -ENOMEM;
17124+ }
17125+ if (check_heap_stack_gap(vma, addr, len))
17126+ break;
17127+ if (addr + mm->cached_hole_size < vma->vm_start)
17128+ mm->cached_hole_size = vma->vm_start - addr;
17129+ addr = vma->vm_end;
17130+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17131+ start_addr = addr = mm->mmap_base;
17132+ mm->cached_hole_size = 0;
17133+ goto full_search;
17134+ }
17135+ }
17136+
17137+ /*
17138+ * Remember the place where we stopped the search:
17139+ */
17140+ mm->free_area_cache = addr + len;
17141+ return addr;
17142+}
17143+
17144+unsigned long
17145+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17146+ const unsigned long len, const unsigned long pgoff,
17147+ const unsigned long flags)
17148+{
17149+ struct vm_area_struct *vma;
17150+ struct mm_struct *mm = current->mm;
17151+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17152+
17153+#ifdef CONFIG_PAX_SEGMEXEC
17154+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17155+ pax_task_size = SEGMEXEC_TASK_SIZE;
17156+#endif
17157+
17158+ pax_task_size -= PAGE_SIZE;
17159+
17160+ /* requested length too big for entire address space */
17161+ if (len > pax_task_size)
17162+ return -ENOMEM;
17163+
17164+ if (flags & MAP_FIXED)
17165+ return addr;
17166+
17167+#ifdef CONFIG_PAX_PAGEEXEC
17168+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17169+ goto bottomup;
17170+#endif
17171+
17172+#ifdef CONFIG_PAX_RANDMMAP
17173+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17174+#endif
17175+
17176+ /* requesting a specific address */
17177+ if (addr) {
17178+ addr = PAGE_ALIGN(addr);
17179+ if (pax_task_size - len >= addr) {
17180+ vma = find_vma(mm, addr);
17181+ if (check_heap_stack_gap(vma, addr, len))
17182+ return addr;
17183+ }
17184+ }
17185+
17186+ /* check if free_area_cache is useful for us */
17187+ if (len <= mm->cached_hole_size) {
17188+ mm->cached_hole_size = 0;
17189+ mm->free_area_cache = mm->mmap_base;
17190+ }
17191+
17192+ /* either no address requested or can't fit in requested address hole */
17193+ addr = mm->free_area_cache;
17194+
17195+ /* make sure it can fit in the remaining address space */
17196+ if (addr > len) {
17197+ vma = find_vma(mm, addr-len);
17198+ if (check_heap_stack_gap(vma, addr - len, len))
17199+ /* remember the address as a hint for next time */
17200+ return (mm->free_area_cache = addr-len);
17201+ }
17202+
17203+ if (mm->mmap_base < len)
17204+ goto bottomup;
17205+
17206+ addr = mm->mmap_base-len;
17207+
17208+ do {
17209+ /*
17210+ * Lookup failure means no vma is above this address,
17211+ * else if new region fits below vma->vm_start,
17212+ * return with success:
17213+ */
17214+ vma = find_vma(mm, addr);
17215+ if (check_heap_stack_gap(vma, addr, len))
17216+ /* remember the address as a hint for next time */
17217+ return (mm->free_area_cache = addr);
17218+
17219+ /* remember the largest hole we saw so far */
17220+ if (addr + mm->cached_hole_size < vma->vm_start)
17221+ mm->cached_hole_size = vma->vm_start - addr;
17222+
17223+ /* try just below the current vma->vm_start */
17224+ addr = skip_heap_stack_gap(vma, len);
17225+ } while (!IS_ERR_VALUE(addr));
17226+
17227+bottomup:
17228+ /*
17229+ * A failed mmap() very likely causes application failure,
17230+ * so fall back to the bottom-up function here. This scenario
17231+ * can happen with large stack limits and large mmap()
17232+ * allocations.
17233+ */
17234+
17235+#ifdef CONFIG_PAX_SEGMEXEC
17236+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17237+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17238+ else
17239+#endif
17240+
17241+ mm->mmap_base = TASK_UNMAPPED_BASE;
17242+
17243+#ifdef CONFIG_PAX_RANDMMAP
17244+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17245+ mm->mmap_base += mm->delta_mmap;
17246+#endif
17247+
17248+ mm->free_area_cache = mm->mmap_base;
17249+ mm->cached_hole_size = ~0UL;
17250+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17251+ /*
17252+ * Restore the topdown base:
17253+ */
17254+ mm->mmap_base = base;
17255+ mm->free_area_cache = base;
17256+ mm->cached_hole_size = ~0UL;
17257+
17258+ return addr;
17259 }
17260diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17261index 0514890..3dbebce 100644
17262--- a/arch/x86/kernel/sys_x86_64.c
17263+++ b/arch/x86/kernel/sys_x86_64.c
17264@@ -95,8 +95,8 @@ out:
17265 return error;
17266 }
17267
17268-static void find_start_end(unsigned long flags, unsigned long *begin,
17269- unsigned long *end)
17270+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17271+ unsigned long *begin, unsigned long *end)
17272 {
17273 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17274 unsigned long new_begin;
17275@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17276 *begin = new_begin;
17277 }
17278 } else {
17279- *begin = TASK_UNMAPPED_BASE;
17280+ *begin = mm->mmap_base;
17281 *end = TASK_SIZE;
17282 }
17283 }
17284@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17285 if (flags & MAP_FIXED)
17286 return addr;
17287
17288- find_start_end(flags, &begin, &end);
17289+ find_start_end(mm, flags, &begin, &end);
17290
17291 if (len > end)
17292 return -ENOMEM;
17293
17294+#ifdef CONFIG_PAX_RANDMMAP
17295+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17296+#endif
17297+
17298 if (addr) {
17299 addr = PAGE_ALIGN(addr);
17300 vma = find_vma(mm, addr);
17301- if (end - len >= addr &&
17302- (!vma || addr + len <= vma->vm_start))
17303+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17304 return addr;
17305 }
17306 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17307@@ -172,7 +175,7 @@ full_search:
17308 }
17309 return -ENOMEM;
17310 }
17311- if (!vma || addr + len <= vma->vm_start) {
17312+ if (check_heap_stack_gap(vma, addr, len)) {
17313 /*
17314 * Remember the place where we stopped the search:
17315 */
17316@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17317 {
17318 struct vm_area_struct *vma;
17319 struct mm_struct *mm = current->mm;
17320- unsigned long addr = addr0;
17321+ unsigned long base = mm->mmap_base, addr = addr0;
17322
17323 /* requested length too big for entire address space */
17324 if (len > TASK_SIZE)
17325@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17326 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17327 goto bottomup;
17328
17329+#ifdef CONFIG_PAX_RANDMMAP
17330+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17331+#endif
17332+
17333 /* requesting a specific address */
17334 if (addr) {
17335 addr = PAGE_ALIGN(addr);
17336- vma = find_vma(mm, addr);
17337- if (TASK_SIZE - len >= addr &&
17338- (!vma || addr + len <= vma->vm_start))
17339- return addr;
17340+ if (TASK_SIZE - len >= addr) {
17341+ vma = find_vma(mm, addr);
17342+ if (check_heap_stack_gap(vma, addr, len))
17343+ return addr;
17344+ }
17345 }
17346
17347 /* check if free_area_cache is useful for us */
17348@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17349 ALIGN_TOPDOWN);
17350
17351 vma = find_vma(mm, tmp_addr);
17352- if (!vma || tmp_addr + len <= vma->vm_start)
17353+ if (check_heap_stack_gap(vma, tmp_addr, len))
17354 /* remember the address as a hint for next time */
17355 return mm->free_area_cache = tmp_addr;
17356 }
17357@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17358 * return with success:
17359 */
17360 vma = find_vma(mm, addr);
17361- if (!vma || addr+len <= vma->vm_start)
17362+ if (check_heap_stack_gap(vma, addr, len))
17363 /* remember the address as a hint for next time */
17364 return mm->free_area_cache = addr;
17365
17366@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17367 mm->cached_hole_size = vma->vm_start - addr;
17368
17369 /* try just below the current vma->vm_start */
17370- addr = vma->vm_start-len;
17371- } while (len < vma->vm_start);
17372+ addr = skip_heap_stack_gap(vma, len);
17373+ } while (!IS_ERR_VALUE(addr));
17374
17375 bottomup:
17376 /*
17377@@ -270,13 +278,21 @@ bottomup:
17378 * can happen with large stack limits and large mmap()
17379 * allocations.
17380 */
17381+ mm->mmap_base = TASK_UNMAPPED_BASE;
17382+
17383+#ifdef CONFIG_PAX_RANDMMAP
17384+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17385+ mm->mmap_base += mm->delta_mmap;
17386+#endif
17387+
17388+ mm->free_area_cache = mm->mmap_base;
17389 mm->cached_hole_size = ~0UL;
17390- mm->free_area_cache = TASK_UNMAPPED_BASE;
17391 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17392 /*
17393 * Restore the topdown base:
17394 */
17395- mm->free_area_cache = mm->mmap_base;
17396+ mm->mmap_base = base;
17397+ mm->free_area_cache = base;
17398 mm->cached_hole_size = ~0UL;
17399
17400 return addr;
17401diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17402index 9a0e312..e6f66f2 100644
17403--- a/arch/x86/kernel/syscall_table_32.S
17404+++ b/arch/x86/kernel/syscall_table_32.S
17405@@ -1,3 +1,4 @@
17406+.section .rodata,"a",@progbits
17407 ENTRY(sys_call_table)
17408 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17409 .long sys_exit
17410diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17411index e2410e2..4fe3fbc 100644
17412--- a/arch/x86/kernel/tboot.c
17413+++ b/arch/x86/kernel/tboot.c
17414@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17415
17416 void tboot_shutdown(u32 shutdown_type)
17417 {
17418- void (*shutdown)(void);
17419+ void (* __noreturn shutdown)(void);
17420
17421 if (!tboot_enabled())
17422 return;
17423@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17424
17425 switch_to_tboot_pt();
17426
17427- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17428+ shutdown = (void *)tboot->shutdown_entry;
17429 shutdown();
17430
17431 /* should not reach here */
17432@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17433 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17434 }
17435
17436-static atomic_t ap_wfs_count;
17437+static atomic_unchecked_t ap_wfs_count;
17438
17439 static int tboot_wait_for_aps(int num_aps)
17440 {
17441@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17442 {
17443 switch (action) {
17444 case CPU_DYING:
17445- atomic_inc(&ap_wfs_count);
17446+ atomic_inc_unchecked(&ap_wfs_count);
17447 if (num_online_cpus() == 1)
17448- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17449+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17450 return NOTIFY_BAD;
17451 break;
17452 }
17453@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17454
17455 tboot_create_trampoline();
17456
17457- atomic_set(&ap_wfs_count, 0);
17458+ atomic_set_unchecked(&ap_wfs_count, 0);
17459 register_hotcpu_notifier(&tboot_cpu_notifier);
17460 return 0;
17461 }
17462diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17463index dd5fbf4..b7f2232 100644
17464--- a/arch/x86/kernel/time.c
17465+++ b/arch/x86/kernel/time.c
17466@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17467 {
17468 unsigned long pc = instruction_pointer(regs);
17469
17470- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17471+ if (!user_mode(regs) && in_lock_functions(pc)) {
17472 #ifdef CONFIG_FRAME_POINTER
17473- return *(unsigned long *)(regs->bp + sizeof(long));
17474+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17475 #else
17476 unsigned long *sp =
17477 (unsigned long *)kernel_stack_pointer(regs);
17478@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17479 * or above a saved flags. Eflags has bits 22-31 zero,
17480 * kernel addresses don't.
17481 */
17482+
17483+#ifdef CONFIG_PAX_KERNEXEC
17484+ return ktla_ktva(sp[0]);
17485+#else
17486 if (sp[0] >> 22)
17487 return sp[0];
17488 if (sp[1] >> 22)
17489 return sp[1];
17490 #endif
17491+
17492+#endif
17493 }
17494 return pc;
17495 }
17496diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17497index 6bb7b85..dd853e1 100644
17498--- a/arch/x86/kernel/tls.c
17499+++ b/arch/x86/kernel/tls.c
17500@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17501 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17502 return -EINVAL;
17503
17504+#ifdef CONFIG_PAX_SEGMEXEC
17505+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17506+ return -EINVAL;
17507+#endif
17508+
17509 set_tls_desc(p, idx, &info, 1);
17510
17511 return 0;
17512diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17513index 451c0a7..e57f551 100644
17514--- a/arch/x86/kernel/trampoline_32.S
17515+++ b/arch/x86/kernel/trampoline_32.S
17516@@ -32,6 +32,12 @@
17517 #include <asm/segment.h>
17518 #include <asm/page_types.h>
17519
17520+#ifdef CONFIG_PAX_KERNEXEC
17521+#define ta(X) (X)
17522+#else
17523+#define ta(X) ((X) - __PAGE_OFFSET)
17524+#endif
17525+
17526 #ifdef CONFIG_SMP
17527
17528 .section ".x86_trampoline","a"
17529@@ -62,7 +68,7 @@ r_base = .
17530 inc %ax # protected mode (PE) bit
17531 lmsw %ax # into protected mode
17532 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17533- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17534+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17535
17536 # These need to be in the same 64K segment as the above;
17537 # hence we don't use the boot_gdt_descr defined in head.S
17538diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17539index 09ff517..df19fbff 100644
17540--- a/arch/x86/kernel/trampoline_64.S
17541+++ b/arch/x86/kernel/trampoline_64.S
17542@@ -90,7 +90,7 @@ startup_32:
17543 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17544 movl %eax, %ds
17545
17546- movl $X86_CR4_PAE, %eax
17547+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17548 movl %eax, %cr4 # Enable PAE mode
17549
17550 # Setup trampoline 4 level pagetables
17551@@ -138,7 +138,7 @@ tidt:
17552 # so the kernel can live anywhere
17553 .balign 4
17554 tgdt:
17555- .short tgdt_end - tgdt # gdt limit
17556+ .short tgdt_end - tgdt - 1 # gdt limit
17557 .long tgdt - r_base
17558 .short 0
17559 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17560diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17561index a8e3eb8..c9dbd7d 100644
17562--- a/arch/x86/kernel/traps.c
17563+++ b/arch/x86/kernel/traps.c
17564@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17565
17566 /* Do we ignore FPU interrupts ? */
17567 char ignore_fpu_irq;
17568-
17569-/*
17570- * The IDT has to be page-aligned to simplify the Pentium
17571- * F0 0F bug workaround.
17572- */
17573-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17574 #endif
17575
17576 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17577@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17578 }
17579
17580 static void __kprobes
17581-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17582+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17583 long error_code, siginfo_t *info)
17584 {
17585 struct task_struct *tsk = current;
17586
17587 #ifdef CONFIG_X86_32
17588- if (regs->flags & X86_VM_MASK) {
17589+ if (v8086_mode(regs)) {
17590 /*
17591 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17592 * On nmi (interrupt 2), do_trap should not be called.
17593@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17594 }
17595 #endif
17596
17597- if (!user_mode(regs))
17598+ if (!user_mode_novm(regs))
17599 goto kernel_trap;
17600
17601 #ifdef CONFIG_X86_32
17602@@ -148,7 +142,7 @@ trap_signal:
17603 printk_ratelimit()) {
17604 printk(KERN_INFO
17605 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17606- tsk->comm, tsk->pid, str,
17607+ tsk->comm, task_pid_nr(tsk), str,
17608 regs->ip, regs->sp, error_code);
17609 print_vma_addr(" in ", regs->ip);
17610 printk("\n");
17611@@ -165,8 +159,20 @@ kernel_trap:
17612 if (!fixup_exception(regs)) {
17613 tsk->thread.error_code = error_code;
17614 tsk->thread.trap_no = trapnr;
17615+
17616+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17617+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17618+ str = "PAX: suspicious stack segment fault";
17619+#endif
17620+
17621 die(str, regs, error_code);
17622 }
17623+
17624+#ifdef CONFIG_PAX_REFCOUNT
17625+ if (trapnr == 4)
17626+ pax_report_refcount_overflow(regs);
17627+#endif
17628+
17629 return;
17630
17631 #ifdef CONFIG_X86_32
17632@@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17633 conditional_sti(regs);
17634
17635 #ifdef CONFIG_X86_32
17636- if (regs->flags & X86_VM_MASK)
17637+ if (v8086_mode(regs))
17638 goto gp_in_vm86;
17639 #endif
17640
17641 tsk = current;
17642- if (!user_mode(regs))
17643+ if (!user_mode_novm(regs))
17644 goto gp_in_kernel;
17645
17646+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17647+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17648+ struct mm_struct *mm = tsk->mm;
17649+ unsigned long limit;
17650+
17651+ down_write(&mm->mmap_sem);
17652+ limit = mm->context.user_cs_limit;
17653+ if (limit < TASK_SIZE) {
17654+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17655+ up_write(&mm->mmap_sem);
17656+ return;
17657+ }
17658+ up_write(&mm->mmap_sem);
17659+ }
17660+#endif
17661+
17662 tsk->thread.error_code = error_code;
17663 tsk->thread.trap_no = 13;
17664
17665@@ -295,6 +317,13 @@ gp_in_kernel:
17666 if (notify_die(DIE_GPF, "general protection fault", regs,
17667 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17668 return;
17669+
17670+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17671+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17672+ die("PAX: suspicious general protection fault", regs, error_code);
17673+ else
17674+#endif
17675+
17676 die("general protection fault", regs, error_code);
17677 }
17678
17679@@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17680 /* It's safe to allow irq's after DR6 has been saved */
17681 preempt_conditional_sti(regs);
17682
17683- if (regs->flags & X86_VM_MASK) {
17684+ if (v8086_mode(regs)) {
17685 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17686 error_code, 1);
17687 preempt_conditional_cli(regs);
17688@@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17689 * We already checked v86 mode above, so we can check for kernel mode
17690 * by just checking the CPL of CS.
17691 */
17692- if ((dr6 & DR_STEP) && !user_mode(regs)) {
17693+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17694 tsk->thread.debugreg6 &= ~DR_STEP;
17695 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17696 regs->flags &= ~X86_EFLAGS_TF;
17697@@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17698 return;
17699 conditional_sti(regs);
17700
17701- if (!user_mode_vm(regs))
17702+ if (!user_mode(regs))
17703 {
17704 if (!fixup_exception(regs)) {
17705 task->thread.error_code = error_code;
17706@@ -568,7 +597,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17707 void __math_state_restore(void)
17708 {
17709 struct thread_info *thread = current_thread_info();
17710- struct task_struct *tsk = thread->task;
17711+ struct task_struct *tsk = current;
17712
17713 /*
17714 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17715@@ -595,8 +624,7 @@ void __math_state_restore(void)
17716 */
17717 asmlinkage void math_state_restore(void)
17718 {
17719- struct thread_info *thread = current_thread_info();
17720- struct task_struct *tsk = thread->task;
17721+ struct task_struct *tsk = current;
17722
17723 if (!tsk_used_math(tsk)) {
17724 local_irq_enable();
17725diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17726index b9242ba..50c5edd 100644
17727--- a/arch/x86/kernel/verify_cpu.S
17728+++ b/arch/x86/kernel/verify_cpu.S
17729@@ -20,6 +20,7 @@
17730 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17731 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17732 * arch/x86/kernel/head_32.S: processor startup
17733+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17734 *
17735 * verify_cpu, returns the status of longmode and SSE in register %eax.
17736 * 0: Success 1: Failure
17737diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17738index 863f875..4307295 100644
17739--- a/arch/x86/kernel/vm86_32.c
17740+++ b/arch/x86/kernel/vm86_32.c
17741@@ -41,6 +41,7 @@
17742 #include <linux/ptrace.h>
17743 #include <linux/audit.h>
17744 #include <linux/stddef.h>
17745+#include <linux/grsecurity.h>
17746
17747 #include <asm/uaccess.h>
17748 #include <asm/io.h>
17749@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17750 do_exit(SIGSEGV);
17751 }
17752
17753- tss = &per_cpu(init_tss, get_cpu());
17754+ tss = init_tss + get_cpu();
17755 current->thread.sp0 = current->thread.saved_sp0;
17756 current->thread.sysenter_cs = __KERNEL_CS;
17757 load_sp0(tss, &current->thread);
17758@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17759 struct task_struct *tsk;
17760 int tmp, ret = -EPERM;
17761
17762+#ifdef CONFIG_GRKERNSEC_VM86
17763+ if (!capable(CAP_SYS_RAWIO)) {
17764+ gr_handle_vm86();
17765+ goto out;
17766+ }
17767+#endif
17768+
17769 tsk = current;
17770 if (tsk->thread.saved_sp0)
17771 goto out;
17772@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
17773 int tmp, ret;
17774 struct vm86plus_struct __user *v86;
17775
17776+#ifdef CONFIG_GRKERNSEC_VM86
17777+ if (!capable(CAP_SYS_RAWIO)) {
17778+ gr_handle_vm86();
17779+ ret = -EPERM;
17780+ goto out;
17781+ }
17782+#endif
17783+
17784 tsk = current;
17785 switch (cmd) {
17786 case VM86_REQUEST_IRQ:
17787@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
17788 tsk->thread.saved_fs = info->regs32->fs;
17789 tsk->thread.saved_gs = get_user_gs(info->regs32);
17790
17791- tss = &per_cpu(init_tss, get_cpu());
17792+ tss = init_tss + get_cpu();
17793 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17794 if (cpu_has_sep)
17795 tsk->thread.sysenter_cs = 0;
17796@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
17797 goto cannot_handle;
17798 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17799 goto cannot_handle;
17800- intr_ptr = (unsigned long __user *) (i << 2);
17801+ intr_ptr = (__force unsigned long __user *) (i << 2);
17802 if (get_user(segoffs, intr_ptr))
17803 goto cannot_handle;
17804 if ((segoffs >> 16) == BIOSSEG)
17805diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17806index 0f703f1..9e15f64 100644
17807--- a/arch/x86/kernel/vmlinux.lds.S
17808+++ b/arch/x86/kernel/vmlinux.lds.S
17809@@ -26,6 +26,13 @@
17810 #include <asm/page_types.h>
17811 #include <asm/cache.h>
17812 #include <asm/boot.h>
17813+#include <asm/segment.h>
17814+
17815+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17816+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17817+#else
17818+#define __KERNEL_TEXT_OFFSET 0
17819+#endif
17820
17821 #undef i386 /* in case the preprocessor is a 32bit one */
17822
17823@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17824
17825 PHDRS {
17826 text PT_LOAD FLAGS(5); /* R_E */
17827+#ifdef CONFIG_X86_32
17828+ module PT_LOAD FLAGS(5); /* R_E */
17829+#endif
17830+#ifdef CONFIG_XEN
17831+ rodata PT_LOAD FLAGS(5); /* R_E */
17832+#else
17833+ rodata PT_LOAD FLAGS(4); /* R__ */
17834+#endif
17835 data PT_LOAD FLAGS(6); /* RW_ */
17836-#ifdef CONFIG_X86_64
17837+ init.begin PT_LOAD FLAGS(6); /* RW_ */
17838 #ifdef CONFIG_SMP
17839 percpu PT_LOAD FLAGS(6); /* RW_ */
17840 #endif
17841+ text.init PT_LOAD FLAGS(5); /* R_E */
17842+ text.exit PT_LOAD FLAGS(5); /* R_E */
17843 init PT_LOAD FLAGS(7); /* RWE */
17844-#endif
17845 note PT_NOTE FLAGS(0); /* ___ */
17846 }
17847
17848 SECTIONS
17849 {
17850 #ifdef CONFIG_X86_32
17851- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17852- phys_startup_32 = startup_32 - LOAD_OFFSET;
17853+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17854 #else
17855- . = __START_KERNEL;
17856- phys_startup_64 = startup_64 - LOAD_OFFSET;
17857+ . = __START_KERNEL;
17858 #endif
17859
17860 /* Text and read-only data */
17861- .text : AT(ADDR(.text) - LOAD_OFFSET) {
17862- _text = .;
17863+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17864 /* bootstrapping code */
17865+#ifdef CONFIG_X86_32
17866+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17867+#else
17868+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17869+#endif
17870+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17871+ _text = .;
17872 HEAD_TEXT
17873 #ifdef CONFIG_X86_32
17874 . = ALIGN(PAGE_SIZE);
17875@@ -108,13 +128,47 @@ SECTIONS
17876 IRQENTRY_TEXT
17877 *(.fixup)
17878 *(.gnu.warning)
17879- /* End of text section */
17880- _etext = .;
17881 } :text = 0x9090
17882
17883- NOTES :text :note
17884+ . += __KERNEL_TEXT_OFFSET;
17885
17886- EXCEPTION_TABLE(16) :text = 0x9090
17887+#ifdef CONFIG_X86_32
17888+ . = ALIGN(PAGE_SIZE);
17889+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17890+
17891+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17892+ MODULES_EXEC_VADDR = .;
17893+ BYTE(0)
17894+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17895+ . = ALIGN(HPAGE_SIZE);
17896+ MODULES_EXEC_END = . - 1;
17897+#endif
17898+
17899+ } :module
17900+#endif
17901+
17902+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17903+ /* End of text section */
17904+ _etext = . - __KERNEL_TEXT_OFFSET;
17905+ }
17906+
17907+#ifdef CONFIG_X86_32
17908+ . = ALIGN(PAGE_SIZE);
17909+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17910+ *(.idt)
17911+ . = ALIGN(PAGE_SIZE);
17912+ *(.empty_zero_page)
17913+ *(.initial_pg_fixmap)
17914+ *(.initial_pg_pmd)
17915+ *(.initial_page_table)
17916+ *(.swapper_pg_dir)
17917+ } :rodata
17918+#endif
17919+
17920+ . = ALIGN(PAGE_SIZE);
17921+ NOTES :rodata :note
17922+
17923+ EXCEPTION_TABLE(16) :rodata
17924
17925 #if defined(CONFIG_DEBUG_RODATA)
17926 /* .text should occupy whole number of pages */
17927@@ -126,16 +180,20 @@ SECTIONS
17928
17929 /* Data */
17930 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17931+
17932+#ifdef CONFIG_PAX_KERNEXEC
17933+ . = ALIGN(HPAGE_SIZE);
17934+#else
17935+ . = ALIGN(PAGE_SIZE);
17936+#endif
17937+
17938 /* Start of data section */
17939 _sdata = .;
17940
17941 /* init_task */
17942 INIT_TASK_DATA(THREAD_SIZE)
17943
17944-#ifdef CONFIG_X86_32
17945- /* 32 bit has nosave before _edata */
17946 NOSAVE_DATA
17947-#endif
17948
17949 PAGE_ALIGNED_DATA(PAGE_SIZE)
17950
17951@@ -176,12 +234,19 @@ SECTIONS
17952 #endif /* CONFIG_X86_64 */
17953
17954 /* Init code and data - will be freed after init */
17955- . = ALIGN(PAGE_SIZE);
17956 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17957+ BYTE(0)
17958+
17959+#ifdef CONFIG_PAX_KERNEXEC
17960+ . = ALIGN(HPAGE_SIZE);
17961+#else
17962+ . = ALIGN(PAGE_SIZE);
17963+#endif
17964+
17965 __init_begin = .; /* paired with __init_end */
17966- }
17967+ } :init.begin
17968
17969-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17970+#ifdef CONFIG_SMP
17971 /*
17972 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17973 * output PHDR, so the next output section - .init.text - should
17974@@ -190,12 +255,27 @@ SECTIONS
17975 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17976 #endif
17977
17978- INIT_TEXT_SECTION(PAGE_SIZE)
17979-#ifdef CONFIG_X86_64
17980- :init
17981-#endif
17982+ . = ALIGN(PAGE_SIZE);
17983+ init_begin = .;
17984+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17985+ VMLINUX_SYMBOL(_sinittext) = .;
17986+ INIT_TEXT
17987+ VMLINUX_SYMBOL(_einittext) = .;
17988+ . = ALIGN(PAGE_SIZE);
17989+ } :text.init
17990
17991- INIT_DATA_SECTION(16)
17992+ /*
17993+ * .exit.text is discard at runtime, not link time, to deal with
17994+ * references from .altinstructions and .eh_frame
17995+ */
17996+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17997+ EXIT_TEXT
17998+ . = ALIGN(16);
17999+ } :text.exit
18000+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18001+
18002+ . = ALIGN(PAGE_SIZE);
18003+ INIT_DATA_SECTION(16) :init
18004
18005 /*
18006 * Code and data for a variety of lowlevel trampolines, to be
18007@@ -269,19 +349,12 @@ SECTIONS
18008 }
18009
18010 . = ALIGN(8);
18011- /*
18012- * .exit.text is discard at runtime, not link time, to deal with
18013- * references from .altinstructions and .eh_frame
18014- */
18015- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18016- EXIT_TEXT
18017- }
18018
18019 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18020 EXIT_DATA
18021 }
18022
18023-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18024+#ifndef CONFIG_SMP
18025 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18026 #endif
18027
18028@@ -300,16 +373,10 @@ SECTIONS
18029 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18030 __smp_locks = .;
18031 *(.smp_locks)
18032- . = ALIGN(PAGE_SIZE);
18033 __smp_locks_end = .;
18034+ . = ALIGN(PAGE_SIZE);
18035 }
18036
18037-#ifdef CONFIG_X86_64
18038- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18039- NOSAVE_DATA
18040- }
18041-#endif
18042-
18043 /* BSS */
18044 . = ALIGN(PAGE_SIZE);
18045 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18046@@ -325,6 +392,7 @@ SECTIONS
18047 __brk_base = .;
18048 . += 64 * 1024; /* 64k alignment slop space */
18049 *(.brk_reservation) /* areas brk users have reserved */
18050+ . = ALIGN(HPAGE_SIZE);
18051 __brk_limit = .;
18052 }
18053
18054@@ -351,13 +419,12 @@ SECTIONS
18055 * for the boot processor.
18056 */
18057 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18058-INIT_PER_CPU(gdt_page);
18059 INIT_PER_CPU(irq_stack_union);
18060
18061 /*
18062 * Build-time check on the image size:
18063 */
18064-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18065+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18066 "kernel image bigger than KERNEL_IMAGE_SIZE");
18067
18068 #ifdef CONFIG_SMP
18069diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18070index e4d4a22..47ee71f 100644
18071--- a/arch/x86/kernel/vsyscall_64.c
18072+++ b/arch/x86/kernel/vsyscall_64.c
18073@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18074 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18075 };
18076
18077-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18078+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18079
18080 static int __init vsyscall_setup(char *str)
18081 {
18082 if (str) {
18083 if (!strcmp("emulate", str))
18084 vsyscall_mode = EMULATE;
18085- else if (!strcmp("native", str))
18086- vsyscall_mode = NATIVE;
18087 else if (!strcmp("none", str))
18088 vsyscall_mode = NONE;
18089 else
18090@@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18091
18092 tsk = current;
18093 if (seccomp_mode(&tsk->seccomp))
18094- do_exit(SIGKILL);
18095+ do_group_exit(SIGKILL);
18096
18097 switch (vsyscall_nr) {
18098 case 0:
18099@@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18100 return true;
18101
18102 sigsegv:
18103- force_sig(SIGSEGV, current);
18104- return true;
18105+ do_group_exit(SIGKILL);
18106 }
18107
18108 /*
18109@@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18110 extern char __vvar_page;
18111 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18112
18113- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18114- vsyscall_mode == NATIVE
18115- ? PAGE_KERNEL_VSYSCALL
18116- : PAGE_KERNEL_VVAR);
18117+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18118 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18119 (unsigned long)VSYSCALL_START);
18120
18121diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18122index 9796c2f..f686fbf 100644
18123--- a/arch/x86/kernel/x8664_ksyms_64.c
18124+++ b/arch/x86/kernel/x8664_ksyms_64.c
18125@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18126 EXPORT_SYMBOL(copy_user_generic_string);
18127 EXPORT_SYMBOL(copy_user_generic_unrolled);
18128 EXPORT_SYMBOL(__copy_user_nocache);
18129-EXPORT_SYMBOL(_copy_from_user);
18130-EXPORT_SYMBOL(_copy_to_user);
18131
18132 EXPORT_SYMBOL(copy_page);
18133 EXPORT_SYMBOL(clear_page);
18134diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18135index a391134..d0b63b6e 100644
18136--- a/arch/x86/kernel/xsave.c
18137+++ b/arch/x86/kernel/xsave.c
18138@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18139 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18140 return -EINVAL;
18141
18142- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18143+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18144 fx_sw_user->extended_size -
18145 FP_XSTATE_MAGIC2_SIZE));
18146 if (err)
18147@@ -267,7 +267,7 @@ fx_only:
18148 * the other extended state.
18149 */
18150 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18151- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18152+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18153 }
18154
18155 /*
18156@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18157 if (use_xsave())
18158 err = restore_user_xstate(buf);
18159 else
18160- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18161+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18162 buf);
18163 if (unlikely(err)) {
18164 /*
18165diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18166index f1e3be1..588efc8 100644
18167--- a/arch/x86/kvm/emulate.c
18168+++ b/arch/x86/kvm/emulate.c
18169@@ -249,6 +249,7 @@ struct gprefix {
18170
18171 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18172 do { \
18173+ unsigned long _tmp; \
18174 __asm__ __volatile__ ( \
18175 _PRE_EFLAGS("0", "4", "2") \
18176 _op _suffix " %"_x"3,%1; " \
18177@@ -263,8 +264,6 @@ struct gprefix {
18178 /* Raw emulation: instruction has two explicit operands. */
18179 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18180 do { \
18181- unsigned long _tmp; \
18182- \
18183 switch ((ctxt)->dst.bytes) { \
18184 case 2: \
18185 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18186@@ -280,7 +279,6 @@ struct gprefix {
18187
18188 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18189 do { \
18190- unsigned long _tmp; \
18191 switch ((ctxt)->dst.bytes) { \
18192 case 1: \
18193 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18194diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18195index 54abb40..a192606 100644
18196--- a/arch/x86/kvm/lapic.c
18197+++ b/arch/x86/kvm/lapic.c
18198@@ -53,7 +53,7 @@
18199 #define APIC_BUS_CYCLE_NS 1
18200
18201 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18202-#define apic_debug(fmt, arg...)
18203+#define apic_debug(fmt, arg...) do {} while (0)
18204
18205 #define APIC_LVT_NUM 6
18206 /* 14 is the version for Xeon and Pentium 8.4.8*/
18207diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18208index f1b36cf..af8a124 100644
18209--- a/arch/x86/kvm/mmu.c
18210+++ b/arch/x86/kvm/mmu.c
18211@@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18212
18213 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18214
18215- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18216+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18217
18218 /*
18219 * Assume that the pte write on a page table of the same type
18220@@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18221 }
18222
18223 spin_lock(&vcpu->kvm->mmu_lock);
18224- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18225+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18226 gentry = 0;
18227 kvm_mmu_free_some_pages(vcpu);
18228 ++vcpu->kvm->stat.mmu_pte_write;
18229diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18230index 9299410..ade2f9b 100644
18231--- a/arch/x86/kvm/paging_tmpl.h
18232+++ b/arch/x86/kvm/paging_tmpl.h
18233@@ -197,7 +197,7 @@ retry_walk:
18234 if (unlikely(kvm_is_error_hva(host_addr)))
18235 goto error;
18236
18237- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18238+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18239 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18240 goto error;
18241
18242@@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18243 if (need_flush)
18244 kvm_flush_remote_tlbs(vcpu->kvm);
18245
18246- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18247+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18248
18249 spin_unlock(&vcpu->kvm->mmu_lock);
18250
18251diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18252index e32243e..a6e6172 100644
18253--- a/arch/x86/kvm/svm.c
18254+++ b/arch/x86/kvm/svm.c
18255@@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18256 int cpu = raw_smp_processor_id();
18257
18258 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18259+
18260+ pax_open_kernel();
18261 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18262+ pax_close_kernel();
18263+
18264 load_TR_desc();
18265 }
18266
18267@@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18268 #endif
18269 #endif
18270
18271+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18272+ __set_fs(current_thread_info()->addr_limit);
18273+#endif
18274+
18275 reload_tss(vcpu);
18276
18277 local_irq_disable();
18278diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18279index 579a0b5..ed7bbf9 100644
18280--- a/arch/x86/kvm/vmx.c
18281+++ b/arch/x86/kvm/vmx.c
18282@@ -1305,7 +1305,11 @@ static void reload_tss(void)
18283 struct desc_struct *descs;
18284
18285 descs = (void *)gdt->address;
18286+
18287+ pax_open_kernel();
18288 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18289+ pax_close_kernel();
18290+
18291 load_TR_desc();
18292 }
18293
18294@@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18295 if (!cpu_has_vmx_flexpriority())
18296 flexpriority_enabled = 0;
18297
18298- if (!cpu_has_vmx_tpr_shadow())
18299- kvm_x86_ops->update_cr8_intercept = NULL;
18300+ if (!cpu_has_vmx_tpr_shadow()) {
18301+ pax_open_kernel();
18302+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18303+ pax_close_kernel();
18304+ }
18305
18306 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18307 kvm_disable_largepages();
18308@@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18309 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18310
18311 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18312- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18313+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18314
18315 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18316 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18317@@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18318 "jmp .Lkvm_vmx_return \n\t"
18319 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18320 ".Lkvm_vmx_return: "
18321+
18322+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18323+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18324+ ".Lkvm_vmx_return2: "
18325+#endif
18326+
18327 /* Save guest registers, load host registers, keep flags */
18328 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18329 "pop %0 \n\t"
18330@@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18331 #endif
18332 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18333 [wordsize]"i"(sizeof(ulong))
18334+
18335+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18336+ ,[cs]"i"(__KERNEL_CS)
18337+#endif
18338+
18339 : "cc", "memory"
18340 , R"ax", R"bx", R"di", R"si"
18341 #ifdef CONFIG_X86_64
18342@@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18343 }
18344 }
18345
18346- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18347+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18348+
18349+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18350+ loadsegment(fs, __KERNEL_PERCPU);
18351+#endif
18352+
18353+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18354+ __set_fs(current_thread_info()->addr_limit);
18355+#endif
18356+
18357 vmx->loaded_vmcs->launched = 1;
18358
18359 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18360diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18361index 4c938da..4ddef65 100644
18362--- a/arch/x86/kvm/x86.c
18363+++ b/arch/x86/kvm/x86.c
18364@@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18365 {
18366 struct kvm *kvm = vcpu->kvm;
18367 int lm = is_long_mode(vcpu);
18368- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18369- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18370+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18371+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18372 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18373 : kvm->arch.xen_hvm_config.blob_size_32;
18374 u32 page_num = data & ~PAGE_MASK;
18375@@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18376 if (n < msr_list.nmsrs)
18377 goto out;
18378 r = -EFAULT;
18379+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18380+ goto out;
18381 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18382 num_msrs_to_save * sizeof(u32)))
18383 goto out;
18384@@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18385 struct kvm_cpuid2 *cpuid,
18386 struct kvm_cpuid_entry2 __user *entries)
18387 {
18388- int r;
18389+ int r, i;
18390
18391 r = -E2BIG;
18392 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18393 goto out;
18394 r = -EFAULT;
18395- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18396- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18397+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18398 goto out;
18399+ for (i = 0; i < cpuid->nent; ++i) {
18400+ struct kvm_cpuid_entry2 cpuid_entry;
18401+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18402+ goto out;
18403+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18404+ }
18405 vcpu->arch.cpuid_nent = cpuid->nent;
18406 kvm_apic_set_version(vcpu);
18407 kvm_x86_ops->cpuid_update(vcpu);
18408@@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18409 struct kvm_cpuid2 *cpuid,
18410 struct kvm_cpuid_entry2 __user *entries)
18411 {
18412- int r;
18413+ int r, i;
18414
18415 r = -E2BIG;
18416 if (cpuid->nent < vcpu->arch.cpuid_nent)
18417 goto out;
18418 r = -EFAULT;
18419- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18420- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18421+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18422 goto out;
18423+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18424+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18425+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18426+ goto out;
18427+ }
18428 return 0;
18429
18430 out:
18431@@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18432 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18433 struct kvm_interrupt *irq)
18434 {
18435- if (irq->irq < 0 || irq->irq >= 256)
18436+ if (irq->irq >= 256)
18437 return -EINVAL;
18438 if (irqchip_in_kernel(vcpu->kvm))
18439 return -ENXIO;
18440@@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18441 kvm_mmu_set_mmio_spte_mask(mask);
18442 }
18443
18444-int kvm_arch_init(void *opaque)
18445+int kvm_arch_init(const void *opaque)
18446 {
18447 int r;
18448 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18449diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18450index cf4603b..7cdde38 100644
18451--- a/arch/x86/lguest/boot.c
18452+++ b/arch/x86/lguest/boot.c
18453@@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18454 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18455 * Launcher to reboot us.
18456 */
18457-static void lguest_restart(char *reason)
18458+static __noreturn void lguest_restart(char *reason)
18459 {
18460 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18461+ BUG();
18462 }
18463
18464 /*G:050
18465diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18466index 042f682..c92afb6 100644
18467--- a/arch/x86/lib/atomic64_32.c
18468+++ b/arch/x86/lib/atomic64_32.c
18469@@ -8,18 +8,30 @@
18470
18471 long long atomic64_read_cx8(long long, const atomic64_t *v);
18472 EXPORT_SYMBOL(atomic64_read_cx8);
18473+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18474+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18475 long long atomic64_set_cx8(long long, const atomic64_t *v);
18476 EXPORT_SYMBOL(atomic64_set_cx8);
18477+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18478+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18479 long long atomic64_xchg_cx8(long long, unsigned high);
18480 EXPORT_SYMBOL(atomic64_xchg_cx8);
18481 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18482 EXPORT_SYMBOL(atomic64_add_return_cx8);
18483+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18484+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18485 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18486 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18487+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18488+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18489 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18490 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18491+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18492+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18493 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18494 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18495+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18496+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18497 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18498 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18499 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18500@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18501 #ifndef CONFIG_X86_CMPXCHG64
18502 long long atomic64_read_386(long long, const atomic64_t *v);
18503 EXPORT_SYMBOL(atomic64_read_386);
18504+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18505+EXPORT_SYMBOL(atomic64_read_unchecked_386);
18506 long long atomic64_set_386(long long, const atomic64_t *v);
18507 EXPORT_SYMBOL(atomic64_set_386);
18508+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18509+EXPORT_SYMBOL(atomic64_set_unchecked_386);
18510 long long atomic64_xchg_386(long long, unsigned high);
18511 EXPORT_SYMBOL(atomic64_xchg_386);
18512 long long atomic64_add_return_386(long long a, atomic64_t *v);
18513 EXPORT_SYMBOL(atomic64_add_return_386);
18514+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18515+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18516 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18517 EXPORT_SYMBOL(atomic64_sub_return_386);
18518+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18519+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18520 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18521 EXPORT_SYMBOL(atomic64_inc_return_386);
18522+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18523+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18524 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18525 EXPORT_SYMBOL(atomic64_dec_return_386);
18526+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18527+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18528 long long atomic64_add_386(long long a, atomic64_t *v);
18529 EXPORT_SYMBOL(atomic64_add_386);
18530+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18531+EXPORT_SYMBOL(atomic64_add_unchecked_386);
18532 long long atomic64_sub_386(long long a, atomic64_t *v);
18533 EXPORT_SYMBOL(atomic64_sub_386);
18534+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18535+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18536 long long atomic64_inc_386(long long a, atomic64_t *v);
18537 EXPORT_SYMBOL(atomic64_inc_386);
18538+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18539+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18540 long long atomic64_dec_386(long long a, atomic64_t *v);
18541 EXPORT_SYMBOL(atomic64_dec_386);
18542+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18543+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18544 long long atomic64_dec_if_positive_386(atomic64_t *v);
18545 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18546 int atomic64_inc_not_zero_386(atomic64_t *v);
18547diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18548index e8e7e0d..56fd1b0 100644
18549--- a/arch/x86/lib/atomic64_386_32.S
18550+++ b/arch/x86/lib/atomic64_386_32.S
18551@@ -48,6 +48,10 @@ BEGIN(read)
18552 movl (v), %eax
18553 movl 4(v), %edx
18554 RET_ENDP
18555+BEGIN(read_unchecked)
18556+ movl (v), %eax
18557+ movl 4(v), %edx
18558+RET_ENDP
18559 #undef v
18560
18561 #define v %esi
18562@@ -55,6 +59,10 @@ BEGIN(set)
18563 movl %ebx, (v)
18564 movl %ecx, 4(v)
18565 RET_ENDP
18566+BEGIN(set_unchecked)
18567+ movl %ebx, (v)
18568+ movl %ecx, 4(v)
18569+RET_ENDP
18570 #undef v
18571
18572 #define v %esi
18573@@ -70,6 +78,20 @@ RET_ENDP
18574 BEGIN(add)
18575 addl %eax, (v)
18576 adcl %edx, 4(v)
18577+
18578+#ifdef CONFIG_PAX_REFCOUNT
18579+ jno 0f
18580+ subl %eax, (v)
18581+ sbbl %edx, 4(v)
18582+ int $4
18583+0:
18584+ _ASM_EXTABLE(0b, 0b)
18585+#endif
18586+
18587+RET_ENDP
18588+BEGIN(add_unchecked)
18589+ addl %eax, (v)
18590+ adcl %edx, 4(v)
18591 RET_ENDP
18592 #undef v
18593
18594@@ -77,6 +99,24 @@ RET_ENDP
18595 BEGIN(add_return)
18596 addl (v), %eax
18597 adcl 4(v), %edx
18598+
18599+#ifdef CONFIG_PAX_REFCOUNT
18600+ into
18601+1234:
18602+ _ASM_EXTABLE(1234b, 2f)
18603+#endif
18604+
18605+ movl %eax, (v)
18606+ movl %edx, 4(v)
18607+
18608+#ifdef CONFIG_PAX_REFCOUNT
18609+2:
18610+#endif
18611+
18612+RET_ENDP
18613+BEGIN(add_return_unchecked)
18614+ addl (v), %eax
18615+ adcl 4(v), %edx
18616 movl %eax, (v)
18617 movl %edx, 4(v)
18618 RET_ENDP
18619@@ -86,6 +126,20 @@ RET_ENDP
18620 BEGIN(sub)
18621 subl %eax, (v)
18622 sbbl %edx, 4(v)
18623+
18624+#ifdef CONFIG_PAX_REFCOUNT
18625+ jno 0f
18626+ addl %eax, (v)
18627+ adcl %edx, 4(v)
18628+ int $4
18629+0:
18630+ _ASM_EXTABLE(0b, 0b)
18631+#endif
18632+
18633+RET_ENDP
18634+BEGIN(sub_unchecked)
18635+ subl %eax, (v)
18636+ sbbl %edx, 4(v)
18637 RET_ENDP
18638 #undef v
18639
18640@@ -96,6 +150,27 @@ BEGIN(sub_return)
18641 sbbl $0, %edx
18642 addl (v), %eax
18643 adcl 4(v), %edx
18644+
18645+#ifdef CONFIG_PAX_REFCOUNT
18646+ into
18647+1234:
18648+ _ASM_EXTABLE(1234b, 2f)
18649+#endif
18650+
18651+ movl %eax, (v)
18652+ movl %edx, 4(v)
18653+
18654+#ifdef CONFIG_PAX_REFCOUNT
18655+2:
18656+#endif
18657+
18658+RET_ENDP
18659+BEGIN(sub_return_unchecked)
18660+ negl %edx
18661+ negl %eax
18662+ sbbl $0, %edx
18663+ addl (v), %eax
18664+ adcl 4(v), %edx
18665 movl %eax, (v)
18666 movl %edx, 4(v)
18667 RET_ENDP
18668@@ -105,6 +180,20 @@ RET_ENDP
18669 BEGIN(inc)
18670 addl $1, (v)
18671 adcl $0, 4(v)
18672+
18673+#ifdef CONFIG_PAX_REFCOUNT
18674+ jno 0f
18675+ subl $1, (v)
18676+ sbbl $0, 4(v)
18677+ int $4
18678+0:
18679+ _ASM_EXTABLE(0b, 0b)
18680+#endif
18681+
18682+RET_ENDP
18683+BEGIN(inc_unchecked)
18684+ addl $1, (v)
18685+ adcl $0, 4(v)
18686 RET_ENDP
18687 #undef v
18688
18689@@ -114,6 +203,26 @@ BEGIN(inc_return)
18690 movl 4(v), %edx
18691 addl $1, %eax
18692 adcl $0, %edx
18693+
18694+#ifdef CONFIG_PAX_REFCOUNT
18695+ into
18696+1234:
18697+ _ASM_EXTABLE(1234b, 2f)
18698+#endif
18699+
18700+ movl %eax, (v)
18701+ movl %edx, 4(v)
18702+
18703+#ifdef CONFIG_PAX_REFCOUNT
18704+2:
18705+#endif
18706+
18707+RET_ENDP
18708+BEGIN(inc_return_unchecked)
18709+ movl (v), %eax
18710+ movl 4(v), %edx
18711+ addl $1, %eax
18712+ adcl $0, %edx
18713 movl %eax, (v)
18714 movl %edx, 4(v)
18715 RET_ENDP
18716@@ -123,6 +232,20 @@ RET_ENDP
18717 BEGIN(dec)
18718 subl $1, (v)
18719 sbbl $0, 4(v)
18720+
18721+#ifdef CONFIG_PAX_REFCOUNT
18722+ jno 0f
18723+ addl $1, (v)
18724+ adcl $0, 4(v)
18725+ int $4
18726+0:
18727+ _ASM_EXTABLE(0b, 0b)
18728+#endif
18729+
18730+RET_ENDP
18731+BEGIN(dec_unchecked)
18732+ subl $1, (v)
18733+ sbbl $0, 4(v)
18734 RET_ENDP
18735 #undef v
18736
18737@@ -132,6 +255,26 @@ BEGIN(dec_return)
18738 movl 4(v), %edx
18739 subl $1, %eax
18740 sbbl $0, %edx
18741+
18742+#ifdef CONFIG_PAX_REFCOUNT
18743+ into
18744+1234:
18745+ _ASM_EXTABLE(1234b, 2f)
18746+#endif
18747+
18748+ movl %eax, (v)
18749+ movl %edx, 4(v)
18750+
18751+#ifdef CONFIG_PAX_REFCOUNT
18752+2:
18753+#endif
18754+
18755+RET_ENDP
18756+BEGIN(dec_return_unchecked)
18757+ movl (v), %eax
18758+ movl 4(v), %edx
18759+ subl $1, %eax
18760+ sbbl $0, %edx
18761 movl %eax, (v)
18762 movl %edx, 4(v)
18763 RET_ENDP
18764@@ -143,6 +286,13 @@ BEGIN(add_unless)
18765 adcl %edx, %edi
18766 addl (v), %eax
18767 adcl 4(v), %edx
18768+
18769+#ifdef CONFIG_PAX_REFCOUNT
18770+ into
18771+1234:
18772+ _ASM_EXTABLE(1234b, 2f)
18773+#endif
18774+
18775 cmpl %eax, %esi
18776 je 3f
18777 1:
18778@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18779 1:
18780 addl $1, %eax
18781 adcl $0, %edx
18782+
18783+#ifdef CONFIG_PAX_REFCOUNT
18784+ into
18785+1234:
18786+ _ASM_EXTABLE(1234b, 2f)
18787+#endif
18788+
18789 movl %eax, (v)
18790 movl %edx, 4(v)
18791 movl $1, %eax
18792@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18793 movl 4(v), %edx
18794 subl $1, %eax
18795 sbbl $0, %edx
18796+
18797+#ifdef CONFIG_PAX_REFCOUNT
18798+ into
18799+1234:
18800+ _ASM_EXTABLE(1234b, 1f)
18801+#endif
18802+
18803 js 1f
18804 movl %eax, (v)
18805 movl %edx, 4(v)
18806diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18807index 391a083..d658e9f 100644
18808--- a/arch/x86/lib/atomic64_cx8_32.S
18809+++ b/arch/x86/lib/atomic64_cx8_32.S
18810@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18811 CFI_STARTPROC
18812
18813 read64 %ecx
18814+ pax_force_retaddr
18815 ret
18816 CFI_ENDPROC
18817 ENDPROC(atomic64_read_cx8)
18818
18819+ENTRY(atomic64_read_unchecked_cx8)
18820+ CFI_STARTPROC
18821+
18822+ read64 %ecx
18823+ pax_force_retaddr
18824+ ret
18825+ CFI_ENDPROC
18826+ENDPROC(atomic64_read_unchecked_cx8)
18827+
18828 ENTRY(atomic64_set_cx8)
18829 CFI_STARTPROC
18830
18831@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18832 cmpxchg8b (%esi)
18833 jne 1b
18834
18835+ pax_force_retaddr
18836 ret
18837 CFI_ENDPROC
18838 ENDPROC(atomic64_set_cx8)
18839
18840+ENTRY(atomic64_set_unchecked_cx8)
18841+ CFI_STARTPROC
18842+
18843+1:
18844+/* we don't need LOCK_PREFIX since aligned 64-bit writes
18845+ * are atomic on 586 and newer */
18846+ cmpxchg8b (%esi)
18847+ jne 1b
18848+
18849+ pax_force_retaddr
18850+ ret
18851+ CFI_ENDPROC
18852+ENDPROC(atomic64_set_unchecked_cx8)
18853+
18854 ENTRY(atomic64_xchg_cx8)
18855 CFI_STARTPROC
18856
18857@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18858 cmpxchg8b (%esi)
18859 jne 1b
18860
18861+ pax_force_retaddr
18862 ret
18863 CFI_ENDPROC
18864 ENDPROC(atomic64_xchg_cx8)
18865
18866-.macro addsub_return func ins insc
18867-ENTRY(atomic64_\func\()_return_cx8)
18868+.macro addsub_return func ins insc unchecked=""
18869+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18870 CFI_STARTPROC
18871 SAVE ebp
18872 SAVE ebx
18873@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18874 movl %edx, %ecx
18875 \ins\()l %esi, %ebx
18876 \insc\()l %edi, %ecx
18877+
18878+.ifb \unchecked
18879+#ifdef CONFIG_PAX_REFCOUNT
18880+ into
18881+2:
18882+ _ASM_EXTABLE(2b, 3f)
18883+#endif
18884+.endif
18885+
18886 LOCK_PREFIX
18887 cmpxchg8b (%ebp)
18888 jne 1b
18889-
18890-10:
18891 movl %ebx, %eax
18892 movl %ecx, %edx
18893+
18894+.ifb \unchecked
18895+#ifdef CONFIG_PAX_REFCOUNT
18896+3:
18897+#endif
18898+.endif
18899+
18900 RESTORE edi
18901 RESTORE esi
18902 RESTORE ebx
18903 RESTORE ebp
18904+ pax_force_retaddr
18905 ret
18906 CFI_ENDPROC
18907-ENDPROC(atomic64_\func\()_return_cx8)
18908+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18909 .endm
18910
18911 addsub_return add add adc
18912 addsub_return sub sub sbb
18913+addsub_return add add adc _unchecked
18914+addsub_return sub sub sbb _unchecked
18915
18916-.macro incdec_return func ins insc
18917-ENTRY(atomic64_\func\()_return_cx8)
18918+.macro incdec_return func ins insc unchecked
18919+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18920 CFI_STARTPROC
18921 SAVE ebx
18922
18923@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18924 movl %edx, %ecx
18925 \ins\()l $1, %ebx
18926 \insc\()l $0, %ecx
18927+
18928+.ifb \unchecked
18929+#ifdef CONFIG_PAX_REFCOUNT
18930+ into
18931+2:
18932+ _ASM_EXTABLE(2b, 3f)
18933+#endif
18934+.endif
18935+
18936 LOCK_PREFIX
18937 cmpxchg8b (%esi)
18938 jne 1b
18939
18940-10:
18941 movl %ebx, %eax
18942 movl %ecx, %edx
18943+
18944+.ifb \unchecked
18945+#ifdef CONFIG_PAX_REFCOUNT
18946+3:
18947+#endif
18948+.endif
18949+
18950 RESTORE ebx
18951+ pax_force_retaddr
18952 ret
18953 CFI_ENDPROC
18954-ENDPROC(atomic64_\func\()_return_cx8)
18955+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18956 .endm
18957
18958 incdec_return inc add adc
18959 incdec_return dec sub sbb
18960+incdec_return inc add adc _unchecked
18961+incdec_return dec sub sbb _unchecked
18962
18963 ENTRY(atomic64_dec_if_positive_cx8)
18964 CFI_STARTPROC
18965@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18966 movl %edx, %ecx
18967 subl $1, %ebx
18968 sbb $0, %ecx
18969+
18970+#ifdef CONFIG_PAX_REFCOUNT
18971+ into
18972+1234:
18973+ _ASM_EXTABLE(1234b, 2f)
18974+#endif
18975+
18976 js 2f
18977 LOCK_PREFIX
18978 cmpxchg8b (%esi)
18979@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18980 movl %ebx, %eax
18981 movl %ecx, %edx
18982 RESTORE ebx
18983+ pax_force_retaddr
18984 ret
18985 CFI_ENDPROC
18986 ENDPROC(atomic64_dec_if_positive_cx8)
18987@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
18988 movl %edx, %ecx
18989 addl %esi, %ebx
18990 adcl %edi, %ecx
18991+
18992+#ifdef CONFIG_PAX_REFCOUNT
18993+ into
18994+1234:
18995+ _ASM_EXTABLE(1234b, 3f)
18996+#endif
18997+
18998 LOCK_PREFIX
18999 cmpxchg8b (%ebp)
19000 jne 1b
19001@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
19002 CFI_ADJUST_CFA_OFFSET -8
19003 RESTORE ebx
19004 RESTORE ebp
19005+ pax_force_retaddr
19006 ret
19007 4:
19008 cmpl %edx, 4(%esp)
19009@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
19010 movl %edx, %ecx
19011 addl $1, %ebx
19012 adcl $0, %ecx
19013+
19014+#ifdef CONFIG_PAX_REFCOUNT
19015+ into
19016+1234:
19017+ _ASM_EXTABLE(1234b, 3f)
19018+#endif
19019+
19020 LOCK_PREFIX
19021 cmpxchg8b (%esi)
19022 jne 1b
19023@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19024 movl $1, %eax
19025 3:
19026 RESTORE ebx
19027+ pax_force_retaddr
19028 ret
19029 4:
19030 testl %edx, %edx
19031diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19032index 78d16a5..fbcf666 100644
19033--- a/arch/x86/lib/checksum_32.S
19034+++ b/arch/x86/lib/checksum_32.S
19035@@ -28,7 +28,8 @@
19036 #include <linux/linkage.h>
19037 #include <asm/dwarf2.h>
19038 #include <asm/errno.h>
19039-
19040+#include <asm/segment.h>
19041+
19042 /*
19043 * computes a partial checksum, e.g. for TCP/UDP fragments
19044 */
19045@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19046
19047 #define ARGBASE 16
19048 #define FP 12
19049-
19050-ENTRY(csum_partial_copy_generic)
19051+
19052+ENTRY(csum_partial_copy_generic_to_user)
19053 CFI_STARTPROC
19054+
19055+#ifdef CONFIG_PAX_MEMORY_UDEREF
19056+ pushl_cfi %gs
19057+ popl_cfi %es
19058+ jmp csum_partial_copy_generic
19059+#endif
19060+
19061+ENTRY(csum_partial_copy_generic_from_user)
19062+
19063+#ifdef CONFIG_PAX_MEMORY_UDEREF
19064+ pushl_cfi %gs
19065+ popl_cfi %ds
19066+#endif
19067+
19068+ENTRY(csum_partial_copy_generic)
19069 subl $4,%esp
19070 CFI_ADJUST_CFA_OFFSET 4
19071 pushl_cfi %edi
19072@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19073 jmp 4f
19074 SRC(1: movw (%esi), %bx )
19075 addl $2, %esi
19076-DST( movw %bx, (%edi) )
19077+DST( movw %bx, %es:(%edi) )
19078 addl $2, %edi
19079 addw %bx, %ax
19080 adcl $0, %eax
19081@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19082 SRC(1: movl (%esi), %ebx )
19083 SRC( movl 4(%esi), %edx )
19084 adcl %ebx, %eax
19085-DST( movl %ebx, (%edi) )
19086+DST( movl %ebx, %es:(%edi) )
19087 adcl %edx, %eax
19088-DST( movl %edx, 4(%edi) )
19089+DST( movl %edx, %es:4(%edi) )
19090
19091 SRC( movl 8(%esi), %ebx )
19092 SRC( movl 12(%esi), %edx )
19093 adcl %ebx, %eax
19094-DST( movl %ebx, 8(%edi) )
19095+DST( movl %ebx, %es:8(%edi) )
19096 adcl %edx, %eax
19097-DST( movl %edx, 12(%edi) )
19098+DST( movl %edx, %es:12(%edi) )
19099
19100 SRC( movl 16(%esi), %ebx )
19101 SRC( movl 20(%esi), %edx )
19102 adcl %ebx, %eax
19103-DST( movl %ebx, 16(%edi) )
19104+DST( movl %ebx, %es:16(%edi) )
19105 adcl %edx, %eax
19106-DST( movl %edx, 20(%edi) )
19107+DST( movl %edx, %es:20(%edi) )
19108
19109 SRC( movl 24(%esi), %ebx )
19110 SRC( movl 28(%esi), %edx )
19111 adcl %ebx, %eax
19112-DST( movl %ebx, 24(%edi) )
19113+DST( movl %ebx, %es:24(%edi) )
19114 adcl %edx, %eax
19115-DST( movl %edx, 28(%edi) )
19116+DST( movl %edx, %es:28(%edi) )
19117
19118 lea 32(%esi), %esi
19119 lea 32(%edi), %edi
19120@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19121 shrl $2, %edx # This clears CF
19122 SRC(3: movl (%esi), %ebx )
19123 adcl %ebx, %eax
19124-DST( movl %ebx, (%edi) )
19125+DST( movl %ebx, %es:(%edi) )
19126 lea 4(%esi), %esi
19127 lea 4(%edi), %edi
19128 dec %edx
19129@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19130 jb 5f
19131 SRC( movw (%esi), %cx )
19132 leal 2(%esi), %esi
19133-DST( movw %cx, (%edi) )
19134+DST( movw %cx, %es:(%edi) )
19135 leal 2(%edi), %edi
19136 je 6f
19137 shll $16,%ecx
19138 SRC(5: movb (%esi), %cl )
19139-DST( movb %cl, (%edi) )
19140+DST( movb %cl, %es:(%edi) )
19141 6: addl %ecx, %eax
19142 adcl $0, %eax
19143 7:
19144@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19145
19146 6001:
19147 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19148- movl $-EFAULT, (%ebx)
19149+ movl $-EFAULT, %ss:(%ebx)
19150
19151 # zero the complete destination - computing the rest
19152 # is too much work
19153@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19154
19155 6002:
19156 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19157- movl $-EFAULT,(%ebx)
19158+ movl $-EFAULT,%ss:(%ebx)
19159 jmp 5000b
19160
19161 .previous
19162
19163+ pushl_cfi %ss
19164+ popl_cfi %ds
19165+ pushl_cfi %ss
19166+ popl_cfi %es
19167 popl_cfi %ebx
19168 CFI_RESTORE ebx
19169 popl_cfi %esi
19170@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19171 popl_cfi %ecx # equivalent to addl $4,%esp
19172 ret
19173 CFI_ENDPROC
19174-ENDPROC(csum_partial_copy_generic)
19175+ENDPROC(csum_partial_copy_generic_to_user)
19176
19177 #else
19178
19179 /* Version for PentiumII/PPro */
19180
19181 #define ROUND1(x) \
19182+ nop; nop; nop; \
19183 SRC(movl x(%esi), %ebx ) ; \
19184 addl %ebx, %eax ; \
19185- DST(movl %ebx, x(%edi) ) ;
19186+ DST(movl %ebx, %es:x(%edi)) ;
19187
19188 #define ROUND(x) \
19189+ nop; nop; nop; \
19190 SRC(movl x(%esi), %ebx ) ; \
19191 adcl %ebx, %eax ; \
19192- DST(movl %ebx, x(%edi) ) ;
19193+ DST(movl %ebx, %es:x(%edi)) ;
19194
19195 #define ARGBASE 12
19196-
19197-ENTRY(csum_partial_copy_generic)
19198+
19199+ENTRY(csum_partial_copy_generic_to_user)
19200 CFI_STARTPROC
19201+
19202+#ifdef CONFIG_PAX_MEMORY_UDEREF
19203+ pushl_cfi %gs
19204+ popl_cfi %es
19205+ jmp csum_partial_copy_generic
19206+#endif
19207+
19208+ENTRY(csum_partial_copy_generic_from_user)
19209+
19210+#ifdef CONFIG_PAX_MEMORY_UDEREF
19211+ pushl_cfi %gs
19212+ popl_cfi %ds
19213+#endif
19214+
19215+ENTRY(csum_partial_copy_generic)
19216 pushl_cfi %ebx
19217 CFI_REL_OFFSET ebx, 0
19218 pushl_cfi %edi
19219@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19220 subl %ebx, %edi
19221 lea -1(%esi),%edx
19222 andl $-32,%edx
19223- lea 3f(%ebx,%ebx), %ebx
19224+ lea 3f(%ebx,%ebx,2), %ebx
19225 testl %esi, %esi
19226 jmp *%ebx
19227 1: addl $64,%esi
19228@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19229 jb 5f
19230 SRC( movw (%esi), %dx )
19231 leal 2(%esi), %esi
19232-DST( movw %dx, (%edi) )
19233+DST( movw %dx, %es:(%edi) )
19234 leal 2(%edi), %edi
19235 je 6f
19236 shll $16,%edx
19237 5:
19238 SRC( movb (%esi), %dl )
19239-DST( movb %dl, (%edi) )
19240+DST( movb %dl, %es:(%edi) )
19241 6: addl %edx, %eax
19242 adcl $0, %eax
19243 7:
19244 .section .fixup, "ax"
19245 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19246- movl $-EFAULT, (%ebx)
19247+ movl $-EFAULT, %ss:(%ebx)
19248 # zero the complete destination (computing the rest is too much work)
19249 movl ARGBASE+8(%esp),%edi # dst
19250 movl ARGBASE+12(%esp),%ecx # len
19251@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19252 rep; stosb
19253 jmp 7b
19254 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19255- movl $-EFAULT, (%ebx)
19256+ movl $-EFAULT, %ss:(%ebx)
19257 jmp 7b
19258 .previous
19259
19260+#ifdef CONFIG_PAX_MEMORY_UDEREF
19261+ pushl_cfi %ss
19262+ popl_cfi %ds
19263+ pushl_cfi %ss
19264+ popl_cfi %es
19265+#endif
19266+
19267 popl_cfi %esi
19268 CFI_RESTORE esi
19269 popl_cfi %edi
19270@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19271 CFI_RESTORE ebx
19272 ret
19273 CFI_ENDPROC
19274-ENDPROC(csum_partial_copy_generic)
19275+ENDPROC(csum_partial_copy_generic_to_user)
19276
19277 #undef ROUND
19278 #undef ROUND1
19279diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19280index f2145cf..cea889d 100644
19281--- a/arch/x86/lib/clear_page_64.S
19282+++ b/arch/x86/lib/clear_page_64.S
19283@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19284 movl $4096/8,%ecx
19285 xorl %eax,%eax
19286 rep stosq
19287+ pax_force_retaddr
19288 ret
19289 CFI_ENDPROC
19290 ENDPROC(clear_page_c)
19291@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19292 movl $4096,%ecx
19293 xorl %eax,%eax
19294 rep stosb
19295+ pax_force_retaddr
19296 ret
19297 CFI_ENDPROC
19298 ENDPROC(clear_page_c_e)
19299@@ -43,6 +45,7 @@ ENTRY(clear_page)
19300 leaq 64(%rdi),%rdi
19301 jnz .Lloop
19302 nop
19303+ pax_force_retaddr
19304 ret
19305 CFI_ENDPROC
19306 .Lclear_page_end:
19307@@ -58,7 +61,7 @@ ENDPROC(clear_page)
19308
19309 #include <asm/cpufeature.h>
19310
19311- .section .altinstr_replacement,"ax"
19312+ .section .altinstr_replacement,"a"
19313 1: .byte 0xeb /* jmp <disp8> */
19314 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19315 2: .byte 0xeb /* jmp <disp8> */
19316diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19317index 1e572c5..2a162cd 100644
19318--- a/arch/x86/lib/cmpxchg16b_emu.S
19319+++ b/arch/x86/lib/cmpxchg16b_emu.S
19320@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19321
19322 popf
19323 mov $1, %al
19324+ pax_force_retaddr
19325 ret
19326
19327 not_same:
19328 popf
19329 xor %al,%al
19330+ pax_force_retaddr
19331 ret
19332
19333 CFI_ENDPROC
19334diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19335index 01c805b..dccb07f 100644
19336--- a/arch/x86/lib/copy_page_64.S
19337+++ b/arch/x86/lib/copy_page_64.S
19338@@ -9,6 +9,7 @@ copy_page_c:
19339 CFI_STARTPROC
19340 movl $4096/8,%ecx
19341 rep movsq
19342+ pax_force_retaddr
19343 ret
19344 CFI_ENDPROC
19345 ENDPROC(copy_page_c)
19346@@ -39,7 +40,7 @@ ENTRY(copy_page)
19347 movq 16 (%rsi), %rdx
19348 movq 24 (%rsi), %r8
19349 movq 32 (%rsi), %r9
19350- movq 40 (%rsi), %r10
19351+ movq 40 (%rsi), %r13
19352 movq 48 (%rsi), %r11
19353 movq 56 (%rsi), %r12
19354
19355@@ -50,7 +51,7 @@ ENTRY(copy_page)
19356 movq %rdx, 16 (%rdi)
19357 movq %r8, 24 (%rdi)
19358 movq %r9, 32 (%rdi)
19359- movq %r10, 40 (%rdi)
19360+ movq %r13, 40 (%rdi)
19361 movq %r11, 48 (%rdi)
19362 movq %r12, 56 (%rdi)
19363
19364@@ -69,7 +70,7 @@ ENTRY(copy_page)
19365 movq 16 (%rsi), %rdx
19366 movq 24 (%rsi), %r8
19367 movq 32 (%rsi), %r9
19368- movq 40 (%rsi), %r10
19369+ movq 40 (%rsi), %r13
19370 movq 48 (%rsi), %r11
19371 movq 56 (%rsi), %r12
19372
19373@@ -78,7 +79,7 @@ ENTRY(copy_page)
19374 movq %rdx, 16 (%rdi)
19375 movq %r8, 24 (%rdi)
19376 movq %r9, 32 (%rdi)
19377- movq %r10, 40 (%rdi)
19378+ movq %r13, 40 (%rdi)
19379 movq %r11, 48 (%rdi)
19380 movq %r12, 56 (%rdi)
19381
19382@@ -95,6 +96,7 @@ ENTRY(copy_page)
19383 CFI_RESTORE r13
19384 addq $3*8,%rsp
19385 CFI_ADJUST_CFA_OFFSET -3*8
19386+ pax_force_retaddr
19387 ret
19388 .Lcopy_page_end:
19389 CFI_ENDPROC
19390@@ -105,7 +107,7 @@ ENDPROC(copy_page)
19391
19392 #include <asm/cpufeature.h>
19393
19394- .section .altinstr_replacement,"ax"
19395+ .section .altinstr_replacement,"a"
19396 1: .byte 0xeb /* jmp <disp8> */
19397 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19398 2:
19399diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19400index 0248402..821c786 100644
19401--- a/arch/x86/lib/copy_user_64.S
19402+++ b/arch/x86/lib/copy_user_64.S
19403@@ -16,6 +16,7 @@
19404 #include <asm/thread_info.h>
19405 #include <asm/cpufeature.h>
19406 #include <asm/alternative-asm.h>
19407+#include <asm/pgtable.h>
19408
19409 /*
19410 * By placing feature2 after feature1 in altinstructions section, we logically
19411@@ -29,7 +30,7 @@
19412 .byte 0xe9 /* 32bit jump */
19413 .long \orig-1f /* by default jump to orig */
19414 1:
19415- .section .altinstr_replacement,"ax"
19416+ .section .altinstr_replacement,"a"
19417 2: .byte 0xe9 /* near jump with 32bit immediate */
19418 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19419 3: .byte 0xe9 /* near jump with 32bit immediate */
19420@@ -71,47 +72,20 @@
19421 #endif
19422 .endm
19423
19424-/* Standard copy_to_user with segment limit checking */
19425-ENTRY(_copy_to_user)
19426- CFI_STARTPROC
19427- GET_THREAD_INFO(%rax)
19428- movq %rdi,%rcx
19429- addq %rdx,%rcx
19430- jc bad_to_user
19431- cmpq TI_addr_limit(%rax),%rcx
19432- ja bad_to_user
19433- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19434- copy_user_generic_unrolled,copy_user_generic_string, \
19435- copy_user_enhanced_fast_string
19436- CFI_ENDPROC
19437-ENDPROC(_copy_to_user)
19438-
19439-/* Standard copy_from_user with segment limit checking */
19440-ENTRY(_copy_from_user)
19441- CFI_STARTPROC
19442- GET_THREAD_INFO(%rax)
19443- movq %rsi,%rcx
19444- addq %rdx,%rcx
19445- jc bad_from_user
19446- cmpq TI_addr_limit(%rax),%rcx
19447- ja bad_from_user
19448- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19449- copy_user_generic_unrolled,copy_user_generic_string, \
19450- copy_user_enhanced_fast_string
19451- CFI_ENDPROC
19452-ENDPROC(_copy_from_user)
19453-
19454 .section .fixup,"ax"
19455 /* must zero dest */
19456 ENTRY(bad_from_user)
19457 bad_from_user:
19458 CFI_STARTPROC
19459+ testl %edx,%edx
19460+ js bad_to_user
19461 movl %edx,%ecx
19462 xorl %eax,%eax
19463 rep
19464 stosb
19465 bad_to_user:
19466 movl %edx,%eax
19467+ pax_force_retaddr
19468 ret
19469 CFI_ENDPROC
19470 ENDPROC(bad_from_user)
19471@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19472 jz 17f
19473 1: movq (%rsi),%r8
19474 2: movq 1*8(%rsi),%r9
19475-3: movq 2*8(%rsi),%r10
19476+3: movq 2*8(%rsi),%rax
19477 4: movq 3*8(%rsi),%r11
19478 5: movq %r8,(%rdi)
19479 6: movq %r9,1*8(%rdi)
19480-7: movq %r10,2*8(%rdi)
19481+7: movq %rax,2*8(%rdi)
19482 8: movq %r11,3*8(%rdi)
19483 9: movq 4*8(%rsi),%r8
19484 10: movq 5*8(%rsi),%r9
19485-11: movq 6*8(%rsi),%r10
19486+11: movq 6*8(%rsi),%rax
19487 12: movq 7*8(%rsi),%r11
19488 13: movq %r8,4*8(%rdi)
19489 14: movq %r9,5*8(%rdi)
19490-15: movq %r10,6*8(%rdi)
19491+15: movq %rax,6*8(%rdi)
19492 16: movq %r11,7*8(%rdi)
19493 leaq 64(%rsi),%rsi
19494 leaq 64(%rdi),%rdi
19495@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19496 decl %ecx
19497 jnz 21b
19498 23: xor %eax,%eax
19499+ pax_force_retaddr
19500 ret
19501
19502 .section .fixup,"ax"
19503@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19504 3: rep
19505 movsb
19506 4: xorl %eax,%eax
19507+ pax_force_retaddr
19508 ret
19509
19510 .section .fixup,"ax"
19511@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19512 1: rep
19513 movsb
19514 2: xorl %eax,%eax
19515+ pax_force_retaddr
19516 ret
19517
19518 .section .fixup,"ax"
19519diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19520index cb0c112..e3a6895 100644
19521--- a/arch/x86/lib/copy_user_nocache_64.S
19522+++ b/arch/x86/lib/copy_user_nocache_64.S
19523@@ -8,12 +8,14 @@
19524
19525 #include <linux/linkage.h>
19526 #include <asm/dwarf2.h>
19527+#include <asm/alternative-asm.h>
19528
19529 #define FIX_ALIGNMENT 1
19530
19531 #include <asm/current.h>
19532 #include <asm/asm-offsets.h>
19533 #include <asm/thread_info.h>
19534+#include <asm/pgtable.h>
19535
19536 .macro ALIGN_DESTINATION
19537 #ifdef FIX_ALIGNMENT
19538@@ -50,6 +52,15 @@
19539 */
19540 ENTRY(__copy_user_nocache)
19541 CFI_STARTPROC
19542+
19543+#ifdef CONFIG_PAX_MEMORY_UDEREF
19544+ mov $PAX_USER_SHADOW_BASE,%rcx
19545+ cmp %rcx,%rsi
19546+ jae 1f
19547+ add %rcx,%rsi
19548+1:
19549+#endif
19550+
19551 cmpl $8,%edx
19552 jb 20f /* less then 8 bytes, go to byte copy loop */
19553 ALIGN_DESTINATION
19554@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19555 jz 17f
19556 1: movq (%rsi),%r8
19557 2: movq 1*8(%rsi),%r9
19558-3: movq 2*8(%rsi),%r10
19559+3: movq 2*8(%rsi),%rax
19560 4: movq 3*8(%rsi),%r11
19561 5: movnti %r8,(%rdi)
19562 6: movnti %r9,1*8(%rdi)
19563-7: movnti %r10,2*8(%rdi)
19564+7: movnti %rax,2*8(%rdi)
19565 8: movnti %r11,3*8(%rdi)
19566 9: movq 4*8(%rsi),%r8
19567 10: movq 5*8(%rsi),%r9
19568-11: movq 6*8(%rsi),%r10
19569+11: movq 6*8(%rsi),%rax
19570 12: movq 7*8(%rsi),%r11
19571 13: movnti %r8,4*8(%rdi)
19572 14: movnti %r9,5*8(%rdi)
19573-15: movnti %r10,6*8(%rdi)
19574+15: movnti %rax,6*8(%rdi)
19575 16: movnti %r11,7*8(%rdi)
19576 leaq 64(%rsi),%rsi
19577 leaq 64(%rdi),%rdi
19578@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19579 jnz 21b
19580 23: xorl %eax,%eax
19581 sfence
19582+ pax_force_retaddr
19583 ret
19584
19585 .section .fixup,"ax"
19586diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19587index fb903b7..c92b7f7 100644
19588--- a/arch/x86/lib/csum-copy_64.S
19589+++ b/arch/x86/lib/csum-copy_64.S
19590@@ -8,6 +8,7 @@
19591 #include <linux/linkage.h>
19592 #include <asm/dwarf2.h>
19593 #include <asm/errno.h>
19594+#include <asm/alternative-asm.h>
19595
19596 /*
19597 * Checksum copy with exception handling.
19598@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19599 CFI_RESTORE rbp
19600 addq $7*8, %rsp
19601 CFI_ADJUST_CFA_OFFSET -7*8
19602+ pax_force_retaddr 0, 1
19603 ret
19604 CFI_RESTORE_STATE
19605
19606diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19607index 459b58a..9570bc7 100644
19608--- a/arch/x86/lib/csum-wrappers_64.c
19609+++ b/arch/x86/lib/csum-wrappers_64.c
19610@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19611 len -= 2;
19612 }
19613 }
19614- isum = csum_partial_copy_generic((__force const void *)src,
19615+
19616+#ifdef CONFIG_PAX_MEMORY_UDEREF
19617+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19618+ src += PAX_USER_SHADOW_BASE;
19619+#endif
19620+
19621+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
19622 dst, len, isum, errp, NULL);
19623 if (unlikely(*errp))
19624 goto out_err;
19625@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19626 }
19627
19628 *errp = 0;
19629- return csum_partial_copy_generic(src, (void __force *)dst,
19630+
19631+#ifdef CONFIG_PAX_MEMORY_UDEREF
19632+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19633+ dst += PAX_USER_SHADOW_BASE;
19634+#endif
19635+
19636+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19637 len, isum, NULL, errp);
19638 }
19639 EXPORT_SYMBOL(csum_partial_copy_to_user);
19640diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19641index 51f1504..ddac4c1 100644
19642--- a/arch/x86/lib/getuser.S
19643+++ b/arch/x86/lib/getuser.S
19644@@ -33,15 +33,38 @@
19645 #include <asm/asm-offsets.h>
19646 #include <asm/thread_info.h>
19647 #include <asm/asm.h>
19648+#include <asm/segment.h>
19649+#include <asm/pgtable.h>
19650+#include <asm/alternative-asm.h>
19651+
19652+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19653+#define __copyuser_seg gs;
19654+#else
19655+#define __copyuser_seg
19656+#endif
19657
19658 .text
19659 ENTRY(__get_user_1)
19660 CFI_STARTPROC
19661+
19662+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19663 GET_THREAD_INFO(%_ASM_DX)
19664 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19665 jae bad_get_user
19666-1: movzb (%_ASM_AX),%edx
19667+
19668+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19669+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19670+ cmp %_ASM_DX,%_ASM_AX
19671+ jae 1234f
19672+ add %_ASM_DX,%_ASM_AX
19673+1234:
19674+#endif
19675+
19676+#endif
19677+
19678+1: __copyuser_seg movzb (%_ASM_AX),%edx
19679 xor %eax,%eax
19680+ pax_force_retaddr
19681 ret
19682 CFI_ENDPROC
19683 ENDPROC(__get_user_1)
19684@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19685 ENTRY(__get_user_2)
19686 CFI_STARTPROC
19687 add $1,%_ASM_AX
19688+
19689+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19690 jc bad_get_user
19691 GET_THREAD_INFO(%_ASM_DX)
19692 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19693 jae bad_get_user
19694-2: movzwl -1(%_ASM_AX),%edx
19695+
19696+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19697+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19698+ cmp %_ASM_DX,%_ASM_AX
19699+ jae 1234f
19700+ add %_ASM_DX,%_ASM_AX
19701+1234:
19702+#endif
19703+
19704+#endif
19705+
19706+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19707 xor %eax,%eax
19708+ pax_force_retaddr
19709 ret
19710 CFI_ENDPROC
19711 ENDPROC(__get_user_2)
19712@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19713 ENTRY(__get_user_4)
19714 CFI_STARTPROC
19715 add $3,%_ASM_AX
19716+
19717+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19718 jc bad_get_user
19719 GET_THREAD_INFO(%_ASM_DX)
19720 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19721 jae bad_get_user
19722-3: mov -3(%_ASM_AX),%edx
19723+
19724+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19725+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19726+ cmp %_ASM_DX,%_ASM_AX
19727+ jae 1234f
19728+ add %_ASM_DX,%_ASM_AX
19729+1234:
19730+#endif
19731+
19732+#endif
19733+
19734+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19735 xor %eax,%eax
19736+ pax_force_retaddr
19737 ret
19738 CFI_ENDPROC
19739 ENDPROC(__get_user_4)
19740@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19741 GET_THREAD_INFO(%_ASM_DX)
19742 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19743 jae bad_get_user
19744+
19745+#ifdef CONFIG_PAX_MEMORY_UDEREF
19746+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19747+ cmp %_ASM_DX,%_ASM_AX
19748+ jae 1234f
19749+ add %_ASM_DX,%_ASM_AX
19750+1234:
19751+#endif
19752+
19753 4: movq -7(%_ASM_AX),%_ASM_DX
19754 xor %eax,%eax
19755+ pax_force_retaddr
19756 ret
19757 CFI_ENDPROC
19758 ENDPROC(__get_user_8)
19759@@ -91,6 +152,7 @@ bad_get_user:
19760 CFI_STARTPROC
19761 xor %edx,%edx
19762 mov $(-EFAULT),%_ASM_AX
19763+ pax_force_retaddr
19764 ret
19765 CFI_ENDPROC
19766 END(bad_get_user)
19767diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19768index 374562e..a75830b 100644
19769--- a/arch/x86/lib/insn.c
19770+++ b/arch/x86/lib/insn.c
19771@@ -21,6 +21,11 @@
19772 #include <linux/string.h>
19773 #include <asm/inat.h>
19774 #include <asm/insn.h>
19775+#ifdef __KERNEL__
19776+#include <asm/pgtable_types.h>
19777+#else
19778+#define ktla_ktva(addr) addr
19779+#endif
19780
19781 /* Verify next sizeof(t) bytes can be on the same instruction */
19782 #define validate_next(t, insn, n) \
19783@@ -49,8 +54,8 @@
19784 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19785 {
19786 memset(insn, 0, sizeof(*insn));
19787- insn->kaddr = kaddr;
19788- insn->next_byte = kaddr;
19789+ insn->kaddr = ktla_ktva(kaddr);
19790+ insn->next_byte = ktla_ktva(kaddr);
19791 insn->x86_64 = x86_64 ? 1 : 0;
19792 insn->opnd_bytes = 4;
19793 if (x86_64)
19794diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19795index 05a95e7..326f2fa 100644
19796--- a/arch/x86/lib/iomap_copy_64.S
19797+++ b/arch/x86/lib/iomap_copy_64.S
19798@@ -17,6 +17,7 @@
19799
19800 #include <linux/linkage.h>
19801 #include <asm/dwarf2.h>
19802+#include <asm/alternative-asm.h>
19803
19804 /*
19805 * override generic version in lib/iomap_copy.c
19806@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19807 CFI_STARTPROC
19808 movl %edx,%ecx
19809 rep movsd
19810+ pax_force_retaddr
19811 ret
19812 CFI_ENDPROC
19813 ENDPROC(__iowrite32_copy)
19814diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19815index efbf2a0..8893637 100644
19816--- a/arch/x86/lib/memcpy_64.S
19817+++ b/arch/x86/lib/memcpy_64.S
19818@@ -34,6 +34,7 @@
19819 rep movsq
19820 movl %edx, %ecx
19821 rep movsb
19822+ pax_force_retaddr
19823 ret
19824 .Lmemcpy_e:
19825 .previous
19826@@ -51,6 +52,7 @@
19827
19828 movl %edx, %ecx
19829 rep movsb
19830+ pax_force_retaddr
19831 ret
19832 .Lmemcpy_e_e:
19833 .previous
19834@@ -81,13 +83,13 @@ ENTRY(memcpy)
19835 */
19836 movq 0*8(%rsi), %r8
19837 movq 1*8(%rsi), %r9
19838- movq 2*8(%rsi), %r10
19839+ movq 2*8(%rsi), %rcx
19840 movq 3*8(%rsi), %r11
19841 leaq 4*8(%rsi), %rsi
19842
19843 movq %r8, 0*8(%rdi)
19844 movq %r9, 1*8(%rdi)
19845- movq %r10, 2*8(%rdi)
19846+ movq %rcx, 2*8(%rdi)
19847 movq %r11, 3*8(%rdi)
19848 leaq 4*8(%rdi), %rdi
19849 jae .Lcopy_forward_loop
19850@@ -110,12 +112,12 @@ ENTRY(memcpy)
19851 subq $0x20, %rdx
19852 movq -1*8(%rsi), %r8
19853 movq -2*8(%rsi), %r9
19854- movq -3*8(%rsi), %r10
19855+ movq -3*8(%rsi), %rcx
19856 movq -4*8(%rsi), %r11
19857 leaq -4*8(%rsi), %rsi
19858 movq %r8, -1*8(%rdi)
19859 movq %r9, -2*8(%rdi)
19860- movq %r10, -3*8(%rdi)
19861+ movq %rcx, -3*8(%rdi)
19862 movq %r11, -4*8(%rdi)
19863 leaq -4*8(%rdi), %rdi
19864 jae .Lcopy_backward_loop
19865@@ -135,12 +137,13 @@ ENTRY(memcpy)
19866 */
19867 movq 0*8(%rsi), %r8
19868 movq 1*8(%rsi), %r9
19869- movq -2*8(%rsi, %rdx), %r10
19870+ movq -2*8(%rsi, %rdx), %rcx
19871 movq -1*8(%rsi, %rdx), %r11
19872 movq %r8, 0*8(%rdi)
19873 movq %r9, 1*8(%rdi)
19874- movq %r10, -2*8(%rdi, %rdx)
19875+ movq %rcx, -2*8(%rdi, %rdx)
19876 movq %r11, -1*8(%rdi, %rdx)
19877+ pax_force_retaddr
19878 retq
19879 .p2align 4
19880 .Lless_16bytes:
19881@@ -153,6 +156,7 @@ ENTRY(memcpy)
19882 movq -1*8(%rsi, %rdx), %r9
19883 movq %r8, 0*8(%rdi)
19884 movq %r9, -1*8(%rdi, %rdx)
19885+ pax_force_retaddr
19886 retq
19887 .p2align 4
19888 .Lless_8bytes:
19889@@ -166,6 +170,7 @@ ENTRY(memcpy)
19890 movl -4(%rsi, %rdx), %r8d
19891 movl %ecx, (%rdi)
19892 movl %r8d, -4(%rdi, %rdx)
19893+ pax_force_retaddr
19894 retq
19895 .p2align 4
19896 .Lless_3bytes:
19897@@ -183,6 +188,7 @@ ENTRY(memcpy)
19898 jnz .Lloop_1
19899
19900 .Lend:
19901+ pax_force_retaddr
19902 retq
19903 CFI_ENDPROC
19904 ENDPROC(memcpy)
19905diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19906index ee16461..c39c199 100644
19907--- a/arch/x86/lib/memmove_64.S
19908+++ b/arch/x86/lib/memmove_64.S
19909@@ -61,13 +61,13 @@ ENTRY(memmove)
19910 5:
19911 sub $0x20, %rdx
19912 movq 0*8(%rsi), %r11
19913- movq 1*8(%rsi), %r10
19914+ movq 1*8(%rsi), %rcx
19915 movq 2*8(%rsi), %r9
19916 movq 3*8(%rsi), %r8
19917 leaq 4*8(%rsi), %rsi
19918
19919 movq %r11, 0*8(%rdi)
19920- movq %r10, 1*8(%rdi)
19921+ movq %rcx, 1*8(%rdi)
19922 movq %r9, 2*8(%rdi)
19923 movq %r8, 3*8(%rdi)
19924 leaq 4*8(%rdi), %rdi
19925@@ -81,10 +81,10 @@ ENTRY(memmove)
19926 4:
19927 movq %rdx, %rcx
19928 movq -8(%rsi, %rdx), %r11
19929- lea -8(%rdi, %rdx), %r10
19930+ lea -8(%rdi, %rdx), %r9
19931 shrq $3, %rcx
19932 rep movsq
19933- movq %r11, (%r10)
19934+ movq %r11, (%r9)
19935 jmp 13f
19936 .Lmemmove_end_forward:
19937
19938@@ -95,14 +95,14 @@ ENTRY(memmove)
19939 7:
19940 movq %rdx, %rcx
19941 movq (%rsi), %r11
19942- movq %rdi, %r10
19943+ movq %rdi, %r9
19944 leaq -8(%rsi, %rdx), %rsi
19945 leaq -8(%rdi, %rdx), %rdi
19946 shrq $3, %rcx
19947 std
19948 rep movsq
19949 cld
19950- movq %r11, (%r10)
19951+ movq %r11, (%r9)
19952 jmp 13f
19953
19954 /*
19955@@ -127,13 +127,13 @@ ENTRY(memmove)
19956 8:
19957 subq $0x20, %rdx
19958 movq -1*8(%rsi), %r11
19959- movq -2*8(%rsi), %r10
19960+ movq -2*8(%rsi), %rcx
19961 movq -3*8(%rsi), %r9
19962 movq -4*8(%rsi), %r8
19963 leaq -4*8(%rsi), %rsi
19964
19965 movq %r11, -1*8(%rdi)
19966- movq %r10, -2*8(%rdi)
19967+ movq %rcx, -2*8(%rdi)
19968 movq %r9, -3*8(%rdi)
19969 movq %r8, -4*8(%rdi)
19970 leaq -4*8(%rdi), %rdi
19971@@ -151,11 +151,11 @@ ENTRY(memmove)
19972 * Move data from 16 bytes to 31 bytes.
19973 */
19974 movq 0*8(%rsi), %r11
19975- movq 1*8(%rsi), %r10
19976+ movq 1*8(%rsi), %rcx
19977 movq -2*8(%rsi, %rdx), %r9
19978 movq -1*8(%rsi, %rdx), %r8
19979 movq %r11, 0*8(%rdi)
19980- movq %r10, 1*8(%rdi)
19981+ movq %rcx, 1*8(%rdi)
19982 movq %r9, -2*8(%rdi, %rdx)
19983 movq %r8, -1*8(%rdi, %rdx)
19984 jmp 13f
19985@@ -167,9 +167,9 @@ ENTRY(memmove)
19986 * Move data from 8 bytes to 15 bytes.
19987 */
19988 movq 0*8(%rsi), %r11
19989- movq -1*8(%rsi, %rdx), %r10
19990+ movq -1*8(%rsi, %rdx), %r9
19991 movq %r11, 0*8(%rdi)
19992- movq %r10, -1*8(%rdi, %rdx)
19993+ movq %r9, -1*8(%rdi, %rdx)
19994 jmp 13f
19995 10:
19996 cmpq $4, %rdx
19997@@ -178,9 +178,9 @@ ENTRY(memmove)
19998 * Move data from 4 bytes to 7 bytes.
19999 */
20000 movl (%rsi), %r11d
20001- movl -4(%rsi, %rdx), %r10d
20002+ movl -4(%rsi, %rdx), %r9d
20003 movl %r11d, (%rdi)
20004- movl %r10d, -4(%rdi, %rdx)
20005+ movl %r9d, -4(%rdi, %rdx)
20006 jmp 13f
20007 11:
20008 cmp $2, %rdx
20009@@ -189,9 +189,9 @@ ENTRY(memmove)
20010 * Move data from 2 bytes to 3 bytes.
20011 */
20012 movw (%rsi), %r11w
20013- movw -2(%rsi, %rdx), %r10w
20014+ movw -2(%rsi, %rdx), %r9w
20015 movw %r11w, (%rdi)
20016- movw %r10w, -2(%rdi, %rdx)
20017+ movw %r9w, -2(%rdi, %rdx)
20018 jmp 13f
20019 12:
20020 cmp $1, %rdx
20021@@ -202,6 +202,7 @@ ENTRY(memmove)
20022 movb (%rsi), %r11b
20023 movb %r11b, (%rdi)
20024 13:
20025+ pax_force_retaddr
20026 retq
20027 CFI_ENDPROC
20028
20029@@ -210,6 +211,7 @@ ENTRY(memmove)
20030 /* Forward moving data. */
20031 movq %rdx, %rcx
20032 rep movsb
20033+ pax_force_retaddr
20034 retq
20035 .Lmemmove_end_forward_efs:
20036 .previous
20037diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20038index 79bd454..dff325a 100644
20039--- a/arch/x86/lib/memset_64.S
20040+++ b/arch/x86/lib/memset_64.S
20041@@ -31,6 +31,7 @@
20042 movl %r8d,%ecx
20043 rep stosb
20044 movq %r9,%rax
20045+ pax_force_retaddr
20046 ret
20047 .Lmemset_e:
20048 .previous
20049@@ -53,6 +54,7 @@
20050 movl %edx,%ecx
20051 rep stosb
20052 movq %r9,%rax
20053+ pax_force_retaddr
20054 ret
20055 .Lmemset_e_e:
20056 .previous
20057@@ -60,13 +62,13 @@
20058 ENTRY(memset)
20059 ENTRY(__memset)
20060 CFI_STARTPROC
20061- movq %rdi,%r10
20062 movq %rdx,%r11
20063
20064 /* expand byte value */
20065 movzbl %sil,%ecx
20066 movabs $0x0101010101010101,%rax
20067 mul %rcx /* with rax, clobbers rdx */
20068+ movq %rdi,%rdx
20069
20070 /* align dst */
20071 movl %edi,%r9d
20072@@ -120,7 +122,8 @@ ENTRY(__memset)
20073 jnz .Lloop_1
20074
20075 .Lende:
20076- movq %r10,%rax
20077+ movq %rdx,%rax
20078+ pax_force_retaddr
20079 ret
20080
20081 CFI_RESTORE_STATE
20082diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20083index c9f2d9b..e7fd2c0 100644
20084--- a/arch/x86/lib/mmx_32.c
20085+++ b/arch/x86/lib/mmx_32.c
20086@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20087 {
20088 void *p;
20089 int i;
20090+ unsigned long cr0;
20091
20092 if (unlikely(in_interrupt()))
20093 return __memcpy(to, from, len);
20094@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20095 kernel_fpu_begin();
20096
20097 __asm__ __volatile__ (
20098- "1: prefetch (%0)\n" /* This set is 28 bytes */
20099- " prefetch 64(%0)\n"
20100- " prefetch 128(%0)\n"
20101- " prefetch 192(%0)\n"
20102- " prefetch 256(%0)\n"
20103+ "1: prefetch (%1)\n" /* This set is 28 bytes */
20104+ " prefetch 64(%1)\n"
20105+ " prefetch 128(%1)\n"
20106+ " prefetch 192(%1)\n"
20107+ " prefetch 256(%1)\n"
20108 "2: \n"
20109 ".section .fixup, \"ax\"\n"
20110- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20111+ "3: \n"
20112+
20113+#ifdef CONFIG_PAX_KERNEXEC
20114+ " movl %%cr0, %0\n"
20115+ " movl %0, %%eax\n"
20116+ " andl $0xFFFEFFFF, %%eax\n"
20117+ " movl %%eax, %%cr0\n"
20118+#endif
20119+
20120+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20121+
20122+#ifdef CONFIG_PAX_KERNEXEC
20123+ " movl %0, %%cr0\n"
20124+#endif
20125+
20126 " jmp 2b\n"
20127 ".previous\n"
20128 _ASM_EXTABLE(1b, 3b)
20129- : : "r" (from));
20130+ : "=&r" (cr0) : "r" (from) : "ax");
20131
20132 for ( ; i > 5; i--) {
20133 __asm__ __volatile__ (
20134- "1: prefetch 320(%0)\n"
20135- "2: movq (%0), %%mm0\n"
20136- " movq 8(%0), %%mm1\n"
20137- " movq 16(%0), %%mm2\n"
20138- " movq 24(%0), %%mm3\n"
20139- " movq %%mm0, (%1)\n"
20140- " movq %%mm1, 8(%1)\n"
20141- " movq %%mm2, 16(%1)\n"
20142- " movq %%mm3, 24(%1)\n"
20143- " movq 32(%0), %%mm0\n"
20144- " movq 40(%0), %%mm1\n"
20145- " movq 48(%0), %%mm2\n"
20146- " movq 56(%0), %%mm3\n"
20147- " movq %%mm0, 32(%1)\n"
20148- " movq %%mm1, 40(%1)\n"
20149- " movq %%mm2, 48(%1)\n"
20150- " movq %%mm3, 56(%1)\n"
20151+ "1: prefetch 320(%1)\n"
20152+ "2: movq (%1), %%mm0\n"
20153+ " movq 8(%1), %%mm1\n"
20154+ " movq 16(%1), %%mm2\n"
20155+ " movq 24(%1), %%mm3\n"
20156+ " movq %%mm0, (%2)\n"
20157+ " movq %%mm1, 8(%2)\n"
20158+ " movq %%mm2, 16(%2)\n"
20159+ " movq %%mm3, 24(%2)\n"
20160+ " movq 32(%1), %%mm0\n"
20161+ " movq 40(%1), %%mm1\n"
20162+ " movq 48(%1), %%mm2\n"
20163+ " movq 56(%1), %%mm3\n"
20164+ " movq %%mm0, 32(%2)\n"
20165+ " movq %%mm1, 40(%2)\n"
20166+ " movq %%mm2, 48(%2)\n"
20167+ " movq %%mm3, 56(%2)\n"
20168 ".section .fixup, \"ax\"\n"
20169- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20170+ "3:\n"
20171+
20172+#ifdef CONFIG_PAX_KERNEXEC
20173+ " movl %%cr0, %0\n"
20174+ " movl %0, %%eax\n"
20175+ " andl $0xFFFEFFFF, %%eax\n"
20176+ " movl %%eax, %%cr0\n"
20177+#endif
20178+
20179+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20180+
20181+#ifdef CONFIG_PAX_KERNEXEC
20182+ " movl %0, %%cr0\n"
20183+#endif
20184+
20185 " jmp 2b\n"
20186 ".previous\n"
20187 _ASM_EXTABLE(1b, 3b)
20188- : : "r" (from), "r" (to) : "memory");
20189+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20190
20191 from += 64;
20192 to += 64;
20193@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20194 static void fast_copy_page(void *to, void *from)
20195 {
20196 int i;
20197+ unsigned long cr0;
20198
20199 kernel_fpu_begin();
20200
20201@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20202 * but that is for later. -AV
20203 */
20204 __asm__ __volatile__(
20205- "1: prefetch (%0)\n"
20206- " prefetch 64(%0)\n"
20207- " prefetch 128(%0)\n"
20208- " prefetch 192(%0)\n"
20209- " prefetch 256(%0)\n"
20210+ "1: prefetch (%1)\n"
20211+ " prefetch 64(%1)\n"
20212+ " prefetch 128(%1)\n"
20213+ " prefetch 192(%1)\n"
20214+ " prefetch 256(%1)\n"
20215 "2: \n"
20216 ".section .fixup, \"ax\"\n"
20217- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20218+ "3: \n"
20219+
20220+#ifdef CONFIG_PAX_KERNEXEC
20221+ " movl %%cr0, %0\n"
20222+ " movl %0, %%eax\n"
20223+ " andl $0xFFFEFFFF, %%eax\n"
20224+ " movl %%eax, %%cr0\n"
20225+#endif
20226+
20227+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20228+
20229+#ifdef CONFIG_PAX_KERNEXEC
20230+ " movl %0, %%cr0\n"
20231+#endif
20232+
20233 " jmp 2b\n"
20234 ".previous\n"
20235- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20236+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20237
20238 for (i = 0; i < (4096-320)/64; i++) {
20239 __asm__ __volatile__ (
20240- "1: prefetch 320(%0)\n"
20241- "2: movq (%0), %%mm0\n"
20242- " movntq %%mm0, (%1)\n"
20243- " movq 8(%0), %%mm1\n"
20244- " movntq %%mm1, 8(%1)\n"
20245- " movq 16(%0), %%mm2\n"
20246- " movntq %%mm2, 16(%1)\n"
20247- " movq 24(%0), %%mm3\n"
20248- " movntq %%mm3, 24(%1)\n"
20249- " movq 32(%0), %%mm4\n"
20250- " movntq %%mm4, 32(%1)\n"
20251- " movq 40(%0), %%mm5\n"
20252- " movntq %%mm5, 40(%1)\n"
20253- " movq 48(%0), %%mm6\n"
20254- " movntq %%mm6, 48(%1)\n"
20255- " movq 56(%0), %%mm7\n"
20256- " movntq %%mm7, 56(%1)\n"
20257+ "1: prefetch 320(%1)\n"
20258+ "2: movq (%1), %%mm0\n"
20259+ " movntq %%mm0, (%2)\n"
20260+ " movq 8(%1), %%mm1\n"
20261+ " movntq %%mm1, 8(%2)\n"
20262+ " movq 16(%1), %%mm2\n"
20263+ " movntq %%mm2, 16(%2)\n"
20264+ " movq 24(%1), %%mm3\n"
20265+ " movntq %%mm3, 24(%2)\n"
20266+ " movq 32(%1), %%mm4\n"
20267+ " movntq %%mm4, 32(%2)\n"
20268+ " movq 40(%1), %%mm5\n"
20269+ " movntq %%mm5, 40(%2)\n"
20270+ " movq 48(%1), %%mm6\n"
20271+ " movntq %%mm6, 48(%2)\n"
20272+ " movq 56(%1), %%mm7\n"
20273+ " movntq %%mm7, 56(%2)\n"
20274 ".section .fixup, \"ax\"\n"
20275- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20276+ "3:\n"
20277+
20278+#ifdef CONFIG_PAX_KERNEXEC
20279+ " movl %%cr0, %0\n"
20280+ " movl %0, %%eax\n"
20281+ " andl $0xFFFEFFFF, %%eax\n"
20282+ " movl %%eax, %%cr0\n"
20283+#endif
20284+
20285+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20286+
20287+#ifdef CONFIG_PAX_KERNEXEC
20288+ " movl %0, %%cr0\n"
20289+#endif
20290+
20291 " jmp 2b\n"
20292 ".previous\n"
20293- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20294+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20295
20296 from += 64;
20297 to += 64;
20298@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20299 static void fast_copy_page(void *to, void *from)
20300 {
20301 int i;
20302+ unsigned long cr0;
20303
20304 kernel_fpu_begin();
20305
20306 __asm__ __volatile__ (
20307- "1: prefetch (%0)\n"
20308- " prefetch 64(%0)\n"
20309- " prefetch 128(%0)\n"
20310- " prefetch 192(%0)\n"
20311- " prefetch 256(%0)\n"
20312+ "1: prefetch (%1)\n"
20313+ " prefetch 64(%1)\n"
20314+ " prefetch 128(%1)\n"
20315+ " prefetch 192(%1)\n"
20316+ " prefetch 256(%1)\n"
20317 "2: \n"
20318 ".section .fixup, \"ax\"\n"
20319- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20320+ "3: \n"
20321+
20322+#ifdef CONFIG_PAX_KERNEXEC
20323+ " movl %%cr0, %0\n"
20324+ " movl %0, %%eax\n"
20325+ " andl $0xFFFEFFFF, %%eax\n"
20326+ " movl %%eax, %%cr0\n"
20327+#endif
20328+
20329+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20330+
20331+#ifdef CONFIG_PAX_KERNEXEC
20332+ " movl %0, %%cr0\n"
20333+#endif
20334+
20335 " jmp 2b\n"
20336 ".previous\n"
20337- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20338+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20339
20340 for (i = 0; i < 4096/64; i++) {
20341 __asm__ __volatile__ (
20342- "1: prefetch 320(%0)\n"
20343- "2: movq (%0), %%mm0\n"
20344- " movq 8(%0), %%mm1\n"
20345- " movq 16(%0), %%mm2\n"
20346- " movq 24(%0), %%mm3\n"
20347- " movq %%mm0, (%1)\n"
20348- " movq %%mm1, 8(%1)\n"
20349- " movq %%mm2, 16(%1)\n"
20350- " movq %%mm3, 24(%1)\n"
20351- " movq 32(%0), %%mm0\n"
20352- " movq 40(%0), %%mm1\n"
20353- " movq 48(%0), %%mm2\n"
20354- " movq 56(%0), %%mm3\n"
20355- " movq %%mm0, 32(%1)\n"
20356- " movq %%mm1, 40(%1)\n"
20357- " movq %%mm2, 48(%1)\n"
20358- " movq %%mm3, 56(%1)\n"
20359+ "1: prefetch 320(%1)\n"
20360+ "2: movq (%1), %%mm0\n"
20361+ " movq 8(%1), %%mm1\n"
20362+ " movq 16(%1), %%mm2\n"
20363+ " movq 24(%1), %%mm3\n"
20364+ " movq %%mm0, (%2)\n"
20365+ " movq %%mm1, 8(%2)\n"
20366+ " movq %%mm2, 16(%2)\n"
20367+ " movq %%mm3, 24(%2)\n"
20368+ " movq 32(%1), %%mm0\n"
20369+ " movq 40(%1), %%mm1\n"
20370+ " movq 48(%1), %%mm2\n"
20371+ " movq 56(%1), %%mm3\n"
20372+ " movq %%mm0, 32(%2)\n"
20373+ " movq %%mm1, 40(%2)\n"
20374+ " movq %%mm2, 48(%2)\n"
20375+ " movq %%mm3, 56(%2)\n"
20376 ".section .fixup, \"ax\"\n"
20377- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20378+ "3:\n"
20379+
20380+#ifdef CONFIG_PAX_KERNEXEC
20381+ " movl %%cr0, %0\n"
20382+ " movl %0, %%eax\n"
20383+ " andl $0xFFFEFFFF, %%eax\n"
20384+ " movl %%eax, %%cr0\n"
20385+#endif
20386+
20387+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20388+
20389+#ifdef CONFIG_PAX_KERNEXEC
20390+ " movl %0, %%cr0\n"
20391+#endif
20392+
20393 " jmp 2b\n"
20394 ".previous\n"
20395 _ASM_EXTABLE(1b, 3b)
20396- : : "r" (from), "r" (to) : "memory");
20397+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20398
20399 from += 64;
20400 to += 64;
20401diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20402index 69fa106..adda88b 100644
20403--- a/arch/x86/lib/msr-reg.S
20404+++ b/arch/x86/lib/msr-reg.S
20405@@ -3,6 +3,7 @@
20406 #include <asm/dwarf2.h>
20407 #include <asm/asm.h>
20408 #include <asm/msr.h>
20409+#include <asm/alternative-asm.h>
20410
20411 #ifdef CONFIG_X86_64
20412 /*
20413@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20414 CFI_STARTPROC
20415 pushq_cfi %rbx
20416 pushq_cfi %rbp
20417- movq %rdi, %r10 /* Save pointer */
20418+ movq %rdi, %r9 /* Save pointer */
20419 xorl %r11d, %r11d /* Return value */
20420 movl (%rdi), %eax
20421 movl 4(%rdi), %ecx
20422@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20423 movl 28(%rdi), %edi
20424 CFI_REMEMBER_STATE
20425 1: \op
20426-2: movl %eax, (%r10)
20427+2: movl %eax, (%r9)
20428 movl %r11d, %eax /* Return value */
20429- movl %ecx, 4(%r10)
20430- movl %edx, 8(%r10)
20431- movl %ebx, 12(%r10)
20432- movl %ebp, 20(%r10)
20433- movl %esi, 24(%r10)
20434- movl %edi, 28(%r10)
20435+ movl %ecx, 4(%r9)
20436+ movl %edx, 8(%r9)
20437+ movl %ebx, 12(%r9)
20438+ movl %ebp, 20(%r9)
20439+ movl %esi, 24(%r9)
20440+ movl %edi, 28(%r9)
20441 popq_cfi %rbp
20442 popq_cfi %rbx
20443+ pax_force_retaddr
20444 ret
20445 3:
20446 CFI_RESTORE_STATE
20447diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20448index 36b0d15..d381858 100644
20449--- a/arch/x86/lib/putuser.S
20450+++ b/arch/x86/lib/putuser.S
20451@@ -15,7 +15,9 @@
20452 #include <asm/thread_info.h>
20453 #include <asm/errno.h>
20454 #include <asm/asm.h>
20455-
20456+#include <asm/segment.h>
20457+#include <asm/pgtable.h>
20458+#include <asm/alternative-asm.h>
20459
20460 /*
20461 * __put_user_X
20462@@ -29,52 +31,119 @@
20463 * as they get called from within inline assembly.
20464 */
20465
20466-#define ENTER CFI_STARTPROC ; \
20467- GET_THREAD_INFO(%_ASM_BX)
20468-#define EXIT ret ; \
20469+#define ENTER CFI_STARTPROC
20470+#define EXIT pax_force_retaddr; ret ; \
20471 CFI_ENDPROC
20472
20473+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20474+#define _DEST %_ASM_CX,%_ASM_BX
20475+#else
20476+#define _DEST %_ASM_CX
20477+#endif
20478+
20479+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20480+#define __copyuser_seg gs;
20481+#else
20482+#define __copyuser_seg
20483+#endif
20484+
20485 .text
20486 ENTRY(__put_user_1)
20487 ENTER
20488+
20489+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20490+ GET_THREAD_INFO(%_ASM_BX)
20491 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20492 jae bad_put_user
20493-1: movb %al,(%_ASM_CX)
20494+
20495+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20496+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20497+ cmp %_ASM_BX,%_ASM_CX
20498+ jb 1234f
20499+ xor %ebx,%ebx
20500+1234:
20501+#endif
20502+
20503+#endif
20504+
20505+1: __copyuser_seg movb %al,(_DEST)
20506 xor %eax,%eax
20507 EXIT
20508 ENDPROC(__put_user_1)
20509
20510 ENTRY(__put_user_2)
20511 ENTER
20512+
20513+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20514+ GET_THREAD_INFO(%_ASM_BX)
20515 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20516 sub $1,%_ASM_BX
20517 cmp %_ASM_BX,%_ASM_CX
20518 jae bad_put_user
20519-2: movw %ax,(%_ASM_CX)
20520+
20521+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20522+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20523+ cmp %_ASM_BX,%_ASM_CX
20524+ jb 1234f
20525+ xor %ebx,%ebx
20526+1234:
20527+#endif
20528+
20529+#endif
20530+
20531+2: __copyuser_seg movw %ax,(_DEST)
20532 xor %eax,%eax
20533 EXIT
20534 ENDPROC(__put_user_2)
20535
20536 ENTRY(__put_user_4)
20537 ENTER
20538+
20539+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20540+ GET_THREAD_INFO(%_ASM_BX)
20541 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20542 sub $3,%_ASM_BX
20543 cmp %_ASM_BX,%_ASM_CX
20544 jae bad_put_user
20545-3: movl %eax,(%_ASM_CX)
20546+
20547+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20548+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20549+ cmp %_ASM_BX,%_ASM_CX
20550+ jb 1234f
20551+ xor %ebx,%ebx
20552+1234:
20553+#endif
20554+
20555+#endif
20556+
20557+3: __copyuser_seg movl %eax,(_DEST)
20558 xor %eax,%eax
20559 EXIT
20560 ENDPROC(__put_user_4)
20561
20562 ENTRY(__put_user_8)
20563 ENTER
20564+
20565+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20566+ GET_THREAD_INFO(%_ASM_BX)
20567 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20568 sub $7,%_ASM_BX
20569 cmp %_ASM_BX,%_ASM_CX
20570 jae bad_put_user
20571-4: mov %_ASM_AX,(%_ASM_CX)
20572+
20573+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20574+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20575+ cmp %_ASM_BX,%_ASM_CX
20576+ jb 1234f
20577+ xor %ebx,%ebx
20578+1234:
20579+#endif
20580+
20581+#endif
20582+
20583+4: __copyuser_seg mov %_ASM_AX,(_DEST)
20584 #ifdef CONFIG_X86_32
20585-5: movl %edx,4(%_ASM_CX)
20586+5: __copyuser_seg movl %edx,4(_DEST)
20587 #endif
20588 xor %eax,%eax
20589 EXIT
20590diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20591index 1cad221..de671ee 100644
20592--- a/arch/x86/lib/rwlock.S
20593+++ b/arch/x86/lib/rwlock.S
20594@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20595 FRAME
20596 0: LOCK_PREFIX
20597 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20598+
20599+#ifdef CONFIG_PAX_REFCOUNT
20600+ jno 1234f
20601+ LOCK_PREFIX
20602+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20603+ int $4
20604+1234:
20605+ _ASM_EXTABLE(1234b, 1234b)
20606+#endif
20607+
20608 1: rep; nop
20609 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20610 jne 1b
20611 LOCK_PREFIX
20612 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20613+
20614+#ifdef CONFIG_PAX_REFCOUNT
20615+ jno 1234f
20616+ LOCK_PREFIX
20617+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20618+ int $4
20619+1234:
20620+ _ASM_EXTABLE(1234b, 1234b)
20621+#endif
20622+
20623 jnz 0b
20624 ENDFRAME
20625+ pax_force_retaddr
20626 ret
20627 CFI_ENDPROC
20628 END(__write_lock_failed)
20629@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20630 FRAME
20631 0: LOCK_PREFIX
20632 READ_LOCK_SIZE(inc) (%__lock_ptr)
20633+
20634+#ifdef CONFIG_PAX_REFCOUNT
20635+ jno 1234f
20636+ LOCK_PREFIX
20637+ READ_LOCK_SIZE(dec) (%__lock_ptr)
20638+ int $4
20639+1234:
20640+ _ASM_EXTABLE(1234b, 1234b)
20641+#endif
20642+
20643 1: rep; nop
20644 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20645 js 1b
20646 LOCK_PREFIX
20647 READ_LOCK_SIZE(dec) (%__lock_ptr)
20648+
20649+#ifdef CONFIG_PAX_REFCOUNT
20650+ jno 1234f
20651+ LOCK_PREFIX
20652+ READ_LOCK_SIZE(inc) (%__lock_ptr)
20653+ int $4
20654+1234:
20655+ _ASM_EXTABLE(1234b, 1234b)
20656+#endif
20657+
20658 js 0b
20659 ENDFRAME
20660+ pax_force_retaddr
20661 ret
20662 CFI_ENDPROC
20663 END(__read_lock_failed)
20664diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20665index 5dff5f0..cadebf4 100644
20666--- a/arch/x86/lib/rwsem.S
20667+++ b/arch/x86/lib/rwsem.S
20668@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20669 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20670 CFI_RESTORE __ASM_REG(dx)
20671 restore_common_regs
20672+ pax_force_retaddr
20673 ret
20674 CFI_ENDPROC
20675 ENDPROC(call_rwsem_down_read_failed)
20676@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20677 movq %rax,%rdi
20678 call rwsem_down_write_failed
20679 restore_common_regs
20680+ pax_force_retaddr
20681 ret
20682 CFI_ENDPROC
20683 ENDPROC(call_rwsem_down_write_failed)
20684@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20685 movq %rax,%rdi
20686 call rwsem_wake
20687 restore_common_regs
20688-1: ret
20689+1: pax_force_retaddr
20690+ ret
20691 CFI_ENDPROC
20692 ENDPROC(call_rwsem_wake)
20693
20694@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20695 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20696 CFI_RESTORE __ASM_REG(dx)
20697 restore_common_regs
20698+ pax_force_retaddr
20699 ret
20700 CFI_ENDPROC
20701 ENDPROC(call_rwsem_downgrade_wake)
20702diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20703index a63efd6..ccecad8 100644
20704--- a/arch/x86/lib/thunk_64.S
20705+++ b/arch/x86/lib/thunk_64.S
20706@@ -8,6 +8,7 @@
20707 #include <linux/linkage.h>
20708 #include <asm/dwarf2.h>
20709 #include <asm/calling.h>
20710+#include <asm/alternative-asm.h>
20711
20712 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20713 .macro THUNK name, func, put_ret_addr_in_rdi=0
20714@@ -41,5 +42,6 @@
20715 SAVE_ARGS
20716 restore:
20717 RESTORE_ARGS
20718+ pax_force_retaddr
20719 ret
20720 CFI_ENDPROC
20721diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20722index e218d5d..35679b4 100644
20723--- a/arch/x86/lib/usercopy_32.c
20724+++ b/arch/x86/lib/usercopy_32.c
20725@@ -43,7 +43,7 @@ do { \
20726 __asm__ __volatile__( \
20727 " testl %1,%1\n" \
20728 " jz 2f\n" \
20729- "0: lodsb\n" \
20730+ "0: "__copyuser_seg"lodsb\n" \
20731 " stosb\n" \
20732 " testb %%al,%%al\n" \
20733 " jz 1f\n" \
20734@@ -128,10 +128,12 @@ do { \
20735 int __d0; \
20736 might_fault(); \
20737 __asm__ __volatile__( \
20738+ __COPYUSER_SET_ES \
20739 "0: rep; stosl\n" \
20740 " movl %2,%0\n" \
20741 "1: rep; stosb\n" \
20742 "2:\n" \
20743+ __COPYUSER_RESTORE_ES \
20744 ".section .fixup,\"ax\"\n" \
20745 "3: lea 0(%2,%0,4),%0\n" \
20746 " jmp 2b\n" \
20747@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20748 might_fault();
20749
20750 __asm__ __volatile__(
20751+ __COPYUSER_SET_ES
20752 " testl %0, %0\n"
20753 " jz 3f\n"
20754 " andl %0,%%ecx\n"
20755@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20756 " subl %%ecx,%0\n"
20757 " addl %0,%%eax\n"
20758 "1:\n"
20759+ __COPYUSER_RESTORE_ES
20760 ".section .fixup,\"ax\"\n"
20761 "2: xorl %%eax,%%eax\n"
20762 " jmp 1b\n"
20763@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20764
20765 #ifdef CONFIG_X86_INTEL_USERCOPY
20766 static unsigned long
20767-__copy_user_intel(void __user *to, const void *from, unsigned long size)
20768+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20769 {
20770 int d0, d1;
20771 __asm__ __volatile__(
20772@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20773 " .align 2,0x90\n"
20774 "3: movl 0(%4), %%eax\n"
20775 "4: movl 4(%4), %%edx\n"
20776- "5: movl %%eax, 0(%3)\n"
20777- "6: movl %%edx, 4(%3)\n"
20778+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20779+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20780 "7: movl 8(%4), %%eax\n"
20781 "8: movl 12(%4),%%edx\n"
20782- "9: movl %%eax, 8(%3)\n"
20783- "10: movl %%edx, 12(%3)\n"
20784+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20785+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20786 "11: movl 16(%4), %%eax\n"
20787 "12: movl 20(%4), %%edx\n"
20788- "13: movl %%eax, 16(%3)\n"
20789- "14: movl %%edx, 20(%3)\n"
20790+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20791+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20792 "15: movl 24(%4), %%eax\n"
20793 "16: movl 28(%4), %%edx\n"
20794- "17: movl %%eax, 24(%3)\n"
20795- "18: movl %%edx, 28(%3)\n"
20796+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20797+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20798 "19: movl 32(%4), %%eax\n"
20799 "20: movl 36(%4), %%edx\n"
20800- "21: movl %%eax, 32(%3)\n"
20801- "22: movl %%edx, 36(%3)\n"
20802+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20803+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20804 "23: movl 40(%4), %%eax\n"
20805 "24: movl 44(%4), %%edx\n"
20806- "25: movl %%eax, 40(%3)\n"
20807- "26: movl %%edx, 44(%3)\n"
20808+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20809+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20810 "27: movl 48(%4), %%eax\n"
20811 "28: movl 52(%4), %%edx\n"
20812- "29: movl %%eax, 48(%3)\n"
20813- "30: movl %%edx, 52(%3)\n"
20814+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20815+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20816 "31: movl 56(%4), %%eax\n"
20817 "32: movl 60(%4), %%edx\n"
20818- "33: movl %%eax, 56(%3)\n"
20819- "34: movl %%edx, 60(%3)\n"
20820+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20821+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20822 " addl $-64, %0\n"
20823 " addl $64, %4\n"
20824 " addl $64, %3\n"
20825@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20826 " shrl $2, %0\n"
20827 " andl $3, %%eax\n"
20828 " cld\n"
20829+ __COPYUSER_SET_ES
20830 "99: rep; movsl\n"
20831 "36: movl %%eax, %0\n"
20832 "37: rep; movsb\n"
20833 "100:\n"
20834+ __COPYUSER_RESTORE_ES
20835+ ".section .fixup,\"ax\"\n"
20836+ "101: lea 0(%%eax,%0,4),%0\n"
20837+ " jmp 100b\n"
20838+ ".previous\n"
20839+ ".section __ex_table,\"a\"\n"
20840+ " .align 4\n"
20841+ " .long 1b,100b\n"
20842+ " .long 2b,100b\n"
20843+ " .long 3b,100b\n"
20844+ " .long 4b,100b\n"
20845+ " .long 5b,100b\n"
20846+ " .long 6b,100b\n"
20847+ " .long 7b,100b\n"
20848+ " .long 8b,100b\n"
20849+ " .long 9b,100b\n"
20850+ " .long 10b,100b\n"
20851+ " .long 11b,100b\n"
20852+ " .long 12b,100b\n"
20853+ " .long 13b,100b\n"
20854+ " .long 14b,100b\n"
20855+ " .long 15b,100b\n"
20856+ " .long 16b,100b\n"
20857+ " .long 17b,100b\n"
20858+ " .long 18b,100b\n"
20859+ " .long 19b,100b\n"
20860+ " .long 20b,100b\n"
20861+ " .long 21b,100b\n"
20862+ " .long 22b,100b\n"
20863+ " .long 23b,100b\n"
20864+ " .long 24b,100b\n"
20865+ " .long 25b,100b\n"
20866+ " .long 26b,100b\n"
20867+ " .long 27b,100b\n"
20868+ " .long 28b,100b\n"
20869+ " .long 29b,100b\n"
20870+ " .long 30b,100b\n"
20871+ " .long 31b,100b\n"
20872+ " .long 32b,100b\n"
20873+ " .long 33b,100b\n"
20874+ " .long 34b,100b\n"
20875+ " .long 35b,100b\n"
20876+ " .long 36b,100b\n"
20877+ " .long 37b,100b\n"
20878+ " .long 99b,101b\n"
20879+ ".previous"
20880+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20881+ : "1"(to), "2"(from), "0"(size)
20882+ : "eax", "edx", "memory");
20883+ return size;
20884+}
20885+
20886+static unsigned long
20887+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20888+{
20889+ int d0, d1;
20890+ __asm__ __volatile__(
20891+ " .align 2,0x90\n"
20892+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20893+ " cmpl $67, %0\n"
20894+ " jbe 3f\n"
20895+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20896+ " .align 2,0x90\n"
20897+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20898+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20899+ "5: movl %%eax, 0(%3)\n"
20900+ "6: movl %%edx, 4(%3)\n"
20901+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20902+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20903+ "9: movl %%eax, 8(%3)\n"
20904+ "10: movl %%edx, 12(%3)\n"
20905+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20906+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20907+ "13: movl %%eax, 16(%3)\n"
20908+ "14: movl %%edx, 20(%3)\n"
20909+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20910+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20911+ "17: movl %%eax, 24(%3)\n"
20912+ "18: movl %%edx, 28(%3)\n"
20913+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20914+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20915+ "21: movl %%eax, 32(%3)\n"
20916+ "22: movl %%edx, 36(%3)\n"
20917+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20918+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20919+ "25: movl %%eax, 40(%3)\n"
20920+ "26: movl %%edx, 44(%3)\n"
20921+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20922+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20923+ "29: movl %%eax, 48(%3)\n"
20924+ "30: movl %%edx, 52(%3)\n"
20925+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20926+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20927+ "33: movl %%eax, 56(%3)\n"
20928+ "34: movl %%edx, 60(%3)\n"
20929+ " addl $-64, %0\n"
20930+ " addl $64, %4\n"
20931+ " addl $64, %3\n"
20932+ " cmpl $63, %0\n"
20933+ " ja 1b\n"
20934+ "35: movl %0, %%eax\n"
20935+ " shrl $2, %0\n"
20936+ " andl $3, %%eax\n"
20937+ " cld\n"
20938+ "99: rep; "__copyuser_seg" movsl\n"
20939+ "36: movl %%eax, %0\n"
20940+ "37: rep; "__copyuser_seg" movsb\n"
20941+ "100:\n"
20942 ".section .fixup,\"ax\"\n"
20943 "101: lea 0(%%eax,%0,4),%0\n"
20944 " jmp 100b\n"
20945@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20946 int d0, d1;
20947 __asm__ __volatile__(
20948 " .align 2,0x90\n"
20949- "0: movl 32(%4), %%eax\n"
20950+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20951 " cmpl $67, %0\n"
20952 " jbe 2f\n"
20953- "1: movl 64(%4), %%eax\n"
20954+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20955 " .align 2,0x90\n"
20956- "2: movl 0(%4), %%eax\n"
20957- "21: movl 4(%4), %%edx\n"
20958+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20959+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20960 " movl %%eax, 0(%3)\n"
20961 " movl %%edx, 4(%3)\n"
20962- "3: movl 8(%4), %%eax\n"
20963- "31: movl 12(%4),%%edx\n"
20964+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20965+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20966 " movl %%eax, 8(%3)\n"
20967 " movl %%edx, 12(%3)\n"
20968- "4: movl 16(%4), %%eax\n"
20969- "41: movl 20(%4), %%edx\n"
20970+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20971+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20972 " movl %%eax, 16(%3)\n"
20973 " movl %%edx, 20(%3)\n"
20974- "10: movl 24(%4), %%eax\n"
20975- "51: movl 28(%4), %%edx\n"
20976+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20977+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20978 " movl %%eax, 24(%3)\n"
20979 " movl %%edx, 28(%3)\n"
20980- "11: movl 32(%4), %%eax\n"
20981- "61: movl 36(%4), %%edx\n"
20982+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20983+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20984 " movl %%eax, 32(%3)\n"
20985 " movl %%edx, 36(%3)\n"
20986- "12: movl 40(%4), %%eax\n"
20987- "71: movl 44(%4), %%edx\n"
20988+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20989+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20990 " movl %%eax, 40(%3)\n"
20991 " movl %%edx, 44(%3)\n"
20992- "13: movl 48(%4), %%eax\n"
20993- "81: movl 52(%4), %%edx\n"
20994+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20995+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20996 " movl %%eax, 48(%3)\n"
20997 " movl %%edx, 52(%3)\n"
20998- "14: movl 56(%4), %%eax\n"
20999- "91: movl 60(%4), %%edx\n"
21000+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21001+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21002 " movl %%eax, 56(%3)\n"
21003 " movl %%edx, 60(%3)\n"
21004 " addl $-64, %0\n"
21005@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21006 " shrl $2, %0\n"
21007 " andl $3, %%eax\n"
21008 " cld\n"
21009- "6: rep; movsl\n"
21010+ "6: rep; "__copyuser_seg" movsl\n"
21011 " movl %%eax,%0\n"
21012- "7: rep; movsb\n"
21013+ "7: rep; "__copyuser_seg" movsb\n"
21014 "8:\n"
21015 ".section .fixup,\"ax\"\n"
21016 "9: lea 0(%%eax,%0,4),%0\n"
21017@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21018
21019 __asm__ __volatile__(
21020 " .align 2,0x90\n"
21021- "0: movl 32(%4), %%eax\n"
21022+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21023 " cmpl $67, %0\n"
21024 " jbe 2f\n"
21025- "1: movl 64(%4), %%eax\n"
21026+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21027 " .align 2,0x90\n"
21028- "2: movl 0(%4), %%eax\n"
21029- "21: movl 4(%4), %%edx\n"
21030+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21031+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21032 " movnti %%eax, 0(%3)\n"
21033 " movnti %%edx, 4(%3)\n"
21034- "3: movl 8(%4), %%eax\n"
21035- "31: movl 12(%4),%%edx\n"
21036+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21037+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21038 " movnti %%eax, 8(%3)\n"
21039 " movnti %%edx, 12(%3)\n"
21040- "4: movl 16(%4), %%eax\n"
21041- "41: movl 20(%4), %%edx\n"
21042+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21043+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21044 " movnti %%eax, 16(%3)\n"
21045 " movnti %%edx, 20(%3)\n"
21046- "10: movl 24(%4), %%eax\n"
21047- "51: movl 28(%4), %%edx\n"
21048+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21049+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21050 " movnti %%eax, 24(%3)\n"
21051 " movnti %%edx, 28(%3)\n"
21052- "11: movl 32(%4), %%eax\n"
21053- "61: movl 36(%4), %%edx\n"
21054+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21055+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21056 " movnti %%eax, 32(%3)\n"
21057 " movnti %%edx, 36(%3)\n"
21058- "12: movl 40(%4), %%eax\n"
21059- "71: movl 44(%4), %%edx\n"
21060+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21061+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21062 " movnti %%eax, 40(%3)\n"
21063 " movnti %%edx, 44(%3)\n"
21064- "13: movl 48(%4), %%eax\n"
21065- "81: movl 52(%4), %%edx\n"
21066+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21067+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21068 " movnti %%eax, 48(%3)\n"
21069 " movnti %%edx, 52(%3)\n"
21070- "14: movl 56(%4), %%eax\n"
21071- "91: movl 60(%4), %%edx\n"
21072+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21073+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21074 " movnti %%eax, 56(%3)\n"
21075 " movnti %%edx, 60(%3)\n"
21076 " addl $-64, %0\n"
21077@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21078 " shrl $2, %0\n"
21079 " andl $3, %%eax\n"
21080 " cld\n"
21081- "6: rep; movsl\n"
21082+ "6: rep; "__copyuser_seg" movsl\n"
21083 " movl %%eax,%0\n"
21084- "7: rep; movsb\n"
21085+ "7: rep; "__copyuser_seg" movsb\n"
21086 "8:\n"
21087 ".section .fixup,\"ax\"\n"
21088 "9: lea 0(%%eax,%0,4),%0\n"
21089@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21090
21091 __asm__ __volatile__(
21092 " .align 2,0x90\n"
21093- "0: movl 32(%4), %%eax\n"
21094+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21095 " cmpl $67, %0\n"
21096 " jbe 2f\n"
21097- "1: movl 64(%4), %%eax\n"
21098+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21099 " .align 2,0x90\n"
21100- "2: movl 0(%4), %%eax\n"
21101- "21: movl 4(%4), %%edx\n"
21102+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21103+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21104 " movnti %%eax, 0(%3)\n"
21105 " movnti %%edx, 4(%3)\n"
21106- "3: movl 8(%4), %%eax\n"
21107- "31: movl 12(%4),%%edx\n"
21108+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21109+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21110 " movnti %%eax, 8(%3)\n"
21111 " movnti %%edx, 12(%3)\n"
21112- "4: movl 16(%4), %%eax\n"
21113- "41: movl 20(%4), %%edx\n"
21114+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21115+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21116 " movnti %%eax, 16(%3)\n"
21117 " movnti %%edx, 20(%3)\n"
21118- "10: movl 24(%4), %%eax\n"
21119- "51: movl 28(%4), %%edx\n"
21120+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21121+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21122 " movnti %%eax, 24(%3)\n"
21123 " movnti %%edx, 28(%3)\n"
21124- "11: movl 32(%4), %%eax\n"
21125- "61: movl 36(%4), %%edx\n"
21126+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21127+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21128 " movnti %%eax, 32(%3)\n"
21129 " movnti %%edx, 36(%3)\n"
21130- "12: movl 40(%4), %%eax\n"
21131- "71: movl 44(%4), %%edx\n"
21132+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21133+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21134 " movnti %%eax, 40(%3)\n"
21135 " movnti %%edx, 44(%3)\n"
21136- "13: movl 48(%4), %%eax\n"
21137- "81: movl 52(%4), %%edx\n"
21138+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21139+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21140 " movnti %%eax, 48(%3)\n"
21141 " movnti %%edx, 52(%3)\n"
21142- "14: movl 56(%4), %%eax\n"
21143- "91: movl 60(%4), %%edx\n"
21144+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21145+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21146 " movnti %%eax, 56(%3)\n"
21147 " movnti %%edx, 60(%3)\n"
21148 " addl $-64, %0\n"
21149@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21150 " shrl $2, %0\n"
21151 " andl $3, %%eax\n"
21152 " cld\n"
21153- "6: rep; movsl\n"
21154+ "6: rep; "__copyuser_seg" movsl\n"
21155 " movl %%eax,%0\n"
21156- "7: rep; movsb\n"
21157+ "7: rep; "__copyuser_seg" movsb\n"
21158 "8:\n"
21159 ".section .fixup,\"ax\"\n"
21160 "9: lea 0(%%eax,%0,4),%0\n"
21161@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21162 */
21163 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21164 unsigned long size);
21165-unsigned long __copy_user_intel(void __user *to, const void *from,
21166+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21167+ unsigned long size);
21168+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21169 unsigned long size);
21170 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21171 const void __user *from, unsigned long size);
21172 #endif /* CONFIG_X86_INTEL_USERCOPY */
21173
21174 /* Generic arbitrary sized copy. */
21175-#define __copy_user(to, from, size) \
21176+#define __copy_user(to, from, size, prefix, set, restore) \
21177 do { \
21178 int __d0, __d1, __d2; \
21179 __asm__ __volatile__( \
21180+ set \
21181 " cmp $7,%0\n" \
21182 " jbe 1f\n" \
21183 " movl %1,%0\n" \
21184 " negl %0\n" \
21185 " andl $7,%0\n" \
21186 " subl %0,%3\n" \
21187- "4: rep; movsb\n" \
21188+ "4: rep; "prefix"movsb\n" \
21189 " movl %3,%0\n" \
21190 " shrl $2,%0\n" \
21191 " andl $3,%3\n" \
21192 " .align 2,0x90\n" \
21193- "0: rep; movsl\n" \
21194+ "0: rep; "prefix"movsl\n" \
21195 " movl %3,%0\n" \
21196- "1: rep; movsb\n" \
21197+ "1: rep; "prefix"movsb\n" \
21198 "2:\n" \
21199+ restore \
21200 ".section .fixup,\"ax\"\n" \
21201 "5: addl %3,%0\n" \
21202 " jmp 2b\n" \
21203@@ -682,14 +799,14 @@ do { \
21204 " negl %0\n" \
21205 " andl $7,%0\n" \
21206 " subl %0,%3\n" \
21207- "4: rep; movsb\n" \
21208+ "4: rep; "__copyuser_seg"movsb\n" \
21209 " movl %3,%0\n" \
21210 " shrl $2,%0\n" \
21211 " andl $3,%3\n" \
21212 " .align 2,0x90\n" \
21213- "0: rep; movsl\n" \
21214+ "0: rep; "__copyuser_seg"movsl\n" \
21215 " movl %3,%0\n" \
21216- "1: rep; movsb\n" \
21217+ "1: rep; "__copyuser_seg"movsb\n" \
21218 "2:\n" \
21219 ".section .fixup,\"ax\"\n" \
21220 "5: addl %3,%0\n" \
21221@@ -775,9 +892,9 @@ survive:
21222 }
21223 #endif
21224 if (movsl_is_ok(to, from, n))
21225- __copy_user(to, from, n);
21226+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21227 else
21228- n = __copy_user_intel(to, from, n);
21229+ n = __generic_copy_to_user_intel(to, from, n);
21230 return n;
21231 }
21232 EXPORT_SYMBOL(__copy_to_user_ll);
21233@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21234 unsigned long n)
21235 {
21236 if (movsl_is_ok(to, from, n))
21237- __copy_user(to, from, n);
21238+ __copy_user(to, from, n, __copyuser_seg, "", "");
21239 else
21240- n = __copy_user_intel((void __user *)to,
21241- (const void *)from, n);
21242+ n = __generic_copy_from_user_intel(to, from, n);
21243 return n;
21244 }
21245 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21246@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21247 if (n > 64 && cpu_has_xmm2)
21248 n = __copy_user_intel_nocache(to, from, n);
21249 else
21250- __copy_user(to, from, n);
21251+ __copy_user(to, from, n, __copyuser_seg, "", "");
21252 #else
21253- __copy_user(to, from, n);
21254+ __copy_user(to, from, n, __copyuser_seg, "", "");
21255 #endif
21256 return n;
21257 }
21258 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21259
21260-/**
21261- * copy_to_user: - Copy a block of data into user space.
21262- * @to: Destination address, in user space.
21263- * @from: Source address, in kernel space.
21264- * @n: Number of bytes to copy.
21265- *
21266- * Context: User context only. This function may sleep.
21267- *
21268- * Copy data from kernel space to user space.
21269- *
21270- * Returns number of bytes that could not be copied.
21271- * On success, this will be zero.
21272- */
21273-unsigned long
21274-copy_to_user(void __user *to, const void *from, unsigned long n)
21275-{
21276- if (access_ok(VERIFY_WRITE, to, n))
21277- n = __copy_to_user(to, from, n);
21278- return n;
21279-}
21280-EXPORT_SYMBOL(copy_to_user);
21281-
21282-/**
21283- * copy_from_user: - Copy a block of data from user space.
21284- * @to: Destination address, in kernel space.
21285- * @from: Source address, in user space.
21286- * @n: Number of bytes to copy.
21287- *
21288- * Context: User context only. This function may sleep.
21289- *
21290- * Copy data from user space to kernel space.
21291- *
21292- * Returns number of bytes that could not be copied.
21293- * On success, this will be zero.
21294- *
21295- * If some data could not be copied, this function will pad the copied
21296- * data to the requested size using zero bytes.
21297- */
21298-unsigned long
21299-_copy_from_user(void *to, const void __user *from, unsigned long n)
21300-{
21301- if (access_ok(VERIFY_READ, from, n))
21302- n = __copy_from_user(to, from, n);
21303- else
21304- memset(to, 0, n);
21305- return n;
21306-}
21307-EXPORT_SYMBOL(_copy_from_user);
21308-
21309 void copy_from_user_overflow(void)
21310 {
21311 WARN(1, "Buffer overflow detected!\n");
21312 }
21313 EXPORT_SYMBOL(copy_from_user_overflow);
21314+
21315+void copy_to_user_overflow(void)
21316+{
21317+ WARN(1, "Buffer overflow detected!\n");
21318+}
21319+EXPORT_SYMBOL(copy_to_user_overflow);
21320+
21321+#ifdef CONFIG_PAX_MEMORY_UDEREF
21322+void __set_fs(mm_segment_t x)
21323+{
21324+ switch (x.seg) {
21325+ case 0:
21326+ loadsegment(gs, 0);
21327+ break;
21328+ case TASK_SIZE_MAX:
21329+ loadsegment(gs, __USER_DS);
21330+ break;
21331+ case -1UL:
21332+ loadsegment(gs, __KERNEL_DS);
21333+ break;
21334+ default:
21335+ BUG();
21336+ }
21337+ return;
21338+}
21339+EXPORT_SYMBOL(__set_fs);
21340+
21341+void set_fs(mm_segment_t x)
21342+{
21343+ current_thread_info()->addr_limit = x;
21344+ __set_fs(x);
21345+}
21346+EXPORT_SYMBOL(set_fs);
21347+#endif
21348diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21349index b7c2849..8633ad8 100644
21350--- a/arch/x86/lib/usercopy_64.c
21351+++ b/arch/x86/lib/usercopy_64.c
21352@@ -42,6 +42,12 @@ long
21353 __strncpy_from_user(char *dst, const char __user *src, long count)
21354 {
21355 long res;
21356+
21357+#ifdef CONFIG_PAX_MEMORY_UDEREF
21358+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21359+ src += PAX_USER_SHADOW_BASE;
21360+#endif
21361+
21362 __do_strncpy_from_user(dst, src, count, res);
21363 return res;
21364 }
21365@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21366 {
21367 long __d0;
21368 might_fault();
21369+
21370+#ifdef CONFIG_PAX_MEMORY_UDEREF
21371+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21372+ addr += PAX_USER_SHADOW_BASE;
21373+#endif
21374+
21375 /* no memory constraint because it doesn't change any memory gcc knows
21376 about */
21377 asm volatile(
21378@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21379 }
21380 EXPORT_SYMBOL(strlen_user);
21381
21382-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21383+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21384 {
21385- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21386- return copy_user_generic((__force void *)to, (__force void *)from, len);
21387- }
21388- return len;
21389+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21390+
21391+#ifdef CONFIG_PAX_MEMORY_UDEREF
21392+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21393+ to += PAX_USER_SHADOW_BASE;
21394+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21395+ from += PAX_USER_SHADOW_BASE;
21396+#endif
21397+
21398+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21399+ }
21400+ return len;
21401 }
21402 EXPORT_SYMBOL(copy_in_user);
21403
21404@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21405 * it is not necessary to optimize tail handling.
21406 */
21407 unsigned long
21408-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21409+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21410 {
21411 char c;
21412 unsigned zero_len;
21413diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21414index d0474ad..36e9257 100644
21415--- a/arch/x86/mm/extable.c
21416+++ b/arch/x86/mm/extable.c
21417@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21418 const struct exception_table_entry *fixup;
21419
21420 #ifdef CONFIG_PNPBIOS
21421- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21422+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21423 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21424 extern u32 pnp_bios_is_utter_crap;
21425 pnp_bios_is_utter_crap = 1;
21426diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21427index 5db0490..13bd09c 100644
21428--- a/arch/x86/mm/fault.c
21429+++ b/arch/x86/mm/fault.c
21430@@ -13,11 +13,18 @@
21431 #include <linux/perf_event.h> /* perf_sw_event */
21432 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21433 #include <linux/prefetch.h> /* prefetchw */
21434+#include <linux/unistd.h>
21435+#include <linux/compiler.h>
21436
21437 #include <asm/traps.h> /* dotraplinkage, ... */
21438 #include <asm/pgalloc.h> /* pgd_*(), ... */
21439 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21440 #include <asm/fixmap.h> /* VSYSCALL_START */
21441+#include <asm/tlbflush.h>
21442+
21443+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21444+#include <asm/stacktrace.h>
21445+#endif
21446
21447 /*
21448 * Page fault error code bits:
21449@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21450 int ret = 0;
21451
21452 /* kprobe_running() needs smp_processor_id() */
21453- if (kprobes_built_in() && !user_mode_vm(regs)) {
21454+ if (kprobes_built_in() && !user_mode(regs)) {
21455 preempt_disable();
21456 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21457 ret = 1;
21458@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21459 return !instr_lo || (instr_lo>>1) == 1;
21460 case 0x00:
21461 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21462- if (probe_kernel_address(instr, opcode))
21463+ if (user_mode(regs)) {
21464+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21465+ return 0;
21466+ } else if (probe_kernel_address(instr, opcode))
21467 return 0;
21468
21469 *prefetch = (instr_lo == 0xF) &&
21470@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21471 while (instr < max_instr) {
21472 unsigned char opcode;
21473
21474- if (probe_kernel_address(instr, opcode))
21475+ if (user_mode(regs)) {
21476+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21477+ break;
21478+ } else if (probe_kernel_address(instr, opcode))
21479 break;
21480
21481 instr++;
21482@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21483 force_sig_info(si_signo, &info, tsk);
21484 }
21485
21486+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21487+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21488+#endif
21489+
21490+#ifdef CONFIG_PAX_EMUTRAMP
21491+static int pax_handle_fetch_fault(struct pt_regs *regs);
21492+#endif
21493+
21494+#ifdef CONFIG_PAX_PAGEEXEC
21495+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21496+{
21497+ pgd_t *pgd;
21498+ pud_t *pud;
21499+ pmd_t *pmd;
21500+
21501+ pgd = pgd_offset(mm, address);
21502+ if (!pgd_present(*pgd))
21503+ return NULL;
21504+ pud = pud_offset(pgd, address);
21505+ if (!pud_present(*pud))
21506+ return NULL;
21507+ pmd = pmd_offset(pud, address);
21508+ if (!pmd_present(*pmd))
21509+ return NULL;
21510+ return pmd;
21511+}
21512+#endif
21513+
21514 DEFINE_SPINLOCK(pgd_lock);
21515 LIST_HEAD(pgd_list);
21516
21517@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21518 for (address = VMALLOC_START & PMD_MASK;
21519 address >= TASK_SIZE && address < FIXADDR_TOP;
21520 address += PMD_SIZE) {
21521+
21522+#ifdef CONFIG_PAX_PER_CPU_PGD
21523+ unsigned long cpu;
21524+#else
21525 struct page *page;
21526+#endif
21527
21528 spin_lock(&pgd_lock);
21529+
21530+#ifdef CONFIG_PAX_PER_CPU_PGD
21531+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21532+ pgd_t *pgd = get_cpu_pgd(cpu);
21533+ pmd_t *ret;
21534+#else
21535 list_for_each_entry(page, &pgd_list, lru) {
21536+ pgd_t *pgd = page_address(page);
21537 spinlock_t *pgt_lock;
21538 pmd_t *ret;
21539
21540@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21541 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21542
21543 spin_lock(pgt_lock);
21544- ret = vmalloc_sync_one(page_address(page), address);
21545+#endif
21546+
21547+ ret = vmalloc_sync_one(pgd, address);
21548+
21549+#ifndef CONFIG_PAX_PER_CPU_PGD
21550 spin_unlock(pgt_lock);
21551+#endif
21552
21553 if (!ret)
21554 break;
21555@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21556 * an interrupt in the middle of a task switch..
21557 */
21558 pgd_paddr = read_cr3();
21559+
21560+#ifdef CONFIG_PAX_PER_CPU_PGD
21561+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21562+#endif
21563+
21564 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21565 if (!pmd_k)
21566 return -1;
21567@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21568 * happen within a race in page table update. In the later
21569 * case just flush:
21570 */
21571+
21572+#ifdef CONFIG_PAX_PER_CPU_PGD
21573+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21574+ pgd = pgd_offset_cpu(smp_processor_id(), address);
21575+#else
21576 pgd = pgd_offset(current->active_mm, address);
21577+#endif
21578+
21579 pgd_ref = pgd_offset_k(address);
21580 if (pgd_none(*pgd_ref))
21581 return -1;
21582@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21583 static int is_errata100(struct pt_regs *regs, unsigned long address)
21584 {
21585 #ifdef CONFIG_X86_64
21586- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21587+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21588 return 1;
21589 #endif
21590 return 0;
21591@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21592 }
21593
21594 static const char nx_warning[] = KERN_CRIT
21595-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21596+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21597
21598 static void
21599 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21600@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21601 if (!oops_may_print())
21602 return;
21603
21604- if (error_code & PF_INSTR) {
21605+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21606 unsigned int level;
21607
21608 pte_t *pte = lookup_address(address, &level);
21609
21610 if (pte && pte_present(*pte) && !pte_exec(*pte))
21611- printk(nx_warning, current_uid());
21612+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21613 }
21614
21615+#ifdef CONFIG_PAX_KERNEXEC
21616+ if (init_mm.start_code <= address && address < init_mm.end_code) {
21617+ if (current->signal->curr_ip)
21618+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21619+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21620+ else
21621+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21622+ current->comm, task_pid_nr(current), current_uid(), current_euid());
21623+ }
21624+#endif
21625+
21626 printk(KERN_ALERT "BUG: unable to handle kernel ");
21627 if (address < PAGE_SIZE)
21628 printk(KERN_CONT "NULL pointer dereference");
21629@@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21630 }
21631 #endif
21632
21633+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21634+ if (pax_is_fetch_fault(regs, error_code, address)) {
21635+
21636+#ifdef CONFIG_PAX_EMUTRAMP
21637+ switch (pax_handle_fetch_fault(regs)) {
21638+ case 2:
21639+ return;
21640+ }
21641+#endif
21642+
21643+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21644+ do_group_exit(SIGKILL);
21645+ }
21646+#endif
21647+
21648 if (unlikely(show_unhandled_signals))
21649 show_signal_msg(regs, error_code, address, tsk);
21650
21651@@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21652 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21653 printk(KERN_ERR
21654 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21655- tsk->comm, tsk->pid, address);
21656+ tsk->comm, task_pid_nr(tsk), address);
21657 code = BUS_MCEERR_AR;
21658 }
21659 #endif
21660@@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21661 return 1;
21662 }
21663
21664+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21665+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21666+{
21667+ pte_t *pte;
21668+ pmd_t *pmd;
21669+ spinlock_t *ptl;
21670+ unsigned char pte_mask;
21671+
21672+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21673+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21674+ return 0;
21675+
21676+ /* PaX: it's our fault, let's handle it if we can */
21677+
21678+ /* PaX: take a look at read faults before acquiring any locks */
21679+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21680+ /* instruction fetch attempt from a protected page in user mode */
21681+ up_read(&mm->mmap_sem);
21682+
21683+#ifdef CONFIG_PAX_EMUTRAMP
21684+ switch (pax_handle_fetch_fault(regs)) {
21685+ case 2:
21686+ return 1;
21687+ }
21688+#endif
21689+
21690+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21691+ do_group_exit(SIGKILL);
21692+ }
21693+
21694+ pmd = pax_get_pmd(mm, address);
21695+ if (unlikely(!pmd))
21696+ return 0;
21697+
21698+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21699+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21700+ pte_unmap_unlock(pte, ptl);
21701+ return 0;
21702+ }
21703+
21704+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21705+ /* write attempt to a protected page in user mode */
21706+ pte_unmap_unlock(pte, ptl);
21707+ return 0;
21708+ }
21709+
21710+#ifdef CONFIG_SMP
21711+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21712+#else
21713+ if (likely(address > get_limit(regs->cs)))
21714+#endif
21715+ {
21716+ set_pte(pte, pte_mkread(*pte));
21717+ __flush_tlb_one(address);
21718+ pte_unmap_unlock(pte, ptl);
21719+ up_read(&mm->mmap_sem);
21720+ return 1;
21721+ }
21722+
21723+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21724+
21725+ /*
21726+ * PaX: fill DTLB with user rights and retry
21727+ */
21728+ __asm__ __volatile__ (
21729+ "orb %2,(%1)\n"
21730+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21731+/*
21732+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21733+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21734+ * page fault when examined during a TLB load attempt. this is true not only
21735+ * for PTEs holding a non-present entry but also present entries that will
21736+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21737+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21738+ * for our target pages since their PTEs are simply not in the TLBs at all.
21739+
21740+ * the best thing in omitting it is that we gain around 15-20% speed in the
21741+ * fast path of the page fault handler and can get rid of tracing since we
21742+ * can no longer flush unintended entries.
21743+ */
21744+ "invlpg (%0)\n"
21745+#endif
21746+ __copyuser_seg"testb $0,(%0)\n"
21747+ "xorb %3,(%1)\n"
21748+ :
21749+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21750+ : "memory", "cc");
21751+ pte_unmap_unlock(pte, ptl);
21752+ up_read(&mm->mmap_sem);
21753+ return 1;
21754+}
21755+#endif
21756+
21757 /*
21758 * Handle a spurious fault caused by a stale TLB entry.
21759 *
21760@@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
21761 static inline int
21762 access_error(unsigned long error_code, struct vm_area_struct *vma)
21763 {
21764+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21765+ return 1;
21766+
21767 if (error_code & PF_WRITE) {
21768 /* write, present and write, not present: */
21769 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21770@@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21771 {
21772 struct vm_area_struct *vma;
21773 struct task_struct *tsk;
21774- unsigned long address;
21775 struct mm_struct *mm;
21776 int fault;
21777 int write = error_code & PF_WRITE;
21778 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
21779 (write ? FAULT_FLAG_WRITE : 0);
21780
21781- tsk = current;
21782- mm = tsk->mm;
21783-
21784 /* Get the faulting address: */
21785- address = read_cr2();
21786+ unsigned long address = read_cr2();
21787+
21788+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21789+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21790+ if (!search_exception_tables(regs->ip)) {
21791+ bad_area_nosemaphore(regs, error_code, address);
21792+ return;
21793+ }
21794+ if (address < PAX_USER_SHADOW_BASE) {
21795+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21796+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
21797+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21798+ } else
21799+ address -= PAX_USER_SHADOW_BASE;
21800+ }
21801+#endif
21802+
21803+ tsk = current;
21804+ mm = tsk->mm;
21805
21806 /*
21807 * Detect and handle instructions that would cause a page fault for
21808@@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21809 * User-mode registers count as a user access even for any
21810 * potential system fault or CPU buglet:
21811 */
21812- if (user_mode_vm(regs)) {
21813+ if (user_mode(regs)) {
21814 local_irq_enable();
21815 error_code |= PF_USER;
21816 } else {
21817@@ -1122,6 +1328,11 @@ retry:
21818 might_sleep();
21819 }
21820
21821+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21822+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21823+ return;
21824+#endif
21825+
21826 vma = find_vma(mm, address);
21827 if (unlikely(!vma)) {
21828 bad_area(regs, error_code, address);
21829@@ -1133,18 +1344,24 @@ retry:
21830 bad_area(regs, error_code, address);
21831 return;
21832 }
21833- if (error_code & PF_USER) {
21834- /*
21835- * Accessing the stack below %sp is always a bug.
21836- * The large cushion allows instructions like enter
21837- * and pusha to work. ("enter $65535, $31" pushes
21838- * 32 pointers and then decrements %sp by 65535.)
21839- */
21840- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21841- bad_area(regs, error_code, address);
21842- return;
21843- }
21844+ /*
21845+ * Accessing the stack below %sp is always a bug.
21846+ * The large cushion allows instructions like enter
21847+ * and pusha to work. ("enter $65535, $31" pushes
21848+ * 32 pointers and then decrements %sp by 65535.)
21849+ */
21850+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21851+ bad_area(regs, error_code, address);
21852+ return;
21853 }
21854+
21855+#ifdef CONFIG_PAX_SEGMEXEC
21856+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21857+ bad_area(regs, error_code, address);
21858+ return;
21859+ }
21860+#endif
21861+
21862 if (unlikely(expand_stack(vma, address))) {
21863 bad_area(regs, error_code, address);
21864 return;
21865@@ -1199,3 +1416,292 @@ good_area:
21866
21867 up_read(&mm->mmap_sem);
21868 }
21869+
21870+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21871+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21872+{
21873+ struct mm_struct *mm = current->mm;
21874+ unsigned long ip = regs->ip;
21875+
21876+ if (v8086_mode(regs))
21877+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21878+
21879+#ifdef CONFIG_PAX_PAGEEXEC
21880+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21881+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21882+ return true;
21883+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21884+ return true;
21885+ return false;
21886+ }
21887+#endif
21888+
21889+#ifdef CONFIG_PAX_SEGMEXEC
21890+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21891+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21892+ return true;
21893+ return false;
21894+ }
21895+#endif
21896+
21897+ return false;
21898+}
21899+#endif
21900+
21901+#ifdef CONFIG_PAX_EMUTRAMP
21902+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21903+{
21904+ int err;
21905+
21906+ do { /* PaX: libffi trampoline emulation */
21907+ unsigned char mov, jmp;
21908+ unsigned int addr1, addr2;
21909+
21910+#ifdef CONFIG_X86_64
21911+ if ((regs->ip + 9) >> 32)
21912+ break;
21913+#endif
21914+
21915+ err = get_user(mov, (unsigned char __user *)regs->ip);
21916+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21917+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21918+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21919+
21920+ if (err)
21921+ break;
21922+
21923+ if (mov == 0xB8 && jmp == 0xE9) {
21924+ regs->ax = addr1;
21925+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21926+ return 2;
21927+ }
21928+ } while (0);
21929+
21930+ do { /* PaX: gcc trampoline emulation #1 */
21931+ unsigned char mov1, mov2;
21932+ unsigned short jmp;
21933+ unsigned int addr1, addr2;
21934+
21935+#ifdef CONFIG_X86_64
21936+ if ((regs->ip + 11) >> 32)
21937+ break;
21938+#endif
21939+
21940+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21941+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21942+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21943+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21944+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21945+
21946+ if (err)
21947+ break;
21948+
21949+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21950+ regs->cx = addr1;
21951+ regs->ax = addr2;
21952+ regs->ip = addr2;
21953+ return 2;
21954+ }
21955+ } while (0);
21956+
21957+ do { /* PaX: gcc trampoline emulation #2 */
21958+ unsigned char mov, jmp;
21959+ unsigned int addr1, addr2;
21960+
21961+#ifdef CONFIG_X86_64
21962+ if ((regs->ip + 9) >> 32)
21963+ break;
21964+#endif
21965+
21966+ err = get_user(mov, (unsigned char __user *)regs->ip);
21967+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21968+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21969+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21970+
21971+ if (err)
21972+ break;
21973+
21974+ if (mov == 0xB9 && jmp == 0xE9) {
21975+ regs->cx = addr1;
21976+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21977+ return 2;
21978+ }
21979+ } while (0);
21980+
21981+ return 1; /* PaX in action */
21982+}
21983+
21984+#ifdef CONFIG_X86_64
21985+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21986+{
21987+ int err;
21988+
21989+ do { /* PaX: libffi trampoline emulation */
21990+ unsigned short mov1, mov2, jmp1;
21991+ unsigned char stcclc, jmp2;
21992+ unsigned long addr1, addr2;
21993+
21994+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21995+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21996+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21997+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21998+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
21999+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
22000+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
22001+
22002+ if (err)
22003+ break;
22004+
22005+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22006+ regs->r11 = addr1;
22007+ regs->r10 = addr2;
22008+ if (stcclc == 0xF8)
22009+ regs->flags &= ~X86_EFLAGS_CF;
22010+ else
22011+ regs->flags |= X86_EFLAGS_CF;
22012+ regs->ip = addr1;
22013+ return 2;
22014+ }
22015+ } while (0);
22016+
22017+ do { /* PaX: gcc trampoline emulation #1 */
22018+ unsigned short mov1, mov2, jmp1;
22019+ unsigned char jmp2;
22020+ unsigned int addr1;
22021+ unsigned long addr2;
22022+
22023+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22024+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22025+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22026+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22027+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22028+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22029+
22030+ if (err)
22031+ break;
22032+
22033+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22034+ regs->r11 = addr1;
22035+ regs->r10 = addr2;
22036+ regs->ip = addr1;
22037+ return 2;
22038+ }
22039+ } while (0);
22040+
22041+ do { /* PaX: gcc trampoline emulation #2 */
22042+ unsigned short mov1, mov2, jmp1;
22043+ unsigned char jmp2;
22044+ unsigned long addr1, addr2;
22045+
22046+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22047+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22048+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22049+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22050+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22051+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22052+
22053+ if (err)
22054+ break;
22055+
22056+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22057+ regs->r11 = addr1;
22058+ regs->r10 = addr2;
22059+ regs->ip = addr1;
22060+ return 2;
22061+ }
22062+ } while (0);
22063+
22064+ return 1; /* PaX in action */
22065+}
22066+#endif
22067+
22068+/*
22069+ * PaX: decide what to do with offenders (regs->ip = fault address)
22070+ *
22071+ * returns 1 when task should be killed
22072+ * 2 when gcc trampoline was detected
22073+ */
22074+static int pax_handle_fetch_fault(struct pt_regs *regs)
22075+{
22076+ if (v8086_mode(regs))
22077+ return 1;
22078+
22079+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22080+ return 1;
22081+
22082+#ifdef CONFIG_X86_32
22083+ return pax_handle_fetch_fault_32(regs);
22084+#else
22085+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22086+ return pax_handle_fetch_fault_32(regs);
22087+ else
22088+ return pax_handle_fetch_fault_64(regs);
22089+#endif
22090+}
22091+#endif
22092+
22093+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22094+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22095+{
22096+ long i;
22097+
22098+ printk(KERN_ERR "PAX: bytes at PC: ");
22099+ for (i = 0; i < 20; i++) {
22100+ unsigned char c;
22101+ if (get_user(c, (unsigned char __force_user *)pc+i))
22102+ printk(KERN_CONT "?? ");
22103+ else
22104+ printk(KERN_CONT "%02x ", c);
22105+ }
22106+ printk("\n");
22107+
22108+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22109+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
22110+ unsigned long c;
22111+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
22112+#ifdef CONFIG_X86_32
22113+ printk(KERN_CONT "???????? ");
22114+#else
22115+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22116+ printk(KERN_CONT "???????? ???????? ");
22117+ else
22118+ printk(KERN_CONT "???????????????? ");
22119+#endif
22120+ } else {
22121+#ifdef CONFIG_X86_64
22122+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22123+ printk(KERN_CONT "%08x ", (unsigned int)c);
22124+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22125+ } else
22126+#endif
22127+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22128+ }
22129+ }
22130+ printk("\n");
22131+}
22132+#endif
22133+
22134+/**
22135+ * probe_kernel_write(): safely attempt to write to a location
22136+ * @dst: address to write to
22137+ * @src: pointer to the data that shall be written
22138+ * @size: size of the data chunk
22139+ *
22140+ * Safely write to address @dst from the buffer at @src. If a kernel fault
22141+ * happens, handle that and return -EFAULT.
22142+ */
22143+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22144+{
22145+ long ret;
22146+ mm_segment_t old_fs = get_fs();
22147+
22148+ set_fs(KERNEL_DS);
22149+ pagefault_disable();
22150+ pax_open_kernel();
22151+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22152+ pax_close_kernel();
22153+ pagefault_enable();
22154+ set_fs(old_fs);
22155+
22156+ return ret ? -EFAULT : 0;
22157+}
22158diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22159index dd74e46..7d26398 100644
22160--- a/arch/x86/mm/gup.c
22161+++ b/arch/x86/mm/gup.c
22162@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22163 addr = start;
22164 len = (unsigned long) nr_pages << PAGE_SHIFT;
22165 end = start + len;
22166- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22167+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22168 (void __user *)start, len)))
22169 return 0;
22170
22171diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22172index f4f29b1..5cac4fb 100644
22173--- a/arch/x86/mm/highmem_32.c
22174+++ b/arch/x86/mm/highmem_32.c
22175@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22176 idx = type + KM_TYPE_NR*smp_processor_id();
22177 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22178 BUG_ON(!pte_none(*(kmap_pte-idx)));
22179+
22180+ pax_open_kernel();
22181 set_pte(kmap_pte-idx, mk_pte(page, prot));
22182+ pax_close_kernel();
22183+
22184 arch_flush_lazy_mmu_mode();
22185
22186 return (void *)vaddr;
22187diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22188index f581a18..29efd37 100644
22189--- a/arch/x86/mm/hugetlbpage.c
22190+++ b/arch/x86/mm/hugetlbpage.c
22191@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22192 struct hstate *h = hstate_file(file);
22193 struct mm_struct *mm = current->mm;
22194 struct vm_area_struct *vma;
22195- unsigned long start_addr;
22196+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22197+
22198+#ifdef CONFIG_PAX_SEGMEXEC
22199+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22200+ pax_task_size = SEGMEXEC_TASK_SIZE;
22201+#endif
22202+
22203+ pax_task_size -= PAGE_SIZE;
22204
22205 if (len > mm->cached_hole_size) {
22206- start_addr = mm->free_area_cache;
22207+ start_addr = mm->free_area_cache;
22208 } else {
22209- start_addr = TASK_UNMAPPED_BASE;
22210- mm->cached_hole_size = 0;
22211+ start_addr = mm->mmap_base;
22212+ mm->cached_hole_size = 0;
22213 }
22214
22215 full_search:
22216@@ -280,26 +287,27 @@ full_search:
22217
22218 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22219 /* At this point: (!vma || addr < vma->vm_end). */
22220- if (TASK_SIZE - len < addr) {
22221+ if (pax_task_size - len < addr) {
22222 /*
22223 * Start a new search - just in case we missed
22224 * some holes.
22225 */
22226- if (start_addr != TASK_UNMAPPED_BASE) {
22227- start_addr = TASK_UNMAPPED_BASE;
22228+ if (start_addr != mm->mmap_base) {
22229+ start_addr = mm->mmap_base;
22230 mm->cached_hole_size = 0;
22231 goto full_search;
22232 }
22233 return -ENOMEM;
22234 }
22235- if (!vma || addr + len <= vma->vm_start) {
22236- mm->free_area_cache = addr + len;
22237- return addr;
22238- }
22239+ if (check_heap_stack_gap(vma, addr, len))
22240+ break;
22241 if (addr + mm->cached_hole_size < vma->vm_start)
22242 mm->cached_hole_size = vma->vm_start - addr;
22243 addr = ALIGN(vma->vm_end, huge_page_size(h));
22244 }
22245+
22246+ mm->free_area_cache = addr + len;
22247+ return addr;
22248 }
22249
22250 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22251@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22252 {
22253 struct hstate *h = hstate_file(file);
22254 struct mm_struct *mm = current->mm;
22255- struct vm_area_struct *vma, *prev_vma;
22256- unsigned long base = mm->mmap_base, addr = addr0;
22257+ struct vm_area_struct *vma;
22258+ unsigned long base = mm->mmap_base, addr;
22259 unsigned long largest_hole = mm->cached_hole_size;
22260- int first_time = 1;
22261
22262 /* don't allow allocations above current base */
22263 if (mm->free_area_cache > base)
22264@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22265 largest_hole = 0;
22266 mm->free_area_cache = base;
22267 }
22268-try_again:
22269+
22270 /* make sure it can fit in the remaining address space */
22271 if (mm->free_area_cache < len)
22272 goto fail;
22273
22274 /* either no address requested or can't fit in requested address hole */
22275- addr = (mm->free_area_cache - len) & huge_page_mask(h);
22276+ addr = (mm->free_area_cache - len);
22277 do {
22278+ addr &= huge_page_mask(h);
22279+ vma = find_vma(mm, addr);
22280 /*
22281 * Lookup failure means no vma is above this address,
22282 * i.e. return with success:
22283- */
22284- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22285- return addr;
22286-
22287- /*
22288 * new region fits between prev_vma->vm_end and
22289 * vma->vm_start, use it:
22290 */
22291- if (addr + len <= vma->vm_start &&
22292- (!prev_vma || (addr >= prev_vma->vm_end))) {
22293+ if (check_heap_stack_gap(vma, addr, len)) {
22294 /* remember the address as a hint for next time */
22295- mm->cached_hole_size = largest_hole;
22296- return (mm->free_area_cache = addr);
22297- } else {
22298- /* pull free_area_cache down to the first hole */
22299- if (mm->free_area_cache == vma->vm_end) {
22300- mm->free_area_cache = vma->vm_start;
22301- mm->cached_hole_size = largest_hole;
22302- }
22303+ mm->cached_hole_size = largest_hole;
22304+ return (mm->free_area_cache = addr);
22305+ }
22306+ /* pull free_area_cache down to the first hole */
22307+ if (mm->free_area_cache == vma->vm_end) {
22308+ mm->free_area_cache = vma->vm_start;
22309+ mm->cached_hole_size = largest_hole;
22310 }
22311
22312 /* remember the largest hole we saw so far */
22313 if (addr + largest_hole < vma->vm_start)
22314- largest_hole = vma->vm_start - addr;
22315+ largest_hole = vma->vm_start - addr;
22316
22317 /* try just below the current vma->vm_start */
22318- addr = (vma->vm_start - len) & huge_page_mask(h);
22319- } while (len <= vma->vm_start);
22320+ addr = skip_heap_stack_gap(vma, len);
22321+ } while (!IS_ERR_VALUE(addr));
22322
22323 fail:
22324 /*
22325- * if hint left us with no space for the requested
22326- * mapping then try again:
22327- */
22328- if (first_time) {
22329- mm->free_area_cache = base;
22330- largest_hole = 0;
22331- first_time = 0;
22332- goto try_again;
22333- }
22334- /*
22335 * A failed mmap() very likely causes application failure,
22336 * so fall back to the bottom-up function here. This scenario
22337 * can happen with large stack limits and large mmap()
22338 * allocations.
22339 */
22340- mm->free_area_cache = TASK_UNMAPPED_BASE;
22341+
22342+#ifdef CONFIG_PAX_SEGMEXEC
22343+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22344+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22345+ else
22346+#endif
22347+
22348+ mm->mmap_base = TASK_UNMAPPED_BASE;
22349+
22350+#ifdef CONFIG_PAX_RANDMMAP
22351+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22352+ mm->mmap_base += mm->delta_mmap;
22353+#endif
22354+
22355+ mm->free_area_cache = mm->mmap_base;
22356 mm->cached_hole_size = ~0UL;
22357 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22358 len, pgoff, flags);
22359@@ -386,6 +392,7 @@ fail:
22360 /*
22361 * Restore the topdown base:
22362 */
22363+ mm->mmap_base = base;
22364 mm->free_area_cache = base;
22365 mm->cached_hole_size = ~0UL;
22366
22367@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22368 struct hstate *h = hstate_file(file);
22369 struct mm_struct *mm = current->mm;
22370 struct vm_area_struct *vma;
22371+ unsigned long pax_task_size = TASK_SIZE;
22372
22373 if (len & ~huge_page_mask(h))
22374 return -EINVAL;
22375- if (len > TASK_SIZE)
22376+
22377+#ifdef CONFIG_PAX_SEGMEXEC
22378+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22379+ pax_task_size = SEGMEXEC_TASK_SIZE;
22380+#endif
22381+
22382+ pax_task_size -= PAGE_SIZE;
22383+
22384+ if (len > pax_task_size)
22385 return -ENOMEM;
22386
22387 if (flags & MAP_FIXED) {
22388@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22389 if (addr) {
22390 addr = ALIGN(addr, huge_page_size(h));
22391 vma = find_vma(mm, addr);
22392- if (TASK_SIZE - len >= addr &&
22393- (!vma || addr + len <= vma->vm_start))
22394+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22395 return addr;
22396 }
22397 if (mm->get_unmapped_area == arch_get_unmapped_area)
22398diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22399index 87488b9..a55509f 100644
22400--- a/arch/x86/mm/init.c
22401+++ b/arch/x86/mm/init.c
22402@@ -15,6 +15,7 @@
22403 #include <asm/tlbflush.h>
22404 #include <asm/tlb.h>
22405 #include <asm/proto.h>
22406+#include <asm/desc.h>
22407
22408 unsigned long __initdata pgt_buf_start;
22409 unsigned long __meminitdata pgt_buf_end;
22410@@ -31,7 +32,7 @@ int direct_gbpages
22411 static void __init find_early_table_space(unsigned long end, int use_pse,
22412 int use_gbpages)
22413 {
22414- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22415+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22416 phys_addr_t base;
22417
22418 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22419@@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22420 */
22421 int devmem_is_allowed(unsigned long pagenr)
22422 {
22423+#ifdef CONFIG_GRKERNSEC_KMEM
22424+ /* allow BDA */
22425+ if (!pagenr)
22426+ return 1;
22427+ /* allow EBDA */
22428+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22429+ return 1;
22430+#else
22431+ if (!pagenr)
22432+ return 1;
22433+#ifdef CONFIG_VM86
22434+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22435+ return 1;
22436+#endif
22437+#endif
22438+
22439+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22440+ return 1;
22441+#ifdef CONFIG_GRKERNSEC_KMEM
22442+ /* throw out everything else below 1MB */
22443 if (pagenr <= 256)
22444- return 1;
22445+ return 0;
22446+#endif
22447 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22448 return 0;
22449 if (!page_is_ram(pagenr))
22450@@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22451
22452 void free_initmem(void)
22453 {
22454+
22455+#ifdef CONFIG_PAX_KERNEXEC
22456+#ifdef CONFIG_X86_32
22457+ /* PaX: limit KERNEL_CS to actual size */
22458+ unsigned long addr, limit;
22459+ struct desc_struct d;
22460+ int cpu;
22461+
22462+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22463+ limit = (limit - 1UL) >> PAGE_SHIFT;
22464+
22465+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22466+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22467+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22468+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22469+ }
22470+
22471+ /* PaX: make KERNEL_CS read-only */
22472+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22473+ if (!paravirt_enabled())
22474+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22475+/*
22476+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22477+ pgd = pgd_offset_k(addr);
22478+ pud = pud_offset(pgd, addr);
22479+ pmd = pmd_offset(pud, addr);
22480+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22481+ }
22482+*/
22483+#ifdef CONFIG_X86_PAE
22484+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22485+/*
22486+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22487+ pgd = pgd_offset_k(addr);
22488+ pud = pud_offset(pgd, addr);
22489+ pmd = pmd_offset(pud, addr);
22490+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22491+ }
22492+*/
22493+#endif
22494+
22495+#ifdef CONFIG_MODULES
22496+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22497+#endif
22498+
22499+#else
22500+ pgd_t *pgd;
22501+ pud_t *pud;
22502+ pmd_t *pmd;
22503+ unsigned long addr, end;
22504+
22505+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22506+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22507+ pgd = pgd_offset_k(addr);
22508+ pud = pud_offset(pgd, addr);
22509+ pmd = pmd_offset(pud, addr);
22510+ if (!pmd_present(*pmd))
22511+ continue;
22512+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22513+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22514+ else
22515+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22516+ }
22517+
22518+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22519+ end = addr + KERNEL_IMAGE_SIZE;
22520+ for (; addr < end; addr += PMD_SIZE) {
22521+ pgd = pgd_offset_k(addr);
22522+ pud = pud_offset(pgd, addr);
22523+ pmd = pmd_offset(pud, addr);
22524+ if (!pmd_present(*pmd))
22525+ continue;
22526+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22527+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22528+ }
22529+#endif
22530+
22531+ flush_tlb_all();
22532+#endif
22533+
22534 free_init_pages("unused kernel memory",
22535 (unsigned long)(&__init_begin),
22536 (unsigned long)(&__init_end));
22537diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22538index 29f7c6d..b46b35b 100644
22539--- a/arch/x86/mm/init_32.c
22540+++ b/arch/x86/mm/init_32.c
22541@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22542 }
22543
22544 /*
22545- * Creates a middle page table and puts a pointer to it in the
22546- * given global directory entry. This only returns the gd entry
22547- * in non-PAE compilation mode, since the middle layer is folded.
22548- */
22549-static pmd_t * __init one_md_table_init(pgd_t *pgd)
22550-{
22551- pud_t *pud;
22552- pmd_t *pmd_table;
22553-
22554-#ifdef CONFIG_X86_PAE
22555- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22556- if (after_bootmem)
22557- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22558- else
22559- pmd_table = (pmd_t *)alloc_low_page();
22560- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22561- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22562- pud = pud_offset(pgd, 0);
22563- BUG_ON(pmd_table != pmd_offset(pud, 0));
22564-
22565- return pmd_table;
22566- }
22567-#endif
22568- pud = pud_offset(pgd, 0);
22569- pmd_table = pmd_offset(pud, 0);
22570-
22571- return pmd_table;
22572-}
22573-
22574-/*
22575 * Create a page table and place a pointer to it in a middle page
22576 * directory entry:
22577 */
22578@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22579 page_table = (pte_t *)alloc_low_page();
22580
22581 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22582+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22583+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22584+#else
22585 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22586+#endif
22587 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22588 }
22589
22590 return pte_offset_kernel(pmd, 0);
22591 }
22592
22593+static pmd_t * __init one_md_table_init(pgd_t *pgd)
22594+{
22595+ pud_t *pud;
22596+ pmd_t *pmd_table;
22597+
22598+ pud = pud_offset(pgd, 0);
22599+ pmd_table = pmd_offset(pud, 0);
22600+
22601+ return pmd_table;
22602+}
22603+
22604 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22605 {
22606 int pgd_idx = pgd_index(vaddr);
22607@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22608 int pgd_idx, pmd_idx;
22609 unsigned long vaddr;
22610 pgd_t *pgd;
22611+ pud_t *pud;
22612 pmd_t *pmd;
22613 pte_t *pte = NULL;
22614
22615@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22616 pgd = pgd_base + pgd_idx;
22617
22618 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22619- pmd = one_md_table_init(pgd);
22620- pmd = pmd + pmd_index(vaddr);
22621+ pud = pud_offset(pgd, vaddr);
22622+ pmd = pmd_offset(pud, vaddr);
22623+
22624+#ifdef CONFIG_X86_PAE
22625+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22626+#endif
22627+
22628 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22629 pmd++, pmd_idx++) {
22630 pte = page_table_kmap_check(one_page_table_init(pmd),
22631@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22632 }
22633 }
22634
22635-static inline int is_kernel_text(unsigned long addr)
22636+static inline int is_kernel_text(unsigned long start, unsigned long end)
22637 {
22638- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22639- return 1;
22640- return 0;
22641+ if ((start > ktla_ktva((unsigned long)_etext) ||
22642+ end <= ktla_ktva((unsigned long)_stext)) &&
22643+ (start > ktla_ktva((unsigned long)_einittext) ||
22644+ end <= ktla_ktva((unsigned long)_sinittext)) &&
22645+
22646+#ifdef CONFIG_ACPI_SLEEP
22647+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22648+#endif
22649+
22650+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22651+ return 0;
22652+ return 1;
22653 }
22654
22655 /*
22656@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22657 unsigned long last_map_addr = end;
22658 unsigned long start_pfn, end_pfn;
22659 pgd_t *pgd_base = swapper_pg_dir;
22660- int pgd_idx, pmd_idx, pte_ofs;
22661+ unsigned int pgd_idx, pmd_idx, pte_ofs;
22662 unsigned long pfn;
22663 pgd_t *pgd;
22664+ pud_t *pud;
22665 pmd_t *pmd;
22666 pte_t *pte;
22667 unsigned pages_2m, pages_4k;
22668@@ -281,8 +282,13 @@ repeat:
22669 pfn = start_pfn;
22670 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22671 pgd = pgd_base + pgd_idx;
22672- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22673- pmd = one_md_table_init(pgd);
22674+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22675+ pud = pud_offset(pgd, 0);
22676+ pmd = pmd_offset(pud, 0);
22677+
22678+#ifdef CONFIG_X86_PAE
22679+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22680+#endif
22681
22682 if (pfn >= end_pfn)
22683 continue;
22684@@ -294,14 +300,13 @@ repeat:
22685 #endif
22686 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22687 pmd++, pmd_idx++) {
22688- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22689+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22690
22691 /*
22692 * Map with big pages if possible, otherwise
22693 * create normal page tables:
22694 */
22695 if (use_pse) {
22696- unsigned int addr2;
22697 pgprot_t prot = PAGE_KERNEL_LARGE;
22698 /*
22699 * first pass will use the same initial
22700@@ -311,11 +316,7 @@ repeat:
22701 __pgprot(PTE_IDENT_ATTR |
22702 _PAGE_PSE);
22703
22704- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22705- PAGE_OFFSET + PAGE_SIZE-1;
22706-
22707- if (is_kernel_text(addr) ||
22708- is_kernel_text(addr2))
22709+ if (is_kernel_text(address, address + PMD_SIZE))
22710 prot = PAGE_KERNEL_LARGE_EXEC;
22711
22712 pages_2m++;
22713@@ -332,7 +333,7 @@ repeat:
22714 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22715 pte += pte_ofs;
22716 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22717- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22718+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22719 pgprot_t prot = PAGE_KERNEL;
22720 /*
22721 * first pass will use the same initial
22722@@ -340,7 +341,7 @@ repeat:
22723 */
22724 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22725
22726- if (is_kernel_text(addr))
22727+ if (is_kernel_text(address, address + PAGE_SIZE))
22728 prot = PAGE_KERNEL_EXEC;
22729
22730 pages_4k++;
22731@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22732
22733 pud = pud_offset(pgd, va);
22734 pmd = pmd_offset(pud, va);
22735- if (!pmd_present(*pmd))
22736+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
22737 break;
22738
22739 pte = pte_offset_kernel(pmd, va);
22740@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22741
22742 static void __init pagetable_init(void)
22743 {
22744- pgd_t *pgd_base = swapper_pg_dir;
22745-
22746- permanent_kmaps_init(pgd_base);
22747+ permanent_kmaps_init(swapper_pg_dir);
22748 }
22749
22750-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22751+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22752 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22753
22754 /* user-defined highmem size */
22755@@ -757,6 +756,12 @@ void __init mem_init(void)
22756
22757 pci_iommu_alloc();
22758
22759+#ifdef CONFIG_PAX_PER_CPU_PGD
22760+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22761+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22762+ KERNEL_PGD_PTRS);
22763+#endif
22764+
22765 #ifdef CONFIG_FLATMEM
22766 BUG_ON(!mem_map);
22767 #endif
22768@@ -774,7 +779,7 @@ void __init mem_init(void)
22769 set_highmem_pages_init();
22770
22771 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22772- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22773+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22774 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22775
22776 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22777@@ -815,10 +820,10 @@ void __init mem_init(void)
22778 ((unsigned long)&__init_end -
22779 (unsigned long)&__init_begin) >> 10,
22780
22781- (unsigned long)&_etext, (unsigned long)&_edata,
22782- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22783+ (unsigned long)&_sdata, (unsigned long)&_edata,
22784+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22785
22786- (unsigned long)&_text, (unsigned long)&_etext,
22787+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22788 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22789
22790 /*
22791@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
22792 if (!kernel_set_to_readonly)
22793 return;
22794
22795+ start = ktla_ktva(start);
22796 pr_debug("Set kernel text: %lx - %lx for read write\n",
22797 start, start+size);
22798
22799@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
22800 if (!kernel_set_to_readonly)
22801 return;
22802
22803+ start = ktla_ktva(start);
22804 pr_debug("Set kernel text: %lx - %lx for read only\n",
22805 start, start+size);
22806
22807@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
22808 unsigned long start = PFN_ALIGN(_text);
22809 unsigned long size = PFN_ALIGN(_etext) - start;
22810
22811+ start = ktla_ktva(start);
22812 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22813 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22814 size >> 10);
22815diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22816index bbaaa00..16dffad 100644
22817--- a/arch/x86/mm/init_64.c
22818+++ b/arch/x86/mm/init_64.c
22819@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
22820 * around without checking the pgd every time.
22821 */
22822
22823-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22824+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22825 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22826
22827 int force_personality32;
22828@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22829
22830 for (address = start; address <= end; address += PGDIR_SIZE) {
22831 const pgd_t *pgd_ref = pgd_offset_k(address);
22832+
22833+#ifdef CONFIG_PAX_PER_CPU_PGD
22834+ unsigned long cpu;
22835+#else
22836 struct page *page;
22837+#endif
22838
22839 if (pgd_none(*pgd_ref))
22840 continue;
22841
22842 spin_lock(&pgd_lock);
22843+
22844+#ifdef CONFIG_PAX_PER_CPU_PGD
22845+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22846+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
22847+#else
22848 list_for_each_entry(page, &pgd_list, lru) {
22849 pgd_t *pgd;
22850 spinlock_t *pgt_lock;
22851@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22852 /* the pgt_lock only for Xen */
22853 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22854 spin_lock(pgt_lock);
22855+#endif
22856
22857 if (pgd_none(*pgd))
22858 set_pgd(pgd, *pgd_ref);
22859@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22860 BUG_ON(pgd_page_vaddr(*pgd)
22861 != pgd_page_vaddr(*pgd_ref));
22862
22863+#ifndef CONFIG_PAX_PER_CPU_PGD
22864 spin_unlock(pgt_lock);
22865+#endif
22866+
22867 }
22868 spin_unlock(&pgd_lock);
22869 }
22870@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
22871 pmd = fill_pmd(pud, vaddr);
22872 pte = fill_pte(pmd, vaddr);
22873
22874+ pax_open_kernel();
22875 set_pte(pte, new_pte);
22876+ pax_close_kernel();
22877
22878 /*
22879 * It's enough to flush this one mapping.
22880@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
22881 pgd = pgd_offset_k((unsigned long)__va(phys));
22882 if (pgd_none(*pgd)) {
22883 pud = (pud_t *) spp_getpage();
22884- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22885- _PAGE_USER));
22886+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22887 }
22888 pud = pud_offset(pgd, (unsigned long)__va(phys));
22889 if (pud_none(*pud)) {
22890 pmd = (pmd_t *) spp_getpage();
22891- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22892- _PAGE_USER));
22893+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22894 }
22895 pmd = pmd_offset(pud, phys);
22896 BUG_ON(!pmd_none(*pmd));
22897@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
22898 if (pfn >= pgt_buf_top)
22899 panic("alloc_low_page: ran out of memory");
22900
22901- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22902+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22903 clear_page(adr);
22904 *phys = pfn * PAGE_SIZE;
22905 return adr;
22906@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
22907
22908 phys = __pa(virt);
22909 left = phys & (PAGE_SIZE - 1);
22910- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22911+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22912 adr = (void *)(((unsigned long)adr) | left);
22913
22914 return adr;
22915@@ -693,6 +707,12 @@ void __init mem_init(void)
22916
22917 pci_iommu_alloc();
22918
22919+#ifdef CONFIG_PAX_PER_CPU_PGD
22920+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22921+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22922+ KERNEL_PGD_PTRS);
22923+#endif
22924+
22925 /* clear_bss() already clear the empty_zero_page */
22926
22927 reservedpages = 0;
22928@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
22929 static struct vm_area_struct gate_vma = {
22930 .vm_start = VSYSCALL_START,
22931 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22932- .vm_page_prot = PAGE_READONLY_EXEC,
22933- .vm_flags = VM_READ | VM_EXEC
22934+ .vm_page_prot = PAGE_READONLY,
22935+ .vm_flags = VM_READ
22936 };
22937
22938 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
22939@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
22940
22941 const char *arch_vma_name(struct vm_area_struct *vma)
22942 {
22943- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22944+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22945 return "[vdso]";
22946 if (vma == &gate_vma)
22947 return "[vsyscall]";
22948diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22949index 7b179b4..6bd1777 100644
22950--- a/arch/x86/mm/iomap_32.c
22951+++ b/arch/x86/mm/iomap_32.c
22952@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
22953 type = kmap_atomic_idx_push();
22954 idx = type + KM_TYPE_NR * smp_processor_id();
22955 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22956+
22957+ pax_open_kernel();
22958 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22959+ pax_close_kernel();
22960+
22961 arch_flush_lazy_mmu_mode();
22962
22963 return (void *)vaddr;
22964diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22965index be1ef57..55f0160 100644
22966--- a/arch/x86/mm/ioremap.c
22967+++ b/arch/x86/mm/ioremap.c
22968@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
22969 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22970 int is_ram = page_is_ram(pfn);
22971
22972- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22973+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22974 return NULL;
22975 WARN_ON_ONCE(is_ram);
22976 }
22977@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
22978
22979 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
22980 if (page_is_ram(start >> PAGE_SHIFT))
22981+#ifdef CONFIG_HIGHMEM
22982+ if ((start >> PAGE_SHIFT) < max_low_pfn)
22983+#endif
22984 return __va(phys);
22985
22986 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
22987@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
22988 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22989
22990 static __initdata int after_paging_init;
22991-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22992+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22993
22994 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22995 {
22996@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
22997 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22998
22999 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
23000- memset(bm_pte, 0, sizeof(bm_pte));
23001- pmd_populate_kernel(&init_mm, pmd, bm_pte);
23002+ pmd_populate_user(&init_mm, pmd, bm_pte);
23003
23004 /*
23005 * The boot-ioremap range spans multiple pmds, for which
23006diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
23007index d87dd6d..bf3fa66 100644
23008--- a/arch/x86/mm/kmemcheck/kmemcheck.c
23009+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
23010@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
23011 * memory (e.g. tracked pages)? For now, we need this to avoid
23012 * invoking kmemcheck for PnP BIOS calls.
23013 */
23014- if (regs->flags & X86_VM_MASK)
23015+ if (v8086_mode(regs))
23016 return false;
23017- if (regs->cs != __KERNEL_CS)
23018+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23019 return false;
23020
23021 pte = kmemcheck_pte_lookup(address);
23022diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
23023index 845df68..1d8d29f 100644
23024--- a/arch/x86/mm/mmap.c
23025+++ b/arch/x86/mm/mmap.c
23026@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23027 * Leave an at least ~128 MB hole with possible stack randomization.
23028 */
23029 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23030-#define MAX_GAP (TASK_SIZE/6*5)
23031+#define MAX_GAP (pax_task_size/6*5)
23032
23033 static int mmap_is_legacy(void)
23034 {
23035@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23036 return rnd << PAGE_SHIFT;
23037 }
23038
23039-static unsigned long mmap_base(void)
23040+static unsigned long mmap_base(struct mm_struct *mm)
23041 {
23042 unsigned long gap = rlimit(RLIMIT_STACK);
23043+ unsigned long pax_task_size = TASK_SIZE;
23044+
23045+#ifdef CONFIG_PAX_SEGMEXEC
23046+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23047+ pax_task_size = SEGMEXEC_TASK_SIZE;
23048+#endif
23049
23050 if (gap < MIN_GAP)
23051 gap = MIN_GAP;
23052 else if (gap > MAX_GAP)
23053 gap = MAX_GAP;
23054
23055- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23056+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23057 }
23058
23059 /*
23060 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23061 * does, but not when emulating X86_32
23062 */
23063-static unsigned long mmap_legacy_base(void)
23064+static unsigned long mmap_legacy_base(struct mm_struct *mm)
23065 {
23066- if (mmap_is_ia32())
23067+ if (mmap_is_ia32()) {
23068+
23069+#ifdef CONFIG_PAX_SEGMEXEC
23070+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23071+ return SEGMEXEC_TASK_UNMAPPED_BASE;
23072+ else
23073+#endif
23074+
23075 return TASK_UNMAPPED_BASE;
23076- else
23077+ } else
23078 return TASK_UNMAPPED_BASE + mmap_rnd();
23079 }
23080
23081@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23082 void arch_pick_mmap_layout(struct mm_struct *mm)
23083 {
23084 if (mmap_is_legacy()) {
23085- mm->mmap_base = mmap_legacy_base();
23086+ mm->mmap_base = mmap_legacy_base(mm);
23087+
23088+#ifdef CONFIG_PAX_RANDMMAP
23089+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23090+ mm->mmap_base += mm->delta_mmap;
23091+#endif
23092+
23093 mm->get_unmapped_area = arch_get_unmapped_area;
23094 mm->unmap_area = arch_unmap_area;
23095 } else {
23096- mm->mmap_base = mmap_base();
23097+ mm->mmap_base = mmap_base(mm);
23098+
23099+#ifdef CONFIG_PAX_RANDMMAP
23100+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23101+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23102+#endif
23103+
23104 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23105 mm->unmap_area = arch_unmap_area_topdown;
23106 }
23107diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23108index de54b9b..799051e 100644
23109--- a/arch/x86/mm/mmio-mod.c
23110+++ b/arch/x86/mm/mmio-mod.c
23111@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23112 break;
23113 default:
23114 {
23115- unsigned char *ip = (unsigned char *)instptr;
23116+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23117 my_trace->opcode = MMIO_UNKNOWN_OP;
23118 my_trace->width = 0;
23119 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23120@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23121 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23122 void __iomem *addr)
23123 {
23124- static atomic_t next_id;
23125+ static atomic_unchecked_t next_id;
23126 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23127 /* These are page-unaligned. */
23128 struct mmiotrace_map map = {
23129@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23130 .private = trace
23131 },
23132 .phys = offset,
23133- .id = atomic_inc_return(&next_id)
23134+ .id = atomic_inc_return_unchecked(&next_id)
23135 };
23136 map.map_id = trace->id;
23137
23138diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23139index b008656..773eac2 100644
23140--- a/arch/x86/mm/pageattr-test.c
23141+++ b/arch/x86/mm/pageattr-test.c
23142@@ -36,7 +36,7 @@ enum {
23143
23144 static int pte_testbit(pte_t pte)
23145 {
23146- return pte_flags(pte) & _PAGE_UNUSED1;
23147+ return pte_flags(pte) & _PAGE_CPA_TEST;
23148 }
23149
23150 struct split_state {
23151diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23152index f9e5267..6f6e27f 100644
23153--- a/arch/x86/mm/pageattr.c
23154+++ b/arch/x86/mm/pageattr.c
23155@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23156 */
23157 #ifdef CONFIG_PCI_BIOS
23158 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23159- pgprot_val(forbidden) |= _PAGE_NX;
23160+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23161 #endif
23162
23163 /*
23164@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23165 * Does not cover __inittext since that is gone later on. On
23166 * 64bit we do not enforce !NX on the low mapping
23167 */
23168- if (within(address, (unsigned long)_text, (unsigned long)_etext))
23169- pgprot_val(forbidden) |= _PAGE_NX;
23170+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23171+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23172
23173+#ifdef CONFIG_DEBUG_RODATA
23174 /*
23175 * The .rodata section needs to be read-only. Using the pfn
23176 * catches all aliases.
23177@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23178 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23179 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23180 pgprot_val(forbidden) |= _PAGE_RW;
23181+#endif
23182
23183 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23184 /*
23185@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23186 }
23187 #endif
23188
23189+#ifdef CONFIG_PAX_KERNEXEC
23190+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23191+ pgprot_val(forbidden) |= _PAGE_RW;
23192+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23193+ }
23194+#endif
23195+
23196 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23197
23198 return prot;
23199@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23200 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23201 {
23202 /* change init_mm */
23203+ pax_open_kernel();
23204 set_pte_atomic(kpte, pte);
23205+
23206 #ifdef CONFIG_X86_32
23207 if (!SHARED_KERNEL_PMD) {
23208+
23209+#ifdef CONFIG_PAX_PER_CPU_PGD
23210+ unsigned long cpu;
23211+#else
23212 struct page *page;
23213+#endif
23214
23215+#ifdef CONFIG_PAX_PER_CPU_PGD
23216+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23217+ pgd_t *pgd = get_cpu_pgd(cpu);
23218+#else
23219 list_for_each_entry(page, &pgd_list, lru) {
23220- pgd_t *pgd;
23221+ pgd_t *pgd = (pgd_t *)page_address(page);
23222+#endif
23223+
23224 pud_t *pud;
23225 pmd_t *pmd;
23226
23227- pgd = (pgd_t *)page_address(page) + pgd_index(address);
23228+ pgd += pgd_index(address);
23229 pud = pud_offset(pgd, address);
23230 pmd = pmd_offset(pud, address);
23231 set_pte_atomic((pte_t *)pmd, pte);
23232 }
23233 }
23234 #endif
23235+ pax_close_kernel();
23236 }
23237
23238 static int
23239diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23240index f6ff57b..481690f 100644
23241--- a/arch/x86/mm/pat.c
23242+++ b/arch/x86/mm/pat.c
23243@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23244
23245 if (!entry) {
23246 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23247- current->comm, current->pid, start, end);
23248+ current->comm, task_pid_nr(current), start, end);
23249 return -EINVAL;
23250 }
23251
23252@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23253 while (cursor < to) {
23254 if (!devmem_is_allowed(pfn)) {
23255 printk(KERN_INFO
23256- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23257- current->comm, from, to);
23258+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23259+ current->comm, from, to, cursor);
23260 return 0;
23261 }
23262 cursor += PAGE_SIZE;
23263@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23264 printk(KERN_INFO
23265 "%s:%d ioremap_change_attr failed %s "
23266 "for %Lx-%Lx\n",
23267- current->comm, current->pid,
23268+ current->comm, task_pid_nr(current),
23269 cattr_name(flags),
23270 base, (unsigned long long)(base + size));
23271 return -EINVAL;
23272@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23273 if (want_flags != flags) {
23274 printk(KERN_WARNING
23275 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23276- current->comm, current->pid,
23277+ current->comm, task_pid_nr(current),
23278 cattr_name(want_flags),
23279 (unsigned long long)paddr,
23280 (unsigned long long)(paddr + size),
23281@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23282 free_memtype(paddr, paddr + size);
23283 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23284 " for %Lx-%Lx, got %s\n",
23285- current->comm, current->pid,
23286+ current->comm, task_pid_nr(current),
23287 cattr_name(want_flags),
23288 (unsigned long long)paddr,
23289 (unsigned long long)(paddr + size),
23290diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23291index 9f0614d..92ae64a 100644
23292--- a/arch/x86/mm/pf_in.c
23293+++ b/arch/x86/mm/pf_in.c
23294@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23295 int i;
23296 enum reason_type rv = OTHERS;
23297
23298- p = (unsigned char *)ins_addr;
23299+ p = (unsigned char *)ktla_ktva(ins_addr);
23300 p += skip_prefix(p, &prf);
23301 p += get_opcode(p, &opcode);
23302
23303@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23304 struct prefix_bits prf;
23305 int i;
23306
23307- p = (unsigned char *)ins_addr;
23308+ p = (unsigned char *)ktla_ktva(ins_addr);
23309 p += skip_prefix(p, &prf);
23310 p += get_opcode(p, &opcode);
23311
23312@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23313 struct prefix_bits prf;
23314 int i;
23315
23316- p = (unsigned char *)ins_addr;
23317+ p = (unsigned char *)ktla_ktva(ins_addr);
23318 p += skip_prefix(p, &prf);
23319 p += get_opcode(p, &opcode);
23320
23321@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23322 struct prefix_bits prf;
23323 int i;
23324
23325- p = (unsigned char *)ins_addr;
23326+ p = (unsigned char *)ktla_ktva(ins_addr);
23327 p += skip_prefix(p, &prf);
23328 p += get_opcode(p, &opcode);
23329 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23330@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23331 struct prefix_bits prf;
23332 int i;
23333
23334- p = (unsigned char *)ins_addr;
23335+ p = (unsigned char *)ktla_ktva(ins_addr);
23336 p += skip_prefix(p, &prf);
23337 p += get_opcode(p, &opcode);
23338 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23339diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23340index 8573b83..6372501 100644
23341--- a/arch/x86/mm/pgtable.c
23342+++ b/arch/x86/mm/pgtable.c
23343@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23344 list_del(&page->lru);
23345 }
23346
23347-#define UNSHARED_PTRS_PER_PGD \
23348- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23349+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23350+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23351
23352+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23353+{
23354+ while (count--)
23355+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23356+}
23357+#endif
23358
23359+#ifdef CONFIG_PAX_PER_CPU_PGD
23360+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23361+{
23362+ while (count--)
23363+
23364+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23365+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23366+#else
23367+ *dst++ = *src++;
23368+#endif
23369+
23370+}
23371+#endif
23372+
23373+#ifdef CONFIG_X86_64
23374+#define pxd_t pud_t
23375+#define pyd_t pgd_t
23376+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23377+#define pxd_free(mm, pud) pud_free((mm), (pud))
23378+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23379+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
23380+#define PYD_SIZE PGDIR_SIZE
23381+#else
23382+#define pxd_t pmd_t
23383+#define pyd_t pud_t
23384+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23385+#define pxd_free(mm, pud) pmd_free((mm), (pud))
23386+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23387+#define pyd_offset(mm ,address) pud_offset((mm), (address))
23388+#define PYD_SIZE PUD_SIZE
23389+#endif
23390+
23391+#ifdef CONFIG_PAX_PER_CPU_PGD
23392+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23393+static inline void pgd_dtor(pgd_t *pgd) {}
23394+#else
23395 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23396 {
23397 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23398@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23399 pgd_list_del(pgd);
23400 spin_unlock(&pgd_lock);
23401 }
23402+#endif
23403
23404 /*
23405 * List of all pgd's needed for non-PAE so it can invalidate entries
23406@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23407 * -- wli
23408 */
23409
23410-#ifdef CONFIG_X86_PAE
23411+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23412 /*
23413 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23414 * updating the top-level pagetable entries to guarantee the
23415@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23416 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23417 * and initialize the kernel pmds here.
23418 */
23419-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23420+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23421
23422 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23423 {
23424@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23425 */
23426 flush_tlb_mm(mm);
23427 }
23428+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23429+#define PREALLOCATED_PXDS USER_PGD_PTRS
23430 #else /* !CONFIG_X86_PAE */
23431
23432 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23433-#define PREALLOCATED_PMDS 0
23434+#define PREALLOCATED_PXDS 0
23435
23436 #endif /* CONFIG_X86_PAE */
23437
23438-static void free_pmds(pmd_t *pmds[])
23439+static void free_pxds(pxd_t *pxds[])
23440 {
23441 int i;
23442
23443- for(i = 0; i < PREALLOCATED_PMDS; i++)
23444- if (pmds[i])
23445- free_page((unsigned long)pmds[i]);
23446+ for(i = 0; i < PREALLOCATED_PXDS; i++)
23447+ if (pxds[i])
23448+ free_page((unsigned long)pxds[i]);
23449 }
23450
23451-static int preallocate_pmds(pmd_t *pmds[])
23452+static int preallocate_pxds(pxd_t *pxds[])
23453 {
23454 int i;
23455 bool failed = false;
23456
23457- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23458- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23459- if (pmd == NULL)
23460+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23461+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23462+ if (pxd == NULL)
23463 failed = true;
23464- pmds[i] = pmd;
23465+ pxds[i] = pxd;
23466 }
23467
23468 if (failed) {
23469- free_pmds(pmds);
23470+ free_pxds(pxds);
23471 return -ENOMEM;
23472 }
23473
23474@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23475 * preallocate which never got a corresponding vma will need to be
23476 * freed manually.
23477 */
23478-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23479+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23480 {
23481 int i;
23482
23483- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23484+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23485 pgd_t pgd = pgdp[i];
23486
23487 if (pgd_val(pgd) != 0) {
23488- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23489+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23490
23491- pgdp[i] = native_make_pgd(0);
23492+ set_pgd(pgdp + i, native_make_pgd(0));
23493
23494- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23495- pmd_free(mm, pmd);
23496+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23497+ pxd_free(mm, pxd);
23498 }
23499 }
23500 }
23501
23502-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23503+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23504 {
23505- pud_t *pud;
23506+ pyd_t *pyd;
23507 unsigned long addr;
23508 int i;
23509
23510- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23511+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23512 return;
23513
23514- pud = pud_offset(pgd, 0);
23515+#ifdef CONFIG_X86_64
23516+ pyd = pyd_offset(mm, 0L);
23517+#else
23518+ pyd = pyd_offset(pgd, 0L);
23519+#endif
23520
23521- for (addr = i = 0; i < PREALLOCATED_PMDS;
23522- i++, pud++, addr += PUD_SIZE) {
23523- pmd_t *pmd = pmds[i];
23524+ for (addr = i = 0; i < PREALLOCATED_PXDS;
23525+ i++, pyd++, addr += PYD_SIZE) {
23526+ pxd_t *pxd = pxds[i];
23527
23528 if (i >= KERNEL_PGD_BOUNDARY)
23529- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23530- sizeof(pmd_t) * PTRS_PER_PMD);
23531+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23532+ sizeof(pxd_t) * PTRS_PER_PMD);
23533
23534- pud_populate(mm, pud, pmd);
23535+ pyd_populate(mm, pyd, pxd);
23536 }
23537 }
23538
23539 pgd_t *pgd_alloc(struct mm_struct *mm)
23540 {
23541 pgd_t *pgd;
23542- pmd_t *pmds[PREALLOCATED_PMDS];
23543+ pxd_t *pxds[PREALLOCATED_PXDS];
23544
23545 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23546
23547@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23548
23549 mm->pgd = pgd;
23550
23551- if (preallocate_pmds(pmds) != 0)
23552+ if (preallocate_pxds(pxds) != 0)
23553 goto out_free_pgd;
23554
23555 if (paravirt_pgd_alloc(mm) != 0)
23556- goto out_free_pmds;
23557+ goto out_free_pxds;
23558
23559 /*
23560 * Make sure that pre-populating the pmds is atomic with
23561@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23562 spin_lock(&pgd_lock);
23563
23564 pgd_ctor(mm, pgd);
23565- pgd_prepopulate_pmd(mm, pgd, pmds);
23566+ pgd_prepopulate_pxd(mm, pgd, pxds);
23567
23568 spin_unlock(&pgd_lock);
23569
23570 return pgd;
23571
23572-out_free_pmds:
23573- free_pmds(pmds);
23574+out_free_pxds:
23575+ free_pxds(pxds);
23576 out_free_pgd:
23577 free_page((unsigned long)pgd);
23578 out:
23579@@ -295,7 +344,7 @@ out:
23580
23581 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23582 {
23583- pgd_mop_up_pmds(mm, pgd);
23584+ pgd_mop_up_pxds(mm, pgd);
23585 pgd_dtor(pgd);
23586 paravirt_pgd_free(mm, pgd);
23587 free_page((unsigned long)pgd);
23588diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23589index cac7184..09a39fa 100644
23590--- a/arch/x86/mm/pgtable_32.c
23591+++ b/arch/x86/mm/pgtable_32.c
23592@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23593 return;
23594 }
23595 pte = pte_offset_kernel(pmd, vaddr);
23596+
23597+ pax_open_kernel();
23598 if (pte_val(pteval))
23599 set_pte_at(&init_mm, vaddr, pte, pteval);
23600 else
23601 pte_clear(&init_mm, vaddr, pte);
23602+ pax_close_kernel();
23603
23604 /*
23605 * It's enough to flush this one mapping.
23606diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23607index 410531d..0f16030 100644
23608--- a/arch/x86/mm/setup_nx.c
23609+++ b/arch/x86/mm/setup_nx.c
23610@@ -5,8 +5,10 @@
23611 #include <asm/pgtable.h>
23612 #include <asm/proto.h>
23613
23614+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23615 static int disable_nx __cpuinitdata;
23616
23617+#ifndef CONFIG_PAX_PAGEEXEC
23618 /*
23619 * noexec = on|off
23620 *
23621@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23622 return 0;
23623 }
23624 early_param("noexec", noexec_setup);
23625+#endif
23626+
23627+#endif
23628
23629 void __cpuinit x86_configure_nx(void)
23630 {
23631+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23632 if (cpu_has_nx && !disable_nx)
23633 __supported_pte_mask |= _PAGE_NX;
23634 else
23635+#endif
23636 __supported_pte_mask &= ~_PAGE_NX;
23637 }
23638
23639diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23640index d6c0418..06a0ad5 100644
23641--- a/arch/x86/mm/tlb.c
23642+++ b/arch/x86/mm/tlb.c
23643@@ -65,7 +65,11 @@ void leave_mm(int cpu)
23644 BUG();
23645 cpumask_clear_cpu(cpu,
23646 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23647+
23648+#ifndef CONFIG_PAX_PER_CPU_PGD
23649 load_cr3(swapper_pg_dir);
23650+#endif
23651+
23652 }
23653 EXPORT_SYMBOL_GPL(leave_mm);
23654
23655diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23656index 6687022..ceabcfa 100644
23657--- a/arch/x86/net/bpf_jit.S
23658+++ b/arch/x86/net/bpf_jit.S
23659@@ -9,6 +9,7 @@
23660 */
23661 #include <linux/linkage.h>
23662 #include <asm/dwarf2.h>
23663+#include <asm/alternative-asm.h>
23664
23665 /*
23666 * Calling convention :
23667@@ -35,6 +36,7 @@ sk_load_word:
23668 jle bpf_slow_path_word
23669 mov (SKBDATA,%rsi),%eax
23670 bswap %eax /* ntohl() */
23671+ pax_force_retaddr
23672 ret
23673
23674
23675@@ -53,6 +55,7 @@ sk_load_half:
23676 jle bpf_slow_path_half
23677 movzwl (SKBDATA,%rsi),%eax
23678 rol $8,%ax # ntohs()
23679+ pax_force_retaddr
23680 ret
23681
23682 sk_load_byte_ind:
23683@@ -66,6 +69,7 @@ sk_load_byte:
23684 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23685 jle bpf_slow_path_byte
23686 movzbl (SKBDATA,%rsi),%eax
23687+ pax_force_retaddr
23688 ret
23689
23690 /**
23691@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23692 movzbl (SKBDATA,%rsi),%ebx
23693 and $15,%bl
23694 shl $2,%bl
23695+ pax_force_retaddr
23696 ret
23697 CFI_ENDPROC
23698 ENDPROC(sk_load_byte_msh)
23699@@ -91,6 +96,7 @@ bpf_error:
23700 xor %eax,%eax
23701 mov -8(%rbp),%rbx
23702 leaveq
23703+ pax_force_retaddr
23704 ret
23705
23706 /* rsi contains offset and can be scratched */
23707@@ -113,6 +119,7 @@ bpf_slow_path_word:
23708 js bpf_error
23709 mov -12(%rbp),%eax
23710 bswap %eax
23711+ pax_force_retaddr
23712 ret
23713
23714 bpf_slow_path_half:
23715@@ -121,12 +128,14 @@ bpf_slow_path_half:
23716 mov -12(%rbp),%ax
23717 rol $8,%ax
23718 movzwl %ax,%eax
23719+ pax_force_retaddr
23720 ret
23721
23722 bpf_slow_path_byte:
23723 bpf_slow_path_common(1)
23724 js bpf_error
23725 movzbl -12(%rbp),%eax
23726+ pax_force_retaddr
23727 ret
23728
23729 bpf_slow_path_byte_msh:
23730@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23731 and $15,%al
23732 shl $2,%al
23733 xchg %eax,%ebx
23734+ pax_force_retaddr
23735 ret
23736diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23737index 7c1b765..180e3b2 100644
23738--- a/arch/x86/net/bpf_jit_comp.c
23739+++ b/arch/x86/net/bpf_jit_comp.c
23740@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23741 set_fs(old_fs);
23742 }
23743
23744+struct bpf_jit_work {
23745+ struct work_struct work;
23746+ void *image;
23747+};
23748
23749 void bpf_jit_compile(struct sk_filter *fp)
23750 {
23751@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23752 if (addrs == NULL)
23753 return;
23754
23755+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23756+ if (!fp->work)
23757+ goto out;
23758+
23759 /* Before first pass, make a rough estimation of addrs[]
23760 * each bpf instruction is translated to less than 64 bytes
23761 */
23762@@ -592,11 +600,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23763 if (image) {
23764 if (unlikely(proglen + ilen > oldproglen)) {
23765 pr_err("bpb_jit_compile fatal error\n");
23766- kfree(addrs);
23767- module_free(NULL, image);
23768- return;
23769+ module_free_exec(NULL, image);
23770+ goto out;
23771 }
23772+ pax_open_kernel();
23773 memcpy(image + proglen, temp, ilen);
23774+ pax_close_kernel();
23775 }
23776 proglen += ilen;
23777 addrs[i] = proglen;
23778@@ -617,7 +626,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23779 break;
23780 }
23781 if (proglen == oldproglen) {
23782- image = module_alloc(max_t(unsigned int,
23783+ image = module_alloc_exec(max_t(unsigned int,
23784 proglen,
23785 sizeof(struct work_struct)));
23786 if (!image)
23787@@ -639,24 +648,27 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23788 fp->bpf_func = (void *)image;
23789 }
23790 out:
23791+ kfree(fp->work);
23792 kfree(addrs);
23793 return;
23794 }
23795
23796 static void jit_free_defer(struct work_struct *arg)
23797 {
23798- module_free(NULL, arg);
23799+ module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
23800+ kfree(arg);
23801 }
23802
23803 /* run from softirq, we must use a work_struct to call
23804- * module_free() from process context
23805+ * module_free_exec() from process context
23806 */
23807 void bpf_jit_free(struct sk_filter *fp)
23808 {
23809 if (fp->bpf_func != sk_run_filter) {
23810- struct work_struct *work = (struct work_struct *)fp->bpf_func;
23811+ struct work_struct *work = &fp->work->work;
23812
23813 INIT_WORK(work, jit_free_defer);
23814+ fp->work->image = fp->bpf_func;
23815 schedule_work(work);
23816 }
23817 }
23818diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23819index bff89df..377758a 100644
23820--- a/arch/x86/oprofile/backtrace.c
23821+++ b/arch/x86/oprofile/backtrace.c
23822@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
23823 struct stack_frame_ia32 *fp;
23824 unsigned long bytes;
23825
23826- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23827+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23828 if (bytes != sizeof(bufhead))
23829 return NULL;
23830
23831- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23832+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23833
23834 oprofile_add_trace(bufhead[0].return_address);
23835
23836@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
23837 struct stack_frame bufhead[2];
23838 unsigned long bytes;
23839
23840- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23841+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23842 if (bytes != sizeof(bufhead))
23843 return NULL;
23844
23845@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
23846 {
23847 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
23848
23849- if (!user_mode_vm(regs)) {
23850+ if (!user_mode(regs)) {
23851 unsigned long stack = kernel_stack_pointer(regs);
23852 if (depth)
23853 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23854diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23855index cb29191..036766d 100644
23856--- a/arch/x86/pci/mrst.c
23857+++ b/arch/x86/pci/mrst.c
23858@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23859 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23860 pci_mmcfg_late_init();
23861 pcibios_enable_irq = mrst_pci_irq_enable;
23862- pci_root_ops = pci_mrst_ops;
23863+ pax_open_kernel();
23864+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23865+ pax_close_kernel();
23866 /* Continue with standard init */
23867 return 1;
23868 }
23869diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23870index db0e9a5..8844dea 100644
23871--- a/arch/x86/pci/pcbios.c
23872+++ b/arch/x86/pci/pcbios.c
23873@@ -79,50 +79,93 @@ union bios32 {
23874 static struct {
23875 unsigned long address;
23876 unsigned short segment;
23877-} bios32_indirect = { 0, __KERNEL_CS };
23878+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23879
23880 /*
23881 * Returns the entry point for the given service, NULL on error
23882 */
23883
23884-static unsigned long bios32_service(unsigned long service)
23885+static unsigned long __devinit bios32_service(unsigned long service)
23886 {
23887 unsigned char return_code; /* %al */
23888 unsigned long address; /* %ebx */
23889 unsigned long length; /* %ecx */
23890 unsigned long entry; /* %edx */
23891 unsigned long flags;
23892+ struct desc_struct d, *gdt;
23893
23894 local_irq_save(flags);
23895- __asm__("lcall *(%%edi); cld"
23896+
23897+ gdt = get_cpu_gdt_table(smp_processor_id());
23898+
23899+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23900+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23901+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23902+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23903+
23904+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23905 : "=a" (return_code),
23906 "=b" (address),
23907 "=c" (length),
23908 "=d" (entry)
23909 : "0" (service),
23910 "1" (0),
23911- "D" (&bios32_indirect));
23912+ "D" (&bios32_indirect),
23913+ "r"(__PCIBIOS_DS)
23914+ : "memory");
23915+
23916+ pax_open_kernel();
23917+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23918+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23919+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23920+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23921+ pax_close_kernel();
23922+
23923 local_irq_restore(flags);
23924
23925 switch (return_code) {
23926- case 0:
23927- return address + entry;
23928- case 0x80: /* Not present */
23929- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23930- return 0;
23931- default: /* Shouldn't happen */
23932- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23933- service, return_code);
23934+ case 0: {
23935+ int cpu;
23936+ unsigned char flags;
23937+
23938+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23939+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23940+ printk(KERN_WARNING "bios32_service: not valid\n");
23941 return 0;
23942+ }
23943+ address = address + PAGE_OFFSET;
23944+ length += 16UL; /* some BIOSs underreport this... */
23945+ flags = 4;
23946+ if (length >= 64*1024*1024) {
23947+ length >>= PAGE_SHIFT;
23948+ flags |= 8;
23949+ }
23950+
23951+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23952+ gdt = get_cpu_gdt_table(cpu);
23953+ pack_descriptor(&d, address, length, 0x9b, flags);
23954+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23955+ pack_descriptor(&d, address, length, 0x93, flags);
23956+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23957+ }
23958+ return entry;
23959+ }
23960+ case 0x80: /* Not present */
23961+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23962+ return 0;
23963+ default: /* Shouldn't happen */
23964+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23965+ service, return_code);
23966+ return 0;
23967 }
23968 }
23969
23970 static struct {
23971 unsigned long address;
23972 unsigned short segment;
23973-} pci_indirect = { 0, __KERNEL_CS };
23974+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23975
23976-static int pci_bios_present;
23977+static int pci_bios_present __read_only;
23978
23979 static int __devinit check_pcibios(void)
23980 {
23981@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
23982 unsigned long flags, pcibios_entry;
23983
23984 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23985- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23986+ pci_indirect.address = pcibios_entry;
23987
23988 local_irq_save(flags);
23989- __asm__(
23990- "lcall *(%%edi); cld\n\t"
23991+ __asm__("movw %w6, %%ds\n\t"
23992+ "lcall *%%ss:(%%edi); cld\n\t"
23993+ "push %%ss\n\t"
23994+ "pop %%ds\n\t"
23995 "jc 1f\n\t"
23996 "xor %%ah, %%ah\n"
23997 "1:"
23998@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
23999 "=b" (ebx),
24000 "=c" (ecx)
24001 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
24002- "D" (&pci_indirect)
24003+ "D" (&pci_indirect),
24004+ "r" (__PCIBIOS_DS)
24005 : "memory");
24006 local_irq_restore(flags);
24007
24008@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24009
24010 switch (len) {
24011 case 1:
24012- __asm__("lcall *(%%esi); cld\n\t"
24013+ __asm__("movw %w6, %%ds\n\t"
24014+ "lcall *%%ss:(%%esi); cld\n\t"
24015+ "push %%ss\n\t"
24016+ "pop %%ds\n\t"
24017 "jc 1f\n\t"
24018 "xor %%ah, %%ah\n"
24019 "1:"
24020@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24021 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24022 "b" (bx),
24023 "D" ((long)reg),
24024- "S" (&pci_indirect));
24025+ "S" (&pci_indirect),
24026+ "r" (__PCIBIOS_DS));
24027 /*
24028 * Zero-extend the result beyond 8 bits, do not trust the
24029 * BIOS having done it:
24030@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24031 *value &= 0xff;
24032 break;
24033 case 2:
24034- __asm__("lcall *(%%esi); cld\n\t"
24035+ __asm__("movw %w6, %%ds\n\t"
24036+ "lcall *%%ss:(%%esi); cld\n\t"
24037+ "push %%ss\n\t"
24038+ "pop %%ds\n\t"
24039 "jc 1f\n\t"
24040 "xor %%ah, %%ah\n"
24041 "1:"
24042@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24043 : "1" (PCIBIOS_READ_CONFIG_WORD),
24044 "b" (bx),
24045 "D" ((long)reg),
24046- "S" (&pci_indirect));
24047+ "S" (&pci_indirect),
24048+ "r" (__PCIBIOS_DS));
24049 /*
24050 * Zero-extend the result beyond 16 bits, do not trust the
24051 * BIOS having done it:
24052@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24053 *value &= 0xffff;
24054 break;
24055 case 4:
24056- __asm__("lcall *(%%esi); cld\n\t"
24057+ __asm__("movw %w6, %%ds\n\t"
24058+ "lcall *%%ss:(%%esi); cld\n\t"
24059+ "push %%ss\n\t"
24060+ "pop %%ds\n\t"
24061 "jc 1f\n\t"
24062 "xor %%ah, %%ah\n"
24063 "1:"
24064@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24065 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24066 "b" (bx),
24067 "D" ((long)reg),
24068- "S" (&pci_indirect));
24069+ "S" (&pci_indirect),
24070+ "r" (__PCIBIOS_DS));
24071 break;
24072 }
24073
24074@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24075
24076 switch (len) {
24077 case 1:
24078- __asm__("lcall *(%%esi); cld\n\t"
24079+ __asm__("movw %w6, %%ds\n\t"
24080+ "lcall *%%ss:(%%esi); cld\n\t"
24081+ "push %%ss\n\t"
24082+ "pop %%ds\n\t"
24083 "jc 1f\n\t"
24084 "xor %%ah, %%ah\n"
24085 "1:"
24086@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24087 "c" (value),
24088 "b" (bx),
24089 "D" ((long)reg),
24090- "S" (&pci_indirect));
24091+ "S" (&pci_indirect),
24092+ "r" (__PCIBIOS_DS));
24093 break;
24094 case 2:
24095- __asm__("lcall *(%%esi); cld\n\t"
24096+ __asm__("movw %w6, %%ds\n\t"
24097+ "lcall *%%ss:(%%esi); cld\n\t"
24098+ "push %%ss\n\t"
24099+ "pop %%ds\n\t"
24100 "jc 1f\n\t"
24101 "xor %%ah, %%ah\n"
24102 "1:"
24103@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24104 "c" (value),
24105 "b" (bx),
24106 "D" ((long)reg),
24107- "S" (&pci_indirect));
24108+ "S" (&pci_indirect),
24109+ "r" (__PCIBIOS_DS));
24110 break;
24111 case 4:
24112- __asm__("lcall *(%%esi); cld\n\t"
24113+ __asm__("movw %w6, %%ds\n\t"
24114+ "lcall *%%ss:(%%esi); cld\n\t"
24115+ "push %%ss\n\t"
24116+ "pop %%ds\n\t"
24117 "jc 1f\n\t"
24118 "xor %%ah, %%ah\n"
24119 "1:"
24120@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24121 "c" (value),
24122 "b" (bx),
24123 "D" ((long)reg),
24124- "S" (&pci_indirect));
24125+ "S" (&pci_indirect),
24126+ "r" (__PCIBIOS_DS));
24127 break;
24128 }
24129
24130@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24131
24132 DBG("PCI: Fetching IRQ routing table... ");
24133 __asm__("push %%es\n\t"
24134+ "movw %w8, %%ds\n\t"
24135 "push %%ds\n\t"
24136 "pop %%es\n\t"
24137- "lcall *(%%esi); cld\n\t"
24138+ "lcall *%%ss:(%%esi); cld\n\t"
24139 "pop %%es\n\t"
24140+ "push %%ss\n\t"
24141+ "pop %%ds\n"
24142 "jc 1f\n\t"
24143 "xor %%ah, %%ah\n"
24144 "1:"
24145@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24146 "1" (0),
24147 "D" ((long) &opt),
24148 "S" (&pci_indirect),
24149- "m" (opt)
24150+ "m" (opt),
24151+ "r" (__PCIBIOS_DS)
24152 : "memory");
24153 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24154 if (ret & 0xff00)
24155@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24156 {
24157 int ret;
24158
24159- __asm__("lcall *(%%esi); cld\n\t"
24160+ __asm__("movw %w5, %%ds\n\t"
24161+ "lcall *%%ss:(%%esi); cld\n\t"
24162+ "push %%ss\n\t"
24163+ "pop %%ds\n"
24164 "jc 1f\n\t"
24165 "xor %%ah, %%ah\n"
24166 "1:"
24167@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24168 : "0" (PCIBIOS_SET_PCI_HW_INT),
24169 "b" ((dev->bus->number << 8) | dev->devfn),
24170 "c" ((irq << 8) | (pin + 10)),
24171- "S" (&pci_indirect));
24172+ "S" (&pci_indirect),
24173+ "r" (__PCIBIOS_DS));
24174 return !(ret & 0xff00);
24175 }
24176 EXPORT_SYMBOL(pcibios_set_irq_routing);
24177diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24178index 40e4469..1ab536e 100644
24179--- a/arch/x86/platform/efi/efi_32.c
24180+++ b/arch/x86/platform/efi/efi_32.c
24181@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24182 {
24183 struct desc_ptr gdt_descr;
24184
24185+#ifdef CONFIG_PAX_KERNEXEC
24186+ struct desc_struct d;
24187+#endif
24188+
24189 local_irq_save(efi_rt_eflags);
24190
24191 load_cr3(initial_page_table);
24192 __flush_tlb_all();
24193
24194+#ifdef CONFIG_PAX_KERNEXEC
24195+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24196+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24197+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24198+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24199+#endif
24200+
24201 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24202 gdt_descr.size = GDT_SIZE - 1;
24203 load_gdt(&gdt_descr);
24204@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24205 {
24206 struct desc_ptr gdt_descr;
24207
24208+#ifdef CONFIG_PAX_KERNEXEC
24209+ struct desc_struct d;
24210+
24211+ memset(&d, 0, sizeof d);
24212+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24213+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24214+#endif
24215+
24216 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24217 gdt_descr.size = GDT_SIZE - 1;
24218 load_gdt(&gdt_descr);
24219diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24220index fbe66e6..c5c0dd2 100644
24221--- a/arch/x86/platform/efi/efi_stub_32.S
24222+++ b/arch/x86/platform/efi/efi_stub_32.S
24223@@ -6,7 +6,9 @@
24224 */
24225
24226 #include <linux/linkage.h>
24227+#include <linux/init.h>
24228 #include <asm/page_types.h>
24229+#include <asm/segment.h>
24230
24231 /*
24232 * efi_call_phys(void *, ...) is a function with variable parameters.
24233@@ -20,7 +22,7 @@
24234 * service functions will comply with gcc calling convention, too.
24235 */
24236
24237-.text
24238+__INIT
24239 ENTRY(efi_call_phys)
24240 /*
24241 * 0. The function can only be called in Linux kernel. So CS has been
24242@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24243 * The mapping of lower virtual memory has been created in prelog and
24244 * epilog.
24245 */
24246- movl $1f, %edx
24247- subl $__PAGE_OFFSET, %edx
24248- jmp *%edx
24249+ movl $(__KERNEXEC_EFI_DS), %edx
24250+ mov %edx, %ds
24251+ mov %edx, %es
24252+ mov %edx, %ss
24253+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24254 1:
24255
24256 /*
24257@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24258 * parameter 2, ..., param n. To make things easy, we save the return
24259 * address of efi_call_phys in a global variable.
24260 */
24261- popl %edx
24262- movl %edx, saved_return_addr
24263- /* get the function pointer into ECX*/
24264- popl %ecx
24265- movl %ecx, efi_rt_function_ptr
24266- movl $2f, %edx
24267- subl $__PAGE_OFFSET, %edx
24268- pushl %edx
24269+ popl (saved_return_addr)
24270+ popl (efi_rt_function_ptr)
24271
24272 /*
24273 * 3. Clear PG bit in %CR0.
24274@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24275 /*
24276 * 5. Call the physical function.
24277 */
24278- jmp *%ecx
24279+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
24280
24281-2:
24282 /*
24283 * 6. After EFI runtime service returns, control will return to
24284 * following instruction. We'd better readjust stack pointer first.
24285@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24286 movl %cr0, %edx
24287 orl $0x80000000, %edx
24288 movl %edx, %cr0
24289- jmp 1f
24290-1:
24291+
24292 /*
24293 * 8. Now restore the virtual mode from flat mode by
24294 * adding EIP with PAGE_OFFSET.
24295 */
24296- movl $1f, %edx
24297- jmp *%edx
24298+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24299 1:
24300+ movl $(__KERNEL_DS), %edx
24301+ mov %edx, %ds
24302+ mov %edx, %es
24303+ mov %edx, %ss
24304
24305 /*
24306 * 9. Balance the stack. And because EAX contain the return value,
24307 * we'd better not clobber it.
24308 */
24309- leal efi_rt_function_ptr, %edx
24310- movl (%edx), %ecx
24311- pushl %ecx
24312+ pushl (efi_rt_function_ptr)
24313
24314 /*
24315- * 10. Push the saved return address onto the stack and return.
24316+ * 10. Return to the saved return address.
24317 */
24318- leal saved_return_addr, %edx
24319- movl (%edx), %ecx
24320- pushl %ecx
24321- ret
24322+ jmpl *(saved_return_addr)
24323 ENDPROC(efi_call_phys)
24324 .previous
24325
24326-.data
24327+__INITDATA
24328 saved_return_addr:
24329 .long 0
24330 efi_rt_function_ptr:
24331diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24332index 4c07cca..2c8427d 100644
24333--- a/arch/x86/platform/efi/efi_stub_64.S
24334+++ b/arch/x86/platform/efi/efi_stub_64.S
24335@@ -7,6 +7,7 @@
24336 */
24337
24338 #include <linux/linkage.h>
24339+#include <asm/alternative-asm.h>
24340
24341 #define SAVE_XMM \
24342 mov %rsp, %rax; \
24343@@ -40,6 +41,7 @@ ENTRY(efi_call0)
24344 call *%rdi
24345 addq $32, %rsp
24346 RESTORE_XMM
24347+ pax_force_retaddr 0, 1
24348 ret
24349 ENDPROC(efi_call0)
24350
24351@@ -50,6 +52,7 @@ ENTRY(efi_call1)
24352 call *%rdi
24353 addq $32, %rsp
24354 RESTORE_XMM
24355+ pax_force_retaddr 0, 1
24356 ret
24357 ENDPROC(efi_call1)
24358
24359@@ -60,6 +63,7 @@ ENTRY(efi_call2)
24360 call *%rdi
24361 addq $32, %rsp
24362 RESTORE_XMM
24363+ pax_force_retaddr 0, 1
24364 ret
24365 ENDPROC(efi_call2)
24366
24367@@ -71,6 +75,7 @@ ENTRY(efi_call3)
24368 call *%rdi
24369 addq $32, %rsp
24370 RESTORE_XMM
24371+ pax_force_retaddr 0, 1
24372 ret
24373 ENDPROC(efi_call3)
24374
24375@@ -83,6 +88,7 @@ ENTRY(efi_call4)
24376 call *%rdi
24377 addq $32, %rsp
24378 RESTORE_XMM
24379+ pax_force_retaddr 0, 1
24380 ret
24381 ENDPROC(efi_call4)
24382
24383@@ -96,6 +102,7 @@ ENTRY(efi_call5)
24384 call *%rdi
24385 addq $48, %rsp
24386 RESTORE_XMM
24387+ pax_force_retaddr 0, 1
24388 ret
24389 ENDPROC(efi_call5)
24390
24391@@ -112,5 +119,6 @@ ENTRY(efi_call6)
24392 call *%rdi
24393 addq $48, %rsp
24394 RESTORE_XMM
24395+ pax_force_retaddr 0, 1
24396 ret
24397 ENDPROC(efi_call6)
24398diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24399index ad4ec1c..686479e 100644
24400--- a/arch/x86/platform/mrst/mrst.c
24401+++ b/arch/x86/platform/mrst/mrst.c
24402@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24403 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24404 int sfi_mrtc_num;
24405
24406-static void mrst_power_off(void)
24407+static __noreturn void mrst_power_off(void)
24408 {
24409 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24410 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24411+ BUG();
24412 }
24413
24414-static void mrst_reboot(void)
24415+static __noreturn void mrst_reboot(void)
24416 {
24417 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24418 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24419 else
24420 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24421+ BUG();
24422 }
24423
24424 /* parse all the mtimer info to a static mtimer array */
24425diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24426index f10c0af..3ec1f95 100644
24427--- a/arch/x86/power/cpu.c
24428+++ b/arch/x86/power/cpu.c
24429@@ -131,7 +131,7 @@ static void do_fpu_end(void)
24430 static void fix_processor_context(void)
24431 {
24432 int cpu = smp_processor_id();
24433- struct tss_struct *t = &per_cpu(init_tss, cpu);
24434+ struct tss_struct *t = init_tss + cpu;
24435
24436 set_tss_desc(cpu, t); /*
24437 * This just modifies memory; should not be
24438@@ -141,7 +141,9 @@ static void fix_processor_context(void)
24439 */
24440
24441 #ifdef CONFIG_X86_64
24442+ pax_open_kernel();
24443 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24444+ pax_close_kernel();
24445
24446 syscall_init(); /* This sets MSR_*STAR and related */
24447 #endif
24448diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24449index 5d17950..2253fc9 100644
24450--- a/arch/x86/vdso/Makefile
24451+++ b/arch/x86/vdso/Makefile
24452@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24453 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24454 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24455
24456-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24457+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24458 GCOV_PROFILE := n
24459
24460 #
24461diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24462index 468d591..8e80a0a 100644
24463--- a/arch/x86/vdso/vdso32-setup.c
24464+++ b/arch/x86/vdso/vdso32-setup.c
24465@@ -25,6 +25,7 @@
24466 #include <asm/tlbflush.h>
24467 #include <asm/vdso.h>
24468 #include <asm/proto.h>
24469+#include <asm/mman.h>
24470
24471 enum {
24472 VDSO_DISABLED = 0,
24473@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24474 void enable_sep_cpu(void)
24475 {
24476 int cpu = get_cpu();
24477- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24478+ struct tss_struct *tss = init_tss + cpu;
24479
24480 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24481 put_cpu();
24482@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24483 gate_vma.vm_start = FIXADDR_USER_START;
24484 gate_vma.vm_end = FIXADDR_USER_END;
24485 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24486- gate_vma.vm_page_prot = __P101;
24487+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24488 /*
24489 * Make sure the vDSO gets into every core dump.
24490 * Dumping its contents makes post-mortem fully interpretable later
24491@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24492 if (compat)
24493 addr = VDSO_HIGH_BASE;
24494 else {
24495- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24496+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24497 if (IS_ERR_VALUE(addr)) {
24498 ret = addr;
24499 goto up_fail;
24500 }
24501 }
24502
24503- current->mm->context.vdso = (void *)addr;
24504+ current->mm->context.vdso = addr;
24505
24506 if (compat_uses_vma || !compat) {
24507 /*
24508@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24509 }
24510
24511 current_thread_info()->sysenter_return =
24512- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24513+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24514
24515 up_fail:
24516 if (ret)
24517- current->mm->context.vdso = NULL;
24518+ current->mm->context.vdso = 0;
24519
24520 up_write(&mm->mmap_sem);
24521
24522@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24523
24524 const char *arch_vma_name(struct vm_area_struct *vma)
24525 {
24526- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24527+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24528 return "[vdso]";
24529+
24530+#ifdef CONFIG_PAX_SEGMEXEC
24531+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24532+ return "[vdso]";
24533+#endif
24534+
24535 return NULL;
24536 }
24537
24538@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24539 * Check to see if the corresponding task was created in compat vdso
24540 * mode.
24541 */
24542- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24543+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24544 return &gate_vma;
24545 return NULL;
24546 }
24547diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24548index 153407c..611cba9 100644
24549--- a/arch/x86/vdso/vma.c
24550+++ b/arch/x86/vdso/vma.c
24551@@ -16,8 +16,6 @@
24552 #include <asm/vdso.h>
24553 #include <asm/page.h>
24554
24555-unsigned int __read_mostly vdso_enabled = 1;
24556-
24557 extern char vdso_start[], vdso_end[];
24558 extern unsigned short vdso_sync_cpuid;
24559
24560@@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24561 * unaligned here as a result of stack start randomization.
24562 */
24563 addr = PAGE_ALIGN(addr);
24564- addr = align_addr(addr, NULL, ALIGN_VDSO);
24565
24566 return addr;
24567 }
24568@@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24569 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24570 {
24571 struct mm_struct *mm = current->mm;
24572- unsigned long addr;
24573+ unsigned long addr = 0;
24574 int ret;
24575
24576- if (!vdso_enabled)
24577- return 0;
24578-
24579 down_write(&mm->mmap_sem);
24580+
24581+#ifdef CONFIG_PAX_RANDMMAP
24582+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24583+#endif
24584+
24585 addr = vdso_addr(mm->start_stack, vdso_size);
24586+ addr = align_addr(addr, NULL, ALIGN_VDSO);
24587 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24588 if (IS_ERR_VALUE(addr)) {
24589 ret = addr;
24590 goto up_fail;
24591 }
24592
24593- current->mm->context.vdso = (void *)addr;
24594+ mm->context.vdso = addr;
24595
24596 ret = install_special_mapping(mm, addr, vdso_size,
24597 VM_READ|VM_EXEC|
24598 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24599 VM_ALWAYSDUMP,
24600 vdso_pages);
24601- if (ret) {
24602- current->mm->context.vdso = NULL;
24603- goto up_fail;
24604- }
24605+
24606+ if (ret)
24607+ mm->context.vdso = 0;
24608
24609 up_fail:
24610 up_write(&mm->mmap_sem);
24611 return ret;
24612 }
24613-
24614-static __init int vdso_setup(char *s)
24615-{
24616- vdso_enabled = simple_strtoul(s, NULL, 0);
24617- return 0;
24618-}
24619-__setup("vdso=", vdso_setup);
24620diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24621index 1f92865..c843b20 100644
24622--- a/arch/x86/xen/enlighten.c
24623+++ b/arch/x86/xen/enlighten.c
24624@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24625
24626 struct shared_info xen_dummy_shared_info;
24627
24628-void *xen_initial_gdt;
24629-
24630 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24631 __read_mostly int xen_have_vector_callback;
24632 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24633@@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24634 #endif
24635 };
24636
24637-static void xen_reboot(int reason)
24638+static __noreturn void xen_reboot(int reason)
24639 {
24640 struct sched_shutdown r = { .reason = reason };
24641
24642@@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24643 BUG();
24644 }
24645
24646-static void xen_restart(char *msg)
24647+static __noreturn void xen_restart(char *msg)
24648 {
24649 xen_reboot(SHUTDOWN_reboot);
24650 }
24651
24652-static void xen_emergency_restart(void)
24653+static __noreturn void xen_emergency_restart(void)
24654 {
24655 xen_reboot(SHUTDOWN_reboot);
24656 }
24657
24658-static void xen_machine_halt(void)
24659+static __noreturn void xen_machine_halt(void)
24660 {
24661 xen_reboot(SHUTDOWN_poweroff);
24662 }
24663@@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24664 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24665
24666 /* Work out if we support NX */
24667- x86_configure_nx();
24668+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24669+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24670+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24671+ unsigned l, h;
24672+
24673+ __supported_pte_mask |= _PAGE_NX;
24674+ rdmsr(MSR_EFER, l, h);
24675+ l |= EFER_NX;
24676+ wrmsr(MSR_EFER, l, h);
24677+ }
24678+#endif
24679
24680 xen_setup_features();
24681
24682@@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24683
24684 machine_ops = xen_machine_ops;
24685
24686- /*
24687- * The only reliable way to retain the initial address of the
24688- * percpu gdt_page is to remember it here, so we can go and
24689- * mark it RW later, when the initial percpu area is freed.
24690- */
24691- xen_initial_gdt = &per_cpu(gdt_page, 0);
24692-
24693 xen_smp_init();
24694
24695 #ifdef CONFIG_ACPI_NUMA
24696diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24697index 87f6673..e2555a6 100644
24698--- a/arch/x86/xen/mmu.c
24699+++ b/arch/x86/xen/mmu.c
24700@@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24701 convert_pfn_mfn(init_level4_pgt);
24702 convert_pfn_mfn(level3_ident_pgt);
24703 convert_pfn_mfn(level3_kernel_pgt);
24704+ convert_pfn_mfn(level3_vmalloc_start_pgt);
24705+ convert_pfn_mfn(level3_vmalloc_end_pgt);
24706+ convert_pfn_mfn(level3_vmemmap_pgt);
24707
24708 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24709 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24710@@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24711 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24712 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24713 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24714+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24715+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24716+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24717 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24718+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24719 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24720 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24721
24722@@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
24723 pv_mmu_ops.set_pud = xen_set_pud;
24724 #if PAGETABLE_LEVELS == 4
24725 pv_mmu_ops.set_pgd = xen_set_pgd;
24726+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24727 #endif
24728
24729 /* This will work as long as patching hasn't happened yet
24730@@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24731 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24732 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24733 .set_pgd = xen_set_pgd_hyper,
24734+ .set_pgd_batched = xen_set_pgd_hyper,
24735
24736 .alloc_pud = xen_alloc_pmd_init,
24737 .release_pud = xen_release_pmd_init,
24738diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24739index 041d4fe..7666b7e 100644
24740--- a/arch/x86/xen/smp.c
24741+++ b/arch/x86/xen/smp.c
24742@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
24743 {
24744 BUG_ON(smp_processor_id() != 0);
24745 native_smp_prepare_boot_cpu();
24746-
24747- /* We've switched to the "real" per-cpu gdt, so make sure the
24748- old memory can be recycled */
24749- make_lowmem_page_readwrite(xen_initial_gdt);
24750-
24751 xen_filter_cpu_maps();
24752 xen_setup_vcpu_info_placement();
24753 }
24754@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
24755 gdt = get_cpu_gdt_table(cpu);
24756
24757 ctxt->flags = VGCF_IN_KERNEL;
24758- ctxt->user_regs.ds = __USER_DS;
24759- ctxt->user_regs.es = __USER_DS;
24760+ ctxt->user_regs.ds = __KERNEL_DS;
24761+ ctxt->user_regs.es = __KERNEL_DS;
24762 ctxt->user_regs.ss = __KERNEL_DS;
24763 #ifdef CONFIG_X86_32
24764 ctxt->user_regs.fs = __KERNEL_PERCPU;
24765- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24766+ savesegment(gs, ctxt->user_regs.gs);
24767 #else
24768 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24769 #endif
24770@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
24771 int rc;
24772
24773 per_cpu(current_task, cpu) = idle;
24774+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24775 #ifdef CONFIG_X86_32
24776 irq_ctx_init(cpu);
24777 #else
24778 clear_tsk_thread_flag(idle, TIF_FORK);
24779- per_cpu(kernel_stack, cpu) =
24780- (unsigned long)task_stack_page(idle) -
24781- KERNEL_STACK_OFFSET + THREAD_SIZE;
24782+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24783 #endif
24784 xen_setup_runstate_info(cpu);
24785 xen_setup_timer(cpu);
24786diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24787index b040b0e..8cc4fe0 100644
24788--- a/arch/x86/xen/xen-asm_32.S
24789+++ b/arch/x86/xen/xen-asm_32.S
24790@@ -83,14 +83,14 @@ ENTRY(xen_iret)
24791 ESP_OFFSET=4 # bytes pushed onto stack
24792
24793 /*
24794- * Store vcpu_info pointer for easy access. Do it this way to
24795- * avoid having to reload %fs
24796+ * Store vcpu_info pointer for easy access.
24797 */
24798 #ifdef CONFIG_SMP
24799- GET_THREAD_INFO(%eax)
24800- movl TI_cpu(%eax), %eax
24801- movl __per_cpu_offset(,%eax,4), %eax
24802- mov xen_vcpu(%eax), %eax
24803+ push %fs
24804+ mov $(__KERNEL_PERCPU), %eax
24805+ mov %eax, %fs
24806+ mov PER_CPU_VAR(xen_vcpu), %eax
24807+ pop %fs
24808 #else
24809 movl xen_vcpu, %eax
24810 #endif
24811diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24812index aaa7291..3f77960 100644
24813--- a/arch/x86/xen/xen-head.S
24814+++ b/arch/x86/xen/xen-head.S
24815@@ -19,6 +19,17 @@ ENTRY(startup_xen)
24816 #ifdef CONFIG_X86_32
24817 mov %esi,xen_start_info
24818 mov $init_thread_union+THREAD_SIZE,%esp
24819+#ifdef CONFIG_SMP
24820+ movl $cpu_gdt_table,%edi
24821+ movl $__per_cpu_load,%eax
24822+ movw %ax,__KERNEL_PERCPU + 2(%edi)
24823+ rorl $16,%eax
24824+ movb %al,__KERNEL_PERCPU + 4(%edi)
24825+ movb %ah,__KERNEL_PERCPU + 7(%edi)
24826+ movl $__per_cpu_end - 1,%eax
24827+ subl $__per_cpu_start,%eax
24828+ movw %ax,__KERNEL_PERCPU + 0(%edi)
24829+#endif
24830 #else
24831 mov %rsi,xen_start_info
24832 mov $init_thread_union+THREAD_SIZE,%rsp
24833diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24834index b095739..8c17bcd 100644
24835--- a/arch/x86/xen/xen-ops.h
24836+++ b/arch/x86/xen/xen-ops.h
24837@@ -10,8 +10,6 @@
24838 extern const char xen_hypervisor_callback[];
24839 extern const char xen_failsafe_callback[];
24840
24841-extern void *xen_initial_gdt;
24842-
24843 struct trap_info;
24844 void xen_copy_trap_info(struct trap_info *traps);
24845
24846diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24847index 58916af..9cb880b 100644
24848--- a/block/blk-iopoll.c
24849+++ b/block/blk-iopoll.c
24850@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
24851 }
24852 EXPORT_SYMBOL(blk_iopoll_complete);
24853
24854-static void blk_iopoll_softirq(struct softirq_action *h)
24855+static void blk_iopoll_softirq(void)
24856 {
24857 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24858 int rearm = 0, budget = blk_iopoll_budget;
24859diff --git a/block/blk-map.c b/block/blk-map.c
24860index 623e1cd..ca1e109 100644
24861--- a/block/blk-map.c
24862+++ b/block/blk-map.c
24863@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
24864 if (!len || !kbuf)
24865 return -EINVAL;
24866
24867- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24868+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
24869 if (do_copy)
24870 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24871 else
24872diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24873index 1366a89..e17f54b 100644
24874--- a/block/blk-softirq.c
24875+++ b/block/blk-softirq.c
24876@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
24877 * Softirq action handler - move entries to local list and loop over them
24878 * while passing them to the queue registered handler.
24879 */
24880-static void blk_done_softirq(struct softirq_action *h)
24881+static void blk_done_softirq(void)
24882 {
24883 struct list_head *cpu_list, local_list;
24884
24885diff --git a/block/bsg.c b/block/bsg.c
24886index 702f131..37808bf 100644
24887--- a/block/bsg.c
24888+++ b/block/bsg.c
24889@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
24890 struct sg_io_v4 *hdr, struct bsg_device *bd,
24891 fmode_t has_write_perm)
24892 {
24893+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24894+ unsigned char *cmdptr;
24895+
24896 if (hdr->request_len > BLK_MAX_CDB) {
24897 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24898 if (!rq->cmd)
24899 return -ENOMEM;
24900- }
24901+ cmdptr = rq->cmd;
24902+ } else
24903+ cmdptr = tmpcmd;
24904
24905- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24906+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24907 hdr->request_len))
24908 return -EFAULT;
24909
24910+ if (cmdptr != rq->cmd)
24911+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24912+
24913 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24914 if (blk_verify_command(rq->cmd, has_write_perm))
24915 return -EPERM;
24916diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24917index 7b72502..646105c 100644
24918--- a/block/compat_ioctl.c
24919+++ b/block/compat_ioctl.c
24920@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
24921 err |= __get_user(f->spec1, &uf->spec1);
24922 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24923 err |= __get_user(name, &uf->name);
24924- f->name = compat_ptr(name);
24925+ f->name = (void __force_kernel *)compat_ptr(name);
24926 if (err) {
24927 err = -EFAULT;
24928 goto out;
24929diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24930index 688be8a..8a37d98 100644
24931--- a/block/scsi_ioctl.c
24932+++ b/block/scsi_ioctl.c
24933@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
24934 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24935 struct sg_io_hdr *hdr, fmode_t mode)
24936 {
24937- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24938+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24939+ unsigned char *cmdptr;
24940+
24941+ if (rq->cmd != rq->__cmd)
24942+ cmdptr = rq->cmd;
24943+ else
24944+ cmdptr = tmpcmd;
24945+
24946+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24947 return -EFAULT;
24948+
24949+ if (cmdptr != rq->cmd)
24950+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24951+
24952 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24953 return -EPERM;
24954
24955@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24956 int err;
24957 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24958 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24959+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24960+ unsigned char *cmdptr;
24961
24962 if (!sic)
24963 return -EINVAL;
24964@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24965 */
24966 err = -EFAULT;
24967 rq->cmd_len = cmdlen;
24968- if (copy_from_user(rq->cmd, sic->data, cmdlen))
24969+
24970+ if (rq->cmd != rq->__cmd)
24971+ cmdptr = rq->cmd;
24972+ else
24973+ cmdptr = tmpcmd;
24974+
24975+ if (copy_from_user(cmdptr, sic->data, cmdlen))
24976 goto error;
24977
24978+ if (rq->cmd != cmdptr)
24979+ memcpy(rq->cmd, cmdptr, cmdlen);
24980+
24981 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24982 goto error;
24983
24984diff --git a/crypto/cryptd.c b/crypto/cryptd.c
24985index 671d4d6..5f24030 100644
24986--- a/crypto/cryptd.c
24987+++ b/crypto/cryptd.c
24988@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
24989
24990 struct cryptd_blkcipher_request_ctx {
24991 crypto_completion_t complete;
24992-};
24993+} __no_const;
24994
24995 struct cryptd_hash_ctx {
24996 struct crypto_shash *child;
24997@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
24998
24999 struct cryptd_aead_request_ctx {
25000 crypto_completion_t complete;
25001-};
25002+} __no_const;
25003
25004 static void cryptd_queue_worker(struct work_struct *work);
25005
25006diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25007index 5d41894..22021e4 100644
25008--- a/drivers/acpi/apei/cper.c
25009+++ b/drivers/acpi/apei/cper.c
25010@@ -38,12 +38,12 @@
25011 */
25012 u64 cper_next_record_id(void)
25013 {
25014- static atomic64_t seq;
25015+ static atomic64_unchecked_t seq;
25016
25017- if (!atomic64_read(&seq))
25018- atomic64_set(&seq, ((u64)get_seconds()) << 32);
25019+ if (!atomic64_read_unchecked(&seq))
25020+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25021
25022- return atomic64_inc_return(&seq);
25023+ return atomic64_inc_return_unchecked(&seq);
25024 }
25025 EXPORT_SYMBOL_GPL(cper_next_record_id);
25026
25027diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25028index 6c47ae9..8ab9132 100644
25029--- a/drivers/acpi/ec_sys.c
25030+++ b/drivers/acpi/ec_sys.c
25031@@ -12,6 +12,7 @@
25032 #include <linux/acpi.h>
25033 #include <linux/debugfs.h>
25034 #include <linux/module.h>
25035+#include <asm/uaccess.h>
25036 #include "internal.h"
25037
25038 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25039@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25040 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25041 */
25042 unsigned int size = EC_SPACE_SIZE;
25043- u8 *data = (u8 *) buf;
25044+ u8 data;
25045 loff_t init_off = *off;
25046 int err = 0;
25047
25048@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25049 size = count;
25050
25051 while (size) {
25052- err = ec_read(*off, &data[*off - init_off]);
25053+ err = ec_read(*off, &data);
25054 if (err)
25055 return err;
25056+ if (put_user(data, &buf[*off - init_off]))
25057+ return -EFAULT;
25058 *off += 1;
25059 size--;
25060 }
25061@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25062
25063 unsigned int size = count;
25064 loff_t init_off = *off;
25065- u8 *data = (u8 *) buf;
25066 int err = 0;
25067
25068 if (*off >= EC_SPACE_SIZE)
25069@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25070 }
25071
25072 while (size) {
25073- u8 byte_write = data[*off - init_off];
25074+ u8 byte_write;
25075+ if (get_user(byte_write, &buf[*off - init_off]))
25076+ return -EFAULT;
25077 err = ec_write(*off, byte_write);
25078 if (err)
25079 return err;
25080diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25081index 251c7b62..000462d 100644
25082--- a/drivers/acpi/proc.c
25083+++ b/drivers/acpi/proc.c
25084@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25085 size_t count, loff_t * ppos)
25086 {
25087 struct list_head *node, *next;
25088- char strbuf[5];
25089- char str[5] = "";
25090- unsigned int len = count;
25091+ char strbuf[5] = {0};
25092
25093- if (len > 4)
25094- len = 4;
25095- if (len < 0)
25096+ if (count > 4)
25097+ count = 4;
25098+ if (copy_from_user(strbuf, buffer, count))
25099 return -EFAULT;
25100-
25101- if (copy_from_user(strbuf, buffer, len))
25102- return -EFAULT;
25103- strbuf[len] = '\0';
25104- sscanf(strbuf, "%s", str);
25105+ strbuf[count] = '\0';
25106
25107 mutex_lock(&acpi_device_lock);
25108 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25109@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25110 if (!dev->wakeup.flags.valid)
25111 continue;
25112
25113- if (!strncmp(dev->pnp.bus_id, str, 4)) {
25114+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25115 if (device_can_wakeup(&dev->dev)) {
25116 bool enable = !device_may_wakeup(&dev->dev);
25117 device_set_wakeup_enable(&dev->dev, enable);
25118diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25119index 9d7bc9f..a6fc091 100644
25120--- a/drivers/acpi/processor_driver.c
25121+++ b/drivers/acpi/processor_driver.c
25122@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25123 return 0;
25124 #endif
25125
25126- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25127+ BUG_ON(pr->id >= nr_cpu_ids);
25128
25129 /*
25130 * Buggy BIOS check
25131diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25132index c04ad68..0b99473 100644
25133--- a/drivers/ata/libata-core.c
25134+++ b/drivers/ata/libata-core.c
25135@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25136 struct ata_port *ap;
25137 unsigned int tag;
25138
25139- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25140+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25141 ap = qc->ap;
25142
25143 qc->flags = 0;
25144@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25145 struct ata_port *ap;
25146 struct ata_link *link;
25147
25148- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25149+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25150 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25151 ap = qc->ap;
25152 link = qc->dev->link;
25153@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25154 return;
25155
25156 spin_lock(&lock);
25157+ pax_open_kernel();
25158
25159 for (cur = ops->inherits; cur; cur = cur->inherits) {
25160 void **inherit = (void **)cur;
25161@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25162 if (IS_ERR(*pp))
25163 *pp = NULL;
25164
25165- ops->inherits = NULL;
25166+ *(struct ata_port_operations **)&ops->inherits = NULL;
25167
25168+ pax_close_kernel();
25169 spin_unlock(&lock);
25170 }
25171
25172diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25173index e8574bb..f9f6a72 100644
25174--- a/drivers/ata/pata_arasan_cf.c
25175+++ b/drivers/ata/pata_arasan_cf.c
25176@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25177 /* Handle platform specific quirks */
25178 if (pdata->quirk) {
25179 if (pdata->quirk & CF_BROKEN_PIO) {
25180- ap->ops->set_piomode = NULL;
25181+ pax_open_kernel();
25182+ *(void **)&ap->ops->set_piomode = NULL;
25183+ pax_close_kernel();
25184 ap->pio_mask = 0;
25185 }
25186 if (pdata->quirk & CF_BROKEN_MWDMA)
25187diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25188index f9b983a..887b9d8 100644
25189--- a/drivers/atm/adummy.c
25190+++ b/drivers/atm/adummy.c
25191@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25192 vcc->pop(vcc, skb);
25193 else
25194 dev_kfree_skb_any(skb);
25195- atomic_inc(&vcc->stats->tx);
25196+ atomic_inc_unchecked(&vcc->stats->tx);
25197
25198 return 0;
25199 }
25200diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25201index f8f41e0..1f987dd 100644
25202--- a/drivers/atm/ambassador.c
25203+++ b/drivers/atm/ambassador.c
25204@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25205 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25206
25207 // VC layer stats
25208- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25209+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25210
25211 // free the descriptor
25212 kfree (tx_descr);
25213@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25214 dump_skb ("<<<", vc, skb);
25215
25216 // VC layer stats
25217- atomic_inc(&atm_vcc->stats->rx);
25218+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25219 __net_timestamp(skb);
25220 // end of our responsibility
25221 atm_vcc->push (atm_vcc, skb);
25222@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25223 } else {
25224 PRINTK (KERN_INFO, "dropped over-size frame");
25225 // should we count this?
25226- atomic_inc(&atm_vcc->stats->rx_drop);
25227+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25228 }
25229
25230 } else {
25231@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25232 }
25233
25234 if (check_area (skb->data, skb->len)) {
25235- atomic_inc(&atm_vcc->stats->tx_err);
25236+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25237 return -ENOMEM; // ?
25238 }
25239
25240diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25241index b22d71c..d6e1049 100644
25242--- a/drivers/atm/atmtcp.c
25243+++ b/drivers/atm/atmtcp.c
25244@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25245 if (vcc->pop) vcc->pop(vcc,skb);
25246 else dev_kfree_skb(skb);
25247 if (dev_data) return 0;
25248- atomic_inc(&vcc->stats->tx_err);
25249+ atomic_inc_unchecked(&vcc->stats->tx_err);
25250 return -ENOLINK;
25251 }
25252 size = skb->len+sizeof(struct atmtcp_hdr);
25253@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25254 if (!new_skb) {
25255 if (vcc->pop) vcc->pop(vcc,skb);
25256 else dev_kfree_skb(skb);
25257- atomic_inc(&vcc->stats->tx_err);
25258+ atomic_inc_unchecked(&vcc->stats->tx_err);
25259 return -ENOBUFS;
25260 }
25261 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25262@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25263 if (vcc->pop) vcc->pop(vcc,skb);
25264 else dev_kfree_skb(skb);
25265 out_vcc->push(out_vcc,new_skb);
25266- atomic_inc(&vcc->stats->tx);
25267- atomic_inc(&out_vcc->stats->rx);
25268+ atomic_inc_unchecked(&vcc->stats->tx);
25269+ atomic_inc_unchecked(&out_vcc->stats->rx);
25270 return 0;
25271 }
25272
25273@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25274 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25275 read_unlock(&vcc_sklist_lock);
25276 if (!out_vcc) {
25277- atomic_inc(&vcc->stats->tx_err);
25278+ atomic_inc_unchecked(&vcc->stats->tx_err);
25279 goto done;
25280 }
25281 skb_pull(skb,sizeof(struct atmtcp_hdr));
25282@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25283 __net_timestamp(new_skb);
25284 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25285 out_vcc->push(out_vcc,new_skb);
25286- atomic_inc(&vcc->stats->tx);
25287- atomic_inc(&out_vcc->stats->rx);
25288+ atomic_inc_unchecked(&vcc->stats->tx);
25289+ atomic_inc_unchecked(&out_vcc->stats->rx);
25290 done:
25291 if (vcc->pop) vcc->pop(vcc,skb);
25292 else dev_kfree_skb(skb);
25293diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25294index 956e9ac..133516d 100644
25295--- a/drivers/atm/eni.c
25296+++ b/drivers/atm/eni.c
25297@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25298 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25299 vcc->dev->number);
25300 length = 0;
25301- atomic_inc(&vcc->stats->rx_err);
25302+ atomic_inc_unchecked(&vcc->stats->rx_err);
25303 }
25304 else {
25305 length = ATM_CELL_SIZE-1; /* no HEC */
25306@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25307 size);
25308 }
25309 eff = length = 0;
25310- atomic_inc(&vcc->stats->rx_err);
25311+ atomic_inc_unchecked(&vcc->stats->rx_err);
25312 }
25313 else {
25314 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25315@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25316 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25317 vcc->dev->number,vcc->vci,length,size << 2,descr);
25318 length = eff = 0;
25319- atomic_inc(&vcc->stats->rx_err);
25320+ atomic_inc_unchecked(&vcc->stats->rx_err);
25321 }
25322 }
25323 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25324@@ -771,7 +771,7 @@ rx_dequeued++;
25325 vcc->push(vcc,skb);
25326 pushed++;
25327 }
25328- atomic_inc(&vcc->stats->rx);
25329+ atomic_inc_unchecked(&vcc->stats->rx);
25330 }
25331 wake_up(&eni_dev->rx_wait);
25332 }
25333@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25334 PCI_DMA_TODEVICE);
25335 if (vcc->pop) vcc->pop(vcc,skb);
25336 else dev_kfree_skb_irq(skb);
25337- atomic_inc(&vcc->stats->tx);
25338+ atomic_inc_unchecked(&vcc->stats->tx);
25339 wake_up(&eni_dev->tx_wait);
25340 dma_complete++;
25341 }
25342@@ -1569,7 +1569,7 @@ tx_complete++;
25343 /*--------------------------------- entries ---------------------------------*/
25344
25345
25346-static const char *media_name[] __devinitdata = {
25347+static const char *media_name[] __devinitconst = {
25348 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25349 "UTP", "05?", "06?", "07?", /* 4- 7 */
25350 "TAXI","09?", "10?", "11?", /* 8-11 */
25351diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25352index 5072f8a..fa52520d 100644
25353--- a/drivers/atm/firestream.c
25354+++ b/drivers/atm/firestream.c
25355@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25356 }
25357 }
25358
25359- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25360+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25361
25362 fs_dprintk (FS_DEBUG_TXMEM, "i");
25363 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25364@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25365 #endif
25366 skb_put (skb, qe->p1 & 0xffff);
25367 ATM_SKB(skb)->vcc = atm_vcc;
25368- atomic_inc(&atm_vcc->stats->rx);
25369+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25370 __net_timestamp(skb);
25371 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25372 atm_vcc->push (atm_vcc, skb);
25373@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25374 kfree (pe);
25375 }
25376 if (atm_vcc)
25377- atomic_inc(&atm_vcc->stats->rx_drop);
25378+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25379 break;
25380 case 0x1f: /* Reassembly abort: no buffers. */
25381 /* Silently increment error counter. */
25382 if (atm_vcc)
25383- atomic_inc(&atm_vcc->stats->rx_drop);
25384+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25385 break;
25386 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25387 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25388diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25389index 361f5ae..7fc552d 100644
25390--- a/drivers/atm/fore200e.c
25391+++ b/drivers/atm/fore200e.c
25392@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25393 #endif
25394 /* check error condition */
25395 if (*entry->status & STATUS_ERROR)
25396- atomic_inc(&vcc->stats->tx_err);
25397+ atomic_inc_unchecked(&vcc->stats->tx_err);
25398 else
25399- atomic_inc(&vcc->stats->tx);
25400+ atomic_inc_unchecked(&vcc->stats->tx);
25401 }
25402 }
25403
25404@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25405 if (skb == NULL) {
25406 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25407
25408- atomic_inc(&vcc->stats->rx_drop);
25409+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25410 return -ENOMEM;
25411 }
25412
25413@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25414
25415 dev_kfree_skb_any(skb);
25416
25417- atomic_inc(&vcc->stats->rx_drop);
25418+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25419 return -ENOMEM;
25420 }
25421
25422 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25423
25424 vcc->push(vcc, skb);
25425- atomic_inc(&vcc->stats->rx);
25426+ atomic_inc_unchecked(&vcc->stats->rx);
25427
25428 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25429
25430@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25431 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25432 fore200e->atm_dev->number,
25433 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25434- atomic_inc(&vcc->stats->rx_err);
25435+ atomic_inc_unchecked(&vcc->stats->rx_err);
25436 }
25437 }
25438
25439@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25440 goto retry_here;
25441 }
25442
25443- atomic_inc(&vcc->stats->tx_err);
25444+ atomic_inc_unchecked(&vcc->stats->tx_err);
25445
25446 fore200e->tx_sat++;
25447 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25448diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25449index 9a51df4..f3bb5f8 100644
25450--- a/drivers/atm/he.c
25451+++ b/drivers/atm/he.c
25452@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25453
25454 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25455 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25456- atomic_inc(&vcc->stats->rx_drop);
25457+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25458 goto return_host_buffers;
25459 }
25460
25461@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25462 RBRQ_LEN_ERR(he_dev->rbrq_head)
25463 ? "LEN_ERR" : "",
25464 vcc->vpi, vcc->vci);
25465- atomic_inc(&vcc->stats->rx_err);
25466+ atomic_inc_unchecked(&vcc->stats->rx_err);
25467 goto return_host_buffers;
25468 }
25469
25470@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25471 vcc->push(vcc, skb);
25472 spin_lock(&he_dev->global_lock);
25473
25474- atomic_inc(&vcc->stats->rx);
25475+ atomic_inc_unchecked(&vcc->stats->rx);
25476
25477 return_host_buffers:
25478 ++pdus_assembled;
25479@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25480 tpd->vcc->pop(tpd->vcc, tpd->skb);
25481 else
25482 dev_kfree_skb_any(tpd->skb);
25483- atomic_inc(&tpd->vcc->stats->tx_err);
25484+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25485 }
25486 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25487 return;
25488@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25489 vcc->pop(vcc, skb);
25490 else
25491 dev_kfree_skb_any(skb);
25492- atomic_inc(&vcc->stats->tx_err);
25493+ atomic_inc_unchecked(&vcc->stats->tx_err);
25494 return -EINVAL;
25495 }
25496
25497@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25498 vcc->pop(vcc, skb);
25499 else
25500 dev_kfree_skb_any(skb);
25501- atomic_inc(&vcc->stats->tx_err);
25502+ atomic_inc_unchecked(&vcc->stats->tx_err);
25503 return -EINVAL;
25504 }
25505 #endif
25506@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25507 vcc->pop(vcc, skb);
25508 else
25509 dev_kfree_skb_any(skb);
25510- atomic_inc(&vcc->stats->tx_err);
25511+ atomic_inc_unchecked(&vcc->stats->tx_err);
25512 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25513 return -ENOMEM;
25514 }
25515@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25516 vcc->pop(vcc, skb);
25517 else
25518 dev_kfree_skb_any(skb);
25519- atomic_inc(&vcc->stats->tx_err);
25520+ atomic_inc_unchecked(&vcc->stats->tx_err);
25521 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25522 return -ENOMEM;
25523 }
25524@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25525 __enqueue_tpd(he_dev, tpd, cid);
25526 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25527
25528- atomic_inc(&vcc->stats->tx);
25529+ atomic_inc_unchecked(&vcc->stats->tx);
25530
25531 return 0;
25532 }
25533diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25534index b812103..e391a49 100644
25535--- a/drivers/atm/horizon.c
25536+++ b/drivers/atm/horizon.c
25537@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25538 {
25539 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25540 // VC layer stats
25541- atomic_inc(&vcc->stats->rx);
25542+ atomic_inc_unchecked(&vcc->stats->rx);
25543 __net_timestamp(skb);
25544 // end of our responsibility
25545 vcc->push (vcc, skb);
25546@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25547 dev->tx_iovec = NULL;
25548
25549 // VC layer stats
25550- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25551+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25552
25553 // free the skb
25554 hrz_kfree_skb (skb);
25555diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25556index 1c05212..c28e200 100644
25557--- a/drivers/atm/idt77252.c
25558+++ b/drivers/atm/idt77252.c
25559@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25560 else
25561 dev_kfree_skb(skb);
25562
25563- atomic_inc(&vcc->stats->tx);
25564+ atomic_inc_unchecked(&vcc->stats->tx);
25565 }
25566
25567 atomic_dec(&scq->used);
25568@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25569 if ((sb = dev_alloc_skb(64)) == NULL) {
25570 printk("%s: Can't allocate buffers for aal0.\n",
25571 card->name);
25572- atomic_add(i, &vcc->stats->rx_drop);
25573+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25574 break;
25575 }
25576 if (!atm_charge(vcc, sb->truesize)) {
25577 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25578 card->name);
25579- atomic_add(i - 1, &vcc->stats->rx_drop);
25580+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25581 dev_kfree_skb(sb);
25582 break;
25583 }
25584@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25585 ATM_SKB(sb)->vcc = vcc;
25586 __net_timestamp(sb);
25587 vcc->push(vcc, sb);
25588- atomic_inc(&vcc->stats->rx);
25589+ atomic_inc_unchecked(&vcc->stats->rx);
25590
25591 cell += ATM_CELL_PAYLOAD;
25592 }
25593@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25594 "(CDC: %08x)\n",
25595 card->name, len, rpp->len, readl(SAR_REG_CDC));
25596 recycle_rx_pool_skb(card, rpp);
25597- atomic_inc(&vcc->stats->rx_err);
25598+ atomic_inc_unchecked(&vcc->stats->rx_err);
25599 return;
25600 }
25601 if (stat & SAR_RSQE_CRC) {
25602 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25603 recycle_rx_pool_skb(card, rpp);
25604- atomic_inc(&vcc->stats->rx_err);
25605+ atomic_inc_unchecked(&vcc->stats->rx_err);
25606 return;
25607 }
25608 if (skb_queue_len(&rpp->queue) > 1) {
25609@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25610 RXPRINTK("%s: Can't alloc RX skb.\n",
25611 card->name);
25612 recycle_rx_pool_skb(card, rpp);
25613- atomic_inc(&vcc->stats->rx_err);
25614+ atomic_inc_unchecked(&vcc->stats->rx_err);
25615 return;
25616 }
25617 if (!atm_charge(vcc, skb->truesize)) {
25618@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25619 __net_timestamp(skb);
25620
25621 vcc->push(vcc, skb);
25622- atomic_inc(&vcc->stats->rx);
25623+ atomic_inc_unchecked(&vcc->stats->rx);
25624
25625 return;
25626 }
25627@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25628 __net_timestamp(skb);
25629
25630 vcc->push(vcc, skb);
25631- atomic_inc(&vcc->stats->rx);
25632+ atomic_inc_unchecked(&vcc->stats->rx);
25633
25634 if (skb->truesize > SAR_FB_SIZE_3)
25635 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25636@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25637 if (vcc->qos.aal != ATM_AAL0) {
25638 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25639 card->name, vpi, vci);
25640- atomic_inc(&vcc->stats->rx_drop);
25641+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25642 goto drop;
25643 }
25644
25645 if ((sb = dev_alloc_skb(64)) == NULL) {
25646 printk("%s: Can't allocate buffers for AAL0.\n",
25647 card->name);
25648- atomic_inc(&vcc->stats->rx_err);
25649+ atomic_inc_unchecked(&vcc->stats->rx_err);
25650 goto drop;
25651 }
25652
25653@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25654 ATM_SKB(sb)->vcc = vcc;
25655 __net_timestamp(sb);
25656 vcc->push(vcc, sb);
25657- atomic_inc(&vcc->stats->rx);
25658+ atomic_inc_unchecked(&vcc->stats->rx);
25659
25660 drop:
25661 skb_pull(queue, 64);
25662@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25663
25664 if (vc == NULL) {
25665 printk("%s: NULL connection in send().\n", card->name);
25666- atomic_inc(&vcc->stats->tx_err);
25667+ atomic_inc_unchecked(&vcc->stats->tx_err);
25668 dev_kfree_skb(skb);
25669 return -EINVAL;
25670 }
25671 if (!test_bit(VCF_TX, &vc->flags)) {
25672 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25673- atomic_inc(&vcc->stats->tx_err);
25674+ atomic_inc_unchecked(&vcc->stats->tx_err);
25675 dev_kfree_skb(skb);
25676 return -EINVAL;
25677 }
25678@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25679 break;
25680 default:
25681 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25682- atomic_inc(&vcc->stats->tx_err);
25683+ atomic_inc_unchecked(&vcc->stats->tx_err);
25684 dev_kfree_skb(skb);
25685 return -EINVAL;
25686 }
25687
25688 if (skb_shinfo(skb)->nr_frags != 0) {
25689 printk("%s: No scatter-gather yet.\n", card->name);
25690- atomic_inc(&vcc->stats->tx_err);
25691+ atomic_inc_unchecked(&vcc->stats->tx_err);
25692 dev_kfree_skb(skb);
25693 return -EINVAL;
25694 }
25695@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25696
25697 err = queue_skb(card, vc, skb, oam);
25698 if (err) {
25699- atomic_inc(&vcc->stats->tx_err);
25700+ atomic_inc_unchecked(&vcc->stats->tx_err);
25701 dev_kfree_skb(skb);
25702 return err;
25703 }
25704@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25705 skb = dev_alloc_skb(64);
25706 if (!skb) {
25707 printk("%s: Out of memory in send_oam().\n", card->name);
25708- atomic_inc(&vcc->stats->tx_err);
25709+ atomic_inc_unchecked(&vcc->stats->tx_err);
25710 return -ENOMEM;
25711 }
25712 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25713diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25714index 3d0c2b0..45441fa 100644
25715--- a/drivers/atm/iphase.c
25716+++ b/drivers/atm/iphase.c
25717@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25718 status = (u_short) (buf_desc_ptr->desc_mode);
25719 if (status & (RX_CER | RX_PTE | RX_OFL))
25720 {
25721- atomic_inc(&vcc->stats->rx_err);
25722+ atomic_inc_unchecked(&vcc->stats->rx_err);
25723 IF_ERR(printk("IA: bad packet, dropping it");)
25724 if (status & RX_CER) {
25725 IF_ERR(printk(" cause: packet CRC error\n");)
25726@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
25727 len = dma_addr - buf_addr;
25728 if (len > iadev->rx_buf_sz) {
25729 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25730- atomic_inc(&vcc->stats->rx_err);
25731+ atomic_inc_unchecked(&vcc->stats->rx_err);
25732 goto out_free_desc;
25733 }
25734
25735@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25736 ia_vcc = INPH_IA_VCC(vcc);
25737 if (ia_vcc == NULL)
25738 {
25739- atomic_inc(&vcc->stats->rx_err);
25740+ atomic_inc_unchecked(&vcc->stats->rx_err);
25741 dev_kfree_skb_any(skb);
25742 atm_return(vcc, atm_guess_pdu2truesize(len));
25743 goto INCR_DLE;
25744@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25745 if ((length > iadev->rx_buf_sz) || (length >
25746 (skb->len - sizeof(struct cpcs_trailer))))
25747 {
25748- atomic_inc(&vcc->stats->rx_err);
25749+ atomic_inc_unchecked(&vcc->stats->rx_err);
25750 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25751 length, skb->len);)
25752 dev_kfree_skb_any(skb);
25753@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25754
25755 IF_RX(printk("rx_dle_intr: skb push");)
25756 vcc->push(vcc,skb);
25757- atomic_inc(&vcc->stats->rx);
25758+ atomic_inc_unchecked(&vcc->stats->rx);
25759 iadev->rx_pkt_cnt++;
25760 }
25761 INCR_DLE:
25762@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
25763 {
25764 struct k_sonet_stats *stats;
25765 stats = &PRIV(_ia_dev[board])->sonet_stats;
25766- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25767- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25768- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25769- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25770- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25771- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25772- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25773- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25774- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25775+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25776+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25777+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25778+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25779+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25780+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25781+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25782+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25783+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25784 }
25785 ia_cmds.status = 0;
25786 break;
25787@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25788 if ((desc == 0) || (desc > iadev->num_tx_desc))
25789 {
25790 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25791- atomic_inc(&vcc->stats->tx);
25792+ atomic_inc_unchecked(&vcc->stats->tx);
25793 if (vcc->pop)
25794 vcc->pop(vcc, skb);
25795 else
25796@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25797 ATM_DESC(skb) = vcc->vci;
25798 skb_queue_tail(&iadev->tx_dma_q, skb);
25799
25800- atomic_inc(&vcc->stats->tx);
25801+ atomic_inc_unchecked(&vcc->stats->tx);
25802 iadev->tx_pkt_cnt++;
25803 /* Increment transaction counter */
25804 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25805
25806 #if 0
25807 /* add flow control logic */
25808- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25809+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25810 if (iavcc->vc_desc_cnt > 10) {
25811 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25812 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25813diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25814index f556969..0da15eb 100644
25815--- a/drivers/atm/lanai.c
25816+++ b/drivers/atm/lanai.c
25817@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
25818 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25819 lanai_endtx(lanai, lvcc);
25820 lanai_free_skb(lvcc->tx.atmvcc, skb);
25821- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25822+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25823 }
25824
25825 /* Try to fill the buffer - don't call unless there is backlog */
25826@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
25827 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25828 __net_timestamp(skb);
25829 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25830- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25831+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25832 out:
25833 lvcc->rx.buf.ptr = end;
25834 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25835@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25836 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25837 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25838 lanai->stats.service_rxnotaal5++;
25839- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25840+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25841 return 0;
25842 }
25843 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25844@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25845 int bytes;
25846 read_unlock(&vcc_sklist_lock);
25847 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25848- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25849+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25850 lvcc->stats.x.aal5.service_trash++;
25851 bytes = (SERVICE_GET_END(s) * 16) -
25852 (((unsigned long) lvcc->rx.buf.ptr) -
25853@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25854 }
25855 if (s & SERVICE_STREAM) {
25856 read_unlock(&vcc_sklist_lock);
25857- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25858+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25859 lvcc->stats.x.aal5.service_stream++;
25860 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25861 "PDU on VCI %d!\n", lanai->number, vci);
25862@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25863 return 0;
25864 }
25865 DPRINTK("got rx crc error on vci %d\n", vci);
25866- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25867+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25868 lvcc->stats.x.aal5.service_rxcrc++;
25869 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25870 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25871diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25872index 1c70c45..300718d 100644
25873--- a/drivers/atm/nicstar.c
25874+++ b/drivers/atm/nicstar.c
25875@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25876 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25877 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25878 card->index);
25879- atomic_inc(&vcc->stats->tx_err);
25880+ atomic_inc_unchecked(&vcc->stats->tx_err);
25881 dev_kfree_skb_any(skb);
25882 return -EINVAL;
25883 }
25884@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25885 if (!vc->tx) {
25886 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25887 card->index);
25888- atomic_inc(&vcc->stats->tx_err);
25889+ atomic_inc_unchecked(&vcc->stats->tx_err);
25890 dev_kfree_skb_any(skb);
25891 return -EINVAL;
25892 }
25893@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25894 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25895 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25896 card->index);
25897- atomic_inc(&vcc->stats->tx_err);
25898+ atomic_inc_unchecked(&vcc->stats->tx_err);
25899 dev_kfree_skb_any(skb);
25900 return -EINVAL;
25901 }
25902
25903 if (skb_shinfo(skb)->nr_frags != 0) {
25904 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25905- atomic_inc(&vcc->stats->tx_err);
25906+ atomic_inc_unchecked(&vcc->stats->tx_err);
25907 dev_kfree_skb_any(skb);
25908 return -EINVAL;
25909 }
25910@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25911 }
25912
25913 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
25914- atomic_inc(&vcc->stats->tx_err);
25915+ atomic_inc_unchecked(&vcc->stats->tx_err);
25916 dev_kfree_skb_any(skb);
25917 return -EIO;
25918 }
25919- atomic_inc(&vcc->stats->tx);
25920+ atomic_inc_unchecked(&vcc->stats->tx);
25921
25922 return 0;
25923 }
25924@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25925 printk
25926 ("nicstar%d: Can't allocate buffers for aal0.\n",
25927 card->index);
25928- atomic_add(i, &vcc->stats->rx_drop);
25929+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25930 break;
25931 }
25932 if (!atm_charge(vcc, sb->truesize)) {
25933 RXPRINTK
25934 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
25935 card->index);
25936- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25937+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25938 dev_kfree_skb_any(sb);
25939 break;
25940 }
25941@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25942 ATM_SKB(sb)->vcc = vcc;
25943 __net_timestamp(sb);
25944 vcc->push(vcc, sb);
25945- atomic_inc(&vcc->stats->rx);
25946+ atomic_inc_unchecked(&vcc->stats->rx);
25947 cell += ATM_CELL_PAYLOAD;
25948 }
25949
25950@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25951 if (iovb == NULL) {
25952 printk("nicstar%d: Out of iovec buffers.\n",
25953 card->index);
25954- atomic_inc(&vcc->stats->rx_drop);
25955+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25956 recycle_rx_buf(card, skb);
25957 return;
25958 }
25959@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25960 small or large buffer itself. */
25961 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
25962 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25963- atomic_inc(&vcc->stats->rx_err);
25964+ atomic_inc_unchecked(&vcc->stats->rx_err);
25965 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25966 NS_MAX_IOVECS);
25967 NS_PRV_IOVCNT(iovb) = 0;
25968@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25969 ("nicstar%d: Expected a small buffer, and this is not one.\n",
25970 card->index);
25971 which_list(card, skb);
25972- atomic_inc(&vcc->stats->rx_err);
25973+ atomic_inc_unchecked(&vcc->stats->rx_err);
25974 recycle_rx_buf(card, skb);
25975 vc->rx_iov = NULL;
25976 recycle_iov_buf(card, iovb);
25977@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25978 ("nicstar%d: Expected a large buffer, and this is not one.\n",
25979 card->index);
25980 which_list(card, skb);
25981- atomic_inc(&vcc->stats->rx_err);
25982+ atomic_inc_unchecked(&vcc->stats->rx_err);
25983 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25984 NS_PRV_IOVCNT(iovb));
25985 vc->rx_iov = NULL;
25986@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25987 printk(" - PDU size mismatch.\n");
25988 else
25989 printk(".\n");
25990- atomic_inc(&vcc->stats->rx_err);
25991+ atomic_inc_unchecked(&vcc->stats->rx_err);
25992 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25993 NS_PRV_IOVCNT(iovb));
25994 vc->rx_iov = NULL;
25995@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25996 /* skb points to a small buffer */
25997 if (!atm_charge(vcc, skb->truesize)) {
25998 push_rxbufs(card, skb);
25999- atomic_inc(&vcc->stats->rx_drop);
26000+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26001 } else {
26002 skb_put(skb, len);
26003 dequeue_sm_buf(card, skb);
26004@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26005 ATM_SKB(skb)->vcc = vcc;
26006 __net_timestamp(skb);
26007 vcc->push(vcc, skb);
26008- atomic_inc(&vcc->stats->rx);
26009+ atomic_inc_unchecked(&vcc->stats->rx);
26010 }
26011 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26012 struct sk_buff *sb;
26013@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26014 if (len <= NS_SMBUFSIZE) {
26015 if (!atm_charge(vcc, sb->truesize)) {
26016 push_rxbufs(card, sb);
26017- atomic_inc(&vcc->stats->rx_drop);
26018+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26019 } else {
26020 skb_put(sb, len);
26021 dequeue_sm_buf(card, sb);
26022@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26023 ATM_SKB(sb)->vcc = vcc;
26024 __net_timestamp(sb);
26025 vcc->push(vcc, sb);
26026- atomic_inc(&vcc->stats->rx);
26027+ atomic_inc_unchecked(&vcc->stats->rx);
26028 }
26029
26030 push_rxbufs(card, skb);
26031@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26032
26033 if (!atm_charge(vcc, skb->truesize)) {
26034 push_rxbufs(card, skb);
26035- atomic_inc(&vcc->stats->rx_drop);
26036+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26037 } else {
26038 dequeue_lg_buf(card, skb);
26039 #ifdef NS_USE_DESTRUCTORS
26040@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26041 ATM_SKB(skb)->vcc = vcc;
26042 __net_timestamp(skb);
26043 vcc->push(vcc, skb);
26044- atomic_inc(&vcc->stats->rx);
26045+ atomic_inc_unchecked(&vcc->stats->rx);
26046 }
26047
26048 push_rxbufs(card, sb);
26049@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26050 printk
26051 ("nicstar%d: Out of huge buffers.\n",
26052 card->index);
26053- atomic_inc(&vcc->stats->rx_drop);
26054+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26055 recycle_iovec_rx_bufs(card,
26056 (struct iovec *)
26057 iovb->data,
26058@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26059 card->hbpool.count++;
26060 } else
26061 dev_kfree_skb_any(hb);
26062- atomic_inc(&vcc->stats->rx_drop);
26063+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26064 } else {
26065 /* Copy the small buffer to the huge buffer */
26066 sb = (struct sk_buff *)iov->iov_base;
26067@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26068 #endif /* NS_USE_DESTRUCTORS */
26069 __net_timestamp(hb);
26070 vcc->push(vcc, hb);
26071- atomic_inc(&vcc->stats->rx);
26072+ atomic_inc_unchecked(&vcc->stats->rx);
26073 }
26074 }
26075
26076diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26077index 5d1d076..12fbca4 100644
26078--- a/drivers/atm/solos-pci.c
26079+++ b/drivers/atm/solos-pci.c
26080@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26081 }
26082 atm_charge(vcc, skb->truesize);
26083 vcc->push(vcc, skb);
26084- atomic_inc(&vcc->stats->rx);
26085+ atomic_inc_unchecked(&vcc->stats->rx);
26086 break;
26087
26088 case PKT_STATUS:
26089@@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26090 vcc = SKB_CB(oldskb)->vcc;
26091
26092 if (vcc) {
26093- atomic_inc(&vcc->stats->tx);
26094+ atomic_inc_unchecked(&vcc->stats->tx);
26095 solos_pop(vcc, oldskb);
26096 } else
26097 dev_kfree_skb_irq(oldskb);
26098diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26099index 90f1ccc..04c4a1e 100644
26100--- a/drivers/atm/suni.c
26101+++ b/drivers/atm/suni.c
26102@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26103
26104
26105 #define ADD_LIMITED(s,v) \
26106- atomic_add((v),&stats->s); \
26107- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26108+ atomic_add_unchecked((v),&stats->s); \
26109+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26110
26111
26112 static void suni_hz(unsigned long from_timer)
26113diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26114index 5120a96..e2572bd 100644
26115--- a/drivers/atm/uPD98402.c
26116+++ b/drivers/atm/uPD98402.c
26117@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26118 struct sonet_stats tmp;
26119 int error = 0;
26120
26121- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26122+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26123 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26124 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26125 if (zero && !error) {
26126@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26127
26128
26129 #define ADD_LIMITED(s,v) \
26130- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26131- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26132- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26133+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26134+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26135+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26136
26137
26138 static void stat_event(struct atm_dev *dev)
26139@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26140 if (reason & uPD98402_INT_PFM) stat_event(dev);
26141 if (reason & uPD98402_INT_PCO) {
26142 (void) GET(PCOCR); /* clear interrupt cause */
26143- atomic_add(GET(HECCT),
26144+ atomic_add_unchecked(GET(HECCT),
26145 &PRIV(dev)->sonet_stats.uncorr_hcs);
26146 }
26147 if ((reason & uPD98402_INT_RFO) &&
26148@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26149 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26150 uPD98402_INT_LOS),PIMR); /* enable them */
26151 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26152- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26153- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26154- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26155+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26156+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26157+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26158 return 0;
26159 }
26160
26161diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26162index d889f56..17eb71e 100644
26163--- a/drivers/atm/zatm.c
26164+++ b/drivers/atm/zatm.c
26165@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26166 }
26167 if (!size) {
26168 dev_kfree_skb_irq(skb);
26169- if (vcc) atomic_inc(&vcc->stats->rx_err);
26170+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26171 continue;
26172 }
26173 if (!atm_charge(vcc,skb->truesize)) {
26174@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26175 skb->len = size;
26176 ATM_SKB(skb)->vcc = vcc;
26177 vcc->push(vcc,skb);
26178- atomic_inc(&vcc->stats->rx);
26179+ atomic_inc_unchecked(&vcc->stats->rx);
26180 }
26181 zout(pos & 0xffff,MTA(mbx));
26182 #if 0 /* probably a stupid idea */
26183@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26184 skb_queue_head(&zatm_vcc->backlog,skb);
26185 break;
26186 }
26187- atomic_inc(&vcc->stats->tx);
26188+ atomic_inc_unchecked(&vcc->stats->tx);
26189 wake_up(&zatm_vcc->tx_wait);
26190 }
26191
26192diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26193index a4760e0..51283cf 100644
26194--- a/drivers/base/devtmpfs.c
26195+++ b/drivers/base/devtmpfs.c
26196@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26197 if (!thread)
26198 return 0;
26199
26200- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26201+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26202 if (err)
26203 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26204 else
26205diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26206index caf995f..6f76697 100644
26207--- a/drivers/base/power/wakeup.c
26208+++ b/drivers/base/power/wakeup.c
26209@@ -30,14 +30,14 @@ bool events_check_enabled;
26210 * They need to be modified together atomically, so it's better to use one
26211 * atomic variable to hold them both.
26212 */
26213-static atomic_t combined_event_count = ATOMIC_INIT(0);
26214+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26215
26216 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26217 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26218
26219 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26220 {
26221- unsigned int comb = atomic_read(&combined_event_count);
26222+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
26223
26224 *cnt = (comb >> IN_PROGRESS_BITS);
26225 *inpr = comb & MAX_IN_PROGRESS;
26226@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26227 ws->last_time = ktime_get();
26228
26229 /* Increment the counter of events in progress. */
26230- atomic_inc(&combined_event_count);
26231+ atomic_inc_unchecked(&combined_event_count);
26232 }
26233
26234 /**
26235@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26236 * Increment the counter of registered wakeup events and decrement the
26237 * couter of wakeup events in progress simultaneously.
26238 */
26239- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26240+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26241 }
26242
26243 /**
26244diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26245index b0f553b..77b928b 100644
26246--- a/drivers/block/cciss.c
26247+++ b/drivers/block/cciss.c
26248@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26249 int err;
26250 u32 cp;
26251
26252+ memset(&arg64, 0, sizeof(arg64));
26253+
26254 err = 0;
26255 err |=
26256 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26257@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26258 while (!list_empty(&h->reqQ)) {
26259 c = list_entry(h->reqQ.next, CommandList_struct, list);
26260 /* can't do anything if fifo is full */
26261- if ((h->access.fifo_full(h))) {
26262+ if ((h->access->fifo_full(h))) {
26263 dev_warn(&h->pdev->dev, "fifo full\n");
26264 break;
26265 }
26266@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26267 h->Qdepth--;
26268
26269 /* Tell the controller execute command */
26270- h->access.submit_command(h, c);
26271+ h->access->submit_command(h, c);
26272
26273 /* Put job onto the completed Q */
26274 addQ(&h->cmpQ, c);
26275@@ -3443,17 +3445,17 @@ startio:
26276
26277 static inline unsigned long get_next_completion(ctlr_info_t *h)
26278 {
26279- return h->access.command_completed(h);
26280+ return h->access->command_completed(h);
26281 }
26282
26283 static inline int interrupt_pending(ctlr_info_t *h)
26284 {
26285- return h->access.intr_pending(h);
26286+ return h->access->intr_pending(h);
26287 }
26288
26289 static inline long interrupt_not_for_us(ctlr_info_t *h)
26290 {
26291- return ((h->access.intr_pending(h) == 0) ||
26292+ return ((h->access->intr_pending(h) == 0) ||
26293 (h->interrupts_enabled == 0));
26294 }
26295
26296@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26297 u32 a;
26298
26299 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26300- return h->access.command_completed(h);
26301+ return h->access->command_completed(h);
26302
26303 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26304 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26305@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26306 trans_support & CFGTBL_Trans_use_short_tags);
26307
26308 /* Change the access methods to the performant access methods */
26309- h->access = SA5_performant_access;
26310+ h->access = &SA5_performant_access;
26311 h->transMethod = CFGTBL_Trans_Performant;
26312
26313 return;
26314@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26315 if (prod_index < 0)
26316 return -ENODEV;
26317 h->product_name = products[prod_index].product_name;
26318- h->access = *(products[prod_index].access);
26319+ h->access = products[prod_index].access;
26320
26321 if (cciss_board_disabled(h)) {
26322 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26323@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26324 }
26325
26326 /* make sure the board interrupts are off */
26327- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26328+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26329 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26330 if (rc)
26331 goto clean2;
26332@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26333 * fake ones to scoop up any residual completions.
26334 */
26335 spin_lock_irqsave(&h->lock, flags);
26336- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26337+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26338 spin_unlock_irqrestore(&h->lock, flags);
26339 free_irq(h->intr[h->intr_mode], h);
26340 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26341@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26342 dev_info(&h->pdev->dev, "Board READY.\n");
26343 dev_info(&h->pdev->dev,
26344 "Waiting for stale completions to drain.\n");
26345- h->access.set_intr_mask(h, CCISS_INTR_ON);
26346+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26347 msleep(10000);
26348- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26349+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26350
26351 rc = controller_reset_failed(h->cfgtable);
26352 if (rc)
26353@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26354 cciss_scsi_setup(h);
26355
26356 /* Turn the interrupts on so we can service requests */
26357- h->access.set_intr_mask(h, CCISS_INTR_ON);
26358+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26359
26360 /* Get the firmware version */
26361 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26362@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26363 kfree(flush_buf);
26364 if (return_code != IO_OK)
26365 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26366- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26367+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26368 free_irq(h->intr[h->intr_mode], h);
26369 }
26370
26371diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26372index 7fda30e..eb5dfe0 100644
26373--- a/drivers/block/cciss.h
26374+++ b/drivers/block/cciss.h
26375@@ -101,7 +101,7 @@ struct ctlr_info
26376 /* information about each logical volume */
26377 drive_info_struct *drv[CISS_MAX_LUN];
26378
26379- struct access_method access;
26380+ struct access_method *access;
26381
26382 /* queue and queue Info */
26383 struct list_head reqQ;
26384diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26385index 9125bbe..eede5c8 100644
26386--- a/drivers/block/cpqarray.c
26387+++ b/drivers/block/cpqarray.c
26388@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26389 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26390 goto Enomem4;
26391 }
26392- hba[i]->access.set_intr_mask(hba[i], 0);
26393+ hba[i]->access->set_intr_mask(hba[i], 0);
26394 if (request_irq(hba[i]->intr, do_ida_intr,
26395 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26396 {
26397@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26398 add_timer(&hba[i]->timer);
26399
26400 /* Enable IRQ now that spinlock and rate limit timer are set up */
26401- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26402+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26403
26404 for(j=0; j<NWD; j++) {
26405 struct gendisk *disk = ida_gendisk[i][j];
26406@@ -694,7 +694,7 @@ DBGINFO(
26407 for(i=0; i<NR_PRODUCTS; i++) {
26408 if (board_id == products[i].board_id) {
26409 c->product_name = products[i].product_name;
26410- c->access = *(products[i].access);
26411+ c->access = products[i].access;
26412 break;
26413 }
26414 }
26415@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26416 hba[ctlr]->intr = intr;
26417 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26418 hba[ctlr]->product_name = products[j].product_name;
26419- hba[ctlr]->access = *(products[j].access);
26420+ hba[ctlr]->access = products[j].access;
26421 hba[ctlr]->ctlr = ctlr;
26422 hba[ctlr]->board_id = board_id;
26423 hba[ctlr]->pci_dev = NULL; /* not PCI */
26424@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26425
26426 while((c = h->reqQ) != NULL) {
26427 /* Can't do anything if we're busy */
26428- if (h->access.fifo_full(h) == 0)
26429+ if (h->access->fifo_full(h) == 0)
26430 return;
26431
26432 /* Get the first entry from the request Q */
26433@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26434 h->Qdepth--;
26435
26436 /* Tell the controller to do our bidding */
26437- h->access.submit_command(h, c);
26438+ h->access->submit_command(h, c);
26439
26440 /* Get onto the completion Q */
26441 addQ(&h->cmpQ, c);
26442@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26443 unsigned long flags;
26444 __u32 a,a1;
26445
26446- istat = h->access.intr_pending(h);
26447+ istat = h->access->intr_pending(h);
26448 /* Is this interrupt for us? */
26449 if (istat == 0)
26450 return IRQ_NONE;
26451@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26452 */
26453 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26454 if (istat & FIFO_NOT_EMPTY) {
26455- while((a = h->access.command_completed(h))) {
26456+ while((a = h->access->command_completed(h))) {
26457 a1 = a; a &= ~3;
26458 if ((c = h->cmpQ) == NULL)
26459 {
26460@@ -1449,11 +1449,11 @@ static int sendcmd(
26461 /*
26462 * Disable interrupt
26463 */
26464- info_p->access.set_intr_mask(info_p, 0);
26465+ info_p->access->set_intr_mask(info_p, 0);
26466 /* Make sure there is room in the command FIFO */
26467 /* Actually it should be completely empty at this time. */
26468 for (i = 200000; i > 0; i--) {
26469- temp = info_p->access.fifo_full(info_p);
26470+ temp = info_p->access->fifo_full(info_p);
26471 if (temp != 0) {
26472 break;
26473 }
26474@@ -1466,7 +1466,7 @@ DBG(
26475 /*
26476 * Send the cmd
26477 */
26478- info_p->access.submit_command(info_p, c);
26479+ info_p->access->submit_command(info_p, c);
26480 complete = pollcomplete(ctlr);
26481
26482 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26483@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26484 * we check the new geometry. Then turn interrupts back on when
26485 * we're done.
26486 */
26487- host->access.set_intr_mask(host, 0);
26488+ host->access->set_intr_mask(host, 0);
26489 getgeometry(ctlr);
26490- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26491+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26492
26493 for(i=0; i<NWD; i++) {
26494 struct gendisk *disk = ida_gendisk[ctlr][i];
26495@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26496 /* Wait (up to 2 seconds) for a command to complete */
26497
26498 for (i = 200000; i > 0; i--) {
26499- done = hba[ctlr]->access.command_completed(hba[ctlr]);
26500+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
26501 if (done == 0) {
26502 udelay(10); /* a short fixed delay */
26503 } else
26504diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26505index be73e9d..7fbf140 100644
26506--- a/drivers/block/cpqarray.h
26507+++ b/drivers/block/cpqarray.h
26508@@ -99,7 +99,7 @@ struct ctlr_info {
26509 drv_info_t drv[NWD];
26510 struct proc_dir_entry *proc;
26511
26512- struct access_method access;
26513+ struct access_method *access;
26514
26515 cmdlist_t *reqQ;
26516 cmdlist_t *cmpQ;
26517diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26518index 9cf2035..bffca95 100644
26519--- a/drivers/block/drbd/drbd_int.h
26520+++ b/drivers/block/drbd/drbd_int.h
26521@@ -736,7 +736,7 @@ struct drbd_request;
26522 struct drbd_epoch {
26523 struct list_head list;
26524 unsigned int barrier_nr;
26525- atomic_t epoch_size; /* increased on every request added. */
26526+ atomic_unchecked_t epoch_size; /* increased on every request added. */
26527 atomic_t active; /* increased on every req. added, and dec on every finished. */
26528 unsigned long flags;
26529 };
26530@@ -1108,7 +1108,7 @@ struct drbd_conf {
26531 void *int_dig_in;
26532 void *int_dig_vv;
26533 wait_queue_head_t seq_wait;
26534- atomic_t packet_seq;
26535+ atomic_unchecked_t packet_seq;
26536 unsigned int peer_seq;
26537 spinlock_t peer_seq_lock;
26538 unsigned int minor;
26539@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26540
26541 static inline void drbd_tcp_cork(struct socket *sock)
26542 {
26543- int __user val = 1;
26544+ int val = 1;
26545 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26546- (char __user *)&val, sizeof(val));
26547+ (char __force_user *)&val, sizeof(val));
26548 }
26549
26550 static inline void drbd_tcp_uncork(struct socket *sock)
26551 {
26552- int __user val = 0;
26553+ int val = 0;
26554 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26555- (char __user *)&val, sizeof(val));
26556+ (char __force_user *)&val, sizeof(val));
26557 }
26558
26559 static inline void drbd_tcp_nodelay(struct socket *sock)
26560 {
26561- int __user val = 1;
26562+ int val = 1;
26563 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26564- (char __user *)&val, sizeof(val));
26565+ (char __force_user *)&val, sizeof(val));
26566 }
26567
26568 static inline void drbd_tcp_quickack(struct socket *sock)
26569 {
26570- int __user val = 2;
26571+ int val = 2;
26572 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26573- (char __user *)&val, sizeof(val));
26574+ (char __force_user *)&val, sizeof(val));
26575 }
26576
26577 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26578diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26579index 0358e55..bc33689 100644
26580--- a/drivers/block/drbd/drbd_main.c
26581+++ b/drivers/block/drbd/drbd_main.c
26582@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26583 p.sector = sector;
26584 p.block_id = block_id;
26585 p.blksize = blksize;
26586- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26587+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26588
26589 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26590 return false;
26591@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26592 p.sector = cpu_to_be64(req->sector);
26593 p.block_id = (unsigned long)req;
26594 p.seq_num = cpu_to_be32(req->seq_num =
26595- atomic_add_return(1, &mdev->packet_seq));
26596+ atomic_add_return_unchecked(1, &mdev->packet_seq));
26597
26598 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26599
26600@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26601 atomic_set(&mdev->unacked_cnt, 0);
26602 atomic_set(&mdev->local_cnt, 0);
26603 atomic_set(&mdev->net_cnt, 0);
26604- atomic_set(&mdev->packet_seq, 0);
26605+ atomic_set_unchecked(&mdev->packet_seq, 0);
26606 atomic_set(&mdev->pp_in_use, 0);
26607 atomic_set(&mdev->pp_in_use_by_net, 0);
26608 atomic_set(&mdev->rs_sect_in, 0);
26609@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26610 mdev->receiver.t_state);
26611
26612 /* no need to lock it, I'm the only thread alive */
26613- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26614- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26615+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26616+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26617 mdev->al_writ_cnt =
26618 mdev->bm_writ_cnt =
26619 mdev->read_cnt =
26620diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26621index af2a250..219c74b 100644
26622--- a/drivers/block/drbd/drbd_nl.c
26623+++ b/drivers/block/drbd/drbd_nl.c
26624@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26625 module_put(THIS_MODULE);
26626 }
26627
26628-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26629+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26630
26631 static unsigned short *
26632 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26633@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26634 cn_reply->id.idx = CN_IDX_DRBD;
26635 cn_reply->id.val = CN_VAL_DRBD;
26636
26637- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26638+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26639 cn_reply->ack = 0; /* not used here. */
26640 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26641 (int)((char *)tl - (char *)reply->tag_list);
26642@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26643 cn_reply->id.idx = CN_IDX_DRBD;
26644 cn_reply->id.val = CN_VAL_DRBD;
26645
26646- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26647+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26648 cn_reply->ack = 0; /* not used here. */
26649 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26650 (int)((char *)tl - (char *)reply->tag_list);
26651@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26652 cn_reply->id.idx = CN_IDX_DRBD;
26653 cn_reply->id.val = CN_VAL_DRBD;
26654
26655- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26656+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26657 cn_reply->ack = 0; // not used here.
26658 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26659 (int)((char*)tl - (char*)reply->tag_list);
26660@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26661 cn_reply->id.idx = CN_IDX_DRBD;
26662 cn_reply->id.val = CN_VAL_DRBD;
26663
26664- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26665+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26666 cn_reply->ack = 0; /* not used here. */
26667 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26668 (int)((char *)tl - (char *)reply->tag_list);
26669diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26670index 43beaca..4a5b1dd 100644
26671--- a/drivers/block/drbd/drbd_receiver.c
26672+++ b/drivers/block/drbd/drbd_receiver.c
26673@@ -894,7 +894,7 @@ retry:
26674 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26675 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26676
26677- atomic_set(&mdev->packet_seq, 0);
26678+ atomic_set_unchecked(&mdev->packet_seq, 0);
26679 mdev->peer_seq = 0;
26680
26681 drbd_thread_start(&mdev->asender);
26682@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26683 do {
26684 next_epoch = NULL;
26685
26686- epoch_size = atomic_read(&epoch->epoch_size);
26687+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26688
26689 switch (ev & ~EV_CLEANUP) {
26690 case EV_PUT:
26691@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26692 rv = FE_DESTROYED;
26693 } else {
26694 epoch->flags = 0;
26695- atomic_set(&epoch->epoch_size, 0);
26696+ atomic_set_unchecked(&epoch->epoch_size, 0);
26697 /* atomic_set(&epoch->active, 0); is already zero */
26698 if (rv == FE_STILL_LIVE)
26699 rv = FE_RECYCLED;
26700@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26701 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26702 drbd_flush(mdev);
26703
26704- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26705+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26706 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26707 if (epoch)
26708 break;
26709 }
26710
26711 epoch = mdev->current_epoch;
26712- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26713+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26714
26715 D_ASSERT(atomic_read(&epoch->active) == 0);
26716 D_ASSERT(epoch->flags == 0);
26717@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26718 }
26719
26720 epoch->flags = 0;
26721- atomic_set(&epoch->epoch_size, 0);
26722+ atomic_set_unchecked(&epoch->epoch_size, 0);
26723 atomic_set(&epoch->active, 0);
26724
26725 spin_lock(&mdev->epoch_lock);
26726- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26727+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26728 list_add(&epoch->list, &mdev->current_epoch->list);
26729 mdev->current_epoch = epoch;
26730 mdev->epochs++;
26731@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26732 spin_unlock(&mdev->peer_seq_lock);
26733
26734 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26735- atomic_inc(&mdev->current_epoch->epoch_size);
26736+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26737 return drbd_drain_block(mdev, data_size);
26738 }
26739
26740@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26741
26742 spin_lock(&mdev->epoch_lock);
26743 e->epoch = mdev->current_epoch;
26744- atomic_inc(&e->epoch->epoch_size);
26745+ atomic_inc_unchecked(&e->epoch->epoch_size);
26746 atomic_inc(&e->epoch->active);
26747 spin_unlock(&mdev->epoch_lock);
26748
26749@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
26750 D_ASSERT(list_empty(&mdev->done_ee));
26751
26752 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26753- atomic_set(&mdev->current_epoch->epoch_size, 0);
26754+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26755 D_ASSERT(list_empty(&mdev->current_epoch->list));
26756 }
26757
26758diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26759index 1e888c9..05cf1b0 100644
26760--- a/drivers/block/loop.c
26761+++ b/drivers/block/loop.c
26762@@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
26763 mm_segment_t old_fs = get_fs();
26764
26765 set_fs(get_ds());
26766- bw = file->f_op->write(file, buf, len, &pos);
26767+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26768 set_fs(old_fs);
26769 if (likely(bw == len))
26770 return 0;
26771diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
26772index 4364303..9adf4ee 100644
26773--- a/drivers/char/Kconfig
26774+++ b/drivers/char/Kconfig
26775@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26776
26777 config DEVKMEM
26778 bool "/dev/kmem virtual device support"
26779- default y
26780+ default n
26781+ depends on !GRKERNSEC_KMEM
26782 help
26783 Say Y here if you want to support the /dev/kmem device. The
26784 /dev/kmem device is rarely used, but can be used for certain
26785@@ -596,6 +597,7 @@ config DEVPORT
26786 bool
26787 depends on !M68K
26788 depends on ISA || PCI
26789+ depends on !GRKERNSEC_KMEM
26790 default y
26791
26792 source "drivers/s390/char/Kconfig"
26793diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
26794index 2e04433..22afc64 100644
26795--- a/drivers/char/agp/frontend.c
26796+++ b/drivers/char/agp/frontend.c
26797@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
26798 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26799 return -EFAULT;
26800
26801- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26802+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26803 return -EFAULT;
26804
26805 client = agp_find_client_by_pid(reserve.pid);
26806diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
26807index 095ab90..afad0a4 100644
26808--- a/drivers/char/briq_panel.c
26809+++ b/drivers/char/briq_panel.c
26810@@ -9,6 +9,7 @@
26811 #include <linux/types.h>
26812 #include <linux/errno.h>
26813 #include <linux/tty.h>
26814+#include <linux/mutex.h>
26815 #include <linux/timer.h>
26816 #include <linux/kernel.h>
26817 #include <linux/wait.h>
26818@@ -34,6 +35,7 @@ static int vfd_is_open;
26819 static unsigned char vfd[40];
26820 static int vfd_cursor;
26821 static unsigned char ledpb, led;
26822+static DEFINE_MUTEX(vfd_mutex);
26823
26824 static void update_vfd(void)
26825 {
26826@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26827 if (!vfd_is_open)
26828 return -EBUSY;
26829
26830+ mutex_lock(&vfd_mutex);
26831 for (;;) {
26832 char c;
26833 if (!indx)
26834 break;
26835- if (get_user(c, buf))
26836+ if (get_user(c, buf)) {
26837+ mutex_unlock(&vfd_mutex);
26838 return -EFAULT;
26839+ }
26840 if (esc) {
26841 set_led(c);
26842 esc = 0;
26843@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26844 buf++;
26845 }
26846 update_vfd();
26847+ mutex_unlock(&vfd_mutex);
26848
26849 return len;
26850 }
26851diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
26852index f773a9d..65cd683 100644
26853--- a/drivers/char/genrtc.c
26854+++ b/drivers/char/genrtc.c
26855@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
26856 switch (cmd) {
26857
26858 case RTC_PLL_GET:
26859+ memset(&pll, 0, sizeof(pll));
26860 if (get_rtc_pll(&pll))
26861 return -EINVAL;
26862 else
26863diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
26864index 0833896..cccce52 100644
26865--- a/drivers/char/hpet.c
26866+++ b/drivers/char/hpet.c
26867@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
26868 }
26869
26870 static int
26871-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26872+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26873 struct hpet_info *info)
26874 {
26875 struct hpet_timer __iomem *timer;
26876diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
26877index 58c0e63..46c16bf 100644
26878--- a/drivers/char/ipmi/ipmi_msghandler.c
26879+++ b/drivers/char/ipmi/ipmi_msghandler.c
26880@@ -415,7 +415,7 @@ struct ipmi_smi {
26881 struct proc_dir_entry *proc_dir;
26882 char proc_dir_name[10];
26883
26884- atomic_t stats[IPMI_NUM_STATS];
26885+ atomic_unchecked_t stats[IPMI_NUM_STATS];
26886
26887 /*
26888 * run_to_completion duplicate of smb_info, smi_info
26889@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26890
26891
26892 #define ipmi_inc_stat(intf, stat) \
26893- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26894+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26895 #define ipmi_get_stat(intf, stat) \
26896- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26897+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26898
26899 static int is_lan_addr(struct ipmi_addr *addr)
26900 {
26901@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
26902 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26903 init_waitqueue_head(&intf->waitq);
26904 for (i = 0; i < IPMI_NUM_STATS; i++)
26905- atomic_set(&intf->stats[i], 0);
26906+ atomic_set_unchecked(&intf->stats[i], 0);
26907
26908 intf->proc_dir = NULL;
26909
26910diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
26911index 9397ab4..d01bee1 100644
26912--- a/drivers/char/ipmi/ipmi_si_intf.c
26913+++ b/drivers/char/ipmi/ipmi_si_intf.c
26914@@ -277,7 +277,7 @@ struct smi_info {
26915 unsigned char slave_addr;
26916
26917 /* Counters and things for the proc filesystem. */
26918- atomic_t stats[SI_NUM_STATS];
26919+ atomic_unchecked_t stats[SI_NUM_STATS];
26920
26921 struct task_struct *thread;
26922
26923@@ -286,9 +286,9 @@ struct smi_info {
26924 };
26925
26926 #define smi_inc_stat(smi, stat) \
26927- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26928+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26929 #define smi_get_stat(smi, stat) \
26930- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26931+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26932
26933 #define SI_MAX_PARMS 4
26934
26935@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
26936 atomic_set(&new_smi->req_events, 0);
26937 new_smi->run_to_completion = 0;
26938 for (i = 0; i < SI_NUM_STATS; i++)
26939- atomic_set(&new_smi->stats[i], 0);
26940+ atomic_set_unchecked(&new_smi->stats[i], 0);
26941
26942 new_smi->interrupt_disabled = 1;
26943 atomic_set(&new_smi->stop_operation, 0);
26944diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
26945index 1aeaaba..e018570 100644
26946--- a/drivers/char/mbcs.c
26947+++ b/drivers/char/mbcs.c
26948@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
26949 return 0;
26950 }
26951
26952-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
26953+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
26954 {
26955 .part_num = MBCS_PART_NUM,
26956 .mfg_num = MBCS_MFG_NUM,
26957diff --git a/drivers/char/mem.c b/drivers/char/mem.c
26958index 1451790..f705c30 100644
26959--- a/drivers/char/mem.c
26960+++ b/drivers/char/mem.c
26961@@ -18,6 +18,7 @@
26962 #include <linux/raw.h>
26963 #include <linux/tty.h>
26964 #include <linux/capability.h>
26965+#include <linux/security.h>
26966 #include <linux/ptrace.h>
26967 #include <linux/device.h>
26968 #include <linux/highmem.h>
26969@@ -35,6 +36,10 @@
26970 # include <linux/efi.h>
26971 #endif
26972
26973+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26974+extern const struct file_operations grsec_fops;
26975+#endif
26976+
26977 static inline unsigned long size_inside_page(unsigned long start,
26978 unsigned long size)
26979 {
26980@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26981
26982 while (cursor < to) {
26983 if (!devmem_is_allowed(pfn)) {
26984+#ifdef CONFIG_GRKERNSEC_KMEM
26985+ gr_handle_mem_readwrite(from, to);
26986+#else
26987 printk(KERN_INFO
26988 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26989 current->comm, from, to);
26990+#endif
26991 return 0;
26992 }
26993 cursor += PAGE_SIZE;
26994@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26995 }
26996 return 1;
26997 }
26998+#elif defined(CONFIG_GRKERNSEC_KMEM)
26999+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27000+{
27001+ return 0;
27002+}
27003 #else
27004 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27005 {
27006@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27007
27008 while (count > 0) {
27009 unsigned long remaining;
27010+ char *temp;
27011
27012 sz = size_inside_page(p, count);
27013
27014@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27015 if (!ptr)
27016 return -EFAULT;
27017
27018- remaining = copy_to_user(buf, ptr, sz);
27019+#ifdef CONFIG_PAX_USERCOPY
27020+ temp = kmalloc(sz, GFP_KERNEL);
27021+ if (!temp) {
27022+ unxlate_dev_mem_ptr(p, ptr);
27023+ return -ENOMEM;
27024+ }
27025+ memcpy(temp, ptr, sz);
27026+#else
27027+ temp = ptr;
27028+#endif
27029+
27030+ remaining = copy_to_user(buf, temp, sz);
27031+
27032+#ifdef CONFIG_PAX_USERCOPY
27033+ kfree(temp);
27034+#endif
27035+
27036 unxlate_dev_mem_ptr(p, ptr);
27037 if (remaining)
27038 return -EFAULT;
27039@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27040 size_t count, loff_t *ppos)
27041 {
27042 unsigned long p = *ppos;
27043- ssize_t low_count, read, sz;
27044+ ssize_t low_count, read, sz, err = 0;
27045 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27046- int err = 0;
27047
27048 read = 0;
27049 if (p < (unsigned long) high_memory) {
27050@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27051 }
27052 #endif
27053 while (low_count > 0) {
27054+ char *temp;
27055+
27056 sz = size_inside_page(p, low_count);
27057
27058 /*
27059@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27060 */
27061 kbuf = xlate_dev_kmem_ptr((char *)p);
27062
27063- if (copy_to_user(buf, kbuf, sz))
27064+#ifdef CONFIG_PAX_USERCOPY
27065+ temp = kmalloc(sz, GFP_KERNEL);
27066+ if (!temp)
27067+ return -ENOMEM;
27068+ memcpy(temp, kbuf, sz);
27069+#else
27070+ temp = kbuf;
27071+#endif
27072+
27073+ err = copy_to_user(buf, temp, sz);
27074+
27075+#ifdef CONFIG_PAX_USERCOPY
27076+ kfree(temp);
27077+#endif
27078+
27079+ if (err)
27080 return -EFAULT;
27081 buf += sz;
27082 p += sz;
27083@@ -867,6 +914,9 @@ static const struct memdev {
27084 #ifdef CONFIG_CRASH_DUMP
27085 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27086 #endif
27087+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27088+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27089+#endif
27090 };
27091
27092 static int memory_open(struct inode *inode, struct file *filp)
27093diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27094index da3cfee..a5a6606 100644
27095--- a/drivers/char/nvram.c
27096+++ b/drivers/char/nvram.c
27097@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27098
27099 spin_unlock_irq(&rtc_lock);
27100
27101- if (copy_to_user(buf, contents, tmp - contents))
27102+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27103 return -EFAULT;
27104
27105 *ppos = i;
27106diff --git a/drivers/char/random.c b/drivers/char/random.c
27107index 6035ab8..bdfe4fd 100644
27108--- a/drivers/char/random.c
27109+++ b/drivers/char/random.c
27110@@ -261,8 +261,13 @@
27111 /*
27112 * Configuration information
27113 */
27114+#ifdef CONFIG_GRKERNSEC_RANDNET
27115+#define INPUT_POOL_WORDS 512
27116+#define OUTPUT_POOL_WORDS 128
27117+#else
27118 #define INPUT_POOL_WORDS 128
27119 #define OUTPUT_POOL_WORDS 32
27120+#endif
27121 #define SEC_XFER_SIZE 512
27122 #define EXTRACT_SIZE 10
27123
27124@@ -300,10 +305,17 @@ static struct poolinfo {
27125 int poolwords;
27126 int tap1, tap2, tap3, tap4, tap5;
27127 } poolinfo_table[] = {
27128+#ifdef CONFIG_GRKERNSEC_RANDNET
27129+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27130+ { 512, 411, 308, 208, 104, 1 },
27131+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27132+ { 128, 103, 76, 51, 25, 1 },
27133+#else
27134 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27135 { 128, 103, 76, 51, 25, 1 },
27136 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27137 { 32, 26, 20, 14, 7, 1 },
27138+#endif
27139 #if 0
27140 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27141 { 2048, 1638, 1231, 819, 411, 1 },
27142@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27143
27144 extract_buf(r, tmp);
27145 i = min_t(int, nbytes, EXTRACT_SIZE);
27146- if (copy_to_user(buf, tmp, i)) {
27147+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27148 ret = -EFAULT;
27149 break;
27150 }
27151@@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27152 #include <linux/sysctl.h>
27153
27154 static int min_read_thresh = 8, min_write_thresh;
27155-static int max_read_thresh = INPUT_POOL_WORDS * 32;
27156+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27157 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27158 static char sysctl_bootid[16];
27159
27160diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27161index 1ee8ce7..b778bef 100644
27162--- a/drivers/char/sonypi.c
27163+++ b/drivers/char/sonypi.c
27164@@ -55,6 +55,7 @@
27165 #include <asm/uaccess.h>
27166 #include <asm/io.h>
27167 #include <asm/system.h>
27168+#include <asm/local.h>
27169
27170 #include <linux/sonypi.h>
27171
27172@@ -491,7 +492,7 @@ static struct sonypi_device {
27173 spinlock_t fifo_lock;
27174 wait_queue_head_t fifo_proc_list;
27175 struct fasync_struct *fifo_async;
27176- int open_count;
27177+ local_t open_count;
27178 int model;
27179 struct input_dev *input_jog_dev;
27180 struct input_dev *input_key_dev;
27181@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27182 static int sonypi_misc_release(struct inode *inode, struct file *file)
27183 {
27184 mutex_lock(&sonypi_device.lock);
27185- sonypi_device.open_count--;
27186+ local_dec(&sonypi_device.open_count);
27187 mutex_unlock(&sonypi_device.lock);
27188 return 0;
27189 }
27190@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27191 {
27192 mutex_lock(&sonypi_device.lock);
27193 /* Flush input queue on first open */
27194- if (!sonypi_device.open_count)
27195+ if (!local_read(&sonypi_device.open_count))
27196 kfifo_reset(&sonypi_device.fifo);
27197- sonypi_device.open_count++;
27198+ local_inc(&sonypi_device.open_count);
27199 mutex_unlock(&sonypi_device.lock);
27200
27201 return 0;
27202diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27203index 361a1df..2471eee 100644
27204--- a/drivers/char/tpm/tpm.c
27205+++ b/drivers/char/tpm/tpm.c
27206@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27207 chip->vendor.req_complete_val)
27208 goto out_recv;
27209
27210- if ((status == chip->vendor.req_canceled)) {
27211+ if (status == chip->vendor.req_canceled) {
27212 dev_err(chip->dev, "Operation Canceled\n");
27213 rc = -ECANCELED;
27214 goto out;
27215diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27216index 0636520..169c1d0 100644
27217--- a/drivers/char/tpm/tpm_bios.c
27218+++ b/drivers/char/tpm/tpm_bios.c
27219@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27220 event = addr;
27221
27222 if ((event->event_type == 0 && event->event_size == 0) ||
27223- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27224+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27225 return NULL;
27226
27227 return addr;
27228@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27229 return NULL;
27230
27231 if ((event->event_type == 0 && event->event_size == 0) ||
27232- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27233+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27234 return NULL;
27235
27236 (*pos)++;
27237@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27238 int i;
27239
27240 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27241- seq_putc(m, data[i]);
27242+ if (!seq_putc(m, data[i]))
27243+ return -EFAULT;
27244
27245 return 0;
27246 }
27247@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27248 log->bios_event_log_end = log->bios_event_log + len;
27249
27250 virt = acpi_os_map_memory(start, len);
27251+ if (!virt) {
27252+ kfree(log->bios_event_log);
27253+ log->bios_event_log = NULL;
27254+ return -EFAULT;
27255+ }
27256
27257- memcpy(log->bios_event_log, virt, len);
27258+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27259
27260 acpi_os_unmap_memory(virt, len);
27261 return 0;
27262diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27263index 8e3c46d..c139b99 100644
27264--- a/drivers/char/virtio_console.c
27265+++ b/drivers/char/virtio_console.c
27266@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27267 if (to_user) {
27268 ssize_t ret;
27269
27270- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27271+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27272 if (ret)
27273 return -EFAULT;
27274 } else {
27275@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27276 if (!port_has_data(port) && !port->host_connected)
27277 return 0;
27278
27279- return fill_readbuf(port, ubuf, count, true);
27280+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27281 }
27282
27283 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27284diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27285index eb1d864..39ee5a7 100644
27286--- a/drivers/dma/dmatest.c
27287+++ b/drivers/dma/dmatest.c
27288@@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27289 }
27290 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27291 cnt = dmatest_add_threads(dtc, DMA_PQ);
27292- thread_count += cnt > 0 ?: 0;
27293+ thread_count += cnt > 0 ? cnt : 0;
27294 }
27295
27296 pr_info("dmatest: Started %u threads using %s\n",
27297diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27298index c9eee6d..f9d5280 100644
27299--- a/drivers/edac/amd64_edac.c
27300+++ b/drivers/edac/amd64_edac.c
27301@@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27302 * PCI core identifies what devices are on a system during boot, and then
27303 * inquiry this table to see if this driver is for a given device found.
27304 */
27305-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27306+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27307 {
27308 .vendor = PCI_VENDOR_ID_AMD,
27309 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27310diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27311index e47e73b..348e0bd 100644
27312--- a/drivers/edac/amd76x_edac.c
27313+++ b/drivers/edac/amd76x_edac.c
27314@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27315 edac_mc_free(mci);
27316 }
27317
27318-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27319+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27320 {
27321 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27322 AMD762},
27323diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27324index 1af531a..3a8ff27 100644
27325--- a/drivers/edac/e752x_edac.c
27326+++ b/drivers/edac/e752x_edac.c
27327@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27328 edac_mc_free(mci);
27329 }
27330
27331-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27332+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27333 {
27334 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27335 E7520},
27336diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27337index 6ffb6d2..383d8d7 100644
27338--- a/drivers/edac/e7xxx_edac.c
27339+++ b/drivers/edac/e7xxx_edac.c
27340@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27341 edac_mc_free(mci);
27342 }
27343
27344-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27345+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27346 {
27347 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27348 E7205},
27349diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27350index 495198a..ac08c85 100644
27351--- a/drivers/edac/edac_pci_sysfs.c
27352+++ b/drivers/edac/edac_pci_sysfs.c
27353@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27354 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27355 static int edac_pci_poll_msec = 1000; /* one second workq period */
27356
27357-static atomic_t pci_parity_count = ATOMIC_INIT(0);
27358-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27359+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27360+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27361
27362 static struct kobject *edac_pci_top_main_kobj;
27363 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27364@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27365 edac_printk(KERN_CRIT, EDAC_PCI,
27366 "Signaled System Error on %s\n",
27367 pci_name(dev));
27368- atomic_inc(&pci_nonparity_count);
27369+ atomic_inc_unchecked(&pci_nonparity_count);
27370 }
27371
27372 if (status & (PCI_STATUS_PARITY)) {
27373@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27374 "Master Data Parity Error on %s\n",
27375 pci_name(dev));
27376
27377- atomic_inc(&pci_parity_count);
27378+ atomic_inc_unchecked(&pci_parity_count);
27379 }
27380
27381 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27382@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27383 "Detected Parity Error on %s\n",
27384 pci_name(dev));
27385
27386- atomic_inc(&pci_parity_count);
27387+ atomic_inc_unchecked(&pci_parity_count);
27388 }
27389 }
27390
27391@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27392 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27393 "Signaled System Error on %s\n",
27394 pci_name(dev));
27395- atomic_inc(&pci_nonparity_count);
27396+ atomic_inc_unchecked(&pci_nonparity_count);
27397 }
27398
27399 if (status & (PCI_STATUS_PARITY)) {
27400@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27401 "Master Data Parity Error on "
27402 "%s\n", pci_name(dev));
27403
27404- atomic_inc(&pci_parity_count);
27405+ atomic_inc_unchecked(&pci_parity_count);
27406 }
27407
27408 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27409@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27410 "Detected Parity Error on %s\n",
27411 pci_name(dev));
27412
27413- atomic_inc(&pci_parity_count);
27414+ atomic_inc_unchecked(&pci_parity_count);
27415 }
27416 }
27417 }
27418@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27419 if (!check_pci_errors)
27420 return;
27421
27422- before_count = atomic_read(&pci_parity_count);
27423+ before_count = atomic_read_unchecked(&pci_parity_count);
27424
27425 /* scan all PCI devices looking for a Parity Error on devices and
27426 * bridges.
27427@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27428 /* Only if operator has selected panic on PCI Error */
27429 if (edac_pci_get_panic_on_pe()) {
27430 /* If the count is different 'after' from 'before' */
27431- if (before_count != atomic_read(&pci_parity_count))
27432+ if (before_count != atomic_read_unchecked(&pci_parity_count))
27433 panic("EDAC: PCI Parity Error");
27434 }
27435 }
27436diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27437index c0510b3..6e2a954 100644
27438--- a/drivers/edac/i3000_edac.c
27439+++ b/drivers/edac/i3000_edac.c
27440@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27441 edac_mc_free(mci);
27442 }
27443
27444-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27445+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27446 {
27447 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27448 I3000},
27449diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27450index aa08497..7e6822a 100644
27451--- a/drivers/edac/i3200_edac.c
27452+++ b/drivers/edac/i3200_edac.c
27453@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27454 edac_mc_free(mci);
27455 }
27456
27457-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27458+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27459 {
27460 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27461 I3200},
27462diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27463index 4dc3ac2..67d05a6 100644
27464--- a/drivers/edac/i5000_edac.c
27465+++ b/drivers/edac/i5000_edac.c
27466@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27467 *
27468 * The "E500P" device is the first device supported.
27469 */
27470-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27471+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27472 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27473 .driver_data = I5000P},
27474
27475diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27476index bcbdeec..9886d16 100644
27477--- a/drivers/edac/i5100_edac.c
27478+++ b/drivers/edac/i5100_edac.c
27479@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27480 edac_mc_free(mci);
27481 }
27482
27483-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27484+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27485 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27486 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27487 { 0, }
27488diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27489index 74d6ec34..baff517 100644
27490--- a/drivers/edac/i5400_edac.c
27491+++ b/drivers/edac/i5400_edac.c
27492@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27493 *
27494 * The "E500P" device is the first device supported.
27495 */
27496-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27497+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27498 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27499 {0,} /* 0 terminated list. */
27500 };
27501diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27502index 6104dba..e7ea8e1 100644
27503--- a/drivers/edac/i7300_edac.c
27504+++ b/drivers/edac/i7300_edac.c
27505@@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27506 *
27507 * Has only 8086:360c PCI ID
27508 */
27509-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27510+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27511 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27512 {0,} /* 0 terminated list. */
27513 };
27514diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27515index 70ad892..178943c 100644
27516--- a/drivers/edac/i7core_edac.c
27517+++ b/drivers/edac/i7core_edac.c
27518@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27519 /*
27520 * pci_device_id table for which devices we are looking for
27521 */
27522-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27523+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27524 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27525 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27526 {0,} /* 0 terminated list. */
27527diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27528index 4329d39..f3022ef 100644
27529--- a/drivers/edac/i82443bxgx_edac.c
27530+++ b/drivers/edac/i82443bxgx_edac.c
27531@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27532
27533 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27534
27535-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27536+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27537 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27538 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27539 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27540diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27541index 931a057..fd28340 100644
27542--- a/drivers/edac/i82860_edac.c
27543+++ b/drivers/edac/i82860_edac.c
27544@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27545 edac_mc_free(mci);
27546 }
27547
27548-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27549+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27550 {
27551 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27552 I82860},
27553diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27554index 33864c6..01edc61 100644
27555--- a/drivers/edac/i82875p_edac.c
27556+++ b/drivers/edac/i82875p_edac.c
27557@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27558 edac_mc_free(mci);
27559 }
27560
27561-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27562+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27563 {
27564 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27565 I82875P},
27566diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27567index a5da732..983363b 100644
27568--- a/drivers/edac/i82975x_edac.c
27569+++ b/drivers/edac/i82975x_edac.c
27570@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27571 edac_mc_free(mci);
27572 }
27573
27574-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27575+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27576 {
27577 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27578 I82975X
27579diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27580index 0106747..0b40417 100644
27581--- a/drivers/edac/mce_amd.h
27582+++ b/drivers/edac/mce_amd.h
27583@@ -83,7 +83,7 @@ struct amd_decoder_ops {
27584 bool (*dc_mce)(u16, u8);
27585 bool (*ic_mce)(u16, u8);
27586 bool (*nb_mce)(u16, u8);
27587-};
27588+} __no_const;
27589
27590 void amd_report_gart_errors(bool);
27591 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27592diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27593index b153674..ad2ba9b 100644
27594--- a/drivers/edac/r82600_edac.c
27595+++ b/drivers/edac/r82600_edac.c
27596@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27597 edac_mc_free(mci);
27598 }
27599
27600-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27601+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27602 {
27603 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27604 },
27605diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27606index 7a402bf..af0b211 100644
27607--- a/drivers/edac/sb_edac.c
27608+++ b/drivers/edac/sb_edac.c
27609@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27610 /*
27611 * pci_device_id table for which devices we are looking for
27612 */
27613-static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27614+static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27615 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27616 {0,} /* 0 terminated list. */
27617 };
27618diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27619index b6f47de..c5acf3a 100644
27620--- a/drivers/edac/x38_edac.c
27621+++ b/drivers/edac/x38_edac.c
27622@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27623 edac_mc_free(mci);
27624 }
27625
27626-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27627+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27628 {
27629 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27630 X38},
27631diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27632index 85661b0..c784559a 100644
27633--- a/drivers/firewire/core-card.c
27634+++ b/drivers/firewire/core-card.c
27635@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27636
27637 void fw_core_remove_card(struct fw_card *card)
27638 {
27639- struct fw_card_driver dummy_driver = dummy_driver_template;
27640+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
27641
27642 card->driver->update_phy_reg(card, 4,
27643 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27644diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27645index 4799393..37bd3ab 100644
27646--- a/drivers/firewire/core-cdev.c
27647+++ b/drivers/firewire/core-cdev.c
27648@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27649 int ret;
27650
27651 if ((request->channels == 0 && request->bandwidth == 0) ||
27652- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27653- request->bandwidth < 0)
27654+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27655 return -EINVAL;
27656
27657 r = kmalloc(sizeof(*r), GFP_KERNEL);
27658diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27659index 855ab3f..11f4bbd 100644
27660--- a/drivers/firewire/core-transaction.c
27661+++ b/drivers/firewire/core-transaction.c
27662@@ -37,6 +37,7 @@
27663 #include <linux/timer.h>
27664 #include <linux/types.h>
27665 #include <linux/workqueue.h>
27666+#include <linux/sched.h>
27667
27668 #include <asm/byteorder.h>
27669
27670diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27671index b45be57..5fad18b 100644
27672--- a/drivers/firewire/core.h
27673+++ b/drivers/firewire/core.h
27674@@ -101,6 +101,7 @@ struct fw_card_driver {
27675
27676 int (*stop_iso)(struct fw_iso_context *ctx);
27677 };
27678+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27679
27680 void fw_card_initialize(struct fw_card *card,
27681 const struct fw_card_driver *driver, struct device *device);
27682diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27683index 153980b..4b4d046 100644
27684--- a/drivers/firmware/dmi_scan.c
27685+++ b/drivers/firmware/dmi_scan.c
27686@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27687 }
27688 }
27689 else {
27690- /*
27691- * no iounmap() for that ioremap(); it would be a no-op, but
27692- * it's so early in setup that sucker gets confused into doing
27693- * what it shouldn't if we actually call it.
27694- */
27695 p = dmi_ioremap(0xF0000, 0x10000);
27696 if (p == NULL)
27697 goto error;
27698@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27699 if (buf == NULL)
27700 return -1;
27701
27702- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27703+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27704
27705 iounmap(buf);
27706 return 0;
27707diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27708index 98723cb..10ca85b 100644
27709--- a/drivers/gpio/gpio-vr41xx.c
27710+++ b/drivers/gpio/gpio-vr41xx.c
27711@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27712 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27713 maskl, pendl, maskh, pendh);
27714
27715- atomic_inc(&irq_err_count);
27716+ atomic_inc_unchecked(&irq_err_count);
27717
27718 return -EINVAL;
27719 }
27720diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27721index 8323fc3..5c1d755 100644
27722--- a/drivers/gpu/drm/drm_crtc.c
27723+++ b/drivers/gpu/drm/drm_crtc.c
27724@@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27725 */
27726 if ((out_resp->count_modes >= mode_count) && mode_count) {
27727 copied = 0;
27728- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27729+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27730 list_for_each_entry(mode, &connector->modes, head) {
27731 drm_crtc_convert_to_umode(&u_mode, mode);
27732 if (copy_to_user(mode_ptr + copied,
27733@@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27734
27735 if ((out_resp->count_props >= props_count) && props_count) {
27736 copied = 0;
27737- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27738- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27739+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27740+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27741 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27742 if (connector->property_ids[i] != 0) {
27743 if (put_user(connector->property_ids[i],
27744@@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27745
27746 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27747 copied = 0;
27748- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27749+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27750 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27751 if (connector->encoder_ids[i] != 0) {
27752 if (put_user(connector->encoder_ids[i],
27753@@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
27754 }
27755
27756 for (i = 0; i < crtc_req->count_connectors; i++) {
27757- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27758+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27759 if (get_user(out_id, &set_connectors_ptr[i])) {
27760 ret = -EFAULT;
27761 goto out;
27762@@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27763 fb = obj_to_fb(obj);
27764
27765 num_clips = r->num_clips;
27766- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27767+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27768
27769 if (!num_clips != !clips_ptr) {
27770 ret = -EINVAL;
27771@@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27772 out_resp->flags = property->flags;
27773
27774 if ((out_resp->count_values >= value_count) && value_count) {
27775- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27776+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27777 for (i = 0; i < value_count; i++) {
27778 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27779 ret = -EFAULT;
27780@@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27781 if (property->flags & DRM_MODE_PROP_ENUM) {
27782 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27783 copied = 0;
27784- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27785+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27786 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27787
27788 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
27789@@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27790 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27791 copied = 0;
27792 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27793- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27794+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27795
27796 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27797 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
27798@@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27799 struct drm_mode_get_blob *out_resp = data;
27800 struct drm_property_blob *blob;
27801 int ret = 0;
27802- void *blob_ptr;
27803+ void __user *blob_ptr;
27804
27805 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27806 return -EINVAL;
27807@@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27808 blob = obj_to_blob(obj);
27809
27810 if (out_resp->length == blob->length) {
27811- blob_ptr = (void *)(unsigned long)out_resp->data;
27812+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
27813 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27814 ret = -EFAULT;
27815 goto done;
27816diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
27817index d2619d7..bd6bd00 100644
27818--- a/drivers/gpu/drm/drm_crtc_helper.c
27819+++ b/drivers/gpu/drm/drm_crtc_helper.c
27820@@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
27821 struct drm_crtc *tmp;
27822 int crtc_mask = 1;
27823
27824- WARN(!crtc, "checking null crtc?\n");
27825+ BUG_ON(!crtc);
27826
27827 dev = crtc->dev;
27828
27829diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
27830index 40c187c..5746164 100644
27831--- a/drivers/gpu/drm/drm_drv.c
27832+++ b/drivers/gpu/drm/drm_drv.c
27833@@ -308,7 +308,7 @@ module_exit(drm_core_exit);
27834 /**
27835 * Copy and IOCTL return string to user space
27836 */
27837-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27838+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27839 {
27840 int len;
27841
27842@@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
27843
27844 dev = file_priv->minor->dev;
27845 atomic_inc(&dev->ioctl_count);
27846- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27847+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27848 ++file_priv->ioctl_count;
27849
27850 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27851diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
27852index 828bf65..cdaa0e9 100644
27853--- a/drivers/gpu/drm/drm_fops.c
27854+++ b/drivers/gpu/drm/drm_fops.c
27855@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
27856 }
27857
27858 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27859- atomic_set(&dev->counts[i], 0);
27860+ atomic_set_unchecked(&dev->counts[i], 0);
27861
27862 dev->sigdata.lock = NULL;
27863
27864@@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
27865
27866 retcode = drm_open_helper(inode, filp, dev);
27867 if (!retcode) {
27868- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27869- if (!dev->open_count++)
27870+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27871+ if (local_inc_return(&dev->open_count) == 1)
27872 retcode = drm_setup(dev);
27873 }
27874 if (!retcode) {
27875@@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
27876
27877 mutex_lock(&drm_global_mutex);
27878
27879- DRM_DEBUG("open_count = %d\n", dev->open_count);
27880+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27881
27882 if (dev->driver->preclose)
27883 dev->driver->preclose(dev, file_priv);
27884@@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
27885 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27886 task_pid_nr(current),
27887 (long)old_encode_dev(file_priv->minor->device),
27888- dev->open_count);
27889+ local_read(&dev->open_count));
27890
27891 /* Release any auth tokens that might point to this file_priv,
27892 (do that under the drm_global_mutex) */
27893@@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
27894 * End inline drm_release
27895 */
27896
27897- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27898- if (!--dev->open_count) {
27899+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27900+ if (local_dec_and_test(&dev->open_count)) {
27901 if (atomic_read(&dev->ioctl_count)) {
27902 DRM_ERROR("Device busy: %d\n",
27903 atomic_read(&dev->ioctl_count));
27904diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
27905index c87dc96..326055d 100644
27906--- a/drivers/gpu/drm/drm_global.c
27907+++ b/drivers/gpu/drm/drm_global.c
27908@@ -36,7 +36,7 @@
27909 struct drm_global_item {
27910 struct mutex mutex;
27911 void *object;
27912- int refcount;
27913+ atomic_t refcount;
27914 };
27915
27916 static struct drm_global_item glob[DRM_GLOBAL_NUM];
27917@@ -49,7 +49,7 @@ void drm_global_init(void)
27918 struct drm_global_item *item = &glob[i];
27919 mutex_init(&item->mutex);
27920 item->object = NULL;
27921- item->refcount = 0;
27922+ atomic_set(&item->refcount, 0);
27923 }
27924 }
27925
27926@@ -59,7 +59,7 @@ void drm_global_release(void)
27927 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
27928 struct drm_global_item *item = &glob[i];
27929 BUG_ON(item->object != NULL);
27930- BUG_ON(item->refcount != 0);
27931+ BUG_ON(atomic_read(&item->refcount) != 0);
27932 }
27933 }
27934
27935@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27936 void *object;
27937
27938 mutex_lock(&item->mutex);
27939- if (item->refcount == 0) {
27940+ if (atomic_read(&item->refcount) == 0) {
27941 item->object = kzalloc(ref->size, GFP_KERNEL);
27942 if (unlikely(item->object == NULL)) {
27943 ret = -ENOMEM;
27944@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27945 goto out_err;
27946
27947 }
27948- ++item->refcount;
27949+ atomic_inc(&item->refcount);
27950 ref->object = item->object;
27951 object = item->object;
27952 mutex_unlock(&item->mutex);
27953@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
27954 struct drm_global_item *item = &glob[ref->global_type];
27955
27956 mutex_lock(&item->mutex);
27957- BUG_ON(item->refcount == 0);
27958+ BUG_ON(atomic_read(&item->refcount) == 0);
27959 BUG_ON(ref->object != item->object);
27960- if (--item->refcount == 0) {
27961+ if (atomic_dec_and_test(&item->refcount)) {
27962 ref->release(ref);
27963 item->object = NULL;
27964 }
27965diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
27966index ab1162d..42587b2 100644
27967--- a/drivers/gpu/drm/drm_info.c
27968+++ b/drivers/gpu/drm/drm_info.c
27969@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
27970 struct drm_local_map *map;
27971 struct drm_map_list *r_list;
27972
27973- /* Hardcoded from _DRM_FRAME_BUFFER,
27974- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
27975- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
27976- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
27977+ static const char * const types[] = {
27978+ [_DRM_FRAME_BUFFER] = "FB",
27979+ [_DRM_REGISTERS] = "REG",
27980+ [_DRM_SHM] = "SHM",
27981+ [_DRM_AGP] = "AGP",
27982+ [_DRM_SCATTER_GATHER] = "SG",
27983+ [_DRM_CONSISTENT] = "PCI",
27984+ [_DRM_GEM] = "GEM" };
27985 const char *type;
27986 int i;
27987
27988@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
27989 map = r_list->map;
27990 if (!map)
27991 continue;
27992- if (map->type < 0 || map->type > 5)
27993+ if (map->type >= ARRAY_SIZE(types))
27994 type = "??";
27995 else
27996 type = types[map->type];
27997@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
27998 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
27999 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28000 vma->vm_flags & VM_IO ? 'i' : '-',
28001+#ifdef CONFIG_GRKERNSEC_HIDESYM
28002+ 0);
28003+#else
28004 vma->vm_pgoff);
28005+#endif
28006
28007 #if defined(__i386__)
28008 pgprot = pgprot_val(vma->vm_page_prot);
28009diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28010index ddd70db..40321e6 100644
28011--- a/drivers/gpu/drm/drm_ioc32.c
28012+++ b/drivers/gpu/drm/drm_ioc32.c
28013@@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28014 request = compat_alloc_user_space(nbytes);
28015 if (!access_ok(VERIFY_WRITE, request, nbytes))
28016 return -EFAULT;
28017- list = (struct drm_buf_desc *) (request + 1);
28018+ list = (struct drm_buf_desc __user *) (request + 1);
28019
28020 if (__put_user(count, &request->count)
28021 || __put_user(list, &request->list))
28022@@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28023 request = compat_alloc_user_space(nbytes);
28024 if (!access_ok(VERIFY_WRITE, request, nbytes))
28025 return -EFAULT;
28026- list = (struct drm_buf_pub *) (request + 1);
28027+ list = (struct drm_buf_pub __user *) (request + 1);
28028
28029 if (__put_user(count, &request->count)
28030 || __put_user(list, &request->list))
28031diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28032index 904d7e9..ab88581 100644
28033--- a/drivers/gpu/drm/drm_ioctl.c
28034+++ b/drivers/gpu/drm/drm_ioctl.c
28035@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28036 stats->data[i].value =
28037 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28038 else
28039- stats->data[i].value = atomic_read(&dev->counts[i]);
28040+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28041 stats->data[i].type = dev->types[i];
28042 }
28043
28044diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28045index 632ae24..244cf4a 100644
28046--- a/drivers/gpu/drm/drm_lock.c
28047+++ b/drivers/gpu/drm/drm_lock.c
28048@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28049 if (drm_lock_take(&master->lock, lock->context)) {
28050 master->lock.file_priv = file_priv;
28051 master->lock.lock_time = jiffies;
28052- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28053+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28054 break; /* Got lock */
28055 }
28056
28057@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28058 return -EINVAL;
28059 }
28060
28061- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28062+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28063
28064 if (drm_lock_free(&master->lock, lock->context)) {
28065 /* FIXME: Should really bail out here. */
28066diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28067index 8f371e8..9f85d52 100644
28068--- a/drivers/gpu/drm/i810/i810_dma.c
28069+++ b/drivers/gpu/drm/i810/i810_dma.c
28070@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28071 dma->buflist[vertex->idx],
28072 vertex->discard, vertex->used);
28073
28074- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28075- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28076+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28077+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28078 sarea_priv->last_enqueue = dev_priv->counter - 1;
28079 sarea_priv->last_dispatch = (int)hw_status[5];
28080
28081@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28082 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28083 mc->last_render);
28084
28085- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28086- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28087+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28088+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28089 sarea_priv->last_enqueue = dev_priv->counter - 1;
28090 sarea_priv->last_dispatch = (int)hw_status[5];
28091
28092diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28093index c9339f4..f5e1b9d 100644
28094--- a/drivers/gpu/drm/i810/i810_drv.h
28095+++ b/drivers/gpu/drm/i810/i810_drv.h
28096@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28097 int page_flipping;
28098
28099 wait_queue_head_t irq_queue;
28100- atomic_t irq_received;
28101- atomic_t irq_emitted;
28102+ atomic_unchecked_t irq_received;
28103+ atomic_unchecked_t irq_emitted;
28104
28105 int front_offset;
28106 } drm_i810_private_t;
28107diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28108index 004b048..7588eba 100644
28109--- a/drivers/gpu/drm/i915/i915_debugfs.c
28110+++ b/drivers/gpu/drm/i915/i915_debugfs.c
28111@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28112 I915_READ(GTIMR));
28113 }
28114 seq_printf(m, "Interrupts received: %d\n",
28115- atomic_read(&dev_priv->irq_received));
28116+ atomic_read_unchecked(&dev_priv->irq_received));
28117 for (i = 0; i < I915_NUM_RINGS; i++) {
28118 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28119 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28120@@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28121 return ret;
28122
28123 if (opregion->header)
28124- seq_write(m, opregion->header, OPREGION_SIZE);
28125+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28126
28127 mutex_unlock(&dev->struct_mutex);
28128
28129diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28130index a9ae374..43c1e9e 100644
28131--- a/drivers/gpu/drm/i915/i915_dma.c
28132+++ b/drivers/gpu/drm/i915/i915_dma.c
28133@@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28134 bool can_switch;
28135
28136 spin_lock(&dev->count_lock);
28137- can_switch = (dev->open_count == 0);
28138+ can_switch = (local_read(&dev->open_count) == 0);
28139 spin_unlock(&dev->count_lock);
28140 return can_switch;
28141 }
28142diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28143index 554bef7..d24791c 100644
28144--- a/drivers/gpu/drm/i915/i915_drv.h
28145+++ b/drivers/gpu/drm/i915/i915_drv.h
28146@@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28147 /* render clock increase/decrease */
28148 /* display clock increase/decrease */
28149 /* pll clock increase/decrease */
28150-};
28151+} __no_const;
28152
28153 struct intel_device_info {
28154 u8 gen;
28155@@ -312,7 +312,7 @@ typedef struct drm_i915_private {
28156 int current_page;
28157 int page_flipping;
28158
28159- atomic_t irq_received;
28160+ atomic_unchecked_t irq_received;
28161
28162 /* protects the irq masks */
28163 spinlock_t irq_lock;
28164@@ -887,7 +887,7 @@ struct drm_i915_gem_object {
28165 * will be page flipped away on the next vblank. When it
28166 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28167 */
28168- atomic_t pending_flip;
28169+ atomic_unchecked_t pending_flip;
28170 };
28171
28172 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28173@@ -1267,7 +1267,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28174 extern void intel_teardown_gmbus(struct drm_device *dev);
28175 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28176 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28177-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28178+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28179 {
28180 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28181 }
28182diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28183index b9da890..cad1d98 100644
28184--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28185+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28186@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28187 i915_gem_clflush_object(obj);
28188
28189 if (obj->base.pending_write_domain)
28190- cd->flips |= atomic_read(&obj->pending_flip);
28191+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28192
28193 /* The actual obj->write_domain will be updated with
28194 * pending_write_domain after we emit the accumulated flush for all
28195@@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28196
28197 static int
28198 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28199- int count)
28200+ unsigned int count)
28201 {
28202- int i;
28203+ unsigned int i;
28204
28205 for (i = 0; i < count; i++) {
28206 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28207diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28208index b40004b..7c53a75 100644
28209--- a/drivers/gpu/drm/i915/i915_irq.c
28210+++ b/drivers/gpu/drm/i915/i915_irq.c
28211@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28212 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28213 struct drm_i915_master_private *master_priv;
28214
28215- atomic_inc(&dev_priv->irq_received);
28216+ atomic_inc_unchecked(&dev_priv->irq_received);
28217
28218 /* disable master interrupt before clearing iir */
28219 de_ier = I915_READ(DEIER);
28220@@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28221 struct drm_i915_master_private *master_priv;
28222 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28223
28224- atomic_inc(&dev_priv->irq_received);
28225+ atomic_inc_unchecked(&dev_priv->irq_received);
28226
28227 if (IS_GEN6(dev))
28228 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28229@@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28230 int ret = IRQ_NONE, pipe;
28231 bool blc_event = false;
28232
28233- atomic_inc(&dev_priv->irq_received);
28234+ atomic_inc_unchecked(&dev_priv->irq_received);
28235
28236 iir = I915_READ(IIR);
28237
28238@@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28239 {
28240 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28241
28242- atomic_set(&dev_priv->irq_received, 0);
28243+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28244
28245 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28246 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28247@@ -1931,7 +1931,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28248 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28249 int pipe;
28250
28251- atomic_set(&dev_priv->irq_received, 0);
28252+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28253
28254 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28255 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28256diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28257index daa5743..c0757a9 100644
28258--- a/drivers/gpu/drm/i915/intel_display.c
28259+++ b/drivers/gpu/drm/i915/intel_display.c
28260@@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28261
28262 wait_event(dev_priv->pending_flip_queue,
28263 atomic_read(&dev_priv->mm.wedged) ||
28264- atomic_read(&obj->pending_flip) == 0);
28265+ atomic_read_unchecked(&obj->pending_flip) == 0);
28266
28267 /* Big Hammer, we also need to ensure that any pending
28268 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28269@@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28270 obj = to_intel_framebuffer(crtc->fb)->obj;
28271 dev_priv = crtc->dev->dev_private;
28272 wait_event(dev_priv->pending_flip_queue,
28273- atomic_read(&obj->pending_flip) == 0);
28274+ atomic_read_unchecked(&obj->pending_flip) == 0);
28275 }
28276
28277 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28278@@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28279
28280 atomic_clear_mask(1 << intel_crtc->plane,
28281 &obj->pending_flip.counter);
28282- if (atomic_read(&obj->pending_flip) == 0)
28283+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
28284 wake_up(&dev_priv->pending_flip_queue);
28285
28286 schedule_work(&work->work);
28287@@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28288 /* Block clients from rendering to the new back buffer until
28289 * the flip occurs and the object is no longer visible.
28290 */
28291- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28292+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28293
28294 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28295 if (ret)
28296@@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28297 return 0;
28298
28299 cleanup_pending:
28300- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28301+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28302 drm_gem_object_unreference(&work->old_fb_obj->base);
28303 drm_gem_object_unreference(&obj->base);
28304 mutex_unlock(&dev->struct_mutex);
28305diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28306index 54558a0..2d97005 100644
28307--- a/drivers/gpu/drm/mga/mga_drv.h
28308+++ b/drivers/gpu/drm/mga/mga_drv.h
28309@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28310 u32 clear_cmd;
28311 u32 maccess;
28312
28313- atomic_t vbl_received; /**< Number of vblanks received. */
28314+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28315 wait_queue_head_t fence_queue;
28316- atomic_t last_fence_retired;
28317+ atomic_unchecked_t last_fence_retired;
28318 u32 next_fence_to_post;
28319
28320 unsigned int fb_cpp;
28321diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28322index 2581202..f230a8d9 100644
28323--- a/drivers/gpu/drm/mga/mga_irq.c
28324+++ b/drivers/gpu/drm/mga/mga_irq.c
28325@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28326 if (crtc != 0)
28327 return 0;
28328
28329- return atomic_read(&dev_priv->vbl_received);
28330+ return atomic_read_unchecked(&dev_priv->vbl_received);
28331 }
28332
28333
28334@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28335 /* VBLANK interrupt */
28336 if (status & MGA_VLINEPEN) {
28337 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28338- atomic_inc(&dev_priv->vbl_received);
28339+ atomic_inc_unchecked(&dev_priv->vbl_received);
28340 drm_handle_vblank(dev, 0);
28341 handled = 1;
28342 }
28343@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28344 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28345 MGA_WRITE(MGA_PRIMEND, prim_end);
28346
28347- atomic_inc(&dev_priv->last_fence_retired);
28348+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
28349 DRM_WAKEUP(&dev_priv->fence_queue);
28350 handled = 1;
28351 }
28352@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28353 * using fences.
28354 */
28355 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28356- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28357+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28358 - *sequence) <= (1 << 23)));
28359
28360 *sequence = cur_fence;
28361diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28362index 5fc201b..7b032b9 100644
28363--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28364+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28365@@ -201,7 +201,7 @@ struct methods {
28366 const char desc[8];
28367 void (*loadbios)(struct drm_device *, uint8_t *);
28368 const bool rw;
28369-};
28370+} __do_const;
28371
28372 static struct methods shadow_methods[] = {
28373 { "PRAMIN", load_vbios_pramin, true },
28374@@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28375 struct bit_table {
28376 const char id;
28377 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28378-};
28379+} __no_const;
28380
28381 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28382
28383diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28384index 4c0be3a..5757582 100644
28385--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28386+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28387@@ -238,7 +238,7 @@ struct nouveau_channel {
28388 struct list_head pending;
28389 uint32_t sequence;
28390 uint32_t sequence_ack;
28391- atomic_t last_sequence_irq;
28392+ atomic_unchecked_t last_sequence_irq;
28393 struct nouveau_vma vma;
28394 } fence;
28395
28396@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28397 u32 handle, u16 class);
28398 void (*set_tile_region)(struct drm_device *dev, int i);
28399 void (*tlb_flush)(struct drm_device *, int engine);
28400-};
28401+} __no_const;
28402
28403 struct nouveau_instmem_engine {
28404 void *priv;
28405@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28406 struct nouveau_mc_engine {
28407 int (*init)(struct drm_device *dev);
28408 void (*takedown)(struct drm_device *dev);
28409-};
28410+} __no_const;
28411
28412 struct nouveau_timer_engine {
28413 int (*init)(struct drm_device *dev);
28414 void (*takedown)(struct drm_device *dev);
28415 uint64_t (*read)(struct drm_device *dev);
28416-};
28417+} __no_const;
28418
28419 struct nouveau_fb_engine {
28420 int num_tiles;
28421@@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28422 void (*put)(struct drm_device *, struct nouveau_mem **);
28423
28424 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28425-};
28426+} __no_const;
28427
28428 struct nouveau_engine {
28429 struct nouveau_instmem_engine instmem;
28430@@ -706,7 +706,7 @@ struct drm_nouveau_private {
28431 struct drm_global_reference mem_global_ref;
28432 struct ttm_bo_global_ref bo_global_ref;
28433 struct ttm_bo_device bdev;
28434- atomic_t validate_sequence;
28435+ atomic_unchecked_t validate_sequence;
28436 } ttm;
28437
28438 struct {
28439diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28440index 2f6daae..c9d7b9e 100644
28441--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28442+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28443@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28444 if (USE_REFCNT(dev))
28445 sequence = nvchan_rd32(chan, 0x48);
28446 else
28447- sequence = atomic_read(&chan->fence.last_sequence_irq);
28448+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28449
28450 if (chan->fence.sequence_ack == sequence)
28451 goto out;
28452@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28453 return ret;
28454 }
28455
28456- atomic_set(&chan->fence.last_sequence_irq, 0);
28457+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28458 return 0;
28459 }
28460
28461diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28462index 5f0bc57..eb9fac8 100644
28463--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28464+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28465@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28466 int trycnt = 0;
28467 int ret, i;
28468
28469- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28470+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28471 retry:
28472 if (++trycnt > 100000) {
28473 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28474diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28475index d8831ab..0ba8356 100644
28476--- a/drivers/gpu/drm/nouveau/nouveau_state.c
28477+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28478@@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28479 bool can_switch;
28480
28481 spin_lock(&dev->count_lock);
28482- can_switch = (dev->open_count == 0);
28483+ can_switch = (local_read(&dev->open_count) == 0);
28484 spin_unlock(&dev->count_lock);
28485 return can_switch;
28486 }
28487diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28488index dbdea8e..cd6eeeb 100644
28489--- a/drivers/gpu/drm/nouveau/nv04_graph.c
28490+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28491@@ -554,7 +554,7 @@ static int
28492 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28493 u32 class, u32 mthd, u32 data)
28494 {
28495- atomic_set(&chan->fence.last_sequence_irq, data);
28496+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28497 return 0;
28498 }
28499
28500diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28501index bcac90b..53bfc76 100644
28502--- a/drivers/gpu/drm/r128/r128_cce.c
28503+++ b/drivers/gpu/drm/r128/r128_cce.c
28504@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28505
28506 /* GH: Simple idle check.
28507 */
28508- atomic_set(&dev_priv->idle_count, 0);
28509+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28510
28511 /* We don't support anything other than bus-mastering ring mode,
28512 * but the ring can be in either AGP or PCI space for the ring
28513diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28514index 930c71b..499aded 100644
28515--- a/drivers/gpu/drm/r128/r128_drv.h
28516+++ b/drivers/gpu/drm/r128/r128_drv.h
28517@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28518 int is_pci;
28519 unsigned long cce_buffers_offset;
28520
28521- atomic_t idle_count;
28522+ atomic_unchecked_t idle_count;
28523
28524 int page_flipping;
28525 int current_page;
28526 u32 crtc_offset;
28527 u32 crtc_offset_cntl;
28528
28529- atomic_t vbl_received;
28530+ atomic_unchecked_t vbl_received;
28531
28532 u32 color_fmt;
28533 unsigned int front_offset;
28534diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28535index 429d5a0..7e899ed 100644
28536--- a/drivers/gpu/drm/r128/r128_irq.c
28537+++ b/drivers/gpu/drm/r128/r128_irq.c
28538@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28539 if (crtc != 0)
28540 return 0;
28541
28542- return atomic_read(&dev_priv->vbl_received);
28543+ return atomic_read_unchecked(&dev_priv->vbl_received);
28544 }
28545
28546 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28547@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28548 /* VBLANK interrupt */
28549 if (status & R128_CRTC_VBLANK_INT) {
28550 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28551- atomic_inc(&dev_priv->vbl_received);
28552+ atomic_inc_unchecked(&dev_priv->vbl_received);
28553 drm_handle_vblank(dev, 0);
28554 return IRQ_HANDLED;
28555 }
28556diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28557index a9e33ce..09edd4b 100644
28558--- a/drivers/gpu/drm/r128/r128_state.c
28559+++ b/drivers/gpu/drm/r128/r128_state.c
28560@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28561
28562 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28563 {
28564- if (atomic_read(&dev_priv->idle_count) == 0)
28565+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28566 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28567 else
28568- atomic_set(&dev_priv->idle_count, 0);
28569+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28570 }
28571
28572 #endif
28573diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28574index 5a82b6b..9e69c73 100644
28575--- a/drivers/gpu/drm/radeon/mkregtable.c
28576+++ b/drivers/gpu/drm/radeon/mkregtable.c
28577@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28578 regex_t mask_rex;
28579 regmatch_t match[4];
28580 char buf[1024];
28581- size_t end;
28582+ long end;
28583 int len;
28584 int done = 0;
28585 int r;
28586 unsigned o;
28587 struct offset *offset;
28588 char last_reg_s[10];
28589- int last_reg;
28590+ unsigned long last_reg;
28591
28592 if (regcomp
28593 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28594diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28595index 8227e76..ce0b195 100644
28596--- a/drivers/gpu/drm/radeon/radeon.h
28597+++ b/drivers/gpu/drm/radeon/radeon.h
28598@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28599 */
28600 struct radeon_fence_driver {
28601 uint32_t scratch_reg;
28602- atomic_t seq;
28603+ atomic_unchecked_t seq;
28604 uint32_t last_seq;
28605 unsigned long last_jiffies;
28606 unsigned long last_timeout;
28607@@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28608 int x2, int y2);
28609 void (*draw_auto)(struct radeon_device *rdev);
28610 void (*set_default_state)(struct radeon_device *rdev);
28611-};
28612+} __no_const;
28613
28614 struct r600_blit {
28615 struct mutex mutex;
28616@@ -954,7 +954,7 @@ struct radeon_asic {
28617 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28618 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28619 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28620-};
28621+} __no_const;
28622
28623 /*
28624 * Asic structures
28625diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28626index 9b39145..389b93b 100644
28627--- a/drivers/gpu/drm/radeon/radeon_device.c
28628+++ b/drivers/gpu/drm/radeon/radeon_device.c
28629@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28630 bool can_switch;
28631
28632 spin_lock(&dev->count_lock);
28633- can_switch = (dev->open_count == 0);
28634+ can_switch = (local_read(&dev->open_count) == 0);
28635 spin_unlock(&dev->count_lock);
28636 return can_switch;
28637 }
28638diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28639index a1b59ca..86f2d44 100644
28640--- a/drivers/gpu/drm/radeon/radeon_drv.h
28641+++ b/drivers/gpu/drm/radeon/radeon_drv.h
28642@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28643
28644 /* SW interrupt */
28645 wait_queue_head_t swi_queue;
28646- atomic_t swi_emitted;
28647+ atomic_unchecked_t swi_emitted;
28648 int vblank_crtc;
28649 uint32_t irq_enable_reg;
28650 uint32_t r500_disp_irq_reg;
28651diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28652index 76ec0e9..6feb1a3 100644
28653--- a/drivers/gpu/drm/radeon/radeon_fence.c
28654+++ b/drivers/gpu/drm/radeon/radeon_fence.c
28655@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28656 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28657 return 0;
28658 }
28659- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28660+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28661 if (!rdev->cp.ready)
28662 /* FIXME: cp is not running assume everythings is done right
28663 * away
28664@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28665 return r;
28666 }
28667 radeon_fence_write(rdev, 0);
28668- atomic_set(&rdev->fence_drv.seq, 0);
28669+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28670 INIT_LIST_HEAD(&rdev->fence_drv.created);
28671 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28672 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28673diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28674index 48b7cea..342236f 100644
28675--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28676+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28677@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28678 request = compat_alloc_user_space(sizeof(*request));
28679 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28680 || __put_user(req32.param, &request->param)
28681- || __put_user((void __user *)(unsigned long)req32.value,
28682+ || __put_user((unsigned long)req32.value,
28683 &request->value))
28684 return -EFAULT;
28685
28686diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28687index 00da384..32f972d 100644
28688--- a/drivers/gpu/drm/radeon/radeon_irq.c
28689+++ b/drivers/gpu/drm/radeon/radeon_irq.c
28690@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28691 unsigned int ret;
28692 RING_LOCALS;
28693
28694- atomic_inc(&dev_priv->swi_emitted);
28695- ret = atomic_read(&dev_priv->swi_emitted);
28696+ atomic_inc_unchecked(&dev_priv->swi_emitted);
28697+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28698
28699 BEGIN_RING(4);
28700 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28701@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28702 drm_radeon_private_t *dev_priv =
28703 (drm_radeon_private_t *) dev->dev_private;
28704
28705- atomic_set(&dev_priv->swi_emitted, 0);
28706+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28707 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28708
28709 dev->max_vblank_count = 0x001fffff;
28710diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28711index e8422ae..d22d4a8 100644
28712--- a/drivers/gpu/drm/radeon/radeon_state.c
28713+++ b/drivers/gpu/drm/radeon/radeon_state.c
28714@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28715 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28716 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28717
28718- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28719+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28720 sarea_priv->nbox * sizeof(depth_boxes[0])))
28721 return -EFAULT;
28722
28723@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28724 {
28725 drm_radeon_private_t *dev_priv = dev->dev_private;
28726 drm_radeon_getparam_t *param = data;
28727- int value;
28728+ int value = 0;
28729
28730 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28731
28732diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28733index 0b5468b..9c4b308 100644
28734--- a/drivers/gpu/drm/radeon/radeon_ttm.c
28735+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
28736@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28737 }
28738 if (unlikely(ttm_vm_ops == NULL)) {
28739 ttm_vm_ops = vma->vm_ops;
28740- radeon_ttm_vm_ops = *ttm_vm_ops;
28741- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28742+ pax_open_kernel();
28743+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28744+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28745+ pax_close_kernel();
28746 }
28747 vma->vm_ops = &radeon_ttm_vm_ops;
28748 return 0;
28749diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
28750index a9049ed..501f284 100644
28751--- a/drivers/gpu/drm/radeon/rs690.c
28752+++ b/drivers/gpu/drm/radeon/rs690.c
28753@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
28754 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28755 rdev->pm.sideport_bandwidth.full)
28756 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28757- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28758+ read_delay_latency.full = dfixed_const(800 * 1000);
28759 read_delay_latency.full = dfixed_div(read_delay_latency,
28760 rdev->pm.igp_sideport_mclk);
28761+ a.full = dfixed_const(370);
28762+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28763 } else {
28764 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28765 rdev->pm.k8_bandwidth.full)
28766diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28767index 727e93d..1565650 100644
28768--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
28769+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28770@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
28771 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28772 struct shrink_control *sc)
28773 {
28774- static atomic_t start_pool = ATOMIC_INIT(0);
28775+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28776 unsigned i;
28777- unsigned pool_offset = atomic_add_return(1, &start_pool);
28778+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28779 struct ttm_page_pool *pool;
28780 int shrink_pages = sc->nr_to_scan;
28781
28782diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
28783index 9cf87d9..2000b7d 100644
28784--- a/drivers/gpu/drm/via/via_drv.h
28785+++ b/drivers/gpu/drm/via/via_drv.h
28786@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28787 typedef uint32_t maskarray_t[5];
28788
28789 typedef struct drm_via_irq {
28790- atomic_t irq_received;
28791+ atomic_unchecked_t irq_received;
28792 uint32_t pending_mask;
28793 uint32_t enable_mask;
28794 wait_queue_head_t irq_queue;
28795@@ -75,7 +75,7 @@ typedef struct drm_via_private {
28796 struct timeval last_vblank;
28797 int last_vblank_valid;
28798 unsigned usec_per_vblank;
28799- atomic_t vbl_received;
28800+ atomic_unchecked_t vbl_received;
28801 drm_via_state_t hc_state;
28802 char pci_buf[VIA_PCI_BUF_SIZE];
28803 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28804diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
28805index d391f48..10c8ca3 100644
28806--- a/drivers/gpu/drm/via/via_irq.c
28807+++ b/drivers/gpu/drm/via/via_irq.c
28808@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
28809 if (crtc != 0)
28810 return 0;
28811
28812- return atomic_read(&dev_priv->vbl_received);
28813+ return atomic_read_unchecked(&dev_priv->vbl_received);
28814 }
28815
28816 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28817@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28818
28819 status = VIA_READ(VIA_REG_INTERRUPT);
28820 if (status & VIA_IRQ_VBLANK_PENDING) {
28821- atomic_inc(&dev_priv->vbl_received);
28822- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28823+ atomic_inc_unchecked(&dev_priv->vbl_received);
28824+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28825 do_gettimeofday(&cur_vblank);
28826 if (dev_priv->last_vblank_valid) {
28827 dev_priv->usec_per_vblank =
28828@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28829 dev_priv->last_vblank = cur_vblank;
28830 dev_priv->last_vblank_valid = 1;
28831 }
28832- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28833+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28834 DRM_DEBUG("US per vblank is: %u\n",
28835 dev_priv->usec_per_vblank);
28836 }
28837@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28838
28839 for (i = 0; i < dev_priv->num_irqs; ++i) {
28840 if (status & cur_irq->pending_mask) {
28841- atomic_inc(&cur_irq->irq_received);
28842+ atomic_inc_unchecked(&cur_irq->irq_received);
28843 DRM_WAKEUP(&cur_irq->irq_queue);
28844 handled = 1;
28845 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
28846@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
28847 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28848 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28849 masks[irq][4]));
28850- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28851+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28852 } else {
28853 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28854 (((cur_irq_sequence =
28855- atomic_read(&cur_irq->irq_received)) -
28856+ atomic_read_unchecked(&cur_irq->irq_received)) -
28857 *sequence) <= (1 << 23)));
28858 }
28859 *sequence = cur_irq_sequence;
28860@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
28861 }
28862
28863 for (i = 0; i < dev_priv->num_irqs; ++i) {
28864- atomic_set(&cur_irq->irq_received, 0);
28865+ atomic_set_unchecked(&cur_irq->irq_received, 0);
28866 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28867 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28868 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28869@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
28870 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28871 case VIA_IRQ_RELATIVE:
28872 irqwait->request.sequence +=
28873- atomic_read(&cur_irq->irq_received);
28874+ atomic_read_unchecked(&cur_irq->irq_received);
28875 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28876 case VIA_IRQ_ABSOLUTE:
28877 break;
28878diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28879index dc27970..f18b008 100644
28880--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28881+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28882@@ -260,7 +260,7 @@ struct vmw_private {
28883 * Fencing and IRQs.
28884 */
28885
28886- atomic_t marker_seq;
28887+ atomic_unchecked_t marker_seq;
28888 wait_queue_head_t fence_queue;
28889 wait_queue_head_t fifo_queue;
28890 int fence_queue_waiters; /* Protected by hw_mutex */
28891diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28892index a0c2f12..68ae6cb 100644
28893--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28894+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28895@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
28896 (unsigned int) min,
28897 (unsigned int) fifo->capabilities);
28898
28899- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28900+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28901 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
28902 vmw_marker_queue_init(&fifo->marker_queue);
28903 return vmw_fifo_send_fence(dev_priv, &dummy);
28904@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
28905 if (reserveable)
28906 iowrite32(bytes, fifo_mem +
28907 SVGA_FIFO_RESERVED);
28908- return fifo_mem + (next_cmd >> 2);
28909+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
28910 } else {
28911 need_bounce = true;
28912 }
28913@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
28914
28915 fm = vmw_fifo_reserve(dev_priv, bytes);
28916 if (unlikely(fm == NULL)) {
28917- *seqno = atomic_read(&dev_priv->marker_seq);
28918+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
28919 ret = -ENOMEM;
28920 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
28921 false, 3*HZ);
28922@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
28923 }
28924
28925 do {
28926- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
28927+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
28928 } while (*seqno == 0);
28929
28930 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
28931diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28932index cabc95f..14b3d77 100644
28933--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28934+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28935@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
28936 * emitted. Then the fence is stale and signaled.
28937 */
28938
28939- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
28940+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
28941 > VMW_FENCE_WRAP);
28942
28943 return ret;
28944@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
28945
28946 if (fifo_idle)
28947 down_read(&fifo_state->rwsem);
28948- signal_seq = atomic_read(&dev_priv->marker_seq);
28949+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
28950 ret = 0;
28951
28952 for (;;) {
28953diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28954index 8a8725c..afed796 100644
28955--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28956+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28957@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
28958 while (!vmw_lag_lt(queue, us)) {
28959 spin_lock(&queue->lock);
28960 if (list_empty(&queue->head))
28961- seqno = atomic_read(&dev_priv->marker_seq);
28962+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
28963 else {
28964 marker = list_first_entry(&queue->head,
28965 struct vmw_marker, head);
28966diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
28967index bb656d8..4169fca 100644
28968--- a/drivers/hid/hid-core.c
28969+++ b/drivers/hid/hid-core.c
28970@@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
28971
28972 int hid_add_device(struct hid_device *hdev)
28973 {
28974- static atomic_t id = ATOMIC_INIT(0);
28975+ static atomic_unchecked_t id = ATOMIC_INIT(0);
28976 int ret;
28977
28978 if (WARN_ON(hdev->status & HID_STAT_ADDED))
28979@@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
28980 /* XXX hack, any other cleaner solution after the driver core
28981 * is converted to allow more than 20 bytes as the device name? */
28982 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
28983- hdev->vendor, hdev->product, atomic_inc_return(&id));
28984+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
28985
28986 hid_debug_register(hdev, dev_name(&hdev->dev));
28987 ret = device_add(&hdev->dev);
28988diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
28989index 4ef02b2..8a96831 100644
28990--- a/drivers/hid/usbhid/hiddev.c
28991+++ b/drivers/hid/usbhid/hiddev.c
28992@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
28993 break;
28994
28995 case HIDIOCAPPLICATION:
28996- if (arg < 0 || arg >= hid->maxapplication)
28997+ if (arg >= hid->maxapplication)
28998 break;
28999
29000 for (i = 0; i < hid->maxcollection; i++)
29001diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
29002index 4065374..10ed7dc 100644
29003--- a/drivers/hv/channel.c
29004+++ b/drivers/hv/channel.c
29005@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
29006 int ret = 0;
29007 int t;
29008
29009- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
29010- atomic_inc(&vmbus_connection.next_gpadl_handle);
29011+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
29012+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
29013
29014 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
29015 if (ret)
29016diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
29017index 0fb100e..baf87e5 100644
29018--- a/drivers/hv/hv.c
29019+++ b/drivers/hv/hv.c
29020@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
29021 u64 output_address = (output) ? virt_to_phys(output) : 0;
29022 u32 output_address_hi = output_address >> 32;
29023 u32 output_address_lo = output_address & 0xFFFFFFFF;
29024- void *hypercall_page = hv_context.hypercall_page;
29025+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
29026
29027 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29028 "=a"(hv_status_lo) : "d" (control_hi),
29029diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29030index 0aee112..b72d21f 100644
29031--- a/drivers/hv/hyperv_vmbus.h
29032+++ b/drivers/hv/hyperv_vmbus.h
29033@@ -556,7 +556,7 @@ enum vmbus_connect_state {
29034 struct vmbus_connection {
29035 enum vmbus_connect_state conn_state;
29036
29037- atomic_t next_gpadl_handle;
29038+ atomic_unchecked_t next_gpadl_handle;
29039
29040 /*
29041 * Represents channel interrupts. Each bit position represents a
29042diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29043index d2d0a2a..90b8f4d 100644
29044--- a/drivers/hv/vmbus_drv.c
29045+++ b/drivers/hv/vmbus_drv.c
29046@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29047 {
29048 int ret = 0;
29049
29050- static atomic_t device_num = ATOMIC_INIT(0);
29051+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29052
29053 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29054- atomic_inc_return(&device_num));
29055+ atomic_inc_return_unchecked(&device_num));
29056
29057 child_device_obj->device.bus = &hv_bus;
29058 child_device_obj->device.parent = &hv_acpi_dev->dev;
29059diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29060index 66f6729..2d6de0a 100644
29061--- a/drivers/hwmon/acpi_power_meter.c
29062+++ b/drivers/hwmon/acpi_power_meter.c
29063@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29064 return res;
29065
29066 temp /= 1000;
29067- if (temp < 0)
29068- return -EINVAL;
29069
29070 mutex_lock(&resource->lock);
29071 resource->trip[attr->index - 7] = temp;
29072diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29073index 5357925..6cf0418 100644
29074--- a/drivers/hwmon/sht15.c
29075+++ b/drivers/hwmon/sht15.c
29076@@ -166,7 +166,7 @@ struct sht15_data {
29077 int supply_uV;
29078 bool supply_uV_valid;
29079 struct work_struct update_supply_work;
29080- atomic_t interrupt_handled;
29081+ atomic_unchecked_t interrupt_handled;
29082 };
29083
29084 /**
29085@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29086 return ret;
29087
29088 gpio_direction_input(data->pdata->gpio_data);
29089- atomic_set(&data->interrupt_handled, 0);
29090+ atomic_set_unchecked(&data->interrupt_handled, 0);
29091
29092 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29093 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29094 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29095 /* Only relevant if the interrupt hasn't occurred. */
29096- if (!atomic_read(&data->interrupt_handled))
29097+ if (!atomic_read_unchecked(&data->interrupt_handled))
29098 schedule_work(&data->read_work);
29099 }
29100 ret = wait_event_timeout(data->wait_queue,
29101@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29102
29103 /* First disable the interrupt */
29104 disable_irq_nosync(irq);
29105- atomic_inc(&data->interrupt_handled);
29106+ atomic_inc_unchecked(&data->interrupt_handled);
29107 /* Then schedule a reading work struct */
29108 if (data->state != SHT15_READING_NOTHING)
29109 schedule_work(&data->read_work);
29110@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29111 * If not, then start the interrupt again - care here as could
29112 * have gone low in meantime so verify it hasn't!
29113 */
29114- atomic_set(&data->interrupt_handled, 0);
29115+ atomic_set_unchecked(&data->interrupt_handled, 0);
29116 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29117 /* If still not occurred or another handler has been scheduled */
29118 if (gpio_get_value(data->pdata->gpio_data)
29119- || atomic_read(&data->interrupt_handled))
29120+ || atomic_read_unchecked(&data->interrupt_handled))
29121 return;
29122 }
29123
29124diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29125index 378fcb5..5e91fa8 100644
29126--- a/drivers/i2c/busses/i2c-amd756-s4882.c
29127+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29128@@ -43,7 +43,7 @@
29129 extern struct i2c_adapter amd756_smbus;
29130
29131 static struct i2c_adapter *s4882_adapter;
29132-static struct i2c_algorithm *s4882_algo;
29133+static i2c_algorithm_no_const *s4882_algo;
29134
29135 /* Wrapper access functions for multiplexed SMBus */
29136 static DEFINE_MUTEX(amd756_lock);
29137diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29138index 29015eb..af2d8e9 100644
29139--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29140+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29141@@ -41,7 +41,7 @@
29142 extern struct i2c_adapter *nforce2_smbus;
29143
29144 static struct i2c_adapter *s4985_adapter;
29145-static struct i2c_algorithm *s4985_algo;
29146+static i2c_algorithm_no_const *s4985_algo;
29147
29148 /* Wrapper access functions for multiplexed SMBus */
29149 static DEFINE_MUTEX(nforce2_lock);
29150diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29151index d7a4833..7fae376 100644
29152--- a/drivers/i2c/i2c-mux.c
29153+++ b/drivers/i2c/i2c-mux.c
29154@@ -28,7 +28,7 @@
29155 /* multiplexer per channel data */
29156 struct i2c_mux_priv {
29157 struct i2c_adapter adap;
29158- struct i2c_algorithm algo;
29159+ i2c_algorithm_no_const algo;
29160
29161 struct i2c_adapter *parent;
29162 void *mux_dev; /* the mux chip/device */
29163diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29164index 57d00ca..0145194 100644
29165--- a/drivers/ide/aec62xx.c
29166+++ b/drivers/ide/aec62xx.c
29167@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29168 .cable_detect = atp86x_cable_detect,
29169 };
29170
29171-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29172+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29173 { /* 0: AEC6210 */
29174 .name = DRV_NAME,
29175 .init_chipset = init_chipset_aec62xx,
29176diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29177index 2c8016a..911a27c 100644
29178--- a/drivers/ide/alim15x3.c
29179+++ b/drivers/ide/alim15x3.c
29180@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29181 .dma_sff_read_status = ide_dma_sff_read_status,
29182 };
29183
29184-static const struct ide_port_info ali15x3_chipset __devinitdata = {
29185+static const struct ide_port_info ali15x3_chipset __devinitconst = {
29186 .name = DRV_NAME,
29187 .init_chipset = init_chipset_ali15x3,
29188 .init_hwif = init_hwif_ali15x3,
29189diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29190index 3747b25..56fc995 100644
29191--- a/drivers/ide/amd74xx.c
29192+++ b/drivers/ide/amd74xx.c
29193@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29194 .udma_mask = udma, \
29195 }
29196
29197-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29198+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29199 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29200 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29201 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29202diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29203index 15f0ead..cb43480 100644
29204--- a/drivers/ide/atiixp.c
29205+++ b/drivers/ide/atiixp.c
29206@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29207 .cable_detect = atiixp_cable_detect,
29208 };
29209
29210-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29211+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29212 { /* 0: IXP200/300/400/700 */
29213 .name = DRV_NAME,
29214 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29215diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29216index 5f80312..d1fc438 100644
29217--- a/drivers/ide/cmd64x.c
29218+++ b/drivers/ide/cmd64x.c
29219@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29220 .dma_sff_read_status = ide_dma_sff_read_status,
29221 };
29222
29223-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29224+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29225 { /* 0: CMD643 */
29226 .name = DRV_NAME,
29227 .init_chipset = init_chipset_cmd64x,
29228diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29229index 2c1e5f7..1444762 100644
29230--- a/drivers/ide/cs5520.c
29231+++ b/drivers/ide/cs5520.c
29232@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29233 .set_dma_mode = cs5520_set_dma_mode,
29234 };
29235
29236-static const struct ide_port_info cyrix_chipset __devinitdata = {
29237+static const struct ide_port_info cyrix_chipset __devinitconst = {
29238 .name = DRV_NAME,
29239 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29240 .port_ops = &cs5520_port_ops,
29241diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29242index 4dc4eb9..49b40ad 100644
29243--- a/drivers/ide/cs5530.c
29244+++ b/drivers/ide/cs5530.c
29245@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29246 .udma_filter = cs5530_udma_filter,
29247 };
29248
29249-static const struct ide_port_info cs5530_chipset __devinitdata = {
29250+static const struct ide_port_info cs5530_chipset __devinitconst = {
29251 .name = DRV_NAME,
29252 .init_chipset = init_chipset_cs5530,
29253 .init_hwif = init_hwif_cs5530,
29254diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29255index 5059faf..18d4c85 100644
29256--- a/drivers/ide/cs5535.c
29257+++ b/drivers/ide/cs5535.c
29258@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29259 .cable_detect = cs5535_cable_detect,
29260 };
29261
29262-static const struct ide_port_info cs5535_chipset __devinitdata = {
29263+static const struct ide_port_info cs5535_chipset __devinitconst = {
29264 .name = DRV_NAME,
29265 .port_ops = &cs5535_port_ops,
29266 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29267diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29268index 847553f..3ffb49d 100644
29269--- a/drivers/ide/cy82c693.c
29270+++ b/drivers/ide/cy82c693.c
29271@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29272 .set_dma_mode = cy82c693_set_dma_mode,
29273 };
29274
29275-static const struct ide_port_info cy82c693_chipset __devinitdata = {
29276+static const struct ide_port_info cy82c693_chipset __devinitconst = {
29277 .name = DRV_NAME,
29278 .init_iops = init_iops_cy82c693,
29279 .port_ops = &cy82c693_port_ops,
29280diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29281index 58c51cd..4aec3b8 100644
29282--- a/drivers/ide/hpt366.c
29283+++ b/drivers/ide/hpt366.c
29284@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29285 }
29286 };
29287
29288-static const struct hpt_info hpt36x __devinitdata = {
29289+static const struct hpt_info hpt36x __devinitconst = {
29290 .chip_name = "HPT36x",
29291 .chip_type = HPT36x,
29292 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29293@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29294 .timings = &hpt36x_timings
29295 };
29296
29297-static const struct hpt_info hpt370 __devinitdata = {
29298+static const struct hpt_info hpt370 __devinitconst = {
29299 .chip_name = "HPT370",
29300 .chip_type = HPT370,
29301 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29302@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29303 .timings = &hpt37x_timings
29304 };
29305
29306-static const struct hpt_info hpt370a __devinitdata = {
29307+static const struct hpt_info hpt370a __devinitconst = {
29308 .chip_name = "HPT370A",
29309 .chip_type = HPT370A,
29310 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29311@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29312 .timings = &hpt37x_timings
29313 };
29314
29315-static const struct hpt_info hpt374 __devinitdata = {
29316+static const struct hpt_info hpt374 __devinitconst = {
29317 .chip_name = "HPT374",
29318 .chip_type = HPT374,
29319 .udma_mask = ATA_UDMA5,
29320@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29321 .timings = &hpt37x_timings
29322 };
29323
29324-static const struct hpt_info hpt372 __devinitdata = {
29325+static const struct hpt_info hpt372 __devinitconst = {
29326 .chip_name = "HPT372",
29327 .chip_type = HPT372,
29328 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29329@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29330 .timings = &hpt37x_timings
29331 };
29332
29333-static const struct hpt_info hpt372a __devinitdata = {
29334+static const struct hpt_info hpt372a __devinitconst = {
29335 .chip_name = "HPT372A",
29336 .chip_type = HPT372A,
29337 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29338@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29339 .timings = &hpt37x_timings
29340 };
29341
29342-static const struct hpt_info hpt302 __devinitdata = {
29343+static const struct hpt_info hpt302 __devinitconst = {
29344 .chip_name = "HPT302",
29345 .chip_type = HPT302,
29346 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29347@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29348 .timings = &hpt37x_timings
29349 };
29350
29351-static const struct hpt_info hpt371 __devinitdata = {
29352+static const struct hpt_info hpt371 __devinitconst = {
29353 .chip_name = "HPT371",
29354 .chip_type = HPT371,
29355 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29356@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29357 .timings = &hpt37x_timings
29358 };
29359
29360-static const struct hpt_info hpt372n __devinitdata = {
29361+static const struct hpt_info hpt372n __devinitconst = {
29362 .chip_name = "HPT372N",
29363 .chip_type = HPT372N,
29364 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29365@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29366 .timings = &hpt37x_timings
29367 };
29368
29369-static const struct hpt_info hpt302n __devinitdata = {
29370+static const struct hpt_info hpt302n __devinitconst = {
29371 .chip_name = "HPT302N",
29372 .chip_type = HPT302N,
29373 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29374@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29375 .timings = &hpt37x_timings
29376 };
29377
29378-static const struct hpt_info hpt371n __devinitdata = {
29379+static const struct hpt_info hpt371n __devinitconst = {
29380 .chip_name = "HPT371N",
29381 .chip_type = HPT371N,
29382 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29383@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29384 .dma_sff_read_status = ide_dma_sff_read_status,
29385 };
29386
29387-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29388+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29389 { /* 0: HPT36x */
29390 .name = DRV_NAME,
29391 .init_chipset = init_chipset_hpt366,
29392diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29393index 8126824..55a2798 100644
29394--- a/drivers/ide/ide-cd.c
29395+++ b/drivers/ide/ide-cd.c
29396@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29397 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29398 if ((unsigned long)buf & alignment
29399 || blk_rq_bytes(rq) & q->dma_pad_mask
29400- || object_is_on_stack(buf))
29401+ || object_starts_on_stack(buf))
29402 drive->dma = 0;
29403 }
29404 }
29405diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29406index a743e68..1cfd674 100644
29407--- a/drivers/ide/ide-pci-generic.c
29408+++ b/drivers/ide/ide-pci-generic.c
29409@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29410 .udma_mask = ATA_UDMA6, \
29411 }
29412
29413-static const struct ide_port_info generic_chipsets[] __devinitdata = {
29414+static const struct ide_port_info generic_chipsets[] __devinitconst = {
29415 /* 0: Unknown */
29416 DECLARE_GENERIC_PCI_DEV(0),
29417
29418diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29419index 560e66d..d5dd180 100644
29420--- a/drivers/ide/it8172.c
29421+++ b/drivers/ide/it8172.c
29422@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29423 .set_dma_mode = it8172_set_dma_mode,
29424 };
29425
29426-static const struct ide_port_info it8172_port_info __devinitdata = {
29427+static const struct ide_port_info it8172_port_info __devinitconst = {
29428 .name = DRV_NAME,
29429 .port_ops = &it8172_port_ops,
29430 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29431diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29432index 46816ba..1847aeb 100644
29433--- a/drivers/ide/it8213.c
29434+++ b/drivers/ide/it8213.c
29435@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29436 .cable_detect = it8213_cable_detect,
29437 };
29438
29439-static const struct ide_port_info it8213_chipset __devinitdata = {
29440+static const struct ide_port_info it8213_chipset __devinitconst = {
29441 .name = DRV_NAME,
29442 .enablebits = { {0x41, 0x80, 0x80} },
29443 .port_ops = &it8213_port_ops,
29444diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29445index 2e3169f..c5611db 100644
29446--- a/drivers/ide/it821x.c
29447+++ b/drivers/ide/it821x.c
29448@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29449 .cable_detect = it821x_cable_detect,
29450 };
29451
29452-static const struct ide_port_info it821x_chipset __devinitdata = {
29453+static const struct ide_port_info it821x_chipset __devinitconst = {
29454 .name = DRV_NAME,
29455 .init_chipset = init_chipset_it821x,
29456 .init_hwif = init_hwif_it821x,
29457diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29458index 74c2c4a..efddd7d 100644
29459--- a/drivers/ide/jmicron.c
29460+++ b/drivers/ide/jmicron.c
29461@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29462 .cable_detect = jmicron_cable_detect,
29463 };
29464
29465-static const struct ide_port_info jmicron_chipset __devinitdata = {
29466+static const struct ide_port_info jmicron_chipset __devinitconst = {
29467 .name = DRV_NAME,
29468 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29469 .port_ops = &jmicron_port_ops,
29470diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29471index 95327a2..73f78d8 100644
29472--- a/drivers/ide/ns87415.c
29473+++ b/drivers/ide/ns87415.c
29474@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29475 .dma_sff_read_status = superio_dma_sff_read_status,
29476 };
29477
29478-static const struct ide_port_info ns87415_chipset __devinitdata = {
29479+static const struct ide_port_info ns87415_chipset __devinitconst = {
29480 .name = DRV_NAME,
29481 .init_hwif = init_hwif_ns87415,
29482 .tp_ops = &ns87415_tp_ops,
29483diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29484index 1a53a4c..39edc66 100644
29485--- a/drivers/ide/opti621.c
29486+++ b/drivers/ide/opti621.c
29487@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29488 .set_pio_mode = opti621_set_pio_mode,
29489 };
29490
29491-static const struct ide_port_info opti621_chipset __devinitdata = {
29492+static const struct ide_port_info opti621_chipset __devinitconst = {
29493 .name = DRV_NAME,
29494 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29495 .port_ops = &opti621_port_ops,
29496diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29497index 9546fe2..2e5ceb6 100644
29498--- a/drivers/ide/pdc202xx_new.c
29499+++ b/drivers/ide/pdc202xx_new.c
29500@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29501 .udma_mask = udma, \
29502 }
29503
29504-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29505+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29506 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29507 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29508 };
29509diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29510index 3a35ec6..5634510 100644
29511--- a/drivers/ide/pdc202xx_old.c
29512+++ b/drivers/ide/pdc202xx_old.c
29513@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29514 .max_sectors = sectors, \
29515 }
29516
29517-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29518+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29519 { /* 0: PDC20246 */
29520 .name = DRV_NAME,
29521 .init_chipset = init_chipset_pdc202xx,
29522diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29523index 1892e81..fe0fd60 100644
29524--- a/drivers/ide/piix.c
29525+++ b/drivers/ide/piix.c
29526@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29527 .udma_mask = udma, \
29528 }
29529
29530-static const struct ide_port_info piix_pci_info[] __devinitdata = {
29531+static const struct ide_port_info piix_pci_info[] __devinitconst = {
29532 /* 0: MPIIX */
29533 { /*
29534 * MPIIX actually has only a single IDE channel mapped to
29535diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29536index a6414a8..c04173e 100644
29537--- a/drivers/ide/rz1000.c
29538+++ b/drivers/ide/rz1000.c
29539@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29540 }
29541 }
29542
29543-static const struct ide_port_info rz1000_chipset __devinitdata = {
29544+static const struct ide_port_info rz1000_chipset __devinitconst = {
29545 .name = DRV_NAME,
29546 .host_flags = IDE_HFLAG_NO_DMA,
29547 };
29548diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29549index 356b9b5..d4758eb 100644
29550--- a/drivers/ide/sc1200.c
29551+++ b/drivers/ide/sc1200.c
29552@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29553 .dma_sff_read_status = ide_dma_sff_read_status,
29554 };
29555
29556-static const struct ide_port_info sc1200_chipset __devinitdata = {
29557+static const struct ide_port_info sc1200_chipset __devinitconst = {
29558 .name = DRV_NAME,
29559 .port_ops = &sc1200_port_ops,
29560 .dma_ops = &sc1200_dma_ops,
29561diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29562index b7f5b0c..9701038 100644
29563--- a/drivers/ide/scc_pata.c
29564+++ b/drivers/ide/scc_pata.c
29565@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29566 .dma_sff_read_status = scc_dma_sff_read_status,
29567 };
29568
29569-static const struct ide_port_info scc_chipset __devinitdata = {
29570+static const struct ide_port_info scc_chipset __devinitconst = {
29571 .name = "sccIDE",
29572 .init_iops = init_iops_scc,
29573 .init_dma = scc_init_dma,
29574diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29575index 35fb8da..24d72ef 100644
29576--- a/drivers/ide/serverworks.c
29577+++ b/drivers/ide/serverworks.c
29578@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29579 .cable_detect = svwks_cable_detect,
29580 };
29581
29582-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29583+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29584 { /* 0: OSB4 */
29585 .name = DRV_NAME,
29586 .init_chipset = init_chipset_svwks,
29587diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29588index ddeda44..46f7e30 100644
29589--- a/drivers/ide/siimage.c
29590+++ b/drivers/ide/siimage.c
29591@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29592 .udma_mask = ATA_UDMA6, \
29593 }
29594
29595-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29596+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29597 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29598 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29599 };
29600diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29601index 4a00225..09e61b4 100644
29602--- a/drivers/ide/sis5513.c
29603+++ b/drivers/ide/sis5513.c
29604@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29605 .cable_detect = sis_cable_detect,
29606 };
29607
29608-static const struct ide_port_info sis5513_chipset __devinitdata = {
29609+static const struct ide_port_info sis5513_chipset __devinitconst = {
29610 .name = DRV_NAME,
29611 .init_chipset = init_chipset_sis5513,
29612 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29613diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29614index f21dc2a..d051cd2 100644
29615--- a/drivers/ide/sl82c105.c
29616+++ b/drivers/ide/sl82c105.c
29617@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29618 .dma_sff_read_status = ide_dma_sff_read_status,
29619 };
29620
29621-static const struct ide_port_info sl82c105_chipset __devinitdata = {
29622+static const struct ide_port_info sl82c105_chipset __devinitconst = {
29623 .name = DRV_NAME,
29624 .init_chipset = init_chipset_sl82c105,
29625 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29626diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29627index 864ffe0..863a5e9 100644
29628--- a/drivers/ide/slc90e66.c
29629+++ b/drivers/ide/slc90e66.c
29630@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29631 .cable_detect = slc90e66_cable_detect,
29632 };
29633
29634-static const struct ide_port_info slc90e66_chipset __devinitdata = {
29635+static const struct ide_port_info slc90e66_chipset __devinitconst = {
29636 .name = DRV_NAME,
29637 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29638 .port_ops = &slc90e66_port_ops,
29639diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29640index 4799d5c..1794678 100644
29641--- a/drivers/ide/tc86c001.c
29642+++ b/drivers/ide/tc86c001.c
29643@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29644 .dma_sff_read_status = ide_dma_sff_read_status,
29645 };
29646
29647-static const struct ide_port_info tc86c001_chipset __devinitdata = {
29648+static const struct ide_port_info tc86c001_chipset __devinitconst = {
29649 .name = DRV_NAME,
29650 .init_hwif = init_hwif_tc86c001,
29651 .port_ops = &tc86c001_port_ops,
29652diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29653index 281c914..55ce1b8 100644
29654--- a/drivers/ide/triflex.c
29655+++ b/drivers/ide/triflex.c
29656@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29657 .set_dma_mode = triflex_set_mode,
29658 };
29659
29660-static const struct ide_port_info triflex_device __devinitdata = {
29661+static const struct ide_port_info triflex_device __devinitconst = {
29662 .name = DRV_NAME,
29663 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29664 .port_ops = &triflex_port_ops,
29665diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29666index 4b42ca0..e494a98 100644
29667--- a/drivers/ide/trm290.c
29668+++ b/drivers/ide/trm290.c
29669@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29670 .dma_check = trm290_dma_check,
29671 };
29672
29673-static const struct ide_port_info trm290_chipset __devinitdata = {
29674+static const struct ide_port_info trm290_chipset __devinitconst = {
29675 .name = DRV_NAME,
29676 .init_hwif = init_hwif_trm290,
29677 .tp_ops = &trm290_tp_ops,
29678diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29679index f46f49c..eb77678 100644
29680--- a/drivers/ide/via82cxxx.c
29681+++ b/drivers/ide/via82cxxx.c
29682@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29683 .cable_detect = via82cxxx_cable_detect,
29684 };
29685
29686-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29687+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29688 .name = DRV_NAME,
29689 .init_chipset = init_chipset_via82cxxx,
29690 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29691diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29692index eb0e2cc..14241c7 100644
29693--- a/drivers/ieee802154/fakehard.c
29694+++ b/drivers/ieee802154/fakehard.c
29695@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29696 phy->transmit_power = 0xbf;
29697
29698 dev->netdev_ops = &fake_ops;
29699- dev->ml_priv = &fake_mlme;
29700+ dev->ml_priv = (void *)&fake_mlme;
29701
29702 priv = netdev_priv(dev);
29703 priv->phy = phy;
29704diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29705index 8b72f39..55df4c8 100644
29706--- a/drivers/infiniband/core/cm.c
29707+++ b/drivers/infiniband/core/cm.c
29708@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29709
29710 struct cm_counter_group {
29711 struct kobject obj;
29712- atomic_long_t counter[CM_ATTR_COUNT];
29713+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29714 };
29715
29716 struct cm_counter_attribute {
29717@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29718 struct ib_mad_send_buf *msg = NULL;
29719 int ret;
29720
29721- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29722+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29723 counter[CM_REQ_COUNTER]);
29724
29725 /* Quick state check to discard duplicate REQs. */
29726@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29727 if (!cm_id_priv)
29728 return;
29729
29730- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29731+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29732 counter[CM_REP_COUNTER]);
29733 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29734 if (ret)
29735@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
29736 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29737 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29738 spin_unlock_irq(&cm_id_priv->lock);
29739- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29740+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29741 counter[CM_RTU_COUNTER]);
29742 goto out;
29743 }
29744@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
29745 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29746 dreq_msg->local_comm_id);
29747 if (!cm_id_priv) {
29748- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29749+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29750 counter[CM_DREQ_COUNTER]);
29751 cm_issue_drep(work->port, work->mad_recv_wc);
29752 return -EINVAL;
29753@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
29754 case IB_CM_MRA_REP_RCVD:
29755 break;
29756 case IB_CM_TIMEWAIT:
29757- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29758+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29759 counter[CM_DREQ_COUNTER]);
29760 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29761 goto unlock;
29762@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
29763 cm_free_msg(msg);
29764 goto deref;
29765 case IB_CM_DREQ_RCVD:
29766- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29767+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29768 counter[CM_DREQ_COUNTER]);
29769 goto unlock;
29770 default:
29771@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
29772 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29773 cm_id_priv->msg, timeout)) {
29774 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29775- atomic_long_inc(&work->port->
29776+ atomic_long_inc_unchecked(&work->port->
29777 counter_group[CM_RECV_DUPLICATES].
29778 counter[CM_MRA_COUNTER]);
29779 goto out;
29780@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
29781 break;
29782 case IB_CM_MRA_REQ_RCVD:
29783 case IB_CM_MRA_REP_RCVD:
29784- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29785+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29786 counter[CM_MRA_COUNTER]);
29787 /* fall through */
29788 default:
29789@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
29790 case IB_CM_LAP_IDLE:
29791 break;
29792 case IB_CM_MRA_LAP_SENT:
29793- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29794+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29795 counter[CM_LAP_COUNTER]);
29796 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29797 goto unlock;
29798@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
29799 cm_free_msg(msg);
29800 goto deref;
29801 case IB_CM_LAP_RCVD:
29802- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29803+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29804 counter[CM_LAP_COUNTER]);
29805 goto unlock;
29806 default:
29807@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
29808 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29809 if (cur_cm_id_priv) {
29810 spin_unlock_irq(&cm.lock);
29811- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29812+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29813 counter[CM_SIDR_REQ_COUNTER]);
29814 goto out; /* Duplicate message. */
29815 }
29816@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
29817 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29818 msg->retries = 1;
29819
29820- atomic_long_add(1 + msg->retries,
29821+ atomic_long_add_unchecked(1 + msg->retries,
29822 &port->counter_group[CM_XMIT].counter[attr_index]);
29823 if (msg->retries)
29824- atomic_long_add(msg->retries,
29825+ atomic_long_add_unchecked(msg->retries,
29826 &port->counter_group[CM_XMIT_RETRIES].
29827 counter[attr_index]);
29828
29829@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
29830 }
29831
29832 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29833- atomic_long_inc(&port->counter_group[CM_RECV].
29834+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29835 counter[attr_id - CM_ATTR_ID_OFFSET]);
29836
29837 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29838@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
29839 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29840
29841 return sprintf(buf, "%ld\n",
29842- atomic_long_read(&group->counter[cm_attr->index]));
29843+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29844 }
29845
29846 static const struct sysfs_ops cm_counter_ops = {
29847diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
29848index 176c8f9..2627b62 100644
29849--- a/drivers/infiniband/core/fmr_pool.c
29850+++ b/drivers/infiniband/core/fmr_pool.c
29851@@ -98,8 +98,8 @@ struct ib_fmr_pool {
29852
29853 struct task_struct *thread;
29854
29855- atomic_t req_ser;
29856- atomic_t flush_ser;
29857+ atomic_unchecked_t req_ser;
29858+ atomic_unchecked_t flush_ser;
29859
29860 wait_queue_head_t force_wait;
29861 };
29862@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29863 struct ib_fmr_pool *pool = pool_ptr;
29864
29865 do {
29866- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29867+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29868 ib_fmr_batch_release(pool);
29869
29870- atomic_inc(&pool->flush_ser);
29871+ atomic_inc_unchecked(&pool->flush_ser);
29872 wake_up_interruptible(&pool->force_wait);
29873
29874 if (pool->flush_function)
29875@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29876 }
29877
29878 set_current_state(TASK_INTERRUPTIBLE);
29879- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29880+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29881 !kthread_should_stop())
29882 schedule();
29883 __set_current_state(TASK_RUNNING);
29884@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
29885 pool->dirty_watermark = params->dirty_watermark;
29886 pool->dirty_len = 0;
29887 spin_lock_init(&pool->pool_lock);
29888- atomic_set(&pool->req_ser, 0);
29889- atomic_set(&pool->flush_ser, 0);
29890+ atomic_set_unchecked(&pool->req_ser, 0);
29891+ atomic_set_unchecked(&pool->flush_ser, 0);
29892 init_waitqueue_head(&pool->force_wait);
29893
29894 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29895@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
29896 }
29897 spin_unlock_irq(&pool->pool_lock);
29898
29899- serial = atomic_inc_return(&pool->req_ser);
29900+ serial = atomic_inc_return_unchecked(&pool->req_ser);
29901 wake_up_process(pool->thread);
29902
29903 if (wait_event_interruptible(pool->force_wait,
29904- atomic_read(&pool->flush_ser) - serial >= 0))
29905+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29906 return -EINTR;
29907
29908 return 0;
29909@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
29910 } else {
29911 list_add_tail(&fmr->list, &pool->dirty_list);
29912 if (++pool->dirty_len >= pool->dirty_watermark) {
29913- atomic_inc(&pool->req_ser);
29914+ atomic_inc_unchecked(&pool->req_ser);
29915 wake_up_process(pool->thread);
29916 }
29917 }
29918diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
29919index 40c8353..946b0e4 100644
29920--- a/drivers/infiniband/hw/cxgb4/mem.c
29921+++ b/drivers/infiniband/hw/cxgb4/mem.c
29922@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
29923 int err;
29924 struct fw_ri_tpte tpt;
29925 u32 stag_idx;
29926- static atomic_t key;
29927+ static atomic_unchecked_t key;
29928
29929 if (c4iw_fatal_error(rdev))
29930 return -EIO;
29931@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
29932 &rdev->resource.tpt_fifo_lock);
29933 if (!stag_idx)
29934 return -ENOMEM;
29935- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
29936+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
29937 }
29938 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
29939 __func__, stag_state, type, pdid, stag_idx);
29940diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
29941index 79b3dbc..96e5fcc 100644
29942--- a/drivers/infiniband/hw/ipath/ipath_rc.c
29943+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
29944@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
29945 struct ib_atomic_eth *ateth;
29946 struct ipath_ack_entry *e;
29947 u64 vaddr;
29948- atomic64_t *maddr;
29949+ atomic64_unchecked_t *maddr;
29950 u64 sdata;
29951 u32 rkey;
29952 u8 next;
29953@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
29954 IB_ACCESS_REMOTE_ATOMIC)))
29955 goto nack_acc_unlck;
29956 /* Perform atomic OP and save result. */
29957- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
29958+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
29959 sdata = be64_to_cpu(ateth->swap_data);
29960 e = &qp->s_ack_queue[qp->r_head_ack_queue];
29961 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
29962- (u64) atomic64_add_return(sdata, maddr) - sdata :
29963+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
29964 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
29965 be64_to_cpu(ateth->compare_data),
29966 sdata);
29967diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
29968index 1f95bba..9530f87 100644
29969--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
29970+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
29971@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
29972 unsigned long flags;
29973 struct ib_wc wc;
29974 u64 sdata;
29975- atomic64_t *maddr;
29976+ atomic64_unchecked_t *maddr;
29977 enum ib_wc_status send_status;
29978
29979 /*
29980@@ -382,11 +382,11 @@ again:
29981 IB_ACCESS_REMOTE_ATOMIC)))
29982 goto acc_err;
29983 /* Perform atomic OP and save result. */
29984- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
29985+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
29986 sdata = wqe->wr.wr.atomic.compare_add;
29987 *(u64 *) sqp->s_sge.sge.vaddr =
29988 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
29989- (u64) atomic64_add_return(sdata, maddr) - sdata :
29990+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
29991 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
29992 sdata, wqe->wr.wr.atomic.swap);
29993 goto send_comp;
29994diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
29995index 5965b3d..16817fb 100644
29996--- a/drivers/infiniband/hw/nes/nes.c
29997+++ b/drivers/infiniband/hw/nes/nes.c
29998@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
29999 LIST_HEAD(nes_adapter_list);
30000 static LIST_HEAD(nes_dev_list);
30001
30002-atomic_t qps_destroyed;
30003+atomic_unchecked_t qps_destroyed;
30004
30005 static unsigned int ee_flsh_adapter;
30006 static unsigned int sysfs_nonidx_addr;
30007@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30008 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30009 struct nes_adapter *nesadapter = nesdev->nesadapter;
30010
30011- atomic_inc(&qps_destroyed);
30012+ atomic_inc_unchecked(&qps_destroyed);
30013
30014 /* Free the control structures */
30015
30016diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30017index 568b4f1..5ea3eff 100644
30018--- a/drivers/infiniband/hw/nes/nes.h
30019+++ b/drivers/infiniband/hw/nes/nes.h
30020@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
30021 extern unsigned int wqm_quanta;
30022 extern struct list_head nes_adapter_list;
30023
30024-extern atomic_t cm_connects;
30025-extern atomic_t cm_accepts;
30026-extern atomic_t cm_disconnects;
30027-extern atomic_t cm_closes;
30028-extern atomic_t cm_connecteds;
30029-extern atomic_t cm_connect_reqs;
30030-extern atomic_t cm_rejects;
30031-extern atomic_t mod_qp_timouts;
30032-extern atomic_t qps_created;
30033-extern atomic_t qps_destroyed;
30034-extern atomic_t sw_qps_destroyed;
30035+extern atomic_unchecked_t cm_connects;
30036+extern atomic_unchecked_t cm_accepts;
30037+extern atomic_unchecked_t cm_disconnects;
30038+extern atomic_unchecked_t cm_closes;
30039+extern atomic_unchecked_t cm_connecteds;
30040+extern atomic_unchecked_t cm_connect_reqs;
30041+extern atomic_unchecked_t cm_rejects;
30042+extern atomic_unchecked_t mod_qp_timouts;
30043+extern atomic_unchecked_t qps_created;
30044+extern atomic_unchecked_t qps_destroyed;
30045+extern atomic_unchecked_t sw_qps_destroyed;
30046 extern u32 mh_detected;
30047 extern u32 mh_pauses_sent;
30048 extern u32 cm_packets_sent;
30049@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30050 extern u32 cm_packets_received;
30051 extern u32 cm_packets_dropped;
30052 extern u32 cm_packets_retrans;
30053-extern atomic_t cm_listens_created;
30054-extern atomic_t cm_listens_destroyed;
30055+extern atomic_unchecked_t cm_listens_created;
30056+extern atomic_unchecked_t cm_listens_destroyed;
30057 extern u32 cm_backlog_drops;
30058-extern atomic_t cm_loopbacks;
30059-extern atomic_t cm_nodes_created;
30060-extern atomic_t cm_nodes_destroyed;
30061-extern atomic_t cm_accel_dropped_pkts;
30062-extern atomic_t cm_resets_recvd;
30063-extern atomic_t pau_qps_created;
30064-extern atomic_t pau_qps_destroyed;
30065+extern atomic_unchecked_t cm_loopbacks;
30066+extern atomic_unchecked_t cm_nodes_created;
30067+extern atomic_unchecked_t cm_nodes_destroyed;
30068+extern atomic_unchecked_t cm_accel_dropped_pkts;
30069+extern atomic_unchecked_t cm_resets_recvd;
30070+extern atomic_unchecked_t pau_qps_created;
30071+extern atomic_unchecked_t pau_qps_destroyed;
30072
30073 extern u32 int_mod_timer_init;
30074 extern u32 int_mod_cq_depth_256;
30075diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30076index 0a52d72..0642f36 100644
30077--- a/drivers/infiniband/hw/nes/nes_cm.c
30078+++ b/drivers/infiniband/hw/nes/nes_cm.c
30079@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30080 u32 cm_packets_retrans;
30081 u32 cm_packets_created;
30082 u32 cm_packets_received;
30083-atomic_t cm_listens_created;
30084-atomic_t cm_listens_destroyed;
30085+atomic_unchecked_t cm_listens_created;
30086+atomic_unchecked_t cm_listens_destroyed;
30087 u32 cm_backlog_drops;
30088-atomic_t cm_loopbacks;
30089-atomic_t cm_nodes_created;
30090-atomic_t cm_nodes_destroyed;
30091-atomic_t cm_accel_dropped_pkts;
30092-atomic_t cm_resets_recvd;
30093+atomic_unchecked_t cm_loopbacks;
30094+atomic_unchecked_t cm_nodes_created;
30095+atomic_unchecked_t cm_nodes_destroyed;
30096+atomic_unchecked_t cm_accel_dropped_pkts;
30097+atomic_unchecked_t cm_resets_recvd;
30098
30099 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30100 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30101@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30102
30103 static struct nes_cm_core *g_cm_core;
30104
30105-atomic_t cm_connects;
30106-atomic_t cm_accepts;
30107-atomic_t cm_disconnects;
30108-atomic_t cm_closes;
30109-atomic_t cm_connecteds;
30110-atomic_t cm_connect_reqs;
30111-atomic_t cm_rejects;
30112+atomic_unchecked_t cm_connects;
30113+atomic_unchecked_t cm_accepts;
30114+atomic_unchecked_t cm_disconnects;
30115+atomic_unchecked_t cm_closes;
30116+atomic_unchecked_t cm_connecteds;
30117+atomic_unchecked_t cm_connect_reqs;
30118+atomic_unchecked_t cm_rejects;
30119
30120 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30121 {
30122@@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30123 kfree(listener);
30124 listener = NULL;
30125 ret = 0;
30126- atomic_inc(&cm_listens_destroyed);
30127+ atomic_inc_unchecked(&cm_listens_destroyed);
30128 } else {
30129 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30130 }
30131@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30132 cm_node->rem_mac);
30133
30134 add_hte_node(cm_core, cm_node);
30135- atomic_inc(&cm_nodes_created);
30136+ atomic_inc_unchecked(&cm_nodes_created);
30137
30138 return cm_node;
30139 }
30140@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30141 }
30142
30143 atomic_dec(&cm_core->node_cnt);
30144- atomic_inc(&cm_nodes_destroyed);
30145+ atomic_inc_unchecked(&cm_nodes_destroyed);
30146 nesqp = cm_node->nesqp;
30147 if (nesqp) {
30148 nesqp->cm_node = NULL;
30149@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30150
30151 static void drop_packet(struct sk_buff *skb)
30152 {
30153- atomic_inc(&cm_accel_dropped_pkts);
30154+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30155 dev_kfree_skb_any(skb);
30156 }
30157
30158@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30159 {
30160
30161 int reset = 0; /* whether to send reset in case of err.. */
30162- atomic_inc(&cm_resets_recvd);
30163+ atomic_inc_unchecked(&cm_resets_recvd);
30164 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30165 " refcnt=%d\n", cm_node, cm_node->state,
30166 atomic_read(&cm_node->ref_count));
30167@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30168 rem_ref_cm_node(cm_node->cm_core, cm_node);
30169 return NULL;
30170 }
30171- atomic_inc(&cm_loopbacks);
30172+ atomic_inc_unchecked(&cm_loopbacks);
30173 loopbackremotenode->loopbackpartner = cm_node;
30174 loopbackremotenode->tcp_cntxt.rcv_wscale =
30175 NES_CM_DEFAULT_RCV_WND_SCALE;
30176@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30177 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30178 else {
30179 rem_ref_cm_node(cm_core, cm_node);
30180- atomic_inc(&cm_accel_dropped_pkts);
30181+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30182 dev_kfree_skb_any(skb);
30183 }
30184 break;
30185@@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30186
30187 if ((cm_id) && (cm_id->event_handler)) {
30188 if (issue_disconn) {
30189- atomic_inc(&cm_disconnects);
30190+ atomic_inc_unchecked(&cm_disconnects);
30191 cm_event.event = IW_CM_EVENT_DISCONNECT;
30192 cm_event.status = disconn_status;
30193 cm_event.local_addr = cm_id->local_addr;
30194@@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30195 }
30196
30197 if (issue_close) {
30198- atomic_inc(&cm_closes);
30199+ atomic_inc_unchecked(&cm_closes);
30200 nes_disconnect(nesqp, 1);
30201
30202 cm_id->provider_data = nesqp;
30203@@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30204
30205 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30206 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30207- atomic_inc(&cm_accepts);
30208+ atomic_inc_unchecked(&cm_accepts);
30209
30210 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30211 netdev_refcnt_read(nesvnic->netdev));
30212@@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30213 struct nes_cm_core *cm_core;
30214 u8 *start_buff;
30215
30216- atomic_inc(&cm_rejects);
30217+ atomic_inc_unchecked(&cm_rejects);
30218 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30219 loopback = cm_node->loopbackpartner;
30220 cm_core = cm_node->cm_core;
30221@@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30222 ntohl(cm_id->local_addr.sin_addr.s_addr),
30223 ntohs(cm_id->local_addr.sin_port));
30224
30225- atomic_inc(&cm_connects);
30226+ atomic_inc_unchecked(&cm_connects);
30227 nesqp->active_conn = 1;
30228
30229 /* cache the cm_id in the qp */
30230@@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30231 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30232 return err;
30233 }
30234- atomic_inc(&cm_listens_created);
30235+ atomic_inc_unchecked(&cm_listens_created);
30236 }
30237
30238 cm_id->add_ref(cm_id);
30239@@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30240
30241 if (nesqp->destroyed)
30242 return;
30243- atomic_inc(&cm_connecteds);
30244+ atomic_inc_unchecked(&cm_connecteds);
30245 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30246 " local port 0x%04X. jiffies = %lu.\n",
30247 nesqp->hwqp.qp_id,
30248@@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30249
30250 cm_id->add_ref(cm_id);
30251 ret = cm_id->event_handler(cm_id, &cm_event);
30252- atomic_inc(&cm_closes);
30253+ atomic_inc_unchecked(&cm_closes);
30254 cm_event.event = IW_CM_EVENT_CLOSE;
30255 cm_event.status = 0;
30256 cm_event.provider_data = cm_id->provider_data;
30257@@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30258 return;
30259 cm_id = cm_node->cm_id;
30260
30261- atomic_inc(&cm_connect_reqs);
30262+ atomic_inc_unchecked(&cm_connect_reqs);
30263 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30264 cm_node, cm_id, jiffies);
30265
30266@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30267 return;
30268 cm_id = cm_node->cm_id;
30269
30270- atomic_inc(&cm_connect_reqs);
30271+ atomic_inc_unchecked(&cm_connect_reqs);
30272 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30273 cm_node, cm_id, jiffies);
30274
30275diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30276index b3b2a24..7bfaf1e 100644
30277--- a/drivers/infiniband/hw/nes/nes_mgt.c
30278+++ b/drivers/infiniband/hw/nes/nes_mgt.c
30279@@ -40,8 +40,8 @@
30280 #include "nes.h"
30281 #include "nes_mgt.h"
30282
30283-atomic_t pau_qps_created;
30284-atomic_t pau_qps_destroyed;
30285+atomic_unchecked_t pau_qps_created;
30286+atomic_unchecked_t pau_qps_destroyed;
30287
30288 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30289 {
30290@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30291 {
30292 struct sk_buff *skb;
30293 unsigned long flags;
30294- atomic_inc(&pau_qps_destroyed);
30295+ atomic_inc_unchecked(&pau_qps_destroyed);
30296
30297 /* Free packets that have not yet been forwarded */
30298 /* Lock is acquired by skb_dequeue when removing the skb */
30299@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30300 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30301 skb_queue_head_init(&nesqp->pau_list);
30302 spin_lock_init(&nesqp->pau_lock);
30303- atomic_inc(&pau_qps_created);
30304+ atomic_inc_unchecked(&pau_qps_created);
30305 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30306 }
30307
30308diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30309index c00d2f3..8834298 100644
30310--- a/drivers/infiniband/hw/nes/nes_nic.c
30311+++ b/drivers/infiniband/hw/nes/nes_nic.c
30312@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30313 target_stat_values[++index] = mh_detected;
30314 target_stat_values[++index] = mh_pauses_sent;
30315 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30316- target_stat_values[++index] = atomic_read(&cm_connects);
30317- target_stat_values[++index] = atomic_read(&cm_accepts);
30318- target_stat_values[++index] = atomic_read(&cm_disconnects);
30319- target_stat_values[++index] = atomic_read(&cm_connecteds);
30320- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30321- target_stat_values[++index] = atomic_read(&cm_rejects);
30322- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30323- target_stat_values[++index] = atomic_read(&qps_created);
30324- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30325- target_stat_values[++index] = atomic_read(&qps_destroyed);
30326- target_stat_values[++index] = atomic_read(&cm_closes);
30327+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30328+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30329+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30330+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30331+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30332+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30333+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30334+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30335+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30336+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30337+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30338 target_stat_values[++index] = cm_packets_sent;
30339 target_stat_values[++index] = cm_packets_bounced;
30340 target_stat_values[++index] = cm_packets_created;
30341 target_stat_values[++index] = cm_packets_received;
30342 target_stat_values[++index] = cm_packets_dropped;
30343 target_stat_values[++index] = cm_packets_retrans;
30344- target_stat_values[++index] = atomic_read(&cm_listens_created);
30345- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30346+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30347+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30348 target_stat_values[++index] = cm_backlog_drops;
30349- target_stat_values[++index] = atomic_read(&cm_loopbacks);
30350- target_stat_values[++index] = atomic_read(&cm_nodes_created);
30351- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30352- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30353- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30354+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30355+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30356+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30357+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30358+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30359 target_stat_values[++index] = nesadapter->free_4kpbl;
30360 target_stat_values[++index] = nesadapter->free_256pbl;
30361 target_stat_values[++index] = int_mod_timer_init;
30362 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30363 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30364 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30365- target_stat_values[++index] = atomic_read(&pau_qps_created);
30366- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30367+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30368+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30369 }
30370
30371 /**
30372diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30373index 5095bc4..41e8fff 100644
30374--- a/drivers/infiniband/hw/nes/nes_verbs.c
30375+++ b/drivers/infiniband/hw/nes/nes_verbs.c
30376@@ -46,9 +46,9 @@
30377
30378 #include <rdma/ib_umem.h>
30379
30380-atomic_t mod_qp_timouts;
30381-atomic_t qps_created;
30382-atomic_t sw_qps_destroyed;
30383+atomic_unchecked_t mod_qp_timouts;
30384+atomic_unchecked_t qps_created;
30385+atomic_unchecked_t sw_qps_destroyed;
30386
30387 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30388
30389@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30390 if (init_attr->create_flags)
30391 return ERR_PTR(-EINVAL);
30392
30393- atomic_inc(&qps_created);
30394+ atomic_inc_unchecked(&qps_created);
30395 switch (init_attr->qp_type) {
30396 case IB_QPT_RC:
30397 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30398@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30399 struct iw_cm_event cm_event;
30400 int ret = 0;
30401
30402- atomic_inc(&sw_qps_destroyed);
30403+ atomic_inc_unchecked(&sw_qps_destroyed);
30404 nesqp->destroyed = 1;
30405
30406 /* Blow away the connection if it exists. */
30407diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30408index b881bdc..c2e360c 100644
30409--- a/drivers/infiniband/hw/qib/qib.h
30410+++ b/drivers/infiniband/hw/qib/qib.h
30411@@ -51,6 +51,7 @@
30412 #include <linux/completion.h>
30413 #include <linux/kref.h>
30414 #include <linux/sched.h>
30415+#include <linux/slab.h>
30416
30417 #include "qib_common.h"
30418 #include "qib_verbs.h"
30419diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30420index c351aa4..e6967c2 100644
30421--- a/drivers/input/gameport/gameport.c
30422+++ b/drivers/input/gameport/gameport.c
30423@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30424 */
30425 static void gameport_init_port(struct gameport *gameport)
30426 {
30427- static atomic_t gameport_no = ATOMIC_INIT(0);
30428+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30429
30430 __module_get(THIS_MODULE);
30431
30432 mutex_init(&gameport->drv_mutex);
30433 device_initialize(&gameport->dev);
30434 dev_set_name(&gameport->dev, "gameport%lu",
30435- (unsigned long)atomic_inc_return(&gameport_no) - 1);
30436+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30437 gameport->dev.bus = &gameport_bus;
30438 gameport->dev.release = gameport_release_port;
30439 if (gameport->parent)
30440diff --git a/drivers/input/input.c b/drivers/input/input.c
30441index da38d97..2aa0b79 100644
30442--- a/drivers/input/input.c
30443+++ b/drivers/input/input.c
30444@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30445 */
30446 int input_register_device(struct input_dev *dev)
30447 {
30448- static atomic_t input_no = ATOMIC_INIT(0);
30449+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30450 struct input_handler *handler;
30451 const char *path;
30452 int error;
30453@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30454 dev->setkeycode = input_default_setkeycode;
30455
30456 dev_set_name(&dev->dev, "input%ld",
30457- (unsigned long) atomic_inc_return(&input_no) - 1);
30458+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30459
30460 error = device_add(&dev->dev);
30461 if (error)
30462diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30463index b8d8611..7a4a04b 100644
30464--- a/drivers/input/joystick/sidewinder.c
30465+++ b/drivers/input/joystick/sidewinder.c
30466@@ -30,6 +30,7 @@
30467 #include <linux/kernel.h>
30468 #include <linux/module.h>
30469 #include <linux/slab.h>
30470+#include <linux/sched.h>
30471 #include <linux/init.h>
30472 #include <linux/input.h>
30473 #include <linux/gameport.h>
30474diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30475index d728875..844c89b 100644
30476--- a/drivers/input/joystick/xpad.c
30477+++ b/drivers/input/joystick/xpad.c
30478@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30479
30480 static int xpad_led_probe(struct usb_xpad *xpad)
30481 {
30482- static atomic_t led_seq = ATOMIC_INIT(0);
30483+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30484 long led_no;
30485 struct xpad_led *led;
30486 struct led_classdev *led_cdev;
30487@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30488 if (!led)
30489 return -ENOMEM;
30490
30491- led_no = (long)atomic_inc_return(&led_seq) - 1;
30492+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30493
30494 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30495 led->xpad = xpad;
30496diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30497index 0110b5a..d3ad144 100644
30498--- a/drivers/input/mousedev.c
30499+++ b/drivers/input/mousedev.c
30500@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30501
30502 spin_unlock_irq(&client->packet_lock);
30503
30504- if (copy_to_user(buffer, data, count))
30505+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
30506 return -EFAULT;
30507
30508 return count;
30509diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30510index ba70058..571d25d 100644
30511--- a/drivers/input/serio/serio.c
30512+++ b/drivers/input/serio/serio.c
30513@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30514 */
30515 static void serio_init_port(struct serio *serio)
30516 {
30517- static atomic_t serio_no = ATOMIC_INIT(0);
30518+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30519
30520 __module_get(THIS_MODULE);
30521
30522@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30523 mutex_init(&serio->drv_mutex);
30524 device_initialize(&serio->dev);
30525 dev_set_name(&serio->dev, "serio%ld",
30526- (long)atomic_inc_return(&serio_no) - 1);
30527+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
30528 serio->dev.bus = &serio_bus;
30529 serio->dev.release = serio_release_port;
30530 serio->dev.groups = serio_device_attr_groups;
30531diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30532index e44933d..9ba484a 100644
30533--- a/drivers/isdn/capi/capi.c
30534+++ b/drivers/isdn/capi/capi.c
30535@@ -83,8 +83,8 @@ struct capiminor {
30536
30537 struct capi20_appl *ap;
30538 u32 ncci;
30539- atomic_t datahandle;
30540- atomic_t msgid;
30541+ atomic_unchecked_t datahandle;
30542+ atomic_unchecked_t msgid;
30543
30544 struct tty_port port;
30545 int ttyinstop;
30546@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30547 capimsg_setu16(s, 2, mp->ap->applid);
30548 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30549 capimsg_setu8 (s, 5, CAPI_RESP);
30550- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30551+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30552 capimsg_setu32(s, 8, mp->ncci);
30553 capimsg_setu16(s, 12, datahandle);
30554 }
30555@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30556 mp->outbytes -= len;
30557 spin_unlock_bh(&mp->outlock);
30558
30559- datahandle = atomic_inc_return(&mp->datahandle);
30560+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30561 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30562 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30563 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30564 capimsg_setu16(skb->data, 2, mp->ap->applid);
30565 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30566 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30567- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30568+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30569 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30570 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30571 capimsg_setu16(skb->data, 16, len); /* Data length */
30572diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30573index db621db..825ea1a 100644
30574--- a/drivers/isdn/gigaset/common.c
30575+++ b/drivers/isdn/gigaset/common.c
30576@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30577 cs->commands_pending = 0;
30578 cs->cur_at_seq = 0;
30579 cs->gotfwver = -1;
30580- cs->open_count = 0;
30581+ local_set(&cs->open_count, 0);
30582 cs->dev = NULL;
30583 cs->tty = NULL;
30584 cs->tty_dev = NULL;
30585diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30586index 212efaf..f187c6b 100644
30587--- a/drivers/isdn/gigaset/gigaset.h
30588+++ b/drivers/isdn/gigaset/gigaset.h
30589@@ -35,6 +35,7 @@
30590 #include <linux/tty_driver.h>
30591 #include <linux/list.h>
30592 #include <linux/atomic.h>
30593+#include <asm/local.h>
30594
30595 #define GIG_VERSION {0, 5, 0, 0}
30596 #define GIG_COMPAT {0, 4, 0, 0}
30597@@ -433,7 +434,7 @@ struct cardstate {
30598 spinlock_t cmdlock;
30599 unsigned curlen, cmdbytes;
30600
30601- unsigned open_count;
30602+ local_t open_count;
30603 struct tty_struct *tty;
30604 struct tasklet_struct if_wake_tasklet;
30605 unsigned control_state;
30606diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30607index ee0a549..a7c9798 100644
30608--- a/drivers/isdn/gigaset/interface.c
30609+++ b/drivers/isdn/gigaset/interface.c
30610@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30611 }
30612 tty->driver_data = cs;
30613
30614- ++cs->open_count;
30615-
30616- if (cs->open_count == 1) {
30617+ if (local_inc_return(&cs->open_count) == 1) {
30618 spin_lock_irqsave(&cs->lock, flags);
30619 cs->tty = tty;
30620 spin_unlock_irqrestore(&cs->lock, flags);
30621@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30622
30623 if (!cs->connected)
30624 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30625- else if (!cs->open_count)
30626+ else if (!local_read(&cs->open_count))
30627 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30628 else {
30629- if (!--cs->open_count) {
30630+ if (!local_dec_return(&cs->open_count)) {
30631 spin_lock_irqsave(&cs->lock, flags);
30632 cs->tty = NULL;
30633 spin_unlock_irqrestore(&cs->lock, flags);
30634@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30635 if (!cs->connected) {
30636 gig_dbg(DEBUG_IF, "not connected");
30637 retval = -ENODEV;
30638- } else if (!cs->open_count)
30639+ } else if (!local_read(&cs->open_count))
30640 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30641 else {
30642 retval = 0;
30643@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30644 retval = -ENODEV;
30645 goto done;
30646 }
30647- if (!cs->open_count) {
30648+ if (!local_read(&cs->open_count)) {
30649 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30650 retval = -ENODEV;
30651 goto done;
30652@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30653 if (!cs->connected) {
30654 gig_dbg(DEBUG_IF, "not connected");
30655 retval = -ENODEV;
30656- } else if (!cs->open_count)
30657+ } else if (!local_read(&cs->open_count))
30658 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30659 else if (cs->mstate != MS_LOCKED) {
30660 dev_warn(cs->dev, "can't write to unlocked device\n");
30661@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30662
30663 if (!cs->connected)
30664 gig_dbg(DEBUG_IF, "not connected");
30665- else if (!cs->open_count)
30666+ else if (!local_read(&cs->open_count))
30667 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30668 else if (cs->mstate != MS_LOCKED)
30669 dev_warn(cs->dev, "can't write to unlocked device\n");
30670@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30671
30672 if (!cs->connected)
30673 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30674- else if (!cs->open_count)
30675+ else if (!local_read(&cs->open_count))
30676 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30677 else
30678 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30679@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30680
30681 if (!cs->connected)
30682 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30683- else if (!cs->open_count)
30684+ else if (!local_read(&cs->open_count))
30685 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30686 else
30687 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30688@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30689 goto out;
30690 }
30691
30692- if (!cs->open_count) {
30693+ if (!local_read(&cs->open_count)) {
30694 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30695 goto out;
30696 }
30697diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30698index 2a57da59..e7a12ed 100644
30699--- a/drivers/isdn/hardware/avm/b1.c
30700+++ b/drivers/isdn/hardware/avm/b1.c
30701@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30702 }
30703 if (left) {
30704 if (t4file->user) {
30705- if (copy_from_user(buf, dp, left))
30706+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30707 return -EFAULT;
30708 } else {
30709 memcpy(buf, dp, left);
30710@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30711 }
30712 if (left) {
30713 if (config->user) {
30714- if (copy_from_user(buf, dp, left))
30715+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30716 return -EFAULT;
30717 } else {
30718 memcpy(buf, dp, left);
30719diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30720index 85784a7..a19ca98 100644
30721--- a/drivers/isdn/hardware/eicon/divasync.h
30722+++ b/drivers/isdn/hardware/eicon/divasync.h
30723@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30724 } diva_didd_add_adapter_t;
30725 typedef struct _diva_didd_remove_adapter {
30726 IDI_CALL p_request;
30727-} diva_didd_remove_adapter_t;
30728+} __no_const diva_didd_remove_adapter_t;
30729 typedef struct _diva_didd_read_adapter_array {
30730 void * buffer;
30731 dword length;
30732diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30733index a3bd163..8956575 100644
30734--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
30735+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
30736@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
30737 typedef struct _diva_os_idi_adapter_interface {
30738 diva_init_card_proc_t cleanup_adapter_proc;
30739 diva_cmd_card_proc_t cmd_proc;
30740-} diva_os_idi_adapter_interface_t;
30741+} __no_const diva_os_idi_adapter_interface_t;
30742
30743 typedef struct _diva_os_xdi_adapter {
30744 struct list_head link;
30745diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
30746index 1f355bb..43f1fea 100644
30747--- a/drivers/isdn/icn/icn.c
30748+++ b/drivers/isdn/icn/icn.c
30749@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
30750 if (count > len)
30751 count = len;
30752 if (user) {
30753- if (copy_from_user(msg, buf, count))
30754+ if (count > sizeof msg || copy_from_user(msg, buf, count))
30755 return -EFAULT;
30756 } else
30757 memcpy(msg, buf, count);
30758diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
30759index b5fdcb7..5b6c59f 100644
30760--- a/drivers/lguest/core.c
30761+++ b/drivers/lguest/core.c
30762@@ -92,9 +92,17 @@ static __init int map_switcher(void)
30763 * it's worked so far. The end address needs +1 because __get_vm_area
30764 * allocates an extra guard page, so we need space for that.
30765 */
30766+
30767+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30768+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30769+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30770+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30771+#else
30772 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30773 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30774 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30775+#endif
30776+
30777 if (!switcher_vma) {
30778 err = -ENOMEM;
30779 printk("lguest: could not map switcher pages high\n");
30780@@ -119,7 +127,7 @@ static __init int map_switcher(void)
30781 * Now the Switcher is mapped at the right address, we can't fail!
30782 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
30783 */
30784- memcpy(switcher_vma->addr, start_switcher_text,
30785+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30786 end_switcher_text - start_switcher_text);
30787
30788 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30789diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
30790index 65af42f..530c87a 100644
30791--- a/drivers/lguest/x86/core.c
30792+++ b/drivers/lguest/x86/core.c
30793@@ -59,7 +59,7 @@ static struct {
30794 /* Offset from where switcher.S was compiled to where we've copied it */
30795 static unsigned long switcher_offset(void)
30796 {
30797- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30798+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30799 }
30800
30801 /* This cpu's struct lguest_pages. */
30802@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
30803 * These copies are pretty cheap, so we do them unconditionally: */
30804 /* Save the current Host top-level page directory.
30805 */
30806+
30807+#ifdef CONFIG_PAX_PER_CPU_PGD
30808+ pages->state.host_cr3 = read_cr3();
30809+#else
30810 pages->state.host_cr3 = __pa(current->mm->pgd);
30811+#endif
30812+
30813 /*
30814 * Set up the Guest's page tables to see this CPU's pages (and no
30815 * other CPU's pages).
30816@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
30817 * compiled-in switcher code and the high-mapped copy we just made.
30818 */
30819 for (i = 0; i < IDT_ENTRIES; i++)
30820- default_idt_entries[i] += switcher_offset();
30821+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30822
30823 /*
30824 * Set up the Switcher's per-cpu areas.
30825@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
30826 * it will be undisturbed when we switch. To change %cs and jump we
30827 * need this structure to feed to Intel's "lcall" instruction.
30828 */
30829- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30830+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30831 lguest_entry.segment = LGUEST_CS;
30832
30833 /*
30834diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
30835index 40634b0..4f5855e 100644
30836--- a/drivers/lguest/x86/switcher_32.S
30837+++ b/drivers/lguest/x86/switcher_32.S
30838@@ -87,6 +87,7 @@
30839 #include <asm/page.h>
30840 #include <asm/segment.h>
30841 #include <asm/lguest.h>
30842+#include <asm/processor-flags.h>
30843
30844 // We mark the start of the code to copy
30845 // It's placed in .text tho it's never run here
30846@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30847 // Changes type when we load it: damn Intel!
30848 // For after we switch over our page tables
30849 // That entry will be read-only: we'd crash.
30850+
30851+#ifdef CONFIG_PAX_KERNEXEC
30852+ mov %cr0, %edx
30853+ xor $X86_CR0_WP, %edx
30854+ mov %edx, %cr0
30855+#endif
30856+
30857 movl $(GDT_ENTRY_TSS*8), %edx
30858 ltr %dx
30859
30860@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30861 // Let's clear it again for our return.
30862 // The GDT descriptor of the Host
30863 // Points to the table after two "size" bytes
30864- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30865+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30866 // Clear "used" from type field (byte 5, bit 2)
30867- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30868+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30869+
30870+#ifdef CONFIG_PAX_KERNEXEC
30871+ mov %cr0, %eax
30872+ xor $X86_CR0_WP, %eax
30873+ mov %eax, %cr0
30874+#endif
30875
30876 // Once our page table's switched, the Guest is live!
30877 // The Host fades as we run this final step.
30878@@ -295,13 +309,12 @@ deliver_to_host:
30879 // I consulted gcc, and it gave
30880 // These instructions, which I gladly credit:
30881 leal (%edx,%ebx,8), %eax
30882- movzwl (%eax),%edx
30883- movl 4(%eax), %eax
30884- xorw %ax, %ax
30885- orl %eax, %edx
30886+ movl 4(%eax), %edx
30887+ movw (%eax), %dx
30888 // Now the address of the handler's in %edx
30889 // We call it now: its "iret" drops us home.
30890- jmp *%edx
30891+ ljmp $__KERNEL_CS, $1f
30892+1: jmp *%edx
30893
30894 // Every interrupt can come to us here
30895 // But we must truly tell each apart.
30896diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
30897index 4daf9e5..b8d1d0f 100644
30898--- a/drivers/macintosh/macio_asic.c
30899+++ b/drivers/macintosh/macio_asic.c
30900@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
30901 * MacIO is matched against any Apple ID, it's probe() function
30902 * will then decide wether it applies or not
30903 */
30904-static const struct pci_device_id __devinitdata pci_ids [] = { {
30905+static const struct pci_device_id __devinitconst pci_ids [] = { {
30906 .vendor = PCI_VENDOR_ID_APPLE,
30907 .device = PCI_ANY_ID,
30908 .subvendor = PCI_ANY_ID,
30909diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
30910index 31c2dc2..a2de7a6 100644
30911--- a/drivers/md/dm-ioctl.c
30912+++ b/drivers/md/dm-ioctl.c
30913@@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
30914 cmd == DM_LIST_VERSIONS_CMD)
30915 return 0;
30916
30917- if ((cmd == DM_DEV_CREATE_CMD)) {
30918+ if (cmd == DM_DEV_CREATE_CMD) {
30919 if (!*param->name) {
30920 DMWARN("name not supplied when creating device");
30921 return -EINVAL;
30922diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
30923index 9bfd057..01180bc 100644
30924--- a/drivers/md/dm-raid1.c
30925+++ b/drivers/md/dm-raid1.c
30926@@ -40,7 +40,7 @@ enum dm_raid1_error {
30927
30928 struct mirror {
30929 struct mirror_set *ms;
30930- atomic_t error_count;
30931+ atomic_unchecked_t error_count;
30932 unsigned long error_type;
30933 struct dm_dev *dev;
30934 sector_t offset;
30935@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
30936 struct mirror *m;
30937
30938 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
30939- if (!atomic_read(&m->error_count))
30940+ if (!atomic_read_unchecked(&m->error_count))
30941 return m;
30942
30943 return NULL;
30944@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
30945 * simple way to tell if a device has encountered
30946 * errors.
30947 */
30948- atomic_inc(&m->error_count);
30949+ atomic_inc_unchecked(&m->error_count);
30950
30951 if (test_and_set_bit(error_type, &m->error_type))
30952 return;
30953@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
30954 struct mirror *m = get_default_mirror(ms);
30955
30956 do {
30957- if (likely(!atomic_read(&m->error_count)))
30958+ if (likely(!atomic_read_unchecked(&m->error_count)))
30959 return m;
30960
30961 if (m-- == ms->mirror)
30962@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
30963 {
30964 struct mirror *default_mirror = get_default_mirror(m->ms);
30965
30966- return !atomic_read(&default_mirror->error_count);
30967+ return !atomic_read_unchecked(&default_mirror->error_count);
30968 }
30969
30970 static int mirror_available(struct mirror_set *ms, struct bio *bio)
30971@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
30972 */
30973 if (likely(region_in_sync(ms, region, 1)))
30974 m = choose_mirror(ms, bio->bi_sector);
30975- else if (m && atomic_read(&m->error_count))
30976+ else if (m && atomic_read_unchecked(&m->error_count))
30977 m = NULL;
30978
30979 if (likely(m))
30980@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
30981 }
30982
30983 ms->mirror[mirror].ms = ms;
30984- atomic_set(&(ms->mirror[mirror].error_count), 0);
30985+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
30986 ms->mirror[mirror].error_type = 0;
30987 ms->mirror[mirror].offset = offset;
30988
30989@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
30990 */
30991 static char device_status_char(struct mirror *m)
30992 {
30993- if (!atomic_read(&(m->error_count)))
30994+ if (!atomic_read_unchecked(&(m->error_count)))
30995 return 'A';
30996
30997 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
30998diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
30999index 3d80cf0..b77cc47 100644
31000--- a/drivers/md/dm-stripe.c
31001+++ b/drivers/md/dm-stripe.c
31002@@ -20,7 +20,7 @@ struct stripe {
31003 struct dm_dev *dev;
31004 sector_t physical_start;
31005
31006- atomic_t error_count;
31007+ atomic_unchecked_t error_count;
31008 };
31009
31010 struct stripe_c {
31011@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31012 kfree(sc);
31013 return r;
31014 }
31015- atomic_set(&(sc->stripe[i].error_count), 0);
31016+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31017 }
31018
31019 ti->private = sc;
31020@@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31021 DMEMIT("%d ", sc->stripes);
31022 for (i = 0; i < sc->stripes; i++) {
31023 DMEMIT("%s ", sc->stripe[i].dev->name);
31024- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31025+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31026 'D' : 'A';
31027 }
31028 buffer[i] = '\0';
31029@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31030 */
31031 for (i = 0; i < sc->stripes; i++)
31032 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31033- atomic_inc(&(sc->stripe[i].error_count));
31034- if (atomic_read(&(sc->stripe[i].error_count)) <
31035+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31036+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31037 DM_IO_ERROR_THRESHOLD)
31038 schedule_work(&sc->trigger_event);
31039 }
31040diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31041index 8e91321..fd17aef 100644
31042--- a/drivers/md/dm-table.c
31043+++ b/drivers/md/dm-table.c
31044@@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31045 if (!dev_size)
31046 return 0;
31047
31048- if ((start >= dev_size) || (start + len > dev_size)) {
31049+ if ((start >= dev_size) || (len > dev_size - start)) {
31050 DMWARN("%s: %s too small for target: "
31051 "start=%llu, len=%llu, dev_size=%llu",
31052 dm_device_name(ti->table->md), bdevname(bdev, b),
31053diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31054index 59c4f04..4c7b661 100644
31055--- a/drivers/md/dm-thin-metadata.c
31056+++ b/drivers/md/dm-thin-metadata.c
31057@@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31058
31059 pmd->info.tm = tm;
31060 pmd->info.levels = 2;
31061- pmd->info.value_type.context = pmd->data_sm;
31062+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31063 pmd->info.value_type.size = sizeof(__le64);
31064 pmd->info.value_type.inc = data_block_inc;
31065 pmd->info.value_type.dec = data_block_dec;
31066@@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31067
31068 pmd->bl_info.tm = tm;
31069 pmd->bl_info.levels = 1;
31070- pmd->bl_info.value_type.context = pmd->data_sm;
31071+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31072 pmd->bl_info.value_type.size = sizeof(__le64);
31073 pmd->bl_info.value_type.inc = data_block_inc;
31074 pmd->bl_info.value_type.dec = data_block_dec;
31075diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31076index 4720f68..78d1df7 100644
31077--- a/drivers/md/dm.c
31078+++ b/drivers/md/dm.c
31079@@ -177,9 +177,9 @@ struct mapped_device {
31080 /*
31081 * Event handling.
31082 */
31083- atomic_t event_nr;
31084+ atomic_unchecked_t event_nr;
31085 wait_queue_head_t eventq;
31086- atomic_t uevent_seq;
31087+ atomic_unchecked_t uevent_seq;
31088 struct list_head uevent_list;
31089 spinlock_t uevent_lock; /* Protect access to uevent_list */
31090
31091@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31092 rwlock_init(&md->map_lock);
31093 atomic_set(&md->holders, 1);
31094 atomic_set(&md->open_count, 0);
31095- atomic_set(&md->event_nr, 0);
31096- atomic_set(&md->uevent_seq, 0);
31097+ atomic_set_unchecked(&md->event_nr, 0);
31098+ atomic_set_unchecked(&md->uevent_seq, 0);
31099 INIT_LIST_HEAD(&md->uevent_list);
31100 spin_lock_init(&md->uevent_lock);
31101
31102@@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31103
31104 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31105
31106- atomic_inc(&md->event_nr);
31107+ atomic_inc_unchecked(&md->event_nr);
31108 wake_up(&md->eventq);
31109 }
31110
31111@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31112
31113 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31114 {
31115- return atomic_add_return(1, &md->uevent_seq);
31116+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31117 }
31118
31119 uint32_t dm_get_event_nr(struct mapped_device *md)
31120 {
31121- return atomic_read(&md->event_nr);
31122+ return atomic_read_unchecked(&md->event_nr);
31123 }
31124
31125 int dm_wait_event(struct mapped_device *md, int event_nr)
31126 {
31127 return wait_event_interruptible(md->eventq,
31128- (event_nr != atomic_read(&md->event_nr)));
31129+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31130 }
31131
31132 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31133diff --git a/drivers/md/md.c b/drivers/md/md.c
31134index f47f1f8..b7f559e 100644
31135--- a/drivers/md/md.c
31136+++ b/drivers/md/md.c
31137@@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31138 * start build, activate spare
31139 */
31140 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31141-static atomic_t md_event_count;
31142+static atomic_unchecked_t md_event_count;
31143 void md_new_event(struct mddev *mddev)
31144 {
31145- atomic_inc(&md_event_count);
31146+ atomic_inc_unchecked(&md_event_count);
31147 wake_up(&md_event_waiters);
31148 }
31149 EXPORT_SYMBOL_GPL(md_new_event);
31150@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31151 */
31152 static void md_new_event_inintr(struct mddev *mddev)
31153 {
31154- atomic_inc(&md_event_count);
31155+ atomic_inc_unchecked(&md_event_count);
31156 wake_up(&md_event_waiters);
31157 }
31158
31159@@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31160
31161 rdev->preferred_minor = 0xffff;
31162 rdev->data_offset = le64_to_cpu(sb->data_offset);
31163- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31164+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31165
31166 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31167 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31168@@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31169 else
31170 sb->resync_offset = cpu_to_le64(0);
31171
31172- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31173+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31174
31175 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31176 sb->size = cpu_to_le64(mddev->dev_sectors);
31177@@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31178 static ssize_t
31179 errors_show(struct md_rdev *rdev, char *page)
31180 {
31181- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31182+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31183 }
31184
31185 static ssize_t
31186@@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31187 char *e;
31188 unsigned long n = simple_strtoul(buf, &e, 10);
31189 if (*buf && (*e == 0 || *e == '\n')) {
31190- atomic_set(&rdev->corrected_errors, n);
31191+ atomic_set_unchecked(&rdev->corrected_errors, n);
31192 return len;
31193 }
31194 return -EINVAL;
31195@@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31196 rdev->sb_loaded = 0;
31197 rdev->bb_page = NULL;
31198 atomic_set(&rdev->nr_pending, 0);
31199- atomic_set(&rdev->read_errors, 0);
31200- atomic_set(&rdev->corrected_errors, 0);
31201+ atomic_set_unchecked(&rdev->read_errors, 0);
31202+ atomic_set_unchecked(&rdev->corrected_errors, 0);
31203
31204 INIT_LIST_HEAD(&rdev->same_set);
31205 init_waitqueue_head(&rdev->blocked_wait);
31206@@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31207
31208 spin_unlock(&pers_lock);
31209 seq_printf(seq, "\n");
31210- seq->poll_event = atomic_read(&md_event_count);
31211+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31212 return 0;
31213 }
31214 if (v == (void*)2) {
31215@@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31216 chunk_kb ? "KB" : "B");
31217 if (bitmap->file) {
31218 seq_printf(seq, ", file: ");
31219- seq_path(seq, &bitmap->file->f_path, " \t\n");
31220+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31221 }
31222
31223 seq_printf(seq, "\n");
31224@@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31225 return error;
31226
31227 seq = file->private_data;
31228- seq->poll_event = atomic_read(&md_event_count);
31229+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31230 return error;
31231 }
31232
31233@@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31234 /* always allow read */
31235 mask = POLLIN | POLLRDNORM;
31236
31237- if (seq->poll_event != atomic_read(&md_event_count))
31238+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31239 mask |= POLLERR | POLLPRI;
31240 return mask;
31241 }
31242@@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31243 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31244 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31245 (int)part_stat_read(&disk->part0, sectors[1]) -
31246- atomic_read(&disk->sync_io);
31247+ atomic_read_unchecked(&disk->sync_io);
31248 /* sync IO will cause sync_io to increase before the disk_stats
31249 * as sync_io is counted when a request starts, and
31250 * disk_stats is counted when it completes.
31251diff --git a/drivers/md/md.h b/drivers/md/md.h
31252index cf742d9..7c7c745 100644
31253--- a/drivers/md/md.h
31254+++ b/drivers/md/md.h
31255@@ -120,13 +120,13 @@ struct md_rdev {
31256 * only maintained for arrays that
31257 * support hot removal
31258 */
31259- atomic_t read_errors; /* number of consecutive read errors that
31260+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
31261 * we have tried to ignore.
31262 */
31263 struct timespec last_read_error; /* monotonic time since our
31264 * last read error
31265 */
31266- atomic_t corrected_errors; /* number of corrected read errors,
31267+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31268 * for reporting to userspace and storing
31269 * in superblock.
31270 */
31271@@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31272
31273 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31274 {
31275- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31276+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31277 }
31278
31279 struct md_personality
31280diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31281index 50ed53b..4f29d7d 100644
31282--- a/drivers/md/persistent-data/dm-space-map-checker.c
31283+++ b/drivers/md/persistent-data/dm-space-map-checker.c
31284@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31285 /*----------------------------------------------------------------*/
31286
31287 struct sm_checker {
31288- struct dm_space_map sm;
31289+ dm_space_map_no_const sm;
31290
31291 struct count_array old_counts;
31292 struct count_array counts;
31293diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31294index fc469ba..2d91555 100644
31295--- a/drivers/md/persistent-data/dm-space-map-disk.c
31296+++ b/drivers/md/persistent-data/dm-space-map-disk.c
31297@@ -23,7 +23,7 @@
31298 * Space map interface.
31299 */
31300 struct sm_disk {
31301- struct dm_space_map sm;
31302+ dm_space_map_no_const sm;
31303
31304 struct ll_disk ll;
31305 struct ll_disk old_ll;
31306diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31307index e89ae5e..062e4c2 100644
31308--- a/drivers/md/persistent-data/dm-space-map-metadata.c
31309+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31310@@ -43,7 +43,7 @@ struct block_op {
31311 };
31312
31313 struct sm_metadata {
31314- struct dm_space_map sm;
31315+ dm_space_map_no_const sm;
31316
31317 struct ll_disk ll;
31318 struct ll_disk old_ll;
31319diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31320index 1cbfc6b..56e1dbb 100644
31321--- a/drivers/md/persistent-data/dm-space-map.h
31322+++ b/drivers/md/persistent-data/dm-space-map.h
31323@@ -60,6 +60,7 @@ struct dm_space_map {
31324 int (*root_size)(struct dm_space_map *sm, size_t *result);
31325 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31326 };
31327+typedef struct dm_space_map __no_const dm_space_map_no_const;
31328
31329 /*----------------------------------------------------------------*/
31330
31331diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31332index 7d9e071..015b1d5 100644
31333--- a/drivers/md/raid1.c
31334+++ b/drivers/md/raid1.c
31335@@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31336 if (r1_sync_page_io(rdev, sect, s,
31337 bio->bi_io_vec[idx].bv_page,
31338 READ) != 0)
31339- atomic_add(s, &rdev->corrected_errors);
31340+ atomic_add_unchecked(s, &rdev->corrected_errors);
31341 }
31342 sectors -= s;
31343 sect += s;
31344@@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31345 test_bit(In_sync, &rdev->flags)) {
31346 if (r1_sync_page_io(rdev, sect, s,
31347 conf->tmppage, READ)) {
31348- atomic_add(s, &rdev->corrected_errors);
31349+ atomic_add_unchecked(s, &rdev->corrected_errors);
31350 printk(KERN_INFO
31351 "md/raid1:%s: read error corrected "
31352 "(%d sectors at %llu on %s)\n",
31353diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31354index 685ddf3..955b087 100644
31355--- a/drivers/md/raid10.c
31356+++ b/drivers/md/raid10.c
31357@@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31358 /* The write handler will notice the lack of
31359 * R10BIO_Uptodate and record any errors etc
31360 */
31361- atomic_add(r10_bio->sectors,
31362+ atomic_add_unchecked(r10_bio->sectors,
31363 &conf->mirrors[d].rdev->corrected_errors);
31364
31365 /* for reconstruct, we always reschedule after a read.
31366@@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31367 {
31368 struct timespec cur_time_mon;
31369 unsigned long hours_since_last;
31370- unsigned int read_errors = atomic_read(&rdev->read_errors);
31371+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31372
31373 ktime_get_ts(&cur_time_mon);
31374
31375@@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31376 * overflowing the shift of read_errors by hours_since_last.
31377 */
31378 if (hours_since_last >= 8 * sizeof(read_errors))
31379- atomic_set(&rdev->read_errors, 0);
31380+ atomic_set_unchecked(&rdev->read_errors, 0);
31381 else
31382- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31383+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31384 }
31385
31386 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31387@@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31388 return;
31389
31390 check_decay_read_errors(mddev, rdev);
31391- atomic_inc(&rdev->read_errors);
31392- if (atomic_read(&rdev->read_errors) > max_read_errors) {
31393+ atomic_inc_unchecked(&rdev->read_errors);
31394+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31395 char b[BDEVNAME_SIZE];
31396 bdevname(rdev->bdev, b);
31397
31398@@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31399 "md/raid10:%s: %s: Raid device exceeded "
31400 "read_error threshold [cur %d:max %d]\n",
31401 mdname(mddev), b,
31402- atomic_read(&rdev->read_errors), max_read_errors);
31403+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31404 printk(KERN_NOTICE
31405 "md/raid10:%s: %s: Failing raid device\n",
31406 mdname(mddev), b);
31407@@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31408 (unsigned long long)(
31409 sect + rdev->data_offset),
31410 bdevname(rdev->bdev, b));
31411- atomic_add(s, &rdev->corrected_errors);
31412+ atomic_add_unchecked(s, &rdev->corrected_errors);
31413 }
31414
31415 rdev_dec_pending(rdev, mddev);
31416diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31417index 858fdbb..b2dac95 100644
31418--- a/drivers/md/raid5.c
31419+++ b/drivers/md/raid5.c
31420@@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31421 (unsigned long long)(sh->sector
31422 + rdev->data_offset),
31423 bdevname(rdev->bdev, b));
31424- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31425+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31426 clear_bit(R5_ReadError, &sh->dev[i].flags);
31427 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31428 }
31429- if (atomic_read(&conf->disks[i].rdev->read_errors))
31430- atomic_set(&conf->disks[i].rdev->read_errors, 0);
31431+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31432+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31433 } else {
31434 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31435 int retry = 0;
31436 rdev = conf->disks[i].rdev;
31437
31438 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31439- atomic_inc(&rdev->read_errors);
31440+ atomic_inc_unchecked(&rdev->read_errors);
31441 if (conf->mddev->degraded >= conf->max_degraded)
31442 printk_ratelimited(
31443 KERN_WARNING
31444@@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31445 (unsigned long long)(sh->sector
31446 + rdev->data_offset),
31447 bdn);
31448- else if (atomic_read(&rdev->read_errors)
31449+ else if (atomic_read_unchecked(&rdev->read_errors)
31450 > conf->max_nr_stripes)
31451 printk(KERN_WARNING
31452 "md/raid:%s: Too many read errors, failing device %s.\n",
31453diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31454index ba9a643..e474ab5 100644
31455--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31456+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31457@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31458 .subvendor = _subvend, .subdevice = _subdev, \
31459 .driver_data = (unsigned long)&_driverdata }
31460
31461-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31462+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31463 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31464 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31465 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31466diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31467index a7d876f..8c21b61 100644
31468--- a/drivers/media/dvb/dvb-core/dvb_demux.h
31469+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31470@@ -73,7 +73,7 @@ struct dvb_demux_feed {
31471 union {
31472 dmx_ts_cb ts;
31473 dmx_section_cb sec;
31474- } cb;
31475+ } __no_const cb;
31476
31477 struct dvb_demux *demux;
31478 void *priv;
31479diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31480index f732877..d38c35a 100644
31481--- a/drivers/media/dvb/dvb-core/dvbdev.c
31482+++ b/drivers/media/dvb/dvb-core/dvbdev.c
31483@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31484 const struct dvb_device *template, void *priv, int type)
31485 {
31486 struct dvb_device *dvbdev;
31487- struct file_operations *dvbdevfops;
31488+ file_operations_no_const *dvbdevfops;
31489 struct device *clsdev;
31490 int minor;
31491 int id;
31492diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31493index 9f2a02c..5920f88 100644
31494--- a/drivers/media/dvb/dvb-usb/cxusb.c
31495+++ b/drivers/media/dvb/dvb-usb/cxusb.c
31496@@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31497 struct dib0700_adapter_state {
31498 int (*set_param_save) (struct dvb_frontend *,
31499 struct dvb_frontend_parameters *);
31500-};
31501+} __no_const;
31502
31503 static int dib7070_set_param_override(struct dvb_frontend *fe,
31504 struct dvb_frontend_parameters *fep)
31505diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31506index f103ec1..5e8968b 100644
31507--- a/drivers/media/dvb/dvb-usb/dw2102.c
31508+++ b/drivers/media/dvb/dvb-usb/dw2102.c
31509@@ -95,7 +95,7 @@ struct su3000_state {
31510
31511 struct s6x0_state {
31512 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31513-};
31514+} __no_const;
31515
31516 /* debug */
31517 static int dvb_usb_dw2102_debug;
31518diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31519index 404f63a..4796533 100644
31520--- a/drivers/media/dvb/frontends/dib3000.h
31521+++ b/drivers/media/dvb/frontends/dib3000.h
31522@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31523 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31524 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31525 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31526-};
31527+} __no_const;
31528
31529 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31530 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31531diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31532index 90bf573..e8463da 100644
31533--- a/drivers/media/dvb/frontends/ds3000.c
31534+++ b/drivers/media/dvb/frontends/ds3000.c
31535@@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31536
31537 for (i = 0; i < 30 ; i++) {
31538 ds3000_read_status(fe, &status);
31539- if (status && FE_HAS_LOCK)
31540+ if (status & FE_HAS_LOCK)
31541 break;
31542
31543 msleep(10);
31544diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31545index 0564192..75b16f5 100644
31546--- a/drivers/media/dvb/ngene/ngene-cards.c
31547+++ b/drivers/media/dvb/ngene/ngene-cards.c
31548@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31549
31550 /****************************************************************************/
31551
31552-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31553+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31554 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31555 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31556 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31557diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31558index 16a089f..ab1667d 100644
31559--- a/drivers/media/radio/radio-cadet.c
31560+++ b/drivers/media/radio/radio-cadet.c
31561@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31562 unsigned char readbuf[RDS_BUFFER];
31563 int i = 0;
31564
31565+ if (count > RDS_BUFFER)
31566+ return -EFAULT;
31567 mutex_lock(&dev->lock);
31568 if (dev->rdsstat == 0) {
31569 dev->rdsstat = 1;
31570diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31571index 61287fc..8b08712 100644
31572--- a/drivers/media/rc/redrat3.c
31573+++ b/drivers/media/rc/redrat3.c
31574@@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31575 return carrier;
31576 }
31577
31578-static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31579+static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31580 {
31581 struct redrat3_dev *rr3 = rcdev->priv;
31582 struct device *dev = rr3->dev;
31583diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31584index 9cde353..8c6a1c3 100644
31585--- a/drivers/media/video/au0828/au0828.h
31586+++ b/drivers/media/video/au0828/au0828.h
31587@@ -191,7 +191,7 @@ struct au0828_dev {
31588
31589 /* I2C */
31590 struct i2c_adapter i2c_adap;
31591- struct i2c_algorithm i2c_algo;
31592+ i2c_algorithm_no_const i2c_algo;
31593 struct i2c_client i2c_client;
31594 u32 i2c_rc;
31595
31596diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31597index 68d1240..46b32eb 100644
31598--- a/drivers/media/video/cx88/cx88-alsa.c
31599+++ b/drivers/media/video/cx88/cx88-alsa.c
31600@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31601 * Only boards with eeprom and byte 1 at eeprom=1 have it
31602 */
31603
31604-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31605+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31606 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31607 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31608 {0, }
31609diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31610index 305e6aa..0143317 100644
31611--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31612+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31613@@ -196,7 +196,7 @@ struct pvr2_hdw {
31614
31615 /* I2C stuff */
31616 struct i2c_adapter i2c_adap;
31617- struct i2c_algorithm i2c_algo;
31618+ i2c_algorithm_no_const i2c_algo;
31619 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31620 int i2c_cx25840_hack_state;
31621 int i2c_linked;
31622diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31623index a0895bf..b7ebb1b 100644
31624--- a/drivers/media/video/timblogiw.c
31625+++ b/drivers/media/video/timblogiw.c
31626@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31627
31628 /* Platform device functions */
31629
31630-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31631+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31632 .vidioc_querycap = timblogiw_querycap,
31633 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31634 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31635@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31636 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31637 };
31638
31639-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31640+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31641 .owner = THIS_MODULE,
31642 .open = timblogiw_open,
31643 .release = timblogiw_close,
31644diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31645index e9c6a60..daf6a33 100644
31646--- a/drivers/message/fusion/mptbase.c
31647+++ b/drivers/message/fusion/mptbase.c
31648@@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31649 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31650 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31651
31652+#ifdef CONFIG_GRKERNSEC_HIDESYM
31653+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31654+#else
31655 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31656 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31657+#endif
31658+
31659 /*
31660 * Rounding UP to nearest 4-kB boundary here...
31661 */
31662diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31663index 9d95042..b808101 100644
31664--- a/drivers/message/fusion/mptsas.c
31665+++ b/drivers/message/fusion/mptsas.c
31666@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31667 return 0;
31668 }
31669
31670+static inline void
31671+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31672+{
31673+ if (phy_info->port_details) {
31674+ phy_info->port_details->rphy = rphy;
31675+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31676+ ioc->name, rphy));
31677+ }
31678+
31679+ if (rphy) {
31680+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31681+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31682+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31683+ ioc->name, rphy, rphy->dev.release));
31684+ }
31685+}
31686+
31687 /* no mutex */
31688 static void
31689 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31690@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31691 return NULL;
31692 }
31693
31694-static inline void
31695-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31696-{
31697- if (phy_info->port_details) {
31698- phy_info->port_details->rphy = rphy;
31699- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31700- ioc->name, rphy));
31701- }
31702-
31703- if (rphy) {
31704- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31705- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31706- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31707- ioc->name, rphy, rphy->dev.release));
31708- }
31709-}
31710-
31711 static inline struct sas_port *
31712 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31713 {
31714diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
31715index 0c3ced7..1fe34ec 100644
31716--- a/drivers/message/fusion/mptscsih.c
31717+++ b/drivers/message/fusion/mptscsih.c
31718@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31719
31720 h = shost_priv(SChost);
31721
31722- if (h) {
31723- if (h->info_kbuf == NULL)
31724- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31725- return h->info_kbuf;
31726- h->info_kbuf[0] = '\0';
31727+ if (!h)
31728+ return NULL;
31729
31730- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31731- h->info_kbuf[size-1] = '\0';
31732- }
31733+ if (h->info_kbuf == NULL)
31734+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31735+ return h->info_kbuf;
31736+ h->info_kbuf[0] = '\0';
31737+
31738+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31739+ h->info_kbuf[size-1] = '\0';
31740
31741 return h->info_kbuf;
31742 }
31743diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
31744index 07dbeaf..5533142 100644
31745--- a/drivers/message/i2o/i2o_proc.c
31746+++ b/drivers/message/i2o/i2o_proc.c
31747@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
31748 "Array Controller Device"
31749 };
31750
31751-static char *chtostr(u8 * chars, int n)
31752-{
31753- char tmp[256];
31754- tmp[0] = 0;
31755- return strncat(tmp, (char *)chars, n);
31756-}
31757-
31758 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31759 char *group)
31760 {
31761@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
31762
31763 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31764 seq_printf(seq, "%-#8x", ddm_table.module_id);
31765- seq_printf(seq, "%-29s",
31766- chtostr(ddm_table.module_name_version, 28));
31767+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31768 seq_printf(seq, "%9d ", ddm_table.data_size);
31769 seq_printf(seq, "%8d", ddm_table.code_size);
31770
31771@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
31772
31773 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31774 seq_printf(seq, "%-#8x", dst->module_id);
31775- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31776- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31777+ seq_printf(seq, "%-.28s", dst->module_name_version);
31778+ seq_printf(seq, "%-.8s", dst->date);
31779 seq_printf(seq, "%8d ", dst->module_size);
31780 seq_printf(seq, "%8d ", dst->mpb_size);
31781 seq_printf(seq, "0x%04x", dst->module_flags);
31782@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
31783 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31784 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31785 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31786- seq_printf(seq, "Vendor info : %s\n",
31787- chtostr((u8 *) (work32 + 2), 16));
31788- seq_printf(seq, "Product info : %s\n",
31789- chtostr((u8 *) (work32 + 6), 16));
31790- seq_printf(seq, "Description : %s\n",
31791- chtostr((u8 *) (work32 + 10), 16));
31792- seq_printf(seq, "Product rev. : %s\n",
31793- chtostr((u8 *) (work32 + 14), 8));
31794+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31795+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31796+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31797+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31798
31799 seq_printf(seq, "Serial number : ");
31800 print_serial_number(seq, (u8 *) (work32 + 16),
31801@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
31802 }
31803
31804 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31805- seq_printf(seq, "Module name : %s\n",
31806- chtostr(result.module_name, 24));
31807- seq_printf(seq, "Module revision : %s\n",
31808- chtostr(result.module_rev, 8));
31809+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
31810+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31811
31812 seq_printf(seq, "Serial number : ");
31813 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31814@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
31815 return 0;
31816 }
31817
31818- seq_printf(seq, "Device name : %s\n",
31819- chtostr(result.device_name, 64));
31820- seq_printf(seq, "Service name : %s\n",
31821- chtostr(result.service_name, 64));
31822- seq_printf(seq, "Physical name : %s\n",
31823- chtostr(result.physical_location, 64));
31824- seq_printf(seq, "Instance number : %s\n",
31825- chtostr(result.instance_number, 4));
31826+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
31827+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
31828+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31829+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31830
31831 return 0;
31832 }
31833diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
31834index a8c08f3..155fe3d 100644
31835--- a/drivers/message/i2o/iop.c
31836+++ b/drivers/message/i2o/iop.c
31837@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
31838
31839 spin_lock_irqsave(&c->context_list_lock, flags);
31840
31841- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31842- atomic_inc(&c->context_list_counter);
31843+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31844+ atomic_inc_unchecked(&c->context_list_counter);
31845
31846- entry->context = atomic_read(&c->context_list_counter);
31847+ entry->context = atomic_read_unchecked(&c->context_list_counter);
31848
31849 list_add(&entry->list, &c->context_list);
31850
31851@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
31852
31853 #if BITS_PER_LONG == 64
31854 spin_lock_init(&c->context_list_lock);
31855- atomic_set(&c->context_list_counter, 0);
31856+ atomic_set_unchecked(&c->context_list_counter, 0);
31857 INIT_LIST_HEAD(&c->context_list);
31858 #endif
31859
31860diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
31861index 7ce65f4..e66e9bc 100644
31862--- a/drivers/mfd/abx500-core.c
31863+++ b/drivers/mfd/abx500-core.c
31864@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
31865
31866 struct abx500_device_entry {
31867 struct list_head list;
31868- struct abx500_ops ops;
31869+ abx500_ops_no_const ops;
31870 struct device *dev;
31871 };
31872
31873diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
31874index 5c2a06a..8fa077c 100644
31875--- a/drivers/mfd/janz-cmodio.c
31876+++ b/drivers/mfd/janz-cmodio.c
31877@@ -13,6 +13,7 @@
31878
31879 #include <linux/kernel.h>
31880 #include <linux/module.h>
31881+#include <linux/slab.h>
31882 #include <linux/init.h>
31883 #include <linux/pci.h>
31884 #include <linux/interrupt.h>
31885diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
31886index 29d12a7..f900ba4 100644
31887--- a/drivers/misc/lis3lv02d/lis3lv02d.c
31888+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
31889@@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
31890 * the lid is closed. This leads to interrupts as soon as a little move
31891 * is done.
31892 */
31893- atomic_inc(&lis3->count);
31894+ atomic_inc_unchecked(&lis3->count);
31895
31896 wake_up_interruptible(&lis3->misc_wait);
31897 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
31898@@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
31899 if (lis3->pm_dev)
31900 pm_runtime_get_sync(lis3->pm_dev);
31901
31902- atomic_set(&lis3->count, 0);
31903+ atomic_set_unchecked(&lis3->count, 0);
31904 return 0;
31905 }
31906
31907@@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
31908 add_wait_queue(&lis3->misc_wait, &wait);
31909 while (true) {
31910 set_current_state(TASK_INTERRUPTIBLE);
31911- data = atomic_xchg(&lis3->count, 0);
31912+ data = atomic_xchg_unchecked(&lis3->count, 0);
31913 if (data)
31914 break;
31915
31916@@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
31917 struct lis3lv02d, miscdev);
31918
31919 poll_wait(file, &lis3->misc_wait, wait);
31920- if (atomic_read(&lis3->count))
31921+ if (atomic_read_unchecked(&lis3->count))
31922 return POLLIN | POLLRDNORM;
31923 return 0;
31924 }
31925diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
31926index 2b1482a..5d33616 100644
31927--- a/drivers/misc/lis3lv02d/lis3lv02d.h
31928+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
31929@@ -266,7 +266,7 @@ struct lis3lv02d {
31930 struct input_polled_dev *idev; /* input device */
31931 struct platform_device *pdev; /* platform device */
31932 struct regulator_bulk_data regulators[2];
31933- atomic_t count; /* interrupt count after last read */
31934+ atomic_unchecked_t count; /* interrupt count after last read */
31935 union axis_conversion ac; /* hw -> logical axis */
31936 int mapped_btns[3];
31937
31938diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
31939index 2f30bad..c4c13d0 100644
31940--- a/drivers/misc/sgi-gru/gruhandles.c
31941+++ b/drivers/misc/sgi-gru/gruhandles.c
31942@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31943 unsigned long nsec;
31944
31945 nsec = CLKS2NSEC(clks);
31946- atomic_long_inc(&mcs_op_statistics[op].count);
31947- atomic_long_add(nsec, &mcs_op_statistics[op].total);
31948+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31949+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
31950 if (mcs_op_statistics[op].max < nsec)
31951 mcs_op_statistics[op].max = nsec;
31952 }
31953diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
31954index 7768b87..f8aac38 100644
31955--- a/drivers/misc/sgi-gru/gruprocfs.c
31956+++ b/drivers/misc/sgi-gru/gruprocfs.c
31957@@ -32,9 +32,9 @@
31958
31959 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
31960
31961-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
31962+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
31963 {
31964- unsigned long val = atomic_long_read(v);
31965+ unsigned long val = atomic_long_read_unchecked(v);
31966
31967 seq_printf(s, "%16lu %s\n", val, id);
31968 }
31969@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
31970
31971 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
31972 for (op = 0; op < mcsop_last; op++) {
31973- count = atomic_long_read(&mcs_op_statistics[op].count);
31974- total = atomic_long_read(&mcs_op_statistics[op].total);
31975+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
31976+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
31977 max = mcs_op_statistics[op].max;
31978 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
31979 count ? total / count : 0, max);
31980diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
31981index 5c3ce24..4915ccb 100644
31982--- a/drivers/misc/sgi-gru/grutables.h
31983+++ b/drivers/misc/sgi-gru/grutables.h
31984@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
31985 * GRU statistics.
31986 */
31987 struct gru_stats_s {
31988- atomic_long_t vdata_alloc;
31989- atomic_long_t vdata_free;
31990- atomic_long_t gts_alloc;
31991- atomic_long_t gts_free;
31992- atomic_long_t gms_alloc;
31993- atomic_long_t gms_free;
31994- atomic_long_t gts_double_allocate;
31995- atomic_long_t assign_context;
31996- atomic_long_t assign_context_failed;
31997- atomic_long_t free_context;
31998- atomic_long_t load_user_context;
31999- atomic_long_t load_kernel_context;
32000- atomic_long_t lock_kernel_context;
32001- atomic_long_t unlock_kernel_context;
32002- atomic_long_t steal_user_context;
32003- atomic_long_t steal_kernel_context;
32004- atomic_long_t steal_context_failed;
32005- atomic_long_t nopfn;
32006- atomic_long_t asid_new;
32007- atomic_long_t asid_next;
32008- atomic_long_t asid_wrap;
32009- atomic_long_t asid_reuse;
32010- atomic_long_t intr;
32011- atomic_long_t intr_cbr;
32012- atomic_long_t intr_tfh;
32013- atomic_long_t intr_spurious;
32014- atomic_long_t intr_mm_lock_failed;
32015- atomic_long_t call_os;
32016- atomic_long_t call_os_wait_queue;
32017- atomic_long_t user_flush_tlb;
32018- atomic_long_t user_unload_context;
32019- atomic_long_t user_exception;
32020- atomic_long_t set_context_option;
32021- atomic_long_t check_context_retarget_intr;
32022- atomic_long_t check_context_unload;
32023- atomic_long_t tlb_dropin;
32024- atomic_long_t tlb_preload_page;
32025- atomic_long_t tlb_dropin_fail_no_asid;
32026- atomic_long_t tlb_dropin_fail_upm;
32027- atomic_long_t tlb_dropin_fail_invalid;
32028- atomic_long_t tlb_dropin_fail_range_active;
32029- atomic_long_t tlb_dropin_fail_idle;
32030- atomic_long_t tlb_dropin_fail_fmm;
32031- atomic_long_t tlb_dropin_fail_no_exception;
32032- atomic_long_t tfh_stale_on_fault;
32033- atomic_long_t mmu_invalidate_range;
32034- atomic_long_t mmu_invalidate_page;
32035- atomic_long_t flush_tlb;
32036- atomic_long_t flush_tlb_gru;
32037- atomic_long_t flush_tlb_gru_tgh;
32038- atomic_long_t flush_tlb_gru_zero_asid;
32039+ atomic_long_unchecked_t vdata_alloc;
32040+ atomic_long_unchecked_t vdata_free;
32041+ atomic_long_unchecked_t gts_alloc;
32042+ atomic_long_unchecked_t gts_free;
32043+ atomic_long_unchecked_t gms_alloc;
32044+ atomic_long_unchecked_t gms_free;
32045+ atomic_long_unchecked_t gts_double_allocate;
32046+ atomic_long_unchecked_t assign_context;
32047+ atomic_long_unchecked_t assign_context_failed;
32048+ atomic_long_unchecked_t free_context;
32049+ atomic_long_unchecked_t load_user_context;
32050+ atomic_long_unchecked_t load_kernel_context;
32051+ atomic_long_unchecked_t lock_kernel_context;
32052+ atomic_long_unchecked_t unlock_kernel_context;
32053+ atomic_long_unchecked_t steal_user_context;
32054+ atomic_long_unchecked_t steal_kernel_context;
32055+ atomic_long_unchecked_t steal_context_failed;
32056+ atomic_long_unchecked_t nopfn;
32057+ atomic_long_unchecked_t asid_new;
32058+ atomic_long_unchecked_t asid_next;
32059+ atomic_long_unchecked_t asid_wrap;
32060+ atomic_long_unchecked_t asid_reuse;
32061+ atomic_long_unchecked_t intr;
32062+ atomic_long_unchecked_t intr_cbr;
32063+ atomic_long_unchecked_t intr_tfh;
32064+ atomic_long_unchecked_t intr_spurious;
32065+ atomic_long_unchecked_t intr_mm_lock_failed;
32066+ atomic_long_unchecked_t call_os;
32067+ atomic_long_unchecked_t call_os_wait_queue;
32068+ atomic_long_unchecked_t user_flush_tlb;
32069+ atomic_long_unchecked_t user_unload_context;
32070+ atomic_long_unchecked_t user_exception;
32071+ atomic_long_unchecked_t set_context_option;
32072+ atomic_long_unchecked_t check_context_retarget_intr;
32073+ atomic_long_unchecked_t check_context_unload;
32074+ atomic_long_unchecked_t tlb_dropin;
32075+ atomic_long_unchecked_t tlb_preload_page;
32076+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32077+ atomic_long_unchecked_t tlb_dropin_fail_upm;
32078+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
32079+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
32080+ atomic_long_unchecked_t tlb_dropin_fail_idle;
32081+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
32082+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32083+ atomic_long_unchecked_t tfh_stale_on_fault;
32084+ atomic_long_unchecked_t mmu_invalidate_range;
32085+ atomic_long_unchecked_t mmu_invalidate_page;
32086+ atomic_long_unchecked_t flush_tlb;
32087+ atomic_long_unchecked_t flush_tlb_gru;
32088+ atomic_long_unchecked_t flush_tlb_gru_tgh;
32089+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32090
32091- atomic_long_t copy_gpa;
32092- atomic_long_t read_gpa;
32093+ atomic_long_unchecked_t copy_gpa;
32094+ atomic_long_unchecked_t read_gpa;
32095
32096- atomic_long_t mesq_receive;
32097- atomic_long_t mesq_receive_none;
32098- atomic_long_t mesq_send;
32099- atomic_long_t mesq_send_failed;
32100- atomic_long_t mesq_noop;
32101- atomic_long_t mesq_send_unexpected_error;
32102- atomic_long_t mesq_send_lb_overflow;
32103- atomic_long_t mesq_send_qlimit_reached;
32104- atomic_long_t mesq_send_amo_nacked;
32105- atomic_long_t mesq_send_put_nacked;
32106- atomic_long_t mesq_page_overflow;
32107- atomic_long_t mesq_qf_locked;
32108- atomic_long_t mesq_qf_noop_not_full;
32109- atomic_long_t mesq_qf_switch_head_failed;
32110- atomic_long_t mesq_qf_unexpected_error;
32111- atomic_long_t mesq_noop_unexpected_error;
32112- atomic_long_t mesq_noop_lb_overflow;
32113- atomic_long_t mesq_noop_qlimit_reached;
32114- atomic_long_t mesq_noop_amo_nacked;
32115- atomic_long_t mesq_noop_put_nacked;
32116- atomic_long_t mesq_noop_page_overflow;
32117+ atomic_long_unchecked_t mesq_receive;
32118+ atomic_long_unchecked_t mesq_receive_none;
32119+ atomic_long_unchecked_t mesq_send;
32120+ atomic_long_unchecked_t mesq_send_failed;
32121+ atomic_long_unchecked_t mesq_noop;
32122+ atomic_long_unchecked_t mesq_send_unexpected_error;
32123+ atomic_long_unchecked_t mesq_send_lb_overflow;
32124+ atomic_long_unchecked_t mesq_send_qlimit_reached;
32125+ atomic_long_unchecked_t mesq_send_amo_nacked;
32126+ atomic_long_unchecked_t mesq_send_put_nacked;
32127+ atomic_long_unchecked_t mesq_page_overflow;
32128+ atomic_long_unchecked_t mesq_qf_locked;
32129+ atomic_long_unchecked_t mesq_qf_noop_not_full;
32130+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
32131+ atomic_long_unchecked_t mesq_qf_unexpected_error;
32132+ atomic_long_unchecked_t mesq_noop_unexpected_error;
32133+ atomic_long_unchecked_t mesq_noop_lb_overflow;
32134+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
32135+ atomic_long_unchecked_t mesq_noop_amo_nacked;
32136+ atomic_long_unchecked_t mesq_noop_put_nacked;
32137+ atomic_long_unchecked_t mesq_noop_page_overflow;
32138
32139 };
32140
32141@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32142 tghop_invalidate, mcsop_last};
32143
32144 struct mcs_op_statistic {
32145- atomic_long_t count;
32146- atomic_long_t total;
32147+ atomic_long_unchecked_t count;
32148+ atomic_long_unchecked_t total;
32149 unsigned long max;
32150 };
32151
32152@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32153
32154 #define STAT(id) do { \
32155 if (gru_options & OPT_STATS) \
32156- atomic_long_inc(&gru_stats.id); \
32157+ atomic_long_inc_unchecked(&gru_stats.id); \
32158 } while (0)
32159
32160 #ifdef CONFIG_SGI_GRU_DEBUG
32161diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32162index 851b2f2..a4ec097 100644
32163--- a/drivers/misc/sgi-xp/xp.h
32164+++ b/drivers/misc/sgi-xp/xp.h
32165@@ -289,7 +289,7 @@ struct xpc_interface {
32166 xpc_notify_func, void *);
32167 void (*received) (short, int, void *);
32168 enum xp_retval (*partid_to_nasids) (short, void *);
32169-};
32170+} __no_const;
32171
32172 extern struct xpc_interface xpc_interface;
32173
32174diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32175index b94d5f7..7f494c5 100644
32176--- a/drivers/misc/sgi-xp/xpc.h
32177+++ b/drivers/misc/sgi-xp/xpc.h
32178@@ -835,6 +835,7 @@ struct xpc_arch_operations {
32179 void (*received_payload) (struct xpc_channel *, void *);
32180 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32181 };
32182+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32183
32184 /* struct xpc_partition act_state values (for XPC HB) */
32185
32186@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32187 /* found in xpc_main.c */
32188 extern struct device *xpc_part;
32189 extern struct device *xpc_chan;
32190-extern struct xpc_arch_operations xpc_arch_ops;
32191+extern xpc_arch_operations_no_const xpc_arch_ops;
32192 extern int xpc_disengage_timelimit;
32193 extern int xpc_disengage_timedout;
32194 extern int xpc_activate_IRQ_rcvd;
32195diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32196index 8d082b4..aa749ae 100644
32197--- a/drivers/misc/sgi-xp/xpc_main.c
32198+++ b/drivers/misc/sgi-xp/xpc_main.c
32199@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32200 .notifier_call = xpc_system_die,
32201 };
32202
32203-struct xpc_arch_operations xpc_arch_ops;
32204+xpc_arch_operations_no_const xpc_arch_ops;
32205
32206 /*
32207 * Timer function to enforce the timelimit on the partition disengage.
32208diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32209index 6878a94..fe5c5f1 100644
32210--- a/drivers/mmc/host/sdhci-pci.c
32211+++ b/drivers/mmc/host/sdhci-pci.c
32212@@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32213 .probe = via_probe,
32214 };
32215
32216-static const struct pci_device_id pci_ids[] __devinitdata = {
32217+static const struct pci_device_id pci_ids[] __devinitconst = {
32218 {
32219 .vendor = PCI_VENDOR_ID_RICOH,
32220 .device = PCI_DEVICE_ID_RICOH_R5C822,
32221diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32222index e9fad91..0a7a16a 100644
32223--- a/drivers/mtd/devices/doc2000.c
32224+++ b/drivers/mtd/devices/doc2000.c
32225@@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32226
32227 /* The ECC will not be calculated correctly if less than 512 is written */
32228 /* DBB-
32229- if (len != 0x200 && eccbuf)
32230+ if (len != 0x200)
32231 printk(KERN_WARNING
32232 "ECC needs a full sector write (adr: %lx size %lx)\n",
32233 (long) to, (long) len);
32234diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32235index a3f7a27..234016e 100644
32236--- a/drivers/mtd/devices/doc2001.c
32237+++ b/drivers/mtd/devices/doc2001.c
32238@@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32239 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32240
32241 /* Don't allow read past end of device */
32242- if (from >= this->totlen)
32243+ if (from >= this->totlen || !len)
32244 return -EINVAL;
32245
32246 /* Don't allow a single read to cross a 512-byte block boundary */
32247diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32248index 3984d48..28aa897 100644
32249--- a/drivers/mtd/nand/denali.c
32250+++ b/drivers/mtd/nand/denali.c
32251@@ -26,6 +26,7 @@
32252 #include <linux/pci.h>
32253 #include <linux/mtd/mtd.h>
32254 #include <linux/module.h>
32255+#include <linux/slab.h>
32256
32257 #include "denali.h"
32258
32259diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32260index ac40925..483b753 100644
32261--- a/drivers/mtd/nftlmount.c
32262+++ b/drivers/mtd/nftlmount.c
32263@@ -24,6 +24,7 @@
32264 #include <asm/errno.h>
32265 #include <linux/delay.h>
32266 #include <linux/slab.h>
32267+#include <linux/sched.h>
32268 #include <linux/mtd/mtd.h>
32269 #include <linux/mtd/nand.h>
32270 #include <linux/mtd/nftl.h>
32271diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32272index 6c3fb5a..c542a81 100644
32273--- a/drivers/mtd/ubi/build.c
32274+++ b/drivers/mtd/ubi/build.c
32275@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32276 static int __init bytes_str_to_int(const char *str)
32277 {
32278 char *endp;
32279- unsigned long result;
32280+ unsigned long result, scale = 1;
32281
32282 result = simple_strtoul(str, &endp, 0);
32283 if (str == endp || result >= INT_MAX) {
32284@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32285
32286 switch (*endp) {
32287 case 'G':
32288- result *= 1024;
32289+ scale *= 1024;
32290 case 'M':
32291- result *= 1024;
32292+ scale *= 1024;
32293 case 'K':
32294- result *= 1024;
32295+ scale *= 1024;
32296 if (endp[1] == 'i' && endp[2] == 'B')
32297 endp += 2;
32298 case '\0':
32299@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32300 return -EINVAL;
32301 }
32302
32303- return result;
32304+ if ((intoverflow_t)result*scale >= INT_MAX) {
32305+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32306+ str);
32307+ return -EINVAL;
32308+ }
32309+
32310+ return result*scale;
32311 }
32312
32313 /**
32314diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32315index 1feae59..c2a61d2 100644
32316--- a/drivers/net/ethernet/atheros/atlx/atl2.c
32317+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32318@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32319 */
32320
32321 #define ATL2_PARAM(X, desc) \
32322- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32323+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32324 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32325 MODULE_PARM_DESC(X, desc);
32326 #else
32327diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32328index 9a517c2..a50cfcb 100644
32329--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32330+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32331@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32332
32333 int (*wait_comp)(struct bnx2x *bp,
32334 struct bnx2x_rx_mode_ramrod_params *p);
32335-};
32336+} __no_const;
32337
32338 /********************** Set multicast group ***********************************/
32339
32340diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32341index 94b4bd0..73c02de 100644
32342--- a/drivers/net/ethernet/broadcom/tg3.h
32343+++ b/drivers/net/ethernet/broadcom/tg3.h
32344@@ -134,6 +134,7 @@
32345 #define CHIPREV_ID_5750_A0 0x4000
32346 #define CHIPREV_ID_5750_A1 0x4001
32347 #define CHIPREV_ID_5750_A3 0x4003
32348+#define CHIPREV_ID_5750_C1 0x4201
32349 #define CHIPREV_ID_5750_C2 0x4202
32350 #define CHIPREV_ID_5752_A0_HW 0x5000
32351 #define CHIPREV_ID_5752_A0 0x6000
32352diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32353index c5f5479..2e8c260 100644
32354--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32355+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32356@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32357 */
32358 struct l2t_skb_cb {
32359 arp_failure_handler_func arp_failure_handler;
32360-};
32361+} __no_const;
32362
32363 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32364
32365diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32366index 871bcaa..4043505 100644
32367--- a/drivers/net/ethernet/dec/tulip/de4x5.c
32368+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32369@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32370 for (i=0; i<ETH_ALEN; i++) {
32371 tmp.addr[i] = dev->dev_addr[i];
32372 }
32373- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32374+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32375 break;
32376
32377 case DE4X5_SET_HWADDR: /* Set the hardware address */
32378@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32379 spin_lock_irqsave(&lp->lock, flags);
32380 memcpy(&statbuf, &lp->pktStats, ioc->len);
32381 spin_unlock_irqrestore(&lp->lock, flags);
32382- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32383+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32384 return -EFAULT;
32385 break;
32386 }
32387diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32388index 14d5b61..1398636 100644
32389--- a/drivers/net/ethernet/dec/tulip/eeprom.c
32390+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32391@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32392 {NULL}};
32393
32394
32395-static const char *block_name[] __devinitdata = {
32396+static const char *block_name[] __devinitconst = {
32397 "21140 non-MII",
32398 "21140 MII PHY",
32399 "21142 Serial PHY",
32400diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32401index 4d01219..b58d26d 100644
32402--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32403+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32404@@ -236,7 +236,7 @@ struct pci_id_info {
32405 int drv_flags; /* Driver use, intended as capability flags. */
32406 };
32407
32408-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32409+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32410 { /* Sometime a Level-One switch card. */
32411 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32412 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32413diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32414index dcd7f7a..ecb7fb3 100644
32415--- a/drivers/net/ethernet/dlink/sundance.c
32416+++ b/drivers/net/ethernet/dlink/sundance.c
32417@@ -218,7 +218,7 @@ enum {
32418 struct pci_id_info {
32419 const char *name;
32420 };
32421-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32422+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32423 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32424 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32425 {"D-Link DFE-580TX 4 port Server Adapter"},
32426diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32427index bf266a0..e024af7 100644
32428--- a/drivers/net/ethernet/emulex/benet/be_main.c
32429+++ b/drivers/net/ethernet/emulex/benet/be_main.c
32430@@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32431
32432 if (wrapped)
32433 newacc += 65536;
32434- ACCESS_ONCE(*acc) = newacc;
32435+ ACCESS_ONCE_RW(*acc) = newacc;
32436 }
32437
32438 void be_parse_stats(struct be_adapter *adapter)
32439diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32440index 61d2bdd..7f1154a 100644
32441--- a/drivers/net/ethernet/fealnx.c
32442+++ b/drivers/net/ethernet/fealnx.c
32443@@ -150,7 +150,7 @@ struct chip_info {
32444 int flags;
32445 };
32446
32447-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32448+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32449 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32450 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32451 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32452diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32453index e1159e5..e18684d 100644
32454--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32455+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32456@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32457 {
32458 struct e1000_hw *hw = &adapter->hw;
32459 struct e1000_mac_info *mac = &hw->mac;
32460- struct e1000_mac_operations *func = &mac->ops;
32461+ e1000_mac_operations_no_const *func = &mac->ops;
32462
32463 /* Set media type */
32464 switch (adapter->pdev->device) {
32465diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32466index a3e65fd..f451444 100644
32467--- a/drivers/net/ethernet/intel/e1000e/82571.c
32468+++ b/drivers/net/ethernet/intel/e1000e/82571.c
32469@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32470 {
32471 struct e1000_hw *hw = &adapter->hw;
32472 struct e1000_mac_info *mac = &hw->mac;
32473- struct e1000_mac_operations *func = &mac->ops;
32474+ e1000_mac_operations_no_const *func = &mac->ops;
32475 u32 swsm = 0;
32476 u32 swsm2 = 0;
32477 bool force_clear_smbi = false;
32478diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32479index 2967039..ca8c40c 100644
32480--- a/drivers/net/ethernet/intel/e1000e/hw.h
32481+++ b/drivers/net/ethernet/intel/e1000e/hw.h
32482@@ -778,6 +778,7 @@ struct e1000_mac_operations {
32483 void (*write_vfta)(struct e1000_hw *, u32, u32);
32484 s32 (*read_mac_addr)(struct e1000_hw *);
32485 };
32486+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32487
32488 /*
32489 * When to use various PHY register access functions:
32490@@ -818,6 +819,7 @@ struct e1000_phy_operations {
32491 void (*power_up)(struct e1000_hw *);
32492 void (*power_down)(struct e1000_hw *);
32493 };
32494+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32495
32496 /* Function pointers for the NVM. */
32497 struct e1000_nvm_operations {
32498@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32499 s32 (*validate)(struct e1000_hw *);
32500 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32501 };
32502+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32503
32504 struct e1000_mac_info {
32505- struct e1000_mac_operations ops;
32506+ e1000_mac_operations_no_const ops;
32507 u8 addr[ETH_ALEN];
32508 u8 perm_addr[ETH_ALEN];
32509
32510@@ -872,7 +875,7 @@ struct e1000_mac_info {
32511 };
32512
32513 struct e1000_phy_info {
32514- struct e1000_phy_operations ops;
32515+ e1000_phy_operations_no_const ops;
32516
32517 enum e1000_phy_type type;
32518
32519@@ -906,7 +909,7 @@ struct e1000_phy_info {
32520 };
32521
32522 struct e1000_nvm_info {
32523- struct e1000_nvm_operations ops;
32524+ e1000_nvm_operations_no_const ops;
32525
32526 enum e1000_nvm_type type;
32527 enum e1000_nvm_override override;
32528diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32529index 4519a13..f97fcd0 100644
32530--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32531+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32532@@ -314,6 +314,7 @@ struct e1000_mac_operations {
32533 s32 (*read_mac_addr)(struct e1000_hw *);
32534 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32535 };
32536+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32537
32538 struct e1000_phy_operations {
32539 s32 (*acquire)(struct e1000_hw *);
32540@@ -330,6 +331,7 @@ struct e1000_phy_operations {
32541 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32542 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32543 };
32544+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32545
32546 struct e1000_nvm_operations {
32547 s32 (*acquire)(struct e1000_hw *);
32548@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32549 s32 (*update)(struct e1000_hw *);
32550 s32 (*validate)(struct e1000_hw *);
32551 };
32552+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32553
32554 struct e1000_info {
32555 s32 (*get_invariants)(struct e1000_hw *);
32556@@ -350,7 +353,7 @@ struct e1000_info {
32557 extern const struct e1000_info e1000_82575_info;
32558
32559 struct e1000_mac_info {
32560- struct e1000_mac_operations ops;
32561+ e1000_mac_operations_no_const ops;
32562
32563 u8 addr[6];
32564 u8 perm_addr[6];
32565@@ -388,7 +391,7 @@ struct e1000_mac_info {
32566 };
32567
32568 struct e1000_phy_info {
32569- struct e1000_phy_operations ops;
32570+ e1000_phy_operations_no_const ops;
32571
32572 enum e1000_phy_type type;
32573
32574@@ -423,7 +426,7 @@ struct e1000_phy_info {
32575 };
32576
32577 struct e1000_nvm_info {
32578- struct e1000_nvm_operations ops;
32579+ e1000_nvm_operations_no_const ops;
32580 enum e1000_nvm_type type;
32581 enum e1000_nvm_override override;
32582
32583@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32584 s32 (*check_for_ack)(struct e1000_hw *, u16);
32585 s32 (*check_for_rst)(struct e1000_hw *, u16);
32586 };
32587+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32588
32589 struct e1000_mbx_stats {
32590 u32 msgs_tx;
32591@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32592 };
32593
32594 struct e1000_mbx_info {
32595- struct e1000_mbx_operations ops;
32596+ e1000_mbx_operations_no_const ops;
32597 struct e1000_mbx_stats stats;
32598 u32 timeout;
32599 u32 usec_delay;
32600diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32601index d7ed58f..64cde36 100644
32602--- a/drivers/net/ethernet/intel/igbvf/vf.h
32603+++ b/drivers/net/ethernet/intel/igbvf/vf.h
32604@@ -189,9 +189,10 @@ struct e1000_mac_operations {
32605 s32 (*read_mac_addr)(struct e1000_hw *);
32606 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32607 };
32608+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32609
32610 struct e1000_mac_info {
32611- struct e1000_mac_operations ops;
32612+ e1000_mac_operations_no_const ops;
32613 u8 addr[6];
32614 u8 perm_addr[6];
32615
32616@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32617 s32 (*check_for_ack)(struct e1000_hw *);
32618 s32 (*check_for_rst)(struct e1000_hw *);
32619 };
32620+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32621
32622 struct e1000_mbx_stats {
32623 u32 msgs_tx;
32624@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32625 };
32626
32627 struct e1000_mbx_info {
32628- struct e1000_mbx_operations ops;
32629+ e1000_mbx_operations_no_const ops;
32630 struct e1000_mbx_stats stats;
32631 u32 timeout;
32632 u32 usec_delay;
32633diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32634index 6c5cca8..de8ef63 100644
32635--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32636+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32637@@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32638 s32 (*update_checksum)(struct ixgbe_hw *);
32639 u16 (*calc_checksum)(struct ixgbe_hw *);
32640 };
32641+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32642
32643 struct ixgbe_mac_operations {
32644 s32 (*init_hw)(struct ixgbe_hw *);
32645@@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32646 /* Manageability interface */
32647 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32648 };
32649+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32650
32651 struct ixgbe_phy_operations {
32652 s32 (*identify)(struct ixgbe_hw *);
32653@@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32654 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32655 s32 (*check_overtemp)(struct ixgbe_hw *);
32656 };
32657+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32658
32659 struct ixgbe_eeprom_info {
32660- struct ixgbe_eeprom_operations ops;
32661+ ixgbe_eeprom_operations_no_const ops;
32662 enum ixgbe_eeprom_type type;
32663 u32 semaphore_delay;
32664 u16 word_size;
32665@@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32666
32667 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32668 struct ixgbe_mac_info {
32669- struct ixgbe_mac_operations ops;
32670+ ixgbe_mac_operations_no_const ops;
32671 enum ixgbe_mac_type type;
32672 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32673 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32674@@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32675 };
32676
32677 struct ixgbe_phy_info {
32678- struct ixgbe_phy_operations ops;
32679+ ixgbe_phy_operations_no_const ops;
32680 struct mdio_if_info mdio;
32681 enum ixgbe_phy_type type;
32682 u32 id;
32683@@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32684 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32685 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32686 };
32687+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32688
32689 struct ixgbe_mbx_stats {
32690 u32 msgs_tx;
32691@@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32692 };
32693
32694 struct ixgbe_mbx_info {
32695- struct ixgbe_mbx_operations ops;
32696+ ixgbe_mbx_operations_no_const ops;
32697 struct ixgbe_mbx_stats stats;
32698 u32 timeout;
32699 u32 usec_delay;
32700diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
32701index 10306b4..28df758 100644
32702--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
32703+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
32704@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32705 s32 (*clear_vfta)(struct ixgbe_hw *);
32706 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
32707 };
32708+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32709
32710 enum ixgbe_mac_type {
32711 ixgbe_mac_unknown = 0,
32712@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
32713 };
32714
32715 struct ixgbe_mac_info {
32716- struct ixgbe_mac_operations ops;
32717+ ixgbe_mac_operations_no_const ops;
32718 u8 addr[6];
32719 u8 perm_addr[6];
32720
32721@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
32722 s32 (*check_for_ack)(struct ixgbe_hw *);
32723 s32 (*check_for_rst)(struct ixgbe_hw *);
32724 };
32725+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32726
32727 struct ixgbe_mbx_stats {
32728 u32 msgs_tx;
32729@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
32730 };
32731
32732 struct ixgbe_mbx_info {
32733- struct ixgbe_mbx_operations ops;
32734+ ixgbe_mbx_operations_no_const ops;
32735 struct ixgbe_mbx_stats stats;
32736 u32 timeout;
32737 u32 udelay;
32738diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
32739index 94bbc85..78c12e6 100644
32740--- a/drivers/net/ethernet/mellanox/mlx4/main.c
32741+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
32742@@ -40,6 +40,7 @@
32743 #include <linux/dma-mapping.h>
32744 #include <linux/slab.h>
32745 #include <linux/io-mapping.h>
32746+#include <linux/sched.h>
32747
32748 #include <linux/mlx4/device.h>
32749 #include <linux/mlx4/doorbell.h>
32750diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32751index 5046a64..71ca936 100644
32752--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
32753+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32754@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
32755 void (*link_down)(struct __vxge_hw_device *devh);
32756 void (*crit_err)(struct __vxge_hw_device *devh,
32757 enum vxge_hw_event type, u64 ext_data);
32758-};
32759+} __no_const;
32760
32761 /*
32762 * struct __vxge_hw_blockpool_entry - Block private data structure
32763diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32764index 4a518a3..936b334 100644
32765--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32766+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32767@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32768 struct vxge_hw_mempool_dma *dma_object,
32769 u32 index,
32770 u32 is_last);
32771-};
32772+} __no_const;
32773
32774 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32775 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32776diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
32777index c8f47f1..5da9840 100644
32778--- a/drivers/net/ethernet/realtek/r8169.c
32779+++ b/drivers/net/ethernet/realtek/r8169.c
32780@@ -698,17 +698,17 @@ struct rtl8169_private {
32781 struct mdio_ops {
32782 void (*write)(void __iomem *, int, int);
32783 int (*read)(void __iomem *, int);
32784- } mdio_ops;
32785+ } __no_const mdio_ops;
32786
32787 struct pll_power_ops {
32788 void (*down)(struct rtl8169_private *);
32789 void (*up)(struct rtl8169_private *);
32790- } pll_power_ops;
32791+ } __no_const pll_power_ops;
32792
32793 struct jumbo_ops {
32794 void (*enable)(struct rtl8169_private *);
32795 void (*disable)(struct rtl8169_private *);
32796- } jumbo_ops;
32797+ } __no_const jumbo_ops;
32798
32799 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32800 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32801diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
32802index 1b4658c..a30dabb 100644
32803--- a/drivers/net/ethernet/sis/sis190.c
32804+++ b/drivers/net/ethernet/sis/sis190.c
32805@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
32806 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32807 struct net_device *dev)
32808 {
32809- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32810+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32811 struct sis190_private *tp = netdev_priv(dev);
32812 struct pci_dev *isa_bridge;
32813 u8 reg, tmp8;
32814diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
32815index edfa15d..002bfa9 100644
32816--- a/drivers/net/ppp/ppp_generic.c
32817+++ b/drivers/net/ppp/ppp_generic.c
32818@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32819 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32820 struct ppp_stats stats;
32821 struct ppp_comp_stats cstats;
32822- char *vers;
32823
32824 switch (cmd) {
32825 case SIOCGPPPSTATS:
32826@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32827 break;
32828
32829 case SIOCGPPPVER:
32830- vers = PPP_VERSION;
32831- if (copy_to_user(addr, vers, strlen(vers) + 1))
32832+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32833 break;
32834 err = 0;
32835 break;
32836diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
32837index 515f122..41dd273 100644
32838--- a/drivers/net/tokenring/abyss.c
32839+++ b/drivers/net/tokenring/abyss.c
32840@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
32841
32842 static int __init abyss_init (void)
32843 {
32844- abyss_netdev_ops = tms380tr_netdev_ops;
32845+ pax_open_kernel();
32846+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32847
32848- abyss_netdev_ops.ndo_open = abyss_open;
32849- abyss_netdev_ops.ndo_stop = abyss_close;
32850+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32851+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32852+ pax_close_kernel();
32853
32854 return pci_register_driver(&abyss_driver);
32855 }
32856diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
32857index 6153cfd..cf69c1c 100644
32858--- a/drivers/net/tokenring/madgemc.c
32859+++ b/drivers/net/tokenring/madgemc.c
32860@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
32861
32862 static int __init madgemc_init (void)
32863 {
32864- madgemc_netdev_ops = tms380tr_netdev_ops;
32865- madgemc_netdev_ops.ndo_open = madgemc_open;
32866- madgemc_netdev_ops.ndo_stop = madgemc_close;
32867+ pax_open_kernel();
32868+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32869+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32870+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32871+ pax_close_kernel();
32872
32873 return mca_register_driver (&madgemc_driver);
32874 }
32875diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
32876index 8d362e6..f91cc52 100644
32877--- a/drivers/net/tokenring/proteon.c
32878+++ b/drivers/net/tokenring/proteon.c
32879@@ -353,9 +353,11 @@ static int __init proteon_init(void)
32880 struct platform_device *pdev;
32881 int i, num = 0, err = 0;
32882
32883- proteon_netdev_ops = tms380tr_netdev_ops;
32884- proteon_netdev_ops.ndo_open = proteon_open;
32885- proteon_netdev_ops.ndo_stop = tms380tr_close;
32886+ pax_open_kernel();
32887+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32888+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32889+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32890+ pax_close_kernel();
32891
32892 err = platform_driver_register(&proteon_driver);
32893 if (err)
32894diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
32895index 46db5c5..37c1536 100644
32896--- a/drivers/net/tokenring/skisa.c
32897+++ b/drivers/net/tokenring/skisa.c
32898@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32899 struct platform_device *pdev;
32900 int i, num = 0, err = 0;
32901
32902- sk_isa_netdev_ops = tms380tr_netdev_ops;
32903- sk_isa_netdev_ops.ndo_open = sk_isa_open;
32904- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32905+ pax_open_kernel();
32906+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32907+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
32908+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32909+ pax_close_kernel();
32910
32911 err = platform_driver_register(&sk_isa_driver);
32912 if (err)
32913diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
32914index 304fe78..db112fa 100644
32915--- a/drivers/net/usb/hso.c
32916+++ b/drivers/net/usb/hso.c
32917@@ -71,7 +71,7 @@
32918 #include <asm/byteorder.h>
32919 #include <linux/serial_core.h>
32920 #include <linux/serial.h>
32921-
32922+#include <asm/local.h>
32923
32924 #define MOD_AUTHOR "Option Wireless"
32925 #define MOD_DESCRIPTION "USB High Speed Option driver"
32926@@ -257,7 +257,7 @@ struct hso_serial {
32927
32928 /* from usb_serial_port */
32929 struct tty_struct *tty;
32930- int open_count;
32931+ local_t open_count;
32932 spinlock_t serial_lock;
32933
32934 int (*write_data) (struct hso_serial *serial);
32935@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
32936 struct urb *urb;
32937
32938 urb = serial->rx_urb[0];
32939- if (serial->open_count > 0) {
32940+ if (local_read(&serial->open_count) > 0) {
32941 count = put_rxbuf_data(urb, serial);
32942 if (count == -1)
32943 return;
32944@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
32945 DUMP1(urb->transfer_buffer, urb->actual_length);
32946
32947 /* Anyone listening? */
32948- if (serial->open_count == 0)
32949+ if (local_read(&serial->open_count) == 0)
32950 return;
32951
32952 if (status == 0) {
32953@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
32954 spin_unlock_irq(&serial->serial_lock);
32955
32956 /* check for port already opened, if not set the termios */
32957- serial->open_count++;
32958- if (serial->open_count == 1) {
32959+ if (local_inc_return(&serial->open_count) == 1) {
32960 serial->rx_state = RX_IDLE;
32961 /* Force default termio settings */
32962 _hso_serial_set_termios(tty, NULL);
32963@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
32964 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32965 if (result) {
32966 hso_stop_serial_device(serial->parent);
32967- serial->open_count--;
32968+ local_dec(&serial->open_count);
32969 kref_put(&serial->parent->ref, hso_serial_ref_free);
32970 }
32971 } else {
32972@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
32973
32974 /* reset the rts and dtr */
32975 /* do the actual close */
32976- serial->open_count--;
32977+ local_dec(&serial->open_count);
32978
32979- if (serial->open_count <= 0) {
32980- serial->open_count = 0;
32981+ if (local_read(&serial->open_count) <= 0) {
32982+ local_set(&serial->open_count, 0);
32983 spin_lock_irq(&serial->serial_lock);
32984 if (serial->tty == tty) {
32985 serial->tty->driver_data = NULL;
32986@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
32987
32988 /* the actual setup */
32989 spin_lock_irqsave(&serial->serial_lock, flags);
32990- if (serial->open_count)
32991+ if (local_read(&serial->open_count))
32992 _hso_serial_set_termios(tty, old);
32993 else
32994 tty->termios = old;
32995@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
32996 D1("Pending read interrupt on port %d\n", i);
32997 spin_lock(&serial->serial_lock);
32998 if (serial->rx_state == RX_IDLE &&
32999- serial->open_count > 0) {
33000+ local_read(&serial->open_count) > 0) {
33001 /* Setup and send a ctrl req read on
33002 * port i */
33003 if (!serial->rx_urb_filled[0]) {
33004@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
33005 /* Start all serial ports */
33006 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33007 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33008- if (dev2ser(serial_table[i])->open_count) {
33009+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
33010 result =
33011 hso_start_serial_device(serial_table[i], GFP_NOIO);
33012 hso_kick_transmit(dev2ser(serial_table[i]));
33013diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33014index e662cbc..8d4a102 100644
33015--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
33016+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33017@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
33018 * Return with error code if any of the queue indices
33019 * is out of range
33020 */
33021- if (p->ring_index[i] < 0 ||
33022- p->ring_index[i] >= adapter->num_rx_queues)
33023+ if (p->ring_index[i] >= adapter->num_rx_queues)
33024 return -EINVAL;
33025 }
33026
33027diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33028index 0f9ee46..e2d6e65 100644
33029--- a/drivers/net/wireless/ath/ath.h
33030+++ b/drivers/net/wireless/ath/ath.h
33031@@ -119,6 +119,7 @@ struct ath_ops {
33032 void (*write_flush) (void *);
33033 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33034 };
33035+typedef struct ath_ops __no_const ath_ops_no_const;
33036
33037 struct ath_common;
33038 struct ath_bus_ops;
33039diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33040index b592016..fe47870 100644
33041--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33042+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33043@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33044 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33045 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33046
33047- ACCESS_ONCE(ads->ds_link) = i->link;
33048- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33049+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
33050+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33051
33052 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33053 ctl6 = SM(i->keytype, AR_EncrType);
33054@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33055
33056 if ((i->is_first || i->is_last) &&
33057 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33058- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33059+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33060 | set11nTries(i->rates, 1)
33061 | set11nTries(i->rates, 2)
33062 | set11nTries(i->rates, 3)
33063 | (i->dur_update ? AR_DurUpdateEna : 0)
33064 | SM(0, AR_BurstDur);
33065
33066- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33067+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33068 | set11nRate(i->rates, 1)
33069 | set11nRate(i->rates, 2)
33070 | set11nRate(i->rates, 3);
33071 } else {
33072- ACCESS_ONCE(ads->ds_ctl2) = 0;
33073- ACCESS_ONCE(ads->ds_ctl3) = 0;
33074+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33075+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33076 }
33077
33078 if (!i->is_first) {
33079- ACCESS_ONCE(ads->ds_ctl0) = 0;
33080- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33081- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33082+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33083+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33084+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33085 return;
33086 }
33087
33088@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33089 break;
33090 }
33091
33092- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33093+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33094 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33095 | SM(i->txpower, AR_XmitPower)
33096 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33097@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33098 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33099 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33100
33101- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33102- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33103+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33104+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33105
33106 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33107 return;
33108
33109- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33110+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33111 | set11nPktDurRTSCTS(i->rates, 1);
33112
33113- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33114+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33115 | set11nPktDurRTSCTS(i->rates, 3);
33116
33117- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33118+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33119 | set11nRateFlags(i->rates, 1)
33120 | set11nRateFlags(i->rates, 2)
33121 | set11nRateFlags(i->rates, 3)
33122diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33123index f5ae3c6..7936af3 100644
33124--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33125+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33126@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33127 (i->qcu << AR_TxQcuNum_S) | 0x17;
33128
33129 checksum += val;
33130- ACCESS_ONCE(ads->info) = val;
33131+ ACCESS_ONCE_RW(ads->info) = val;
33132
33133 checksum += i->link;
33134- ACCESS_ONCE(ads->link) = i->link;
33135+ ACCESS_ONCE_RW(ads->link) = i->link;
33136
33137 checksum += i->buf_addr[0];
33138- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33139+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33140 checksum += i->buf_addr[1];
33141- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33142+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33143 checksum += i->buf_addr[2];
33144- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33145+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33146 checksum += i->buf_addr[3];
33147- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33148+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33149
33150 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33151- ACCESS_ONCE(ads->ctl3) = val;
33152+ ACCESS_ONCE_RW(ads->ctl3) = val;
33153 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33154- ACCESS_ONCE(ads->ctl5) = val;
33155+ ACCESS_ONCE_RW(ads->ctl5) = val;
33156 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33157- ACCESS_ONCE(ads->ctl7) = val;
33158+ ACCESS_ONCE_RW(ads->ctl7) = val;
33159 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33160- ACCESS_ONCE(ads->ctl9) = val;
33161+ ACCESS_ONCE_RW(ads->ctl9) = val;
33162
33163 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33164- ACCESS_ONCE(ads->ctl10) = checksum;
33165+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
33166
33167 if (i->is_first || i->is_last) {
33168- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33169+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33170 | set11nTries(i->rates, 1)
33171 | set11nTries(i->rates, 2)
33172 | set11nTries(i->rates, 3)
33173 | (i->dur_update ? AR_DurUpdateEna : 0)
33174 | SM(0, AR_BurstDur);
33175
33176- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33177+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33178 | set11nRate(i->rates, 1)
33179 | set11nRate(i->rates, 2)
33180 | set11nRate(i->rates, 3);
33181 } else {
33182- ACCESS_ONCE(ads->ctl13) = 0;
33183- ACCESS_ONCE(ads->ctl14) = 0;
33184+ ACCESS_ONCE_RW(ads->ctl13) = 0;
33185+ ACCESS_ONCE_RW(ads->ctl14) = 0;
33186 }
33187
33188 ads->ctl20 = 0;
33189@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33190
33191 ctl17 = SM(i->keytype, AR_EncrType);
33192 if (!i->is_first) {
33193- ACCESS_ONCE(ads->ctl11) = 0;
33194- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33195- ACCESS_ONCE(ads->ctl15) = 0;
33196- ACCESS_ONCE(ads->ctl16) = 0;
33197- ACCESS_ONCE(ads->ctl17) = ctl17;
33198- ACCESS_ONCE(ads->ctl18) = 0;
33199- ACCESS_ONCE(ads->ctl19) = 0;
33200+ ACCESS_ONCE_RW(ads->ctl11) = 0;
33201+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33202+ ACCESS_ONCE_RW(ads->ctl15) = 0;
33203+ ACCESS_ONCE_RW(ads->ctl16) = 0;
33204+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33205+ ACCESS_ONCE_RW(ads->ctl18) = 0;
33206+ ACCESS_ONCE_RW(ads->ctl19) = 0;
33207 return;
33208 }
33209
33210- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33211+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33212 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33213 | SM(i->txpower, AR_XmitPower)
33214 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33215@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33216 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33217 ctl12 |= SM(val, AR_PAPRDChainMask);
33218
33219- ACCESS_ONCE(ads->ctl12) = ctl12;
33220- ACCESS_ONCE(ads->ctl17) = ctl17;
33221+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33222+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33223
33224- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33225+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33226 | set11nPktDurRTSCTS(i->rates, 1);
33227
33228- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33229+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33230 | set11nPktDurRTSCTS(i->rates, 3);
33231
33232- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33233+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33234 | set11nRateFlags(i->rates, 1)
33235 | set11nRateFlags(i->rates, 2)
33236 | set11nRateFlags(i->rates, 3)
33237 | SM(i->rtscts_rate, AR_RTSCTSRate);
33238
33239- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33240+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33241 }
33242
33243 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33244diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33245index f389b3c..7359e18 100644
33246--- a/drivers/net/wireless/ath/ath9k/hw.h
33247+++ b/drivers/net/wireless/ath/ath9k/hw.h
33248@@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33249
33250 /* ANI */
33251 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33252-};
33253+} __no_const;
33254
33255 /**
33256 * struct ath_hw_ops - callbacks used by hardware code and driver code
33257@@ -635,7 +635,7 @@ struct ath_hw_ops {
33258 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33259 struct ath_hw_antcomb_conf *antconf);
33260
33261-};
33262+} __no_const;
33263
33264 struct ath_nf_limits {
33265 s16 max;
33266@@ -655,7 +655,7 @@ enum ath_cal_list {
33267 #define AH_FASTCC 0x4
33268
33269 struct ath_hw {
33270- struct ath_ops reg_ops;
33271+ ath_ops_no_const reg_ops;
33272
33273 struct ieee80211_hw *hw;
33274 struct ath_common common;
33275diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33276index bea8524..c677c06 100644
33277--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33278+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33279@@ -547,7 +547,7 @@ struct phy_func_ptr {
33280 void (*carrsuppr)(struct brcms_phy *);
33281 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33282 void (*detach)(struct brcms_phy *);
33283-};
33284+} __no_const;
33285
33286 struct brcms_phy {
33287 struct brcms_phy_pub pubpi_ro;
33288diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33289index 05f2ad1..ae00eea 100644
33290--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33291+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33292@@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33293 */
33294 if (iwl3945_mod_params.disable_hw_scan) {
33295 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33296- iwl3945_hw_ops.hw_scan = NULL;
33297+ pax_open_kernel();
33298+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33299+ pax_close_kernel();
33300 }
33301
33302 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33303diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33304index 69a77e2..552b42c 100644
33305--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33306+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33307@@ -71,8 +71,8 @@ do { \
33308 } while (0)
33309
33310 #else
33311-#define IWL_DEBUG(m, level, fmt, args...)
33312-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33313+#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33314+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33315 #define iwl_print_hex_dump(m, level, p, len)
33316 #endif /* CONFIG_IWLWIFI_DEBUG */
33317
33318diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33319index 523ad55..f8c5dc5 100644
33320--- a/drivers/net/wireless/mac80211_hwsim.c
33321+++ b/drivers/net/wireless/mac80211_hwsim.c
33322@@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33323 return -EINVAL;
33324
33325 if (fake_hw_scan) {
33326- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33327- mac80211_hwsim_ops.sw_scan_start = NULL;
33328- mac80211_hwsim_ops.sw_scan_complete = NULL;
33329+ pax_open_kernel();
33330+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33331+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33332+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33333+ pax_close_kernel();
33334 }
33335
33336 spin_lock_init(&hwsim_radio_lock);
33337diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33338index 30f138b..c904585 100644
33339--- a/drivers/net/wireless/mwifiex/main.h
33340+++ b/drivers/net/wireless/mwifiex/main.h
33341@@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33342 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33343 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33344 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33345-};
33346+} __no_const;
33347
33348 struct mwifiex_adapter {
33349 u8 iface_type;
33350diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33351index 0c13840..a5c3ed6 100644
33352--- a/drivers/net/wireless/rndis_wlan.c
33353+++ b/drivers/net/wireless/rndis_wlan.c
33354@@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33355
33356 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33357
33358- if (rts_threshold < 0 || rts_threshold > 2347)
33359+ if (rts_threshold > 2347)
33360 rts_threshold = 2347;
33361
33362 tmp = cpu_to_le32(rts_threshold);
33363diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33364index a77f1bb..c608b2b 100644
33365--- a/drivers/net/wireless/wl1251/wl1251.h
33366+++ b/drivers/net/wireless/wl1251/wl1251.h
33367@@ -266,7 +266,7 @@ struct wl1251_if_operations {
33368 void (*reset)(struct wl1251 *wl);
33369 void (*enable_irq)(struct wl1251 *wl);
33370 void (*disable_irq)(struct wl1251 *wl);
33371-};
33372+} __no_const;
33373
33374 struct wl1251 {
33375 struct ieee80211_hw *hw;
33376diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33377index f34b5b2..b5abb9f 100644
33378--- a/drivers/oprofile/buffer_sync.c
33379+++ b/drivers/oprofile/buffer_sync.c
33380@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33381 if (cookie == NO_COOKIE)
33382 offset = pc;
33383 if (cookie == INVALID_COOKIE) {
33384- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33385+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33386 offset = pc;
33387 }
33388 if (cookie != last_cookie) {
33389@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33390 /* add userspace sample */
33391
33392 if (!mm) {
33393- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33394+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33395 return 0;
33396 }
33397
33398 cookie = lookup_dcookie(mm, s->eip, &offset);
33399
33400 if (cookie == INVALID_COOKIE) {
33401- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33402+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33403 return 0;
33404 }
33405
33406@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33407 /* ignore backtraces if failed to add a sample */
33408 if (state == sb_bt_start) {
33409 state = sb_bt_ignore;
33410- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33411+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33412 }
33413 }
33414 release_mm(mm);
33415diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33416index c0cc4e7..44d4e54 100644
33417--- a/drivers/oprofile/event_buffer.c
33418+++ b/drivers/oprofile/event_buffer.c
33419@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33420 }
33421
33422 if (buffer_pos == buffer_size) {
33423- atomic_inc(&oprofile_stats.event_lost_overflow);
33424+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33425 return;
33426 }
33427
33428diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33429index f8c752e..28bf4fc 100644
33430--- a/drivers/oprofile/oprof.c
33431+++ b/drivers/oprofile/oprof.c
33432@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33433 if (oprofile_ops.switch_events())
33434 return;
33435
33436- atomic_inc(&oprofile_stats.multiplex_counter);
33437+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33438 start_switch_worker();
33439 }
33440
33441diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33442index 917d28e..d62d981 100644
33443--- a/drivers/oprofile/oprofile_stats.c
33444+++ b/drivers/oprofile/oprofile_stats.c
33445@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33446 cpu_buf->sample_invalid_eip = 0;
33447 }
33448
33449- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33450- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33451- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33452- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33453- atomic_set(&oprofile_stats.multiplex_counter, 0);
33454+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33455+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33456+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33457+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33458+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33459 }
33460
33461
33462diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33463index 38b6fc0..b5cbfce 100644
33464--- a/drivers/oprofile/oprofile_stats.h
33465+++ b/drivers/oprofile/oprofile_stats.h
33466@@ -13,11 +13,11 @@
33467 #include <linux/atomic.h>
33468
33469 struct oprofile_stat_struct {
33470- atomic_t sample_lost_no_mm;
33471- atomic_t sample_lost_no_mapping;
33472- atomic_t bt_lost_no_mapping;
33473- atomic_t event_lost_overflow;
33474- atomic_t multiplex_counter;
33475+ atomic_unchecked_t sample_lost_no_mm;
33476+ atomic_unchecked_t sample_lost_no_mapping;
33477+ atomic_unchecked_t bt_lost_no_mapping;
33478+ atomic_unchecked_t event_lost_overflow;
33479+ atomic_unchecked_t multiplex_counter;
33480 };
33481
33482 extern struct oprofile_stat_struct oprofile_stats;
33483diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33484index 2f0aa0f..90fab02 100644
33485--- a/drivers/oprofile/oprofilefs.c
33486+++ b/drivers/oprofile/oprofilefs.c
33487@@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33488
33489
33490 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33491- char const *name, atomic_t *val)
33492+ char const *name, atomic_unchecked_t *val)
33493 {
33494 return __oprofilefs_create_file(sb, root, name,
33495 &atomic_ro_fops, 0444, val);
33496diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33497index 3f56bc0..707d642 100644
33498--- a/drivers/parport/procfs.c
33499+++ b/drivers/parport/procfs.c
33500@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33501
33502 *ppos += len;
33503
33504- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33505+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33506 }
33507
33508 #ifdef CONFIG_PARPORT_1284
33509@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33510
33511 *ppos += len;
33512
33513- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33514+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33515 }
33516 #endif /* IEEE1284.3 support. */
33517
33518diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33519index 9fff878..ad0ad53 100644
33520--- a/drivers/pci/hotplug/cpci_hotplug.h
33521+++ b/drivers/pci/hotplug/cpci_hotplug.h
33522@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33523 int (*hardware_test) (struct slot* slot, u32 value);
33524 u8 (*get_power) (struct slot* slot);
33525 int (*set_power) (struct slot* slot, int value);
33526-};
33527+} __no_const;
33528
33529 struct cpci_hp_controller {
33530 unsigned int irq;
33531diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33532index 76ba8a1..20ca857 100644
33533--- a/drivers/pci/hotplug/cpqphp_nvram.c
33534+++ b/drivers/pci/hotplug/cpqphp_nvram.c
33535@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33536
33537 void compaq_nvram_init (void __iomem *rom_start)
33538 {
33539+
33540+#ifndef CONFIG_PAX_KERNEXEC
33541 if (rom_start) {
33542 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33543 }
33544+#endif
33545+
33546 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33547
33548 /* initialize our int15 lock */
33549diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33550index cbfbab1..6a9fced 100644
33551--- a/drivers/pci/pcie/aspm.c
33552+++ b/drivers/pci/pcie/aspm.c
33553@@ -27,9 +27,9 @@
33554 #define MODULE_PARAM_PREFIX "pcie_aspm."
33555
33556 /* Note: those are not register definitions */
33557-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33558-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33559-#define ASPM_STATE_L1 (4) /* L1 state */
33560+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33561+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33562+#define ASPM_STATE_L1 (4U) /* L1 state */
33563 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33564 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33565
33566diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33567index 04e74f4..a960176 100644
33568--- a/drivers/pci/probe.c
33569+++ b/drivers/pci/probe.c
33570@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33571 u32 l, sz, mask;
33572 u16 orig_cmd;
33573
33574- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33575+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33576
33577 if (!dev->mmio_always_on) {
33578 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33579diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33580index 27911b5..5b6db88 100644
33581--- a/drivers/pci/proc.c
33582+++ b/drivers/pci/proc.c
33583@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33584 static int __init pci_proc_init(void)
33585 {
33586 struct pci_dev *dev = NULL;
33587+
33588+#ifdef CONFIG_GRKERNSEC_PROC_ADD
33589+#ifdef CONFIG_GRKERNSEC_PROC_USER
33590+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33591+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33592+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33593+#endif
33594+#else
33595 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33596+#endif
33597 proc_create("devices", 0, proc_bus_pci_dir,
33598 &proc_bus_pci_dev_operations);
33599 proc_initialized = 1;
33600diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33601index 7b82868..b9344c9 100644
33602--- a/drivers/platform/x86/thinkpad_acpi.c
33603+++ b/drivers/platform/x86/thinkpad_acpi.c
33604@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33605 return 0;
33606 }
33607
33608-void static hotkey_mask_warn_incomplete_mask(void)
33609+static void hotkey_mask_warn_incomplete_mask(void)
33610 {
33611 /* log only what the user can fix... */
33612 const u32 wantedmask = hotkey_driver_mask &
33613@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33614 }
33615 }
33616
33617-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33618- struct tp_nvram_state *newn,
33619- const u32 event_mask)
33620-{
33621-
33622 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33623 do { \
33624 if ((event_mask & (1 << __scancode)) && \
33625@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33626 tpacpi_hotkey_send_key(__scancode); \
33627 } while (0)
33628
33629- void issue_volchange(const unsigned int oldvol,
33630- const unsigned int newvol)
33631- {
33632- unsigned int i = oldvol;
33633+static void issue_volchange(const unsigned int oldvol,
33634+ const unsigned int newvol,
33635+ const u32 event_mask)
33636+{
33637+ unsigned int i = oldvol;
33638
33639- while (i > newvol) {
33640- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33641- i--;
33642- }
33643- while (i < newvol) {
33644- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33645- i++;
33646- }
33647+ while (i > newvol) {
33648+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33649+ i--;
33650 }
33651+ while (i < newvol) {
33652+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33653+ i++;
33654+ }
33655+}
33656
33657- void issue_brightnesschange(const unsigned int oldbrt,
33658- const unsigned int newbrt)
33659- {
33660- unsigned int i = oldbrt;
33661+static void issue_brightnesschange(const unsigned int oldbrt,
33662+ const unsigned int newbrt,
33663+ const u32 event_mask)
33664+{
33665+ unsigned int i = oldbrt;
33666
33667- while (i > newbrt) {
33668- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33669- i--;
33670- }
33671- while (i < newbrt) {
33672- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33673- i++;
33674- }
33675+ while (i > newbrt) {
33676+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33677+ i--;
33678+ }
33679+ while (i < newbrt) {
33680+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33681+ i++;
33682 }
33683+}
33684
33685+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33686+ struct tp_nvram_state *newn,
33687+ const u32 event_mask)
33688+{
33689 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33690 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33691 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33692@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33693 oldn->volume_level != newn->volume_level) {
33694 /* recently muted, or repeated mute keypress, or
33695 * multiple presses ending in mute */
33696- issue_volchange(oldn->volume_level, newn->volume_level);
33697+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33698 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
33699 }
33700 } else {
33701@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33702 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33703 }
33704 if (oldn->volume_level != newn->volume_level) {
33705- issue_volchange(oldn->volume_level, newn->volume_level);
33706+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33707 } else if (oldn->volume_toggle != newn->volume_toggle) {
33708 /* repeated vol up/down keypress at end of scale ? */
33709 if (newn->volume_level == 0)
33710@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33711 /* handle brightness */
33712 if (oldn->brightness_level != newn->brightness_level) {
33713 issue_brightnesschange(oldn->brightness_level,
33714- newn->brightness_level);
33715+ newn->brightness_level,
33716+ event_mask);
33717 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
33718 /* repeated key presses that didn't change state */
33719 if (newn->brightness_level == 0)
33720@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33721 && !tp_features.bright_unkfw)
33722 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33723 }
33724+}
33725
33726 #undef TPACPI_COMPARE_KEY
33727 #undef TPACPI_MAY_SEND_KEY
33728-}
33729
33730 /*
33731 * Polling driver
33732diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
33733index b859d16..5cc6b1a 100644
33734--- a/drivers/pnp/pnpbios/bioscalls.c
33735+++ b/drivers/pnp/pnpbios/bioscalls.c
33736@@ -59,7 +59,7 @@ do { \
33737 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33738 } while(0)
33739
33740-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33741+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33742 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33743
33744 /*
33745@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33746
33747 cpu = get_cpu();
33748 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33749+
33750+ pax_open_kernel();
33751 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33752+ pax_close_kernel();
33753
33754 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33755 spin_lock_irqsave(&pnp_bios_lock, flags);
33756@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33757 :"memory");
33758 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33759
33760+ pax_open_kernel();
33761 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33762+ pax_close_kernel();
33763+
33764 put_cpu();
33765
33766 /* If we get here and this is set then the PnP BIOS faulted on us. */
33767@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
33768 return status;
33769 }
33770
33771-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33772+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33773 {
33774 int i;
33775
33776@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33777 pnp_bios_callpoint.offset = header->fields.pm16offset;
33778 pnp_bios_callpoint.segment = PNP_CS16;
33779
33780+ pax_open_kernel();
33781+
33782 for_each_possible_cpu(i) {
33783 struct desc_struct *gdt = get_cpu_gdt_table(i);
33784 if (!gdt)
33785@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33786 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33787 (unsigned long)__va(header->fields.pm16dseg));
33788 }
33789+
33790+ pax_close_kernel();
33791 }
33792diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
33793index b0ecacb..7c9da2e 100644
33794--- a/drivers/pnp/resource.c
33795+++ b/drivers/pnp/resource.c
33796@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
33797 return 1;
33798
33799 /* check if the resource is valid */
33800- if (*irq < 0 || *irq > 15)
33801+ if (*irq > 15)
33802 return 0;
33803
33804 /* check if the resource is reserved */
33805@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
33806 return 1;
33807
33808 /* check if the resource is valid */
33809- if (*dma < 0 || *dma == 4 || *dma > 7)
33810+ if (*dma == 4 || *dma > 7)
33811 return 0;
33812
33813 /* check if the resource is reserved */
33814diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
33815index bb16f5b..c751eef 100644
33816--- a/drivers/power/bq27x00_battery.c
33817+++ b/drivers/power/bq27x00_battery.c
33818@@ -67,7 +67,7 @@
33819 struct bq27x00_device_info;
33820 struct bq27x00_access_methods {
33821 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33822-};
33823+} __no_const;
33824
33825 enum bq27x00_chip { BQ27000, BQ27500 };
33826
33827diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
33828index 33f5d9a..d957d3f 100644
33829--- a/drivers/regulator/max8660.c
33830+++ b/drivers/regulator/max8660.c
33831@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
33832 max8660->shadow_regs[MAX8660_OVER1] = 5;
33833 } else {
33834 /* Otherwise devices can be toggled via software */
33835- max8660_dcdc_ops.enable = max8660_dcdc_enable;
33836- max8660_dcdc_ops.disable = max8660_dcdc_disable;
33837+ pax_open_kernel();
33838+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33839+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33840+ pax_close_kernel();
33841 }
33842
33843 /*
33844diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
33845index 023d17d..74ef35b 100644
33846--- a/drivers/regulator/mc13892-regulator.c
33847+++ b/drivers/regulator/mc13892-regulator.c
33848@@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
33849 }
33850 mc13xxx_unlock(mc13892);
33851
33852- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33853+ pax_open_kernel();
33854+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33855 = mc13892_vcam_set_mode;
33856- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33857+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33858 = mc13892_vcam_get_mode;
33859+ pax_close_kernel();
33860 for (i = 0; i < pdata->num_regulators; i++) {
33861 init_data = &pdata->regulators[i];
33862 priv->regulators[i] = regulator_register(
33863diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
33864index cace6d3..f623fda 100644
33865--- a/drivers/rtc/rtc-dev.c
33866+++ b/drivers/rtc/rtc-dev.c
33867@@ -14,6 +14,7 @@
33868 #include <linux/module.h>
33869 #include <linux/rtc.h>
33870 #include <linux/sched.h>
33871+#include <linux/grsecurity.h>
33872 #include "rtc-core.h"
33873
33874 static dev_t rtc_devt;
33875@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
33876 if (copy_from_user(&tm, uarg, sizeof(tm)))
33877 return -EFAULT;
33878
33879+ gr_log_timechange();
33880+
33881 return rtc_set_time(rtc, &tm);
33882
33883 case RTC_PIE_ON:
33884diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
33885index ffb5878..e6d785c 100644
33886--- a/drivers/scsi/aacraid/aacraid.h
33887+++ b/drivers/scsi/aacraid/aacraid.h
33888@@ -492,7 +492,7 @@ struct adapter_ops
33889 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33890 /* Administrative operations */
33891 int (*adapter_comm)(struct aac_dev * dev, int comm);
33892-};
33893+} __no_const;
33894
33895 /*
33896 * Define which interrupt handler needs to be installed
33897diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
33898index 705e13e..91c873c 100644
33899--- a/drivers/scsi/aacraid/linit.c
33900+++ b/drivers/scsi/aacraid/linit.c
33901@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
33902 #elif defined(__devinitconst)
33903 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33904 #else
33905-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33906+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33907 #endif
33908 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33909 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33910diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
33911index d5ff142..49c0ebb 100644
33912--- a/drivers/scsi/aic94xx/aic94xx_init.c
33913+++ b/drivers/scsi/aic94xx/aic94xx_init.c
33914@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
33915 .lldd_control_phy = asd_control_phy,
33916 };
33917
33918-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33919+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33920 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33921 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33922 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33923diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
33924index a796de9..1ef20e1 100644
33925--- a/drivers/scsi/bfa/bfa.h
33926+++ b/drivers/scsi/bfa/bfa.h
33927@@ -196,7 +196,7 @@ struct bfa_hwif_s {
33928 u32 *end);
33929 int cpe_vec_q0;
33930 int rme_vec_q0;
33931-};
33932+} __no_const;
33933 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33934
33935 struct bfa_faa_cbfn_s {
33936diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
33937index e07bd47..cd1bbbb 100644
33938--- a/drivers/scsi/bfa/bfa_fcpim.c
33939+++ b/drivers/scsi/bfa/bfa_fcpim.c
33940@@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
33941
33942 bfa_iotag_attach(fcp);
33943
33944- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
33945+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
33946 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
33947 (fcp->num_itns * sizeof(struct bfa_itn_s));
33948 memset(fcp->itn_arr, 0,
33949@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
33950 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
33951 {
33952 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
33953- struct bfa_itn_s *itn;
33954+ bfa_itn_s_no_const *itn;
33955
33956 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
33957 itn->isr = isr;
33958diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
33959index 1080bcb..a3b39e3 100644
33960--- a/drivers/scsi/bfa/bfa_fcpim.h
33961+++ b/drivers/scsi/bfa/bfa_fcpim.h
33962@@ -37,6 +37,7 @@ struct bfa_iotag_s {
33963 struct bfa_itn_s {
33964 bfa_isr_func_t isr;
33965 };
33966+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
33967
33968 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
33969 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
33970@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
33971 struct list_head iotag_tio_free_q; /* free IO resources */
33972 struct list_head iotag_unused_q; /* unused IO resources*/
33973 struct bfa_iotag_s *iotag_arr;
33974- struct bfa_itn_s *itn_arr;
33975+ bfa_itn_s_no_const *itn_arr;
33976 int num_ioim_reqs;
33977 int num_fwtio_reqs;
33978 int num_itns;
33979diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
33980index 546d46b..642fa5b 100644
33981--- a/drivers/scsi/bfa/bfa_ioc.h
33982+++ b/drivers/scsi/bfa/bfa_ioc.h
33983@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
33984 bfa_ioc_disable_cbfn_t disable_cbfn;
33985 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
33986 bfa_ioc_reset_cbfn_t reset_cbfn;
33987-};
33988+} __no_const;
33989
33990 /*
33991 * IOC event notification mechanism.
33992@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
33993 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
33994 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
33995 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
33996-};
33997+} __no_const;
33998
33999 /*
34000 * Queue element to wait for room in request queue. FIFO order is
34001diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
34002index 351dc0b..951dc32 100644
34003--- a/drivers/scsi/hosts.c
34004+++ b/drivers/scsi/hosts.c
34005@@ -42,7 +42,7 @@
34006 #include "scsi_logging.h"
34007
34008
34009-static atomic_t scsi_host_next_hn; /* host_no for next new host */
34010+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34011
34012
34013 static void scsi_host_cls_release(struct device *dev)
34014@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
34015 * subtract one because we increment first then return, but we need to
34016 * know what the next host number was before increment
34017 */
34018- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34019+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34020 shost->dma_channel = 0xff;
34021
34022 /* These three are default values which can be overridden */
34023diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
34024index 865d452..e9b7fa7 100644
34025--- a/drivers/scsi/hpsa.c
34026+++ b/drivers/scsi/hpsa.c
34027@@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34028 u32 a;
34029
34030 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34031- return h->access.command_completed(h);
34032+ return h->access->command_completed(h);
34033
34034 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34035 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34036@@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34037 while (!list_empty(&h->reqQ)) {
34038 c = list_entry(h->reqQ.next, struct CommandList, list);
34039 /* can't do anything if fifo is full */
34040- if ((h->access.fifo_full(h))) {
34041+ if ((h->access->fifo_full(h))) {
34042 dev_warn(&h->pdev->dev, "fifo full\n");
34043 break;
34044 }
34045@@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34046 h->Qdepth--;
34047
34048 /* Tell the controller execute command */
34049- h->access.submit_command(h, c);
34050+ h->access->submit_command(h, c);
34051
34052 /* Put job onto the completed Q */
34053 addQ(&h->cmpQ, c);
34054@@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34055
34056 static inline unsigned long get_next_completion(struct ctlr_info *h)
34057 {
34058- return h->access.command_completed(h);
34059+ return h->access->command_completed(h);
34060 }
34061
34062 static inline bool interrupt_pending(struct ctlr_info *h)
34063 {
34064- return h->access.intr_pending(h);
34065+ return h->access->intr_pending(h);
34066 }
34067
34068 static inline long interrupt_not_for_us(struct ctlr_info *h)
34069 {
34070- return (h->access.intr_pending(h) == 0) ||
34071+ return (h->access->intr_pending(h) == 0) ||
34072 (h->interrupts_enabled == 0);
34073 }
34074
34075@@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34076 if (prod_index < 0)
34077 return -ENODEV;
34078 h->product_name = products[prod_index].product_name;
34079- h->access = *(products[prod_index].access);
34080+ h->access = products[prod_index].access;
34081
34082 if (hpsa_board_disabled(h->pdev)) {
34083 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34084@@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34085
34086 assert_spin_locked(&lockup_detector_lock);
34087 remove_ctlr_from_lockup_detector_list(h);
34088- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34089+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34090 spin_lock_irqsave(&h->lock, flags);
34091 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34092 spin_unlock_irqrestore(&h->lock, flags);
34093@@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34094 }
34095
34096 /* make sure the board interrupts are off */
34097- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34098+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34099
34100 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34101 goto clean2;
34102@@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34103 * fake ones to scoop up any residual completions.
34104 */
34105 spin_lock_irqsave(&h->lock, flags);
34106- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34107+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34108 spin_unlock_irqrestore(&h->lock, flags);
34109 free_irq(h->intr[h->intr_mode], h);
34110 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34111@@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34112 dev_info(&h->pdev->dev, "Board READY.\n");
34113 dev_info(&h->pdev->dev,
34114 "Waiting for stale completions to drain.\n");
34115- h->access.set_intr_mask(h, HPSA_INTR_ON);
34116+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34117 msleep(10000);
34118- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34119+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34120
34121 rc = controller_reset_failed(h->cfgtable);
34122 if (rc)
34123@@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34124 }
34125
34126 /* Turn the interrupts on so we can service requests */
34127- h->access.set_intr_mask(h, HPSA_INTR_ON);
34128+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34129
34130 hpsa_hba_inquiry(h);
34131 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34132@@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34133 * To write all data in the battery backed cache to disks
34134 */
34135 hpsa_flush_cache(h);
34136- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34137+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34138 free_irq(h->intr[h->intr_mode], h);
34139 #ifdef CONFIG_PCI_MSI
34140 if (h->msix_vector)
34141@@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34142 return;
34143 }
34144 /* Change the access methods to the performant access methods */
34145- h->access = SA5_performant_access;
34146+ h->access = &SA5_performant_access;
34147 h->transMethod = CFGTBL_Trans_Performant;
34148 }
34149
34150diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34151index 91edafb..a9b88ec 100644
34152--- a/drivers/scsi/hpsa.h
34153+++ b/drivers/scsi/hpsa.h
34154@@ -73,7 +73,7 @@ struct ctlr_info {
34155 unsigned int msix_vector;
34156 unsigned int msi_vector;
34157 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34158- struct access_method access;
34159+ struct access_method *access;
34160
34161 /* queue and queue Info */
34162 struct list_head reqQ;
34163diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34164index f2df059..a3a9930 100644
34165--- a/drivers/scsi/ips.h
34166+++ b/drivers/scsi/ips.h
34167@@ -1027,7 +1027,7 @@ typedef struct {
34168 int (*intr)(struct ips_ha *);
34169 void (*enableint)(struct ips_ha *);
34170 uint32_t (*statupd)(struct ips_ha *);
34171-} ips_hw_func_t;
34172+} __no_const ips_hw_func_t;
34173
34174 typedef struct ips_ha {
34175 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34176diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34177index 9de9db2..1e09660 100644
34178--- a/drivers/scsi/libfc/fc_exch.c
34179+++ b/drivers/scsi/libfc/fc_exch.c
34180@@ -105,12 +105,12 @@ struct fc_exch_mgr {
34181 * all together if not used XXX
34182 */
34183 struct {
34184- atomic_t no_free_exch;
34185- atomic_t no_free_exch_xid;
34186- atomic_t xid_not_found;
34187- atomic_t xid_busy;
34188- atomic_t seq_not_found;
34189- atomic_t non_bls_resp;
34190+ atomic_unchecked_t no_free_exch;
34191+ atomic_unchecked_t no_free_exch_xid;
34192+ atomic_unchecked_t xid_not_found;
34193+ atomic_unchecked_t xid_busy;
34194+ atomic_unchecked_t seq_not_found;
34195+ atomic_unchecked_t non_bls_resp;
34196 } stats;
34197 };
34198
34199@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34200 /* allocate memory for exchange */
34201 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34202 if (!ep) {
34203- atomic_inc(&mp->stats.no_free_exch);
34204+ atomic_inc_unchecked(&mp->stats.no_free_exch);
34205 goto out;
34206 }
34207 memset(ep, 0, sizeof(*ep));
34208@@ -780,7 +780,7 @@ out:
34209 return ep;
34210 err:
34211 spin_unlock_bh(&pool->lock);
34212- atomic_inc(&mp->stats.no_free_exch_xid);
34213+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34214 mempool_free(ep, mp->ep_pool);
34215 return NULL;
34216 }
34217@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34218 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34219 ep = fc_exch_find(mp, xid);
34220 if (!ep) {
34221- atomic_inc(&mp->stats.xid_not_found);
34222+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34223 reject = FC_RJT_OX_ID;
34224 goto out;
34225 }
34226@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34227 ep = fc_exch_find(mp, xid);
34228 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34229 if (ep) {
34230- atomic_inc(&mp->stats.xid_busy);
34231+ atomic_inc_unchecked(&mp->stats.xid_busy);
34232 reject = FC_RJT_RX_ID;
34233 goto rel;
34234 }
34235@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34236 }
34237 xid = ep->xid; /* get our XID */
34238 } else if (!ep) {
34239- atomic_inc(&mp->stats.xid_not_found);
34240+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34241 reject = FC_RJT_RX_ID; /* XID not found */
34242 goto out;
34243 }
34244@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34245 } else {
34246 sp = &ep->seq;
34247 if (sp->id != fh->fh_seq_id) {
34248- atomic_inc(&mp->stats.seq_not_found);
34249+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34250 if (f_ctl & FC_FC_END_SEQ) {
34251 /*
34252 * Update sequence_id based on incoming last
34253@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34254
34255 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34256 if (!ep) {
34257- atomic_inc(&mp->stats.xid_not_found);
34258+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34259 goto out;
34260 }
34261 if (ep->esb_stat & ESB_ST_COMPLETE) {
34262- atomic_inc(&mp->stats.xid_not_found);
34263+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34264 goto rel;
34265 }
34266 if (ep->rxid == FC_XID_UNKNOWN)
34267 ep->rxid = ntohs(fh->fh_rx_id);
34268 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34269- atomic_inc(&mp->stats.xid_not_found);
34270+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34271 goto rel;
34272 }
34273 if (ep->did != ntoh24(fh->fh_s_id) &&
34274 ep->did != FC_FID_FLOGI) {
34275- atomic_inc(&mp->stats.xid_not_found);
34276+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34277 goto rel;
34278 }
34279 sof = fr_sof(fp);
34280@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34281 sp->ssb_stat |= SSB_ST_RESP;
34282 sp->id = fh->fh_seq_id;
34283 } else if (sp->id != fh->fh_seq_id) {
34284- atomic_inc(&mp->stats.seq_not_found);
34285+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34286 goto rel;
34287 }
34288
34289@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34290 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34291
34292 if (!sp)
34293- atomic_inc(&mp->stats.xid_not_found);
34294+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34295 else
34296- atomic_inc(&mp->stats.non_bls_resp);
34297+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34298
34299 fc_frame_free(fp);
34300 }
34301diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34302index db9238f..4378ed2 100644
34303--- a/drivers/scsi/libsas/sas_ata.c
34304+++ b/drivers/scsi/libsas/sas_ata.c
34305@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34306 .postreset = ata_std_postreset,
34307 .error_handler = ata_std_error_handler,
34308 .post_internal_cmd = sas_ata_post_internal,
34309- .qc_defer = ata_std_qc_defer,
34310+ .qc_defer = ata_std_qc_defer,
34311 .qc_prep = ata_noop_qc_prep,
34312 .qc_issue = sas_ata_qc_issue,
34313 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34314diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34315index bb4c8e0..f33d849 100644
34316--- a/drivers/scsi/lpfc/lpfc.h
34317+++ b/drivers/scsi/lpfc/lpfc.h
34318@@ -425,7 +425,7 @@ struct lpfc_vport {
34319 struct dentry *debug_nodelist;
34320 struct dentry *vport_debugfs_root;
34321 struct lpfc_debugfs_trc *disc_trc;
34322- atomic_t disc_trc_cnt;
34323+ atomic_unchecked_t disc_trc_cnt;
34324 #endif
34325 uint8_t stat_data_enabled;
34326 uint8_t stat_data_blocked;
34327@@ -835,8 +835,8 @@ struct lpfc_hba {
34328 struct timer_list fabric_block_timer;
34329 unsigned long bit_flags;
34330 #define FABRIC_COMANDS_BLOCKED 0
34331- atomic_t num_rsrc_err;
34332- atomic_t num_cmd_success;
34333+ atomic_unchecked_t num_rsrc_err;
34334+ atomic_unchecked_t num_cmd_success;
34335 unsigned long last_rsrc_error_time;
34336 unsigned long last_ramp_down_time;
34337 unsigned long last_ramp_up_time;
34338@@ -866,7 +866,7 @@ struct lpfc_hba {
34339
34340 struct dentry *debug_slow_ring_trc;
34341 struct lpfc_debugfs_trc *slow_ring_trc;
34342- atomic_t slow_ring_trc_cnt;
34343+ atomic_unchecked_t slow_ring_trc_cnt;
34344 /* iDiag debugfs sub-directory */
34345 struct dentry *idiag_root;
34346 struct dentry *idiag_pci_cfg;
34347diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34348index 2838259..a07cfb5 100644
34349--- a/drivers/scsi/lpfc/lpfc_debugfs.c
34350+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34351@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34352
34353 #include <linux/debugfs.h>
34354
34355-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34356+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34357 static unsigned long lpfc_debugfs_start_time = 0L;
34358
34359 /* iDiag */
34360@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34361 lpfc_debugfs_enable = 0;
34362
34363 len = 0;
34364- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34365+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34366 (lpfc_debugfs_max_disc_trc - 1);
34367 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34368 dtp = vport->disc_trc + i;
34369@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34370 lpfc_debugfs_enable = 0;
34371
34372 len = 0;
34373- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34374+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34375 (lpfc_debugfs_max_slow_ring_trc - 1);
34376 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34377 dtp = phba->slow_ring_trc + i;
34378@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34379 !vport || !vport->disc_trc)
34380 return;
34381
34382- index = atomic_inc_return(&vport->disc_trc_cnt) &
34383+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34384 (lpfc_debugfs_max_disc_trc - 1);
34385 dtp = vport->disc_trc + index;
34386 dtp->fmt = fmt;
34387 dtp->data1 = data1;
34388 dtp->data2 = data2;
34389 dtp->data3 = data3;
34390- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34391+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34392 dtp->jif = jiffies;
34393 #endif
34394 return;
34395@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34396 !phba || !phba->slow_ring_trc)
34397 return;
34398
34399- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34400+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34401 (lpfc_debugfs_max_slow_ring_trc - 1);
34402 dtp = phba->slow_ring_trc + index;
34403 dtp->fmt = fmt;
34404 dtp->data1 = data1;
34405 dtp->data2 = data2;
34406 dtp->data3 = data3;
34407- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34408+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34409 dtp->jif = jiffies;
34410 #endif
34411 return;
34412@@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34413 "slow_ring buffer\n");
34414 goto debug_failed;
34415 }
34416- atomic_set(&phba->slow_ring_trc_cnt, 0);
34417+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34418 memset(phba->slow_ring_trc, 0,
34419 (sizeof(struct lpfc_debugfs_trc) *
34420 lpfc_debugfs_max_slow_ring_trc));
34421@@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34422 "buffer\n");
34423 goto debug_failed;
34424 }
34425- atomic_set(&vport->disc_trc_cnt, 0);
34426+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34427
34428 snprintf(name, sizeof(name), "discovery_trace");
34429 vport->debug_disc_trc =
34430diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34431index 55bc4fc..a2a109c 100644
34432--- a/drivers/scsi/lpfc/lpfc_init.c
34433+++ b/drivers/scsi/lpfc/lpfc_init.c
34434@@ -10027,8 +10027,10 @@ lpfc_init(void)
34435 printk(LPFC_COPYRIGHT "\n");
34436
34437 if (lpfc_enable_npiv) {
34438- lpfc_transport_functions.vport_create = lpfc_vport_create;
34439- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34440+ pax_open_kernel();
34441+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34442+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34443+ pax_close_kernel();
34444 }
34445 lpfc_transport_template =
34446 fc_attach_transport(&lpfc_transport_functions);
34447diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34448index 2e1e54e..1af0a0d 100644
34449--- a/drivers/scsi/lpfc/lpfc_scsi.c
34450+++ b/drivers/scsi/lpfc/lpfc_scsi.c
34451@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34452 uint32_t evt_posted;
34453
34454 spin_lock_irqsave(&phba->hbalock, flags);
34455- atomic_inc(&phba->num_rsrc_err);
34456+ atomic_inc_unchecked(&phba->num_rsrc_err);
34457 phba->last_rsrc_error_time = jiffies;
34458
34459 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34460@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34461 unsigned long flags;
34462 struct lpfc_hba *phba = vport->phba;
34463 uint32_t evt_posted;
34464- atomic_inc(&phba->num_cmd_success);
34465+ atomic_inc_unchecked(&phba->num_cmd_success);
34466
34467 if (vport->cfg_lun_queue_depth <= queue_depth)
34468 return;
34469@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34470 unsigned long num_rsrc_err, num_cmd_success;
34471 int i;
34472
34473- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34474- num_cmd_success = atomic_read(&phba->num_cmd_success);
34475+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34476+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34477
34478 vports = lpfc_create_vport_work_array(phba);
34479 if (vports != NULL)
34480@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34481 }
34482 }
34483 lpfc_destroy_vport_work_array(phba, vports);
34484- atomic_set(&phba->num_rsrc_err, 0);
34485- atomic_set(&phba->num_cmd_success, 0);
34486+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34487+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34488 }
34489
34490 /**
34491@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34492 }
34493 }
34494 lpfc_destroy_vport_work_array(phba, vports);
34495- atomic_set(&phba->num_rsrc_err, 0);
34496- atomic_set(&phba->num_cmd_success, 0);
34497+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34498+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34499 }
34500
34501 /**
34502diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34503index 5163edb..7b142bc 100644
34504--- a/drivers/scsi/pmcraid.c
34505+++ b/drivers/scsi/pmcraid.c
34506@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34507 res->scsi_dev = scsi_dev;
34508 scsi_dev->hostdata = res;
34509 res->change_detected = 0;
34510- atomic_set(&res->read_failures, 0);
34511- atomic_set(&res->write_failures, 0);
34512+ atomic_set_unchecked(&res->read_failures, 0);
34513+ atomic_set_unchecked(&res->write_failures, 0);
34514 rc = 0;
34515 }
34516 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34517@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34518
34519 /* If this was a SCSI read/write command keep count of errors */
34520 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34521- atomic_inc(&res->read_failures);
34522+ atomic_inc_unchecked(&res->read_failures);
34523 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34524- atomic_inc(&res->write_failures);
34525+ atomic_inc_unchecked(&res->write_failures);
34526
34527 if (!RES_IS_GSCSI(res->cfg_entry) &&
34528 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34529@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34530 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34531 * hrrq_id assigned here in queuecommand
34532 */
34533- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34534+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34535 pinstance->num_hrrq;
34536 cmd->cmd_done = pmcraid_io_done;
34537
34538@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34539 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34540 * hrrq_id assigned here in queuecommand
34541 */
34542- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34543+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34544 pinstance->num_hrrq;
34545
34546 if (request_size) {
34547@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34548
34549 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34550 /* add resources only after host is added into system */
34551- if (!atomic_read(&pinstance->expose_resources))
34552+ if (!atomic_read_unchecked(&pinstance->expose_resources))
34553 return;
34554
34555 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34556@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34557 init_waitqueue_head(&pinstance->reset_wait_q);
34558
34559 atomic_set(&pinstance->outstanding_cmds, 0);
34560- atomic_set(&pinstance->last_message_id, 0);
34561- atomic_set(&pinstance->expose_resources, 0);
34562+ atomic_set_unchecked(&pinstance->last_message_id, 0);
34563+ atomic_set_unchecked(&pinstance->expose_resources, 0);
34564
34565 INIT_LIST_HEAD(&pinstance->free_res_q);
34566 INIT_LIST_HEAD(&pinstance->used_res_q);
34567@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34568 /* Schedule worker thread to handle CCN and take care of adding and
34569 * removing devices to OS
34570 */
34571- atomic_set(&pinstance->expose_resources, 1);
34572+ atomic_set_unchecked(&pinstance->expose_resources, 1);
34573 schedule_work(&pinstance->worker_q);
34574 return rc;
34575
34576diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34577index ca496c7..9c791d5 100644
34578--- a/drivers/scsi/pmcraid.h
34579+++ b/drivers/scsi/pmcraid.h
34580@@ -748,7 +748,7 @@ struct pmcraid_instance {
34581 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34582
34583 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34584- atomic_t last_message_id;
34585+ atomic_unchecked_t last_message_id;
34586
34587 /* configuration table */
34588 struct pmcraid_config_table *cfg_table;
34589@@ -777,7 +777,7 @@ struct pmcraid_instance {
34590 atomic_t outstanding_cmds;
34591
34592 /* should add/delete resources to mid-layer now ?*/
34593- atomic_t expose_resources;
34594+ atomic_unchecked_t expose_resources;
34595
34596
34597
34598@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34599 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34600 };
34601 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34602- atomic_t read_failures; /* count of failed READ commands */
34603- atomic_t write_failures; /* count of failed WRITE commands */
34604+ atomic_unchecked_t read_failures; /* count of failed READ commands */
34605+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34606
34607 /* To indicate add/delete/modify during CCN */
34608 u8 change_detected;
34609diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34610index fcf052c..a8025a4 100644
34611--- a/drivers/scsi/qla2xxx/qla_def.h
34612+++ b/drivers/scsi/qla2xxx/qla_def.h
34613@@ -2244,7 +2244,7 @@ struct isp_operations {
34614 int (*get_flash_version) (struct scsi_qla_host *, void *);
34615 int (*start_scsi) (srb_t *);
34616 int (*abort_isp) (struct scsi_qla_host *);
34617-};
34618+} __no_const;
34619
34620 /* MSI-X Support *************************************************************/
34621
34622diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34623index fd5edc6..4906148 100644
34624--- a/drivers/scsi/qla4xxx/ql4_def.h
34625+++ b/drivers/scsi/qla4xxx/ql4_def.h
34626@@ -258,7 +258,7 @@ struct ddb_entry {
34627 * (4000 only) */
34628 atomic_t relogin_timer; /* Max Time to wait for
34629 * relogin to complete */
34630- atomic_t relogin_retry_count; /* Num of times relogin has been
34631+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34632 * retried */
34633 uint32_t default_time2wait; /* Default Min time between
34634 * relogins (+aens) */
34635diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34636index 4169c8b..a8b896b 100644
34637--- a/drivers/scsi/qla4xxx/ql4_os.c
34638+++ b/drivers/scsi/qla4xxx/ql4_os.c
34639@@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34640 */
34641 if (!iscsi_is_session_online(cls_sess)) {
34642 /* Reset retry relogin timer */
34643- atomic_inc(&ddb_entry->relogin_retry_count);
34644+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34645 DEBUG2(ql4_printk(KERN_INFO, ha,
34646 "%s: index[%d] relogin timed out-retrying"
34647 " relogin (%d), retry (%d)\n", __func__,
34648 ddb_entry->fw_ddb_index,
34649- atomic_read(&ddb_entry->relogin_retry_count),
34650+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34651 ddb_entry->default_time2wait + 4));
34652 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34653 atomic_set(&ddb_entry->retry_relogin_timer,
34654@@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34655
34656 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34657 atomic_set(&ddb_entry->relogin_timer, 0);
34658- atomic_set(&ddb_entry->relogin_retry_count, 0);
34659+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34660
34661 ddb_entry->default_relogin_timeout =
34662 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34663diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34664index 2aeb2e9..46e3925 100644
34665--- a/drivers/scsi/scsi.c
34666+++ b/drivers/scsi/scsi.c
34667@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34668 unsigned long timeout;
34669 int rtn = 0;
34670
34671- atomic_inc(&cmd->device->iorequest_cnt);
34672+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34673
34674 /* check if the device is still usable */
34675 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34676diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34677index f85cfa6..a57c9e8 100644
34678--- a/drivers/scsi/scsi_lib.c
34679+++ b/drivers/scsi/scsi_lib.c
34680@@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34681 shost = sdev->host;
34682 scsi_init_cmd_errh(cmd);
34683 cmd->result = DID_NO_CONNECT << 16;
34684- atomic_inc(&cmd->device->iorequest_cnt);
34685+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34686
34687 /*
34688 * SCSI request completion path will do scsi_device_unbusy(),
34689@@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34690
34691 INIT_LIST_HEAD(&cmd->eh_entry);
34692
34693- atomic_inc(&cmd->device->iodone_cnt);
34694+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34695 if (cmd->result)
34696- atomic_inc(&cmd->device->ioerr_cnt);
34697+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34698
34699 disposition = scsi_decide_disposition(cmd);
34700 if (disposition != SUCCESS &&
34701diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
34702index 04c2a27..9d8bd66 100644
34703--- a/drivers/scsi/scsi_sysfs.c
34704+++ b/drivers/scsi/scsi_sysfs.c
34705@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
34706 char *buf) \
34707 { \
34708 struct scsi_device *sdev = to_scsi_device(dev); \
34709- unsigned long long count = atomic_read(&sdev->field); \
34710+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34711 return snprintf(buf, 20, "0x%llx\n", count); \
34712 } \
34713 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34714diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
34715index 84a1fdf..693b0d6 100644
34716--- a/drivers/scsi/scsi_tgt_lib.c
34717+++ b/drivers/scsi/scsi_tgt_lib.c
34718@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
34719 int err;
34720
34721 dprintk("%lx %u\n", uaddr, len);
34722- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34723+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34724 if (err) {
34725 /*
34726 * TODO: need to fixup sg_tablesize, max_segment_size,
34727diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
34728index 1b21491..1b7f60e 100644
34729--- a/drivers/scsi/scsi_transport_fc.c
34730+++ b/drivers/scsi/scsi_transport_fc.c
34731@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
34732 * Netlink Infrastructure
34733 */
34734
34735-static atomic_t fc_event_seq;
34736+static atomic_unchecked_t fc_event_seq;
34737
34738 /**
34739 * fc_get_event_number - Obtain the next sequential FC event number
34740@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34741 u32
34742 fc_get_event_number(void)
34743 {
34744- return atomic_add_return(1, &fc_event_seq);
34745+ return atomic_add_return_unchecked(1, &fc_event_seq);
34746 }
34747 EXPORT_SYMBOL(fc_get_event_number);
34748
34749@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
34750 {
34751 int error;
34752
34753- atomic_set(&fc_event_seq, 0);
34754+ atomic_set_unchecked(&fc_event_seq, 0);
34755
34756 error = transport_class_register(&fc_host_class);
34757 if (error)
34758@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
34759 char *cp;
34760
34761 *val = simple_strtoul(buf, &cp, 0);
34762- if ((*cp && (*cp != '\n')) || (*val < 0))
34763+ if (*cp && (*cp != '\n'))
34764 return -EINVAL;
34765 /*
34766 * Check for overflow; dev_loss_tmo is u32
34767diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
34768index 96029e6..4d77fa0 100644
34769--- a/drivers/scsi/scsi_transport_iscsi.c
34770+++ b/drivers/scsi/scsi_transport_iscsi.c
34771@@ -79,7 +79,7 @@ struct iscsi_internal {
34772 struct transport_container session_cont;
34773 };
34774
34775-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34776+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34777 static struct workqueue_struct *iscsi_eh_timer_workq;
34778
34779 static DEFINE_IDA(iscsi_sess_ida);
34780@@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
34781 int err;
34782
34783 ihost = shost->shost_data;
34784- session->sid = atomic_add_return(1, &iscsi_session_nr);
34785+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34786
34787 if (target_id == ISCSI_MAX_TARGET) {
34788 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
34789@@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
34790 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34791 ISCSI_TRANSPORT_VERSION);
34792
34793- atomic_set(&iscsi_session_nr, 0);
34794+ atomic_set_unchecked(&iscsi_session_nr, 0);
34795
34796 err = class_register(&iscsi_transport_class);
34797 if (err)
34798diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
34799index 21a045e..ec89e03 100644
34800--- a/drivers/scsi/scsi_transport_srp.c
34801+++ b/drivers/scsi/scsi_transport_srp.c
34802@@ -33,7 +33,7 @@
34803 #include "scsi_transport_srp_internal.h"
34804
34805 struct srp_host_attrs {
34806- atomic_t next_port_id;
34807+ atomic_unchecked_t next_port_id;
34808 };
34809 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34810
34811@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
34812 struct Scsi_Host *shost = dev_to_shost(dev);
34813 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34814
34815- atomic_set(&srp_host->next_port_id, 0);
34816+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34817 return 0;
34818 }
34819
34820@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
34821 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34822 rport->roles = ids->roles;
34823
34824- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34825+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34826 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34827
34828 transport_setup_device(&rport->dev);
34829diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
34830index 441a1c5..07cece7 100644
34831--- a/drivers/scsi/sg.c
34832+++ b/drivers/scsi/sg.c
34833@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
34834 sdp->disk->disk_name,
34835 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34836 NULL,
34837- (char *)arg);
34838+ (char __user *)arg);
34839 case BLKTRACESTART:
34840 return blk_trace_startstop(sdp->device->request_queue, 1);
34841 case BLKTRACESTOP:
34842@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
34843 const struct file_operations * fops;
34844 };
34845
34846-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34847+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34848 {"allow_dio", &adio_fops},
34849 {"debug", &debug_fops},
34850 {"def_reserved_size", &dressz_fops},
34851@@ -2327,7 +2327,7 @@ sg_proc_init(void)
34852 {
34853 int k, mask;
34854 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34855- struct sg_proc_leaf * leaf;
34856+ const struct sg_proc_leaf * leaf;
34857
34858 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34859 if (!sg_proc_sgp)
34860diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
34861index f64250e..1ee3049 100644
34862--- a/drivers/spi/spi-dw-pci.c
34863+++ b/drivers/spi/spi-dw-pci.c
34864@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
34865 #define spi_resume NULL
34866 #endif
34867
34868-static const struct pci_device_id pci_ids[] __devinitdata = {
34869+static const struct pci_device_id pci_ids[] __devinitconst = {
34870 /* Intel MID platform SPI controller 0 */
34871 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34872 {},
34873diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
34874index 77eae99..b7cdcc9 100644
34875--- a/drivers/spi/spi.c
34876+++ b/drivers/spi/spi.c
34877@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
34878 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34879
34880 /* portable code must never pass more than 32 bytes */
34881-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34882+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34883
34884 static u8 *buf;
34885
34886diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
34887index 436fe97..4082570 100644
34888--- a/drivers/staging/gma500/power.c
34889+++ b/drivers/staging/gma500/power.c
34890@@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
34891 ret = gma_resume_pci(dev->pdev);
34892 if (ret == 0) {
34893 /* FIXME: we want to defer this for Medfield/Oaktrail */
34894- gma_resume_display(dev);
34895+ gma_resume_display(dev->pdev);
34896 psb_irq_preinstall(dev);
34897 psb_irq_postinstall(dev);
34898 pm_runtime_get(&dev->pdev->dev);
34899diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
34900index bafccb3..e3ac78d 100644
34901--- a/drivers/staging/hv/rndis_filter.c
34902+++ b/drivers/staging/hv/rndis_filter.c
34903@@ -42,7 +42,7 @@ struct rndis_device {
34904
34905 enum rndis_device_state state;
34906 bool link_state;
34907- atomic_t new_req_id;
34908+ atomic_unchecked_t new_req_id;
34909
34910 spinlock_t request_lock;
34911 struct list_head req_list;
34912@@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34913 * template
34914 */
34915 set = &rndis_msg->msg.set_req;
34916- set->req_id = atomic_inc_return(&dev->new_req_id);
34917+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34918
34919 /* Add to the request list */
34920 spin_lock_irqsave(&dev->request_lock, flags);
34921@@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34922
34923 /* Setup the rndis set */
34924 halt = &request->request_msg.msg.halt_req;
34925- halt->req_id = atomic_inc_return(&dev->new_req_id);
34926+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34927
34928 /* Ignore return since this msg is optional. */
34929 rndis_filter_send_request(dev, request);
34930diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
34931index 9e8f010..af9efb5 100644
34932--- a/drivers/staging/iio/buffer_generic.h
34933+++ b/drivers/staging/iio/buffer_generic.h
34934@@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
34935
34936 int (*is_enabled)(struct iio_buffer *buffer);
34937 int (*enable)(struct iio_buffer *buffer);
34938-};
34939+} __no_const;
34940
34941 /**
34942 * struct iio_buffer_setup_ops - buffer setup related callbacks
34943diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
34944index 8b307b4..a97ac91 100644
34945--- a/drivers/staging/octeon/ethernet-rx.c
34946+++ b/drivers/staging/octeon/ethernet-rx.c
34947@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
34948 /* Increment RX stats for virtual ports */
34949 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34950 #ifdef CONFIG_64BIT
34951- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34952- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34953+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34954+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34955 #else
34956- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34957- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34958+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34959+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34960 #endif
34961 }
34962 netif_receive_skb(skb);
34963@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
34964 dev->name);
34965 */
34966 #ifdef CONFIG_64BIT
34967- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34968+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34969 #else
34970- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34971+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
34972 #endif
34973 dev_kfree_skb_irq(skb);
34974 }
34975diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
34976index 076f866..2308070 100644
34977--- a/drivers/staging/octeon/ethernet.c
34978+++ b/drivers/staging/octeon/ethernet.c
34979@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
34980 * since the RX tasklet also increments it.
34981 */
34982 #ifdef CONFIG_64BIT
34983- atomic64_add(rx_status.dropped_packets,
34984- (atomic64_t *)&priv->stats.rx_dropped);
34985+ atomic64_add_unchecked(rx_status.dropped_packets,
34986+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34987 #else
34988- atomic_add(rx_status.dropped_packets,
34989- (atomic_t *)&priv->stats.rx_dropped);
34990+ atomic_add_unchecked(rx_status.dropped_packets,
34991+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
34992 #endif
34993 }
34994
34995diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
34996index 7a19555..466456d 100644
34997--- a/drivers/staging/pohmelfs/inode.c
34998+++ b/drivers/staging/pohmelfs/inode.c
34999@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35000 mutex_init(&psb->mcache_lock);
35001 psb->mcache_root = RB_ROOT;
35002 psb->mcache_timeout = msecs_to_jiffies(5000);
35003- atomic_long_set(&psb->mcache_gen, 0);
35004+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
35005
35006 psb->trans_max_pages = 100;
35007
35008@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35009 INIT_LIST_HEAD(&psb->crypto_ready_list);
35010 INIT_LIST_HEAD(&psb->crypto_active_list);
35011
35012- atomic_set(&psb->trans_gen, 1);
35013+ atomic_set_unchecked(&psb->trans_gen, 1);
35014 atomic_long_set(&psb->total_inodes, 0);
35015
35016 mutex_init(&psb->state_lock);
35017diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
35018index e22665c..a2a9390 100644
35019--- a/drivers/staging/pohmelfs/mcache.c
35020+++ b/drivers/staging/pohmelfs/mcache.c
35021@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
35022 m->data = data;
35023 m->start = start;
35024 m->size = size;
35025- m->gen = atomic_long_inc_return(&psb->mcache_gen);
35026+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35027
35028 mutex_lock(&psb->mcache_lock);
35029 err = pohmelfs_mcache_insert(psb, m);
35030diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35031index 985b6b7..7699e05 100644
35032--- a/drivers/staging/pohmelfs/netfs.h
35033+++ b/drivers/staging/pohmelfs/netfs.h
35034@@ -571,14 +571,14 @@ struct pohmelfs_config;
35035 struct pohmelfs_sb {
35036 struct rb_root mcache_root;
35037 struct mutex mcache_lock;
35038- atomic_long_t mcache_gen;
35039+ atomic_long_unchecked_t mcache_gen;
35040 unsigned long mcache_timeout;
35041
35042 unsigned int idx;
35043
35044 unsigned int trans_retries;
35045
35046- atomic_t trans_gen;
35047+ atomic_unchecked_t trans_gen;
35048
35049 unsigned int crypto_attached_size;
35050 unsigned int crypto_align_size;
35051diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35052index 06c1a74..866eebc 100644
35053--- a/drivers/staging/pohmelfs/trans.c
35054+++ b/drivers/staging/pohmelfs/trans.c
35055@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35056 int err;
35057 struct netfs_cmd *cmd = t->iovec.iov_base;
35058
35059- t->gen = atomic_inc_return(&psb->trans_gen);
35060+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35061
35062 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35063 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35064diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35065index 86308a0..feaa925 100644
35066--- a/drivers/staging/rtl8712/rtl871x_io.h
35067+++ b/drivers/staging/rtl8712/rtl871x_io.h
35068@@ -108,7 +108,7 @@ struct _io_ops {
35069 u8 *pmem);
35070 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35071 u8 *pmem);
35072-};
35073+} __no_const;
35074
35075 struct io_req {
35076 struct list_head list;
35077diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35078index c7b5e8b..783d6cb 100644
35079--- a/drivers/staging/sbe-2t3e3/netdev.c
35080+++ b/drivers/staging/sbe-2t3e3/netdev.c
35081@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35082 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35083
35084 if (rlen)
35085- if (copy_to_user(data, &resp, rlen))
35086+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35087 return -EFAULT;
35088
35089 return 0;
35090diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35091index be21617..0954e45 100644
35092--- a/drivers/staging/usbip/usbip_common.h
35093+++ b/drivers/staging/usbip/usbip_common.h
35094@@ -289,7 +289,7 @@ struct usbip_device {
35095 void (*shutdown)(struct usbip_device *);
35096 void (*reset)(struct usbip_device *);
35097 void (*unusable)(struct usbip_device *);
35098- } eh_ops;
35099+ } __no_const eh_ops;
35100 };
35101
35102 #if 0
35103diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35104index 88b3298..3783eee 100644
35105--- a/drivers/staging/usbip/vhci.h
35106+++ b/drivers/staging/usbip/vhci.h
35107@@ -88,7 +88,7 @@ struct vhci_hcd {
35108 unsigned resuming:1;
35109 unsigned long re_timeout;
35110
35111- atomic_t seqnum;
35112+ atomic_unchecked_t seqnum;
35113
35114 /*
35115 * NOTE:
35116diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35117index 2ee97e2..0420b86 100644
35118--- a/drivers/staging/usbip/vhci_hcd.c
35119+++ b/drivers/staging/usbip/vhci_hcd.c
35120@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35121 return;
35122 }
35123
35124- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35125+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35126 if (priv->seqnum == 0xffff)
35127 dev_info(&urb->dev->dev, "seqnum max\n");
35128
35129@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35130 return -ENOMEM;
35131 }
35132
35133- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35134+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35135 if (unlink->seqnum == 0xffff)
35136 pr_info("seqnum max\n");
35137
35138@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35139 vdev->rhport = rhport;
35140 }
35141
35142- atomic_set(&vhci->seqnum, 0);
35143+ atomic_set_unchecked(&vhci->seqnum, 0);
35144 spin_lock_init(&vhci->lock);
35145
35146 hcd->power_budget = 0; /* no limit */
35147diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35148index 3872b8c..fe6d2f4 100644
35149--- a/drivers/staging/usbip/vhci_rx.c
35150+++ b/drivers/staging/usbip/vhci_rx.c
35151@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35152 if (!urb) {
35153 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35154 pr_info("max seqnum %d\n",
35155- atomic_read(&the_controller->seqnum));
35156+ atomic_read_unchecked(&the_controller->seqnum));
35157 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35158 return;
35159 }
35160diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35161index 7735027..30eed13 100644
35162--- a/drivers/staging/vt6655/hostap.c
35163+++ b/drivers/staging/vt6655/hostap.c
35164@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35165 *
35166 */
35167
35168+static net_device_ops_no_const apdev_netdev_ops;
35169+
35170 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35171 {
35172 PSDevice apdev_priv;
35173 struct net_device *dev = pDevice->dev;
35174 int ret;
35175- const struct net_device_ops apdev_netdev_ops = {
35176- .ndo_start_xmit = pDevice->tx_80211,
35177- };
35178
35179 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35180
35181@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35182 *apdev_priv = *pDevice;
35183 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35184
35185+ /* only half broken now */
35186+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35187 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35188
35189 pDevice->apdev->type = ARPHRD_IEEE80211;
35190diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35191index 51b5adf..098e320 100644
35192--- a/drivers/staging/vt6656/hostap.c
35193+++ b/drivers/staging/vt6656/hostap.c
35194@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35195 *
35196 */
35197
35198+static net_device_ops_no_const apdev_netdev_ops;
35199+
35200 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35201 {
35202 PSDevice apdev_priv;
35203 struct net_device *dev = pDevice->dev;
35204 int ret;
35205- const struct net_device_ops apdev_netdev_ops = {
35206- .ndo_start_xmit = pDevice->tx_80211,
35207- };
35208
35209 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35210
35211@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35212 *apdev_priv = *pDevice;
35213 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35214
35215+ /* only half broken now */
35216+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35217 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35218
35219 pDevice->apdev->type = ARPHRD_IEEE80211;
35220diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35221index 7843dfd..3db105f 100644
35222--- a/drivers/staging/wlan-ng/hfa384x_usb.c
35223+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35224@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35225
35226 struct usbctlx_completor {
35227 int (*complete) (struct usbctlx_completor *);
35228-};
35229+} __no_const;
35230
35231 static int
35232 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35233diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35234index 1ca66ea..76f1343 100644
35235--- a/drivers/staging/zcache/tmem.c
35236+++ b/drivers/staging/zcache/tmem.c
35237@@ -39,7 +39,7 @@
35238 * A tmem host implementation must use this function to register callbacks
35239 * for memory allocation.
35240 */
35241-static struct tmem_hostops tmem_hostops;
35242+static tmem_hostops_no_const tmem_hostops;
35243
35244 static void tmem_objnode_tree_init(void);
35245
35246@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35247 * A tmem host implementation must use this function to register
35248 * callbacks for a page-accessible memory (PAM) implementation
35249 */
35250-static struct tmem_pamops tmem_pamops;
35251+static tmem_pamops_no_const tmem_pamops;
35252
35253 void tmem_register_pamops(struct tmem_pamops *m)
35254 {
35255diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35256index ed147c4..94fc3c6 100644
35257--- a/drivers/staging/zcache/tmem.h
35258+++ b/drivers/staging/zcache/tmem.h
35259@@ -180,6 +180,7 @@ struct tmem_pamops {
35260 void (*new_obj)(struct tmem_obj *);
35261 int (*replace_in_obj)(void *, struct tmem_obj *);
35262 };
35263+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35264 extern void tmem_register_pamops(struct tmem_pamops *m);
35265
35266 /* memory allocation methods provided by the host implementation */
35267@@ -189,6 +190,7 @@ struct tmem_hostops {
35268 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35269 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35270 };
35271+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35272 extern void tmem_register_hostops(struct tmem_hostops *m);
35273
35274 /* core tmem accessor functions */
35275diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35276index 8599545..7761358 100644
35277--- a/drivers/target/iscsi/iscsi_target.c
35278+++ b/drivers/target/iscsi/iscsi_target.c
35279@@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35280 * outstanding_r2ts reaches zero, go ahead and send the delayed
35281 * TASK_ABORTED status.
35282 */
35283- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35284+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35285 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35286 if (--cmd->outstanding_r2ts < 1) {
35287 iscsit_stop_dataout_timer(cmd);
35288diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35289index 6845228..df77141 100644
35290--- a/drivers/target/target_core_tmr.c
35291+++ b/drivers/target/target_core_tmr.c
35292@@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35293 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35294 cmd->t_task_list_num,
35295 atomic_read(&cmd->t_task_cdbs_left),
35296- atomic_read(&cmd->t_task_cdbs_sent),
35297+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35298 atomic_read(&cmd->t_transport_active),
35299 atomic_read(&cmd->t_transport_stop),
35300 atomic_read(&cmd->t_transport_sent));
35301@@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35302 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35303 " task: %p, t_fe_count: %d dev: %p\n", task,
35304 fe_count, dev);
35305- atomic_set(&cmd->t_transport_aborted, 1);
35306+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35307 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35308
35309 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35310@@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35311 }
35312 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35313 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35314- atomic_set(&cmd->t_transport_aborted, 1);
35315+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35316 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35317
35318 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35319diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35320index e87d0eb..856cbcc 100644
35321--- a/drivers/target/target_core_transport.c
35322+++ b/drivers/target/target_core_transport.c
35323@@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35324
35325 dev->queue_depth = dev_limits->queue_depth;
35326 atomic_set(&dev->depth_left, dev->queue_depth);
35327- atomic_set(&dev->dev_ordered_id, 0);
35328+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
35329
35330 se_dev_set_default_attribs(dev, dev_limits);
35331
35332@@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35333 * Used to determine when ORDERED commands should go from
35334 * Dormant to Active status.
35335 */
35336- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35337+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35338 smp_mb__after_atomic_inc();
35339 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35340 cmd->se_ordered_id, cmd->sam_task_attr,
35341@@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35342 " t_transport_active: %d t_transport_stop: %d"
35343 " t_transport_sent: %d\n", cmd->t_task_list_num,
35344 atomic_read(&cmd->t_task_cdbs_left),
35345- atomic_read(&cmd->t_task_cdbs_sent),
35346+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35347 atomic_read(&cmd->t_task_cdbs_ex_left),
35348 atomic_read(&cmd->t_transport_active),
35349 atomic_read(&cmd->t_transport_stop),
35350@@ -2089,9 +2089,9 @@ check_depth:
35351
35352 spin_lock_irqsave(&cmd->t_state_lock, flags);
35353 task->task_flags |= (TF_ACTIVE | TF_SENT);
35354- atomic_inc(&cmd->t_task_cdbs_sent);
35355+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35356
35357- if (atomic_read(&cmd->t_task_cdbs_sent) ==
35358+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35359 cmd->t_task_list_num)
35360 atomic_set(&cmd->t_transport_sent, 1);
35361
35362@@ -4260,7 +4260,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35363 atomic_set(&cmd->transport_lun_stop, 0);
35364 }
35365 if (!atomic_read(&cmd->t_transport_active) ||
35366- atomic_read(&cmd->t_transport_aborted)) {
35367+ atomic_read_unchecked(&cmd->t_transport_aborted)) {
35368 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35369 return false;
35370 }
35371@@ -4509,7 +4509,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35372 {
35373 int ret = 0;
35374
35375- if (atomic_read(&cmd->t_transport_aborted) != 0) {
35376+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35377 if (!send_status ||
35378 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35379 return 1;
35380@@ -4546,7 +4546,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35381 */
35382 if (cmd->data_direction == DMA_TO_DEVICE) {
35383 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35384- atomic_inc(&cmd->t_transport_aborted);
35385+ atomic_inc_unchecked(&cmd->t_transport_aborted);
35386 smp_mb__after_atomic_inc();
35387 }
35388 }
35389diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35390index b9040be..e3f5aab 100644
35391--- a/drivers/tty/hvc/hvcs.c
35392+++ b/drivers/tty/hvc/hvcs.c
35393@@ -83,6 +83,7 @@
35394 #include <asm/hvcserver.h>
35395 #include <asm/uaccess.h>
35396 #include <asm/vio.h>
35397+#include <asm/local.h>
35398
35399 /*
35400 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35401@@ -270,7 +271,7 @@ struct hvcs_struct {
35402 unsigned int index;
35403
35404 struct tty_struct *tty;
35405- int open_count;
35406+ local_t open_count;
35407
35408 /*
35409 * Used to tell the driver kernel_thread what operations need to take
35410@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35411
35412 spin_lock_irqsave(&hvcsd->lock, flags);
35413
35414- if (hvcsd->open_count > 0) {
35415+ if (local_read(&hvcsd->open_count) > 0) {
35416 spin_unlock_irqrestore(&hvcsd->lock, flags);
35417 printk(KERN_INFO "HVCS: vterm state unchanged. "
35418 "The hvcs device node is still in use.\n");
35419@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35420 if ((retval = hvcs_partner_connect(hvcsd)))
35421 goto error_release;
35422
35423- hvcsd->open_count = 1;
35424+ local_set(&hvcsd->open_count, 1);
35425 hvcsd->tty = tty;
35426 tty->driver_data = hvcsd;
35427
35428@@ -1179,7 +1180,7 @@ fast_open:
35429
35430 spin_lock_irqsave(&hvcsd->lock, flags);
35431 kref_get(&hvcsd->kref);
35432- hvcsd->open_count++;
35433+ local_inc(&hvcsd->open_count);
35434 hvcsd->todo_mask |= HVCS_SCHED_READ;
35435 spin_unlock_irqrestore(&hvcsd->lock, flags);
35436
35437@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35438 hvcsd = tty->driver_data;
35439
35440 spin_lock_irqsave(&hvcsd->lock, flags);
35441- if (--hvcsd->open_count == 0) {
35442+ if (local_dec_and_test(&hvcsd->open_count)) {
35443
35444 vio_disable_interrupts(hvcsd->vdev);
35445
35446@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35447 free_irq(irq, hvcsd);
35448 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35449 return;
35450- } else if (hvcsd->open_count < 0) {
35451+ } else if (local_read(&hvcsd->open_count) < 0) {
35452 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35453 " is missmanaged.\n",
35454- hvcsd->vdev->unit_address, hvcsd->open_count);
35455+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35456 }
35457
35458 spin_unlock_irqrestore(&hvcsd->lock, flags);
35459@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35460
35461 spin_lock_irqsave(&hvcsd->lock, flags);
35462 /* Preserve this so that we know how many kref refs to put */
35463- temp_open_count = hvcsd->open_count;
35464+ temp_open_count = local_read(&hvcsd->open_count);
35465
35466 /*
35467 * Don't kref put inside the spinlock because the destruction
35468@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35469 hvcsd->tty->driver_data = NULL;
35470 hvcsd->tty = NULL;
35471
35472- hvcsd->open_count = 0;
35473+ local_set(&hvcsd->open_count, 0);
35474
35475 /* This will drop any buffered data on the floor which is OK in a hangup
35476 * scenario. */
35477@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35478 * the middle of a write operation? This is a crummy place to do this
35479 * but we want to keep it all in the spinlock.
35480 */
35481- if (hvcsd->open_count <= 0) {
35482+ if (local_read(&hvcsd->open_count) <= 0) {
35483 spin_unlock_irqrestore(&hvcsd->lock, flags);
35484 return -ENODEV;
35485 }
35486@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35487 {
35488 struct hvcs_struct *hvcsd = tty->driver_data;
35489
35490- if (!hvcsd || hvcsd->open_count <= 0)
35491+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35492 return 0;
35493
35494 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35495diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35496index ef92869..f4ebd88 100644
35497--- a/drivers/tty/ipwireless/tty.c
35498+++ b/drivers/tty/ipwireless/tty.c
35499@@ -29,6 +29,7 @@
35500 #include <linux/tty_driver.h>
35501 #include <linux/tty_flip.h>
35502 #include <linux/uaccess.h>
35503+#include <asm/local.h>
35504
35505 #include "tty.h"
35506 #include "network.h"
35507@@ -51,7 +52,7 @@ struct ipw_tty {
35508 int tty_type;
35509 struct ipw_network *network;
35510 struct tty_struct *linux_tty;
35511- int open_count;
35512+ local_t open_count;
35513 unsigned int control_lines;
35514 struct mutex ipw_tty_mutex;
35515 int tx_bytes_queued;
35516@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35517 mutex_unlock(&tty->ipw_tty_mutex);
35518 return -ENODEV;
35519 }
35520- if (tty->open_count == 0)
35521+ if (local_read(&tty->open_count) == 0)
35522 tty->tx_bytes_queued = 0;
35523
35524- tty->open_count++;
35525+ local_inc(&tty->open_count);
35526
35527 tty->linux_tty = linux_tty;
35528 linux_tty->driver_data = tty;
35529@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35530
35531 static void do_ipw_close(struct ipw_tty *tty)
35532 {
35533- tty->open_count--;
35534-
35535- if (tty->open_count == 0) {
35536+ if (local_dec_return(&tty->open_count) == 0) {
35537 struct tty_struct *linux_tty = tty->linux_tty;
35538
35539 if (linux_tty != NULL) {
35540@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35541 return;
35542
35543 mutex_lock(&tty->ipw_tty_mutex);
35544- if (tty->open_count == 0) {
35545+ if (local_read(&tty->open_count) == 0) {
35546 mutex_unlock(&tty->ipw_tty_mutex);
35547 return;
35548 }
35549@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35550 return;
35551 }
35552
35553- if (!tty->open_count) {
35554+ if (!local_read(&tty->open_count)) {
35555 mutex_unlock(&tty->ipw_tty_mutex);
35556 return;
35557 }
35558@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35559 return -ENODEV;
35560
35561 mutex_lock(&tty->ipw_tty_mutex);
35562- if (!tty->open_count) {
35563+ if (!local_read(&tty->open_count)) {
35564 mutex_unlock(&tty->ipw_tty_mutex);
35565 return -EINVAL;
35566 }
35567@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35568 if (!tty)
35569 return -ENODEV;
35570
35571- if (!tty->open_count)
35572+ if (!local_read(&tty->open_count))
35573 return -EINVAL;
35574
35575 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35576@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35577 if (!tty)
35578 return 0;
35579
35580- if (!tty->open_count)
35581+ if (!local_read(&tty->open_count))
35582 return 0;
35583
35584 return tty->tx_bytes_queued;
35585@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35586 if (!tty)
35587 return -ENODEV;
35588
35589- if (!tty->open_count)
35590+ if (!local_read(&tty->open_count))
35591 return -EINVAL;
35592
35593 return get_control_lines(tty);
35594@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35595 if (!tty)
35596 return -ENODEV;
35597
35598- if (!tty->open_count)
35599+ if (!local_read(&tty->open_count))
35600 return -EINVAL;
35601
35602 return set_control_lines(tty, set, clear);
35603@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35604 if (!tty)
35605 return -ENODEV;
35606
35607- if (!tty->open_count)
35608+ if (!local_read(&tty->open_count))
35609 return -EINVAL;
35610
35611 /* FIXME: Exactly how is the tty object locked here .. */
35612@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35613 against a parallel ioctl etc */
35614 mutex_lock(&ttyj->ipw_tty_mutex);
35615 }
35616- while (ttyj->open_count)
35617+ while (local_read(&ttyj->open_count))
35618 do_ipw_close(ttyj);
35619 ipwireless_disassociate_network_ttys(network,
35620 ttyj->channel_idx);
35621diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35622index fc7bbba..9527e93 100644
35623--- a/drivers/tty/n_gsm.c
35624+++ b/drivers/tty/n_gsm.c
35625@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35626 kref_init(&dlci->ref);
35627 mutex_init(&dlci->mutex);
35628 dlci->fifo = &dlci->_fifo;
35629- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35630+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35631 kfree(dlci);
35632 return NULL;
35633 }
35634diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35635index 39d6ab6..eb97f41 100644
35636--- a/drivers/tty/n_tty.c
35637+++ b/drivers/tty/n_tty.c
35638@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35639 {
35640 *ops = tty_ldisc_N_TTY;
35641 ops->owner = NULL;
35642- ops->refcount = ops->flags = 0;
35643+ atomic_set(&ops->refcount, 0);
35644+ ops->flags = 0;
35645 }
35646 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35647diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35648index e18604b..a7d5a11 100644
35649--- a/drivers/tty/pty.c
35650+++ b/drivers/tty/pty.c
35651@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35652 register_sysctl_table(pty_root_table);
35653
35654 /* Now create the /dev/ptmx special device */
35655+ pax_open_kernel();
35656 tty_default_fops(&ptmx_fops);
35657- ptmx_fops.open = ptmx_open;
35658+ *(void **)&ptmx_fops.open = ptmx_open;
35659+ pax_close_kernel();
35660
35661 cdev_init(&ptmx_cdev, &ptmx_fops);
35662 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35663diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35664index 2b42a01..32a2ed3 100644
35665--- a/drivers/tty/serial/kgdboc.c
35666+++ b/drivers/tty/serial/kgdboc.c
35667@@ -24,8 +24,9 @@
35668 #define MAX_CONFIG_LEN 40
35669
35670 static struct kgdb_io kgdboc_io_ops;
35671+static struct kgdb_io kgdboc_io_ops_console;
35672
35673-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35674+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35675 static int configured = -1;
35676
35677 static char config[MAX_CONFIG_LEN];
35678@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35679 kgdboc_unregister_kbd();
35680 if (configured == 1)
35681 kgdb_unregister_io_module(&kgdboc_io_ops);
35682+ else if (configured == 2)
35683+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35684 }
35685
35686 static int configure_kgdboc(void)
35687@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35688 int err;
35689 char *cptr = config;
35690 struct console *cons;
35691+ int is_console = 0;
35692
35693 err = kgdboc_option_setup(config);
35694 if (err || !strlen(config) || isspace(config[0]))
35695 goto noconfig;
35696
35697 err = -ENODEV;
35698- kgdboc_io_ops.is_console = 0;
35699 kgdb_tty_driver = NULL;
35700
35701 kgdboc_use_kms = 0;
35702@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
35703 int idx;
35704 if (cons->device && cons->device(cons, &idx) == p &&
35705 idx == tty_line) {
35706- kgdboc_io_ops.is_console = 1;
35707+ is_console = 1;
35708 break;
35709 }
35710 cons = cons->next;
35711@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
35712 kgdb_tty_line = tty_line;
35713
35714 do_register:
35715- err = kgdb_register_io_module(&kgdboc_io_ops);
35716+ if (is_console) {
35717+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35718+ configured = 2;
35719+ } else {
35720+ err = kgdb_register_io_module(&kgdboc_io_ops);
35721+ configured = 1;
35722+ }
35723 if (err)
35724 goto noconfig;
35725
35726- configured = 1;
35727-
35728 return 0;
35729
35730 noconfig:
35731@@ -213,7 +220,7 @@ noconfig:
35732 static int __init init_kgdboc(void)
35733 {
35734 /* Already configured? */
35735- if (configured == 1)
35736+ if (configured >= 1)
35737 return 0;
35738
35739 return configure_kgdboc();
35740@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
35741 if (config[len - 1] == '\n')
35742 config[len - 1] = '\0';
35743
35744- if (configured == 1)
35745+ if (configured >= 1)
35746 cleanup_kgdboc();
35747
35748 /* Go and configure with the new params. */
35749@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
35750 .post_exception = kgdboc_post_exp_handler,
35751 };
35752
35753+static struct kgdb_io kgdboc_io_ops_console = {
35754+ .name = "kgdboc",
35755+ .read_char = kgdboc_get_char,
35756+ .write_char = kgdboc_put_char,
35757+ .pre_exception = kgdboc_pre_exp_handler,
35758+ .post_exception = kgdboc_post_exp_handler,
35759+ .is_console = 1
35760+};
35761+
35762 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35763 /* This is only available if kgdboc is a built in for early debugging */
35764 static int __init kgdboc_early_init(char *opt)
35765diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
35766index 05085be..67eadb0 100644
35767--- a/drivers/tty/tty_io.c
35768+++ b/drivers/tty/tty_io.c
35769@@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35770
35771 void tty_default_fops(struct file_operations *fops)
35772 {
35773- *fops = tty_fops;
35774+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35775 }
35776
35777 /*
35778diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
35779index 8e0924f..4204eb4 100644
35780--- a/drivers/tty/tty_ldisc.c
35781+++ b/drivers/tty/tty_ldisc.c
35782@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
35783 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35784 struct tty_ldisc_ops *ldo = ld->ops;
35785
35786- ldo->refcount--;
35787+ atomic_dec(&ldo->refcount);
35788 module_put(ldo->owner);
35789 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35790
35791@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
35792 spin_lock_irqsave(&tty_ldisc_lock, flags);
35793 tty_ldiscs[disc] = new_ldisc;
35794 new_ldisc->num = disc;
35795- new_ldisc->refcount = 0;
35796+ atomic_set(&new_ldisc->refcount, 0);
35797 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35798
35799 return ret;
35800@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
35801 return -EINVAL;
35802
35803 spin_lock_irqsave(&tty_ldisc_lock, flags);
35804- if (tty_ldiscs[disc]->refcount)
35805+ if (atomic_read(&tty_ldiscs[disc]->refcount))
35806 ret = -EBUSY;
35807 else
35808 tty_ldiscs[disc] = NULL;
35809@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
35810 if (ldops) {
35811 ret = ERR_PTR(-EAGAIN);
35812 if (try_module_get(ldops->owner)) {
35813- ldops->refcount++;
35814+ atomic_inc(&ldops->refcount);
35815 ret = ldops;
35816 }
35817 }
35818@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
35819 unsigned long flags;
35820
35821 spin_lock_irqsave(&tty_ldisc_lock, flags);
35822- ldops->refcount--;
35823+ atomic_dec(&ldops->refcount);
35824 module_put(ldops->owner);
35825 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35826 }
35827diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
35828index a605549..6bd3c96 100644
35829--- a/drivers/tty/vt/keyboard.c
35830+++ b/drivers/tty/vt/keyboard.c
35831@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
35832 kbd->kbdmode == VC_OFF) &&
35833 value != KVAL(K_SAK))
35834 return; /* SAK is allowed even in raw mode */
35835+
35836+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35837+ {
35838+ void *func = fn_handler[value];
35839+ if (func == fn_show_state || func == fn_show_ptregs ||
35840+ func == fn_show_mem)
35841+ return;
35842+ }
35843+#endif
35844+
35845 fn_handler[value](vc);
35846 }
35847
35848diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
35849index 5e096f4..0da1363 100644
35850--- a/drivers/tty/vt/vt_ioctl.c
35851+++ b/drivers/tty/vt/vt_ioctl.c
35852@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35853 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35854 return -EFAULT;
35855
35856- if (!capable(CAP_SYS_TTY_CONFIG))
35857- perm = 0;
35858-
35859 switch (cmd) {
35860 case KDGKBENT:
35861 key_map = key_maps[s];
35862@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35863 val = (i ? K_HOLE : K_NOSUCHMAP);
35864 return put_user(val, &user_kbe->kb_value);
35865 case KDSKBENT:
35866+ if (!capable(CAP_SYS_TTY_CONFIG))
35867+ perm = 0;
35868+
35869 if (!perm)
35870 return -EPERM;
35871 if (!i && v == K_NOSUCHMAP) {
35872@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35873 int i, j, k;
35874 int ret;
35875
35876- if (!capable(CAP_SYS_TTY_CONFIG))
35877- perm = 0;
35878-
35879 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35880 if (!kbs) {
35881 ret = -ENOMEM;
35882@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35883 kfree(kbs);
35884 return ((p && *p) ? -EOVERFLOW : 0);
35885 case KDSKBSENT:
35886+ if (!capable(CAP_SYS_TTY_CONFIG))
35887+ perm = 0;
35888+
35889 if (!perm) {
35890 ret = -EPERM;
35891 goto reterr;
35892diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
35893index a783d53..cb30d94 100644
35894--- a/drivers/uio/uio.c
35895+++ b/drivers/uio/uio.c
35896@@ -25,6 +25,7 @@
35897 #include <linux/kobject.h>
35898 #include <linux/cdev.h>
35899 #include <linux/uio_driver.h>
35900+#include <asm/local.h>
35901
35902 #define UIO_MAX_DEVICES (1U << MINORBITS)
35903
35904@@ -32,10 +33,10 @@ struct uio_device {
35905 struct module *owner;
35906 struct device *dev;
35907 int minor;
35908- atomic_t event;
35909+ atomic_unchecked_t event;
35910 struct fasync_struct *async_queue;
35911 wait_queue_head_t wait;
35912- int vma_count;
35913+ local_t vma_count;
35914 struct uio_info *info;
35915 struct kobject *map_dir;
35916 struct kobject *portio_dir;
35917@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
35918 struct device_attribute *attr, char *buf)
35919 {
35920 struct uio_device *idev = dev_get_drvdata(dev);
35921- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
35922+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
35923 }
35924
35925 static struct device_attribute uio_class_attributes[] = {
35926@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
35927 {
35928 struct uio_device *idev = info->uio_dev;
35929
35930- atomic_inc(&idev->event);
35931+ atomic_inc_unchecked(&idev->event);
35932 wake_up_interruptible(&idev->wait);
35933 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35934 }
35935@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
35936 }
35937
35938 listener->dev = idev;
35939- listener->event_count = atomic_read(&idev->event);
35940+ listener->event_count = atomic_read_unchecked(&idev->event);
35941 filep->private_data = listener;
35942
35943 if (idev->info->open) {
35944@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
35945 return -EIO;
35946
35947 poll_wait(filep, &idev->wait, wait);
35948- if (listener->event_count != atomic_read(&idev->event))
35949+ if (listener->event_count != atomic_read_unchecked(&idev->event))
35950 return POLLIN | POLLRDNORM;
35951 return 0;
35952 }
35953@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
35954 do {
35955 set_current_state(TASK_INTERRUPTIBLE);
35956
35957- event_count = atomic_read(&idev->event);
35958+ event_count = atomic_read_unchecked(&idev->event);
35959 if (event_count != listener->event_count) {
35960 if (copy_to_user(buf, &event_count, count))
35961 retval = -EFAULT;
35962@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
35963 static void uio_vma_open(struct vm_area_struct *vma)
35964 {
35965 struct uio_device *idev = vma->vm_private_data;
35966- idev->vma_count++;
35967+ local_inc(&idev->vma_count);
35968 }
35969
35970 static void uio_vma_close(struct vm_area_struct *vma)
35971 {
35972 struct uio_device *idev = vma->vm_private_data;
35973- idev->vma_count--;
35974+ local_dec(&idev->vma_count);
35975 }
35976
35977 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35978@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
35979 idev->owner = owner;
35980 idev->info = info;
35981 init_waitqueue_head(&idev->wait);
35982- atomic_set(&idev->event, 0);
35983+ atomic_set_unchecked(&idev->event, 0);
35984
35985 ret = uio_get_minor(idev);
35986 if (ret)
35987diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
35988index a845f8b..4f54072 100644
35989--- a/drivers/usb/atm/cxacru.c
35990+++ b/drivers/usb/atm/cxacru.c
35991@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
35992 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
35993 if (ret < 2)
35994 return -EINVAL;
35995- if (index < 0 || index > 0x7f)
35996+ if (index > 0x7f)
35997 return -EINVAL;
35998 pos += tmp;
35999
36000diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
36001index d3448ca..d2864ca 100644
36002--- a/drivers/usb/atm/usbatm.c
36003+++ b/drivers/usb/atm/usbatm.c
36004@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36005 if (printk_ratelimit())
36006 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36007 __func__, vpi, vci);
36008- atomic_inc(&vcc->stats->rx_err);
36009+ atomic_inc_unchecked(&vcc->stats->rx_err);
36010 return;
36011 }
36012
36013@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36014 if (length > ATM_MAX_AAL5_PDU) {
36015 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36016 __func__, length, vcc);
36017- atomic_inc(&vcc->stats->rx_err);
36018+ atomic_inc_unchecked(&vcc->stats->rx_err);
36019 goto out;
36020 }
36021
36022@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36023 if (sarb->len < pdu_length) {
36024 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36025 __func__, pdu_length, sarb->len, vcc);
36026- atomic_inc(&vcc->stats->rx_err);
36027+ atomic_inc_unchecked(&vcc->stats->rx_err);
36028 goto out;
36029 }
36030
36031 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36032 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36033 __func__, vcc);
36034- atomic_inc(&vcc->stats->rx_err);
36035+ atomic_inc_unchecked(&vcc->stats->rx_err);
36036 goto out;
36037 }
36038
36039@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36040 if (printk_ratelimit())
36041 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36042 __func__, length);
36043- atomic_inc(&vcc->stats->rx_drop);
36044+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36045 goto out;
36046 }
36047
36048@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36049
36050 vcc->push(vcc, skb);
36051
36052- atomic_inc(&vcc->stats->rx);
36053+ atomic_inc_unchecked(&vcc->stats->rx);
36054 out:
36055 skb_trim(sarb, 0);
36056 }
36057@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36058 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36059
36060 usbatm_pop(vcc, skb);
36061- atomic_inc(&vcc->stats->tx);
36062+ atomic_inc_unchecked(&vcc->stats->tx);
36063
36064 skb = skb_dequeue(&instance->sndqueue);
36065 }
36066@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36067 if (!left--)
36068 return sprintf(page,
36069 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36070- atomic_read(&atm_dev->stats.aal5.tx),
36071- atomic_read(&atm_dev->stats.aal5.tx_err),
36072- atomic_read(&atm_dev->stats.aal5.rx),
36073- atomic_read(&atm_dev->stats.aal5.rx_err),
36074- atomic_read(&atm_dev->stats.aal5.rx_drop));
36075+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36076+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36077+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36078+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36079+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36080
36081 if (!left--) {
36082 if (instance->disconnected)
36083diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36084index d956965..4179a77 100644
36085--- a/drivers/usb/core/devices.c
36086+++ b/drivers/usb/core/devices.c
36087@@ -126,7 +126,7 @@ static const char format_endpt[] =
36088 * time it gets called.
36089 */
36090 static struct device_connect_event {
36091- atomic_t count;
36092+ atomic_unchecked_t count;
36093 wait_queue_head_t wait;
36094 } device_event = {
36095 .count = ATOMIC_INIT(1),
36096@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36097
36098 void usbfs_conn_disc_event(void)
36099 {
36100- atomic_add(2, &device_event.count);
36101+ atomic_add_unchecked(2, &device_event.count);
36102 wake_up(&device_event.wait);
36103 }
36104
36105@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36106
36107 poll_wait(file, &device_event.wait, wait);
36108
36109- event_count = atomic_read(&device_event.count);
36110+ event_count = atomic_read_unchecked(&device_event.count);
36111 if (file->f_version != event_count) {
36112 file->f_version = event_count;
36113 return POLLIN | POLLRDNORM;
36114diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36115index b3bdfed..a9460e0 100644
36116--- a/drivers/usb/core/message.c
36117+++ b/drivers/usb/core/message.c
36118@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36119 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36120 if (buf) {
36121 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36122- if (len > 0) {
36123- smallbuf = kmalloc(++len, GFP_NOIO);
36124+ if (len++ > 0) {
36125+ smallbuf = kmalloc(len, GFP_NOIO);
36126 if (!smallbuf)
36127 return buf;
36128 memcpy(smallbuf, buf, len);
36129diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36130index 1fc8f12..20647c1 100644
36131--- a/drivers/usb/early/ehci-dbgp.c
36132+++ b/drivers/usb/early/ehci-dbgp.c
36133@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36134
36135 #ifdef CONFIG_KGDB
36136 static struct kgdb_io kgdbdbgp_io_ops;
36137-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36138+static struct kgdb_io kgdbdbgp_io_ops_console;
36139+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36140 #else
36141 #define dbgp_kgdb_mode (0)
36142 #endif
36143@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36144 .write_char = kgdbdbgp_write_char,
36145 };
36146
36147+static struct kgdb_io kgdbdbgp_io_ops_console = {
36148+ .name = "kgdbdbgp",
36149+ .read_char = kgdbdbgp_read_char,
36150+ .write_char = kgdbdbgp_write_char,
36151+ .is_console = 1
36152+};
36153+
36154 static int kgdbdbgp_wait_time;
36155
36156 static int __init kgdbdbgp_parse_config(char *str)
36157@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36158 ptr++;
36159 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36160 }
36161- kgdb_register_io_module(&kgdbdbgp_io_ops);
36162- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36163+ if (early_dbgp_console.index != -1)
36164+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36165+ else
36166+ kgdb_register_io_module(&kgdbdbgp_io_ops);
36167
36168 return 0;
36169 }
36170diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36171index d6bea3e..60b250e 100644
36172--- a/drivers/usb/wusbcore/wa-hc.h
36173+++ b/drivers/usb/wusbcore/wa-hc.h
36174@@ -192,7 +192,7 @@ struct wahc {
36175 struct list_head xfer_delayed_list;
36176 spinlock_t xfer_list_lock;
36177 struct work_struct xfer_work;
36178- atomic_t xfer_id_count;
36179+ atomic_unchecked_t xfer_id_count;
36180 };
36181
36182
36183@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36184 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36185 spin_lock_init(&wa->xfer_list_lock);
36186 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36187- atomic_set(&wa->xfer_id_count, 1);
36188+ atomic_set_unchecked(&wa->xfer_id_count, 1);
36189 }
36190
36191 /**
36192diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36193index 57c01ab..8a05959 100644
36194--- a/drivers/usb/wusbcore/wa-xfer.c
36195+++ b/drivers/usb/wusbcore/wa-xfer.c
36196@@ -296,7 +296,7 @@ out:
36197 */
36198 static void wa_xfer_id_init(struct wa_xfer *xfer)
36199 {
36200- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36201+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36202 }
36203
36204 /*
36205diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36206index c14c42b..f955cc2 100644
36207--- a/drivers/vhost/vhost.c
36208+++ b/drivers/vhost/vhost.c
36209@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36210 return 0;
36211 }
36212
36213-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36214+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36215 {
36216 struct file *eventfp, *filep = NULL,
36217 *pollstart = NULL, *pollstop = NULL;
36218diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36219index b0b2ac3..89a4399 100644
36220--- a/drivers/video/aty/aty128fb.c
36221+++ b/drivers/video/aty/aty128fb.c
36222@@ -148,7 +148,7 @@ enum {
36223 };
36224
36225 /* Must match above enum */
36226-static const char *r128_family[] __devinitdata = {
36227+static const char *r128_family[] __devinitconst = {
36228 "AGP",
36229 "PCI",
36230 "PRO AGP",
36231diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36232index 5c3960d..15cf8fc 100644
36233--- a/drivers/video/fbcmap.c
36234+++ b/drivers/video/fbcmap.c
36235@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36236 rc = -ENODEV;
36237 goto out;
36238 }
36239- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36240- !info->fbops->fb_setcmap)) {
36241+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36242 rc = -EINVAL;
36243 goto out1;
36244 }
36245diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36246index ad93629..e020fc3 100644
36247--- a/drivers/video/fbmem.c
36248+++ b/drivers/video/fbmem.c
36249@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36250 image->dx += image->width + 8;
36251 }
36252 } else if (rotate == FB_ROTATE_UD) {
36253- for (x = 0; x < num && image->dx >= 0; x++) {
36254+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36255 info->fbops->fb_imageblit(info, image);
36256 image->dx -= image->width + 8;
36257 }
36258@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36259 image->dy += image->height + 8;
36260 }
36261 } else if (rotate == FB_ROTATE_CCW) {
36262- for (x = 0; x < num && image->dy >= 0; x++) {
36263+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36264 info->fbops->fb_imageblit(info, image);
36265 image->dy -= image->height + 8;
36266 }
36267@@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36268 return -EFAULT;
36269 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36270 return -EINVAL;
36271- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36272+ if (con2fb.framebuffer >= FB_MAX)
36273 return -EINVAL;
36274 if (!registered_fb[con2fb.framebuffer])
36275 request_module("fb%d", con2fb.framebuffer);
36276diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36277index 5a5d092..265c5ed 100644
36278--- a/drivers/video/geode/gx1fb_core.c
36279+++ b/drivers/video/geode/gx1fb_core.c
36280@@ -29,7 +29,7 @@ static int crt_option = 1;
36281 static char panel_option[32] = "";
36282
36283 /* Modes relevant to the GX1 (taken from modedb.c) */
36284-static const struct fb_videomode __devinitdata gx1_modedb[] = {
36285+static const struct fb_videomode __devinitconst gx1_modedb[] = {
36286 /* 640x480-60 VESA */
36287 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36288 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36289diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36290index 0fad23f..0e9afa4 100644
36291--- a/drivers/video/gxt4500.c
36292+++ b/drivers/video/gxt4500.c
36293@@ -156,7 +156,7 @@ struct gxt4500_par {
36294 static char *mode_option;
36295
36296 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36297-static const struct fb_videomode defaultmode __devinitdata = {
36298+static const struct fb_videomode defaultmode __devinitconst = {
36299 .refresh = 60,
36300 .xres = 1280,
36301 .yres = 1024,
36302@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36303 return 0;
36304 }
36305
36306-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36307+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36308 .id = "IBM GXT4500P",
36309 .type = FB_TYPE_PACKED_PIXELS,
36310 .visual = FB_VISUAL_PSEUDOCOLOR,
36311diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36312index 7672d2e..b56437f 100644
36313--- a/drivers/video/i810/i810_accel.c
36314+++ b/drivers/video/i810/i810_accel.c
36315@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36316 }
36317 }
36318 printk("ringbuffer lockup!!!\n");
36319+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36320 i810_report_error(mmio);
36321 par->dev_flags |= LOCKUP;
36322 info->pixmap.scan_align = 1;
36323diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36324index 318f6fb..9a389c1 100644
36325--- a/drivers/video/i810/i810_main.c
36326+++ b/drivers/video/i810/i810_main.c
36327@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36328 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36329
36330 /* PCI */
36331-static const char *i810_pci_list[] __devinitdata = {
36332+static const char *i810_pci_list[] __devinitconst = {
36333 "Intel(R) 810 Framebuffer Device" ,
36334 "Intel(R) 810-DC100 Framebuffer Device" ,
36335 "Intel(R) 810E Framebuffer Device" ,
36336diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36337index de36693..3c63fc2 100644
36338--- a/drivers/video/jz4740_fb.c
36339+++ b/drivers/video/jz4740_fb.c
36340@@ -136,7 +136,7 @@ struct jzfb {
36341 uint32_t pseudo_palette[16];
36342 };
36343
36344-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36345+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36346 .id = "JZ4740 FB",
36347 .type = FB_TYPE_PACKED_PIXELS,
36348 .visual = FB_VISUAL_TRUECOLOR,
36349diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36350index 3c14e43..eafa544 100644
36351--- a/drivers/video/logo/logo_linux_clut224.ppm
36352+++ b/drivers/video/logo/logo_linux_clut224.ppm
36353@@ -1,1604 +1,1123 @@
36354 P3
36355-# Standard 224-color Linux logo
36356 80 80
36357 255
36358- 0 0 0 0 0 0 0 0 0 0 0 0
36359- 0 0 0 0 0 0 0 0 0 0 0 0
36360- 0 0 0 0 0 0 0 0 0 0 0 0
36361- 0 0 0 0 0 0 0 0 0 0 0 0
36362- 0 0 0 0 0 0 0 0 0 0 0 0
36363- 0 0 0 0 0 0 0 0 0 0 0 0
36364- 0 0 0 0 0 0 0 0 0 0 0 0
36365- 0 0 0 0 0 0 0 0 0 0 0 0
36366- 0 0 0 0 0 0 0 0 0 0 0 0
36367- 6 6 6 6 6 6 10 10 10 10 10 10
36368- 10 10 10 6 6 6 6 6 6 6 6 6
36369- 0 0 0 0 0 0 0 0 0 0 0 0
36370- 0 0 0 0 0 0 0 0 0 0 0 0
36371- 0 0 0 0 0 0 0 0 0 0 0 0
36372- 0 0 0 0 0 0 0 0 0 0 0 0
36373- 0 0 0 0 0 0 0 0 0 0 0 0
36374- 0 0 0 0 0 0 0 0 0 0 0 0
36375- 0 0 0 0 0 0 0 0 0 0 0 0
36376- 0 0 0 0 0 0 0 0 0 0 0 0
36377- 0 0 0 0 0 0 0 0 0 0 0 0
36378- 0 0 0 0 0 0 0 0 0 0 0 0
36379- 0 0 0 0 0 0 0 0 0 0 0 0
36380- 0 0 0 0 0 0 0 0 0 0 0 0
36381- 0 0 0 0 0 0 0 0 0 0 0 0
36382- 0 0 0 0 0 0 0 0 0 0 0 0
36383- 0 0 0 0 0 0 0 0 0 0 0 0
36384- 0 0 0 0 0 0 0 0 0 0 0 0
36385- 0 0 0 0 0 0 0 0 0 0 0 0
36386- 0 0 0 6 6 6 10 10 10 14 14 14
36387- 22 22 22 26 26 26 30 30 30 34 34 34
36388- 30 30 30 30 30 30 26 26 26 18 18 18
36389- 14 14 14 10 10 10 6 6 6 0 0 0
36390- 0 0 0 0 0 0 0 0 0 0 0 0
36391- 0 0 0 0 0 0 0 0 0 0 0 0
36392- 0 0 0 0 0 0 0 0 0 0 0 0
36393- 0 0 0 0 0 0 0 0 0 0 0 0
36394- 0 0 0 0 0 0 0 0 0 0 0 0
36395- 0 0 0 0 0 0 0 0 0 0 0 0
36396- 0 0 0 0 0 0 0 0 0 0 0 0
36397- 0 0 0 0 0 0 0 0 0 0 0 0
36398- 0 0 0 0 0 0 0 0 0 0 0 0
36399- 0 0 0 0 0 1 0 0 1 0 0 0
36400- 0 0 0 0 0 0 0 0 0 0 0 0
36401- 0 0 0 0 0 0 0 0 0 0 0 0
36402- 0 0 0 0 0 0 0 0 0 0 0 0
36403- 0 0 0 0 0 0 0 0 0 0 0 0
36404- 0 0 0 0 0 0 0 0 0 0 0 0
36405- 0 0 0 0 0 0 0 0 0 0 0 0
36406- 6 6 6 14 14 14 26 26 26 42 42 42
36407- 54 54 54 66 66 66 78 78 78 78 78 78
36408- 78 78 78 74 74 74 66 66 66 54 54 54
36409- 42 42 42 26 26 26 18 18 18 10 10 10
36410- 6 6 6 0 0 0 0 0 0 0 0 0
36411- 0 0 0 0 0 0 0 0 0 0 0 0
36412- 0 0 0 0 0 0 0 0 0 0 0 0
36413- 0 0 0 0 0 0 0 0 0 0 0 0
36414- 0 0 0 0 0 0 0 0 0 0 0 0
36415- 0 0 0 0 0 0 0 0 0 0 0 0
36416- 0 0 0 0 0 0 0 0 0 0 0 0
36417- 0 0 0 0 0 0 0 0 0 0 0 0
36418- 0 0 0 0 0 0 0 0 0 0 0 0
36419- 0 0 1 0 0 0 0 0 0 0 0 0
36420- 0 0 0 0 0 0 0 0 0 0 0 0
36421- 0 0 0 0 0 0 0 0 0 0 0 0
36422- 0 0 0 0 0 0 0 0 0 0 0 0
36423- 0 0 0 0 0 0 0 0 0 0 0 0
36424- 0 0 0 0 0 0 0 0 0 0 0 0
36425- 0 0 0 0 0 0 0 0 0 10 10 10
36426- 22 22 22 42 42 42 66 66 66 86 86 86
36427- 66 66 66 38 38 38 38 38 38 22 22 22
36428- 26 26 26 34 34 34 54 54 54 66 66 66
36429- 86 86 86 70 70 70 46 46 46 26 26 26
36430- 14 14 14 6 6 6 0 0 0 0 0 0
36431- 0 0 0 0 0 0 0 0 0 0 0 0
36432- 0 0 0 0 0 0 0 0 0 0 0 0
36433- 0 0 0 0 0 0 0 0 0 0 0 0
36434- 0 0 0 0 0 0 0 0 0 0 0 0
36435- 0 0 0 0 0 0 0 0 0 0 0 0
36436- 0 0 0 0 0 0 0 0 0 0 0 0
36437- 0 0 0 0 0 0 0 0 0 0 0 0
36438- 0 0 0 0 0 0 0 0 0 0 0 0
36439- 0 0 1 0 0 1 0 0 1 0 0 0
36440- 0 0 0 0 0 0 0 0 0 0 0 0
36441- 0 0 0 0 0 0 0 0 0 0 0 0
36442- 0 0 0 0 0 0 0 0 0 0 0 0
36443- 0 0 0 0 0 0 0 0 0 0 0 0
36444- 0 0 0 0 0 0 0 0 0 0 0 0
36445- 0 0 0 0 0 0 10 10 10 26 26 26
36446- 50 50 50 82 82 82 58 58 58 6 6 6
36447- 2 2 6 2 2 6 2 2 6 2 2 6
36448- 2 2 6 2 2 6 2 2 6 2 2 6
36449- 6 6 6 54 54 54 86 86 86 66 66 66
36450- 38 38 38 18 18 18 6 6 6 0 0 0
36451- 0 0 0 0 0 0 0 0 0 0 0 0
36452- 0 0 0 0 0 0 0 0 0 0 0 0
36453- 0 0 0 0 0 0 0 0 0 0 0 0
36454- 0 0 0 0 0 0 0 0 0 0 0 0
36455- 0 0 0 0 0 0 0 0 0 0 0 0
36456- 0 0 0 0 0 0 0 0 0 0 0 0
36457- 0 0 0 0 0 0 0 0 0 0 0 0
36458- 0 0 0 0 0 0 0 0 0 0 0 0
36459- 0 0 0 0 0 0 0 0 0 0 0 0
36460- 0 0 0 0 0 0 0 0 0 0 0 0
36461- 0 0 0 0 0 0 0 0 0 0 0 0
36462- 0 0 0 0 0 0 0 0 0 0 0 0
36463- 0 0 0 0 0 0 0 0 0 0 0 0
36464- 0 0 0 0 0 0 0 0 0 0 0 0
36465- 0 0 0 6 6 6 22 22 22 50 50 50
36466- 78 78 78 34 34 34 2 2 6 2 2 6
36467- 2 2 6 2 2 6 2 2 6 2 2 6
36468- 2 2 6 2 2 6 2 2 6 2 2 6
36469- 2 2 6 2 2 6 6 6 6 70 70 70
36470- 78 78 78 46 46 46 22 22 22 6 6 6
36471- 0 0 0 0 0 0 0 0 0 0 0 0
36472- 0 0 0 0 0 0 0 0 0 0 0 0
36473- 0 0 0 0 0 0 0 0 0 0 0 0
36474- 0 0 0 0 0 0 0 0 0 0 0 0
36475- 0 0 0 0 0 0 0 0 0 0 0 0
36476- 0 0 0 0 0 0 0 0 0 0 0 0
36477- 0 0 0 0 0 0 0 0 0 0 0 0
36478- 0 0 0 0 0 0 0 0 0 0 0 0
36479- 0 0 1 0 0 1 0 0 1 0 0 0
36480- 0 0 0 0 0 0 0 0 0 0 0 0
36481- 0 0 0 0 0 0 0 0 0 0 0 0
36482- 0 0 0 0 0 0 0 0 0 0 0 0
36483- 0 0 0 0 0 0 0 0 0 0 0 0
36484- 0 0 0 0 0 0 0 0 0 0 0 0
36485- 6 6 6 18 18 18 42 42 42 82 82 82
36486- 26 26 26 2 2 6 2 2 6 2 2 6
36487- 2 2 6 2 2 6 2 2 6 2 2 6
36488- 2 2 6 2 2 6 2 2 6 14 14 14
36489- 46 46 46 34 34 34 6 6 6 2 2 6
36490- 42 42 42 78 78 78 42 42 42 18 18 18
36491- 6 6 6 0 0 0 0 0 0 0 0 0
36492- 0 0 0 0 0 0 0 0 0 0 0 0
36493- 0 0 0 0 0 0 0 0 0 0 0 0
36494- 0 0 0 0 0 0 0 0 0 0 0 0
36495- 0 0 0 0 0 0 0 0 0 0 0 0
36496- 0 0 0 0 0 0 0 0 0 0 0 0
36497- 0 0 0 0 0 0 0 0 0 0 0 0
36498- 0 0 0 0 0 0 0 0 0 0 0 0
36499- 0 0 1 0 0 0 0 0 1 0 0 0
36500- 0 0 0 0 0 0 0 0 0 0 0 0
36501- 0 0 0 0 0 0 0 0 0 0 0 0
36502- 0 0 0 0 0 0 0 0 0 0 0 0
36503- 0 0 0 0 0 0 0 0 0 0 0 0
36504- 0 0 0 0 0 0 0 0 0 0 0 0
36505- 10 10 10 30 30 30 66 66 66 58 58 58
36506- 2 2 6 2 2 6 2 2 6 2 2 6
36507- 2 2 6 2 2 6 2 2 6 2 2 6
36508- 2 2 6 2 2 6 2 2 6 26 26 26
36509- 86 86 86 101 101 101 46 46 46 10 10 10
36510- 2 2 6 58 58 58 70 70 70 34 34 34
36511- 10 10 10 0 0 0 0 0 0 0 0 0
36512- 0 0 0 0 0 0 0 0 0 0 0 0
36513- 0 0 0 0 0 0 0 0 0 0 0 0
36514- 0 0 0 0 0 0 0 0 0 0 0 0
36515- 0 0 0 0 0 0 0 0 0 0 0 0
36516- 0 0 0 0 0 0 0 0 0 0 0 0
36517- 0 0 0 0 0 0 0 0 0 0 0 0
36518- 0 0 0 0 0 0 0 0 0 0 0 0
36519- 0 0 1 0 0 1 0 0 1 0 0 0
36520- 0 0 0 0 0 0 0 0 0 0 0 0
36521- 0 0 0 0 0 0 0 0 0 0 0 0
36522- 0 0 0 0 0 0 0 0 0 0 0 0
36523- 0 0 0 0 0 0 0 0 0 0 0 0
36524- 0 0 0 0 0 0 0 0 0 0 0 0
36525- 14 14 14 42 42 42 86 86 86 10 10 10
36526- 2 2 6 2 2 6 2 2 6 2 2 6
36527- 2 2 6 2 2 6 2 2 6 2 2 6
36528- 2 2 6 2 2 6 2 2 6 30 30 30
36529- 94 94 94 94 94 94 58 58 58 26 26 26
36530- 2 2 6 6 6 6 78 78 78 54 54 54
36531- 22 22 22 6 6 6 0 0 0 0 0 0
36532- 0 0 0 0 0 0 0 0 0 0 0 0
36533- 0 0 0 0 0 0 0 0 0 0 0 0
36534- 0 0 0 0 0 0 0 0 0 0 0 0
36535- 0 0 0 0 0 0 0 0 0 0 0 0
36536- 0 0 0 0 0 0 0 0 0 0 0 0
36537- 0 0 0 0 0 0 0 0 0 0 0 0
36538- 0 0 0 0 0 0 0 0 0 0 0 0
36539- 0 0 0 0 0 0 0 0 0 0 0 0
36540- 0 0 0 0 0 0 0 0 0 0 0 0
36541- 0 0 0 0 0 0 0 0 0 0 0 0
36542- 0 0 0 0 0 0 0 0 0 0 0 0
36543- 0 0 0 0 0 0 0 0 0 0 0 0
36544- 0 0 0 0 0 0 0 0 0 6 6 6
36545- 22 22 22 62 62 62 62 62 62 2 2 6
36546- 2 2 6 2 2 6 2 2 6 2 2 6
36547- 2 2 6 2 2 6 2 2 6 2 2 6
36548- 2 2 6 2 2 6 2 2 6 26 26 26
36549- 54 54 54 38 38 38 18 18 18 10 10 10
36550- 2 2 6 2 2 6 34 34 34 82 82 82
36551- 38 38 38 14 14 14 0 0 0 0 0 0
36552- 0 0 0 0 0 0 0 0 0 0 0 0
36553- 0 0 0 0 0 0 0 0 0 0 0 0
36554- 0 0 0 0 0 0 0 0 0 0 0 0
36555- 0 0 0 0 0 0 0 0 0 0 0 0
36556- 0 0 0 0 0 0 0 0 0 0 0 0
36557- 0 0 0 0 0 0 0 0 0 0 0 0
36558- 0 0 0 0 0 0 0 0 0 0 0 0
36559- 0 0 0 0 0 1 0 0 1 0 0 0
36560- 0 0 0 0 0 0 0 0 0 0 0 0
36561- 0 0 0 0 0 0 0 0 0 0 0 0
36562- 0 0 0 0 0 0 0 0 0 0 0 0
36563- 0 0 0 0 0 0 0 0 0 0 0 0
36564- 0 0 0 0 0 0 0 0 0 6 6 6
36565- 30 30 30 78 78 78 30 30 30 2 2 6
36566- 2 2 6 2 2 6 2 2 6 2 2 6
36567- 2 2 6 2 2 6 2 2 6 2 2 6
36568- 2 2 6 2 2 6 2 2 6 10 10 10
36569- 10 10 10 2 2 6 2 2 6 2 2 6
36570- 2 2 6 2 2 6 2 2 6 78 78 78
36571- 50 50 50 18 18 18 6 6 6 0 0 0
36572- 0 0 0 0 0 0 0 0 0 0 0 0
36573- 0 0 0 0 0 0 0 0 0 0 0 0
36574- 0 0 0 0 0 0 0 0 0 0 0 0
36575- 0 0 0 0 0 0 0 0 0 0 0 0
36576- 0 0 0 0 0 0 0 0 0 0 0 0
36577- 0 0 0 0 0 0 0 0 0 0 0 0
36578- 0 0 0 0 0 0 0 0 0 0 0 0
36579- 0 0 1 0 0 0 0 0 0 0 0 0
36580- 0 0 0 0 0 0 0 0 0 0 0 0
36581- 0 0 0 0 0 0 0 0 0 0 0 0
36582- 0 0 0 0 0 0 0 0 0 0 0 0
36583- 0 0 0 0 0 0 0 0 0 0 0 0
36584- 0 0 0 0 0 0 0 0 0 10 10 10
36585- 38 38 38 86 86 86 14 14 14 2 2 6
36586- 2 2 6 2 2 6 2 2 6 2 2 6
36587- 2 2 6 2 2 6 2 2 6 2 2 6
36588- 2 2 6 2 2 6 2 2 6 2 2 6
36589- 2 2 6 2 2 6 2 2 6 2 2 6
36590- 2 2 6 2 2 6 2 2 6 54 54 54
36591- 66 66 66 26 26 26 6 6 6 0 0 0
36592- 0 0 0 0 0 0 0 0 0 0 0 0
36593- 0 0 0 0 0 0 0 0 0 0 0 0
36594- 0 0 0 0 0 0 0 0 0 0 0 0
36595- 0 0 0 0 0 0 0 0 0 0 0 0
36596- 0 0 0 0 0 0 0 0 0 0 0 0
36597- 0 0 0 0 0 0 0 0 0 0 0 0
36598- 0 0 0 0 0 0 0 0 0 0 0 0
36599- 0 0 0 0 0 1 0 0 1 0 0 0
36600- 0 0 0 0 0 0 0 0 0 0 0 0
36601- 0 0 0 0 0 0 0 0 0 0 0 0
36602- 0 0 0 0 0 0 0 0 0 0 0 0
36603- 0 0 0 0 0 0 0 0 0 0 0 0
36604- 0 0 0 0 0 0 0 0 0 14 14 14
36605- 42 42 42 82 82 82 2 2 6 2 2 6
36606- 2 2 6 6 6 6 10 10 10 2 2 6
36607- 2 2 6 2 2 6 2 2 6 2 2 6
36608- 2 2 6 2 2 6 2 2 6 6 6 6
36609- 14 14 14 10 10 10 2 2 6 2 2 6
36610- 2 2 6 2 2 6 2 2 6 18 18 18
36611- 82 82 82 34 34 34 10 10 10 0 0 0
36612- 0 0 0 0 0 0 0 0 0 0 0 0
36613- 0 0 0 0 0 0 0 0 0 0 0 0
36614- 0 0 0 0 0 0 0 0 0 0 0 0
36615- 0 0 0 0 0 0 0 0 0 0 0 0
36616- 0 0 0 0 0 0 0 0 0 0 0 0
36617- 0 0 0 0 0 0 0 0 0 0 0 0
36618- 0 0 0 0 0 0 0 0 0 0 0 0
36619- 0 0 1 0 0 0 0 0 0 0 0 0
36620- 0 0 0 0 0 0 0 0 0 0 0 0
36621- 0 0 0 0 0 0 0 0 0 0 0 0
36622- 0 0 0 0 0 0 0 0 0 0 0 0
36623- 0 0 0 0 0 0 0 0 0 0 0 0
36624- 0 0 0 0 0 0 0 0 0 14 14 14
36625- 46 46 46 86 86 86 2 2 6 2 2 6
36626- 6 6 6 6 6 6 22 22 22 34 34 34
36627- 6 6 6 2 2 6 2 2 6 2 2 6
36628- 2 2 6 2 2 6 18 18 18 34 34 34
36629- 10 10 10 50 50 50 22 22 22 2 2 6
36630- 2 2 6 2 2 6 2 2 6 10 10 10
36631- 86 86 86 42 42 42 14 14 14 0 0 0
36632- 0 0 0 0 0 0 0 0 0 0 0 0
36633- 0 0 0 0 0 0 0 0 0 0 0 0
36634- 0 0 0 0 0 0 0 0 0 0 0 0
36635- 0 0 0 0 0 0 0 0 0 0 0 0
36636- 0 0 0 0 0 0 0 0 0 0 0 0
36637- 0 0 0 0 0 0 0 0 0 0 0 0
36638- 0 0 0 0 0 0 0 0 0 0 0 0
36639- 0 0 1 0 0 1 0 0 1 0 0 0
36640- 0 0 0 0 0 0 0 0 0 0 0 0
36641- 0 0 0 0 0 0 0 0 0 0 0 0
36642- 0 0 0 0 0 0 0 0 0 0 0 0
36643- 0 0 0 0 0 0 0 0 0 0 0 0
36644- 0 0 0 0 0 0 0 0 0 14 14 14
36645- 46 46 46 86 86 86 2 2 6 2 2 6
36646- 38 38 38 116 116 116 94 94 94 22 22 22
36647- 22 22 22 2 2 6 2 2 6 2 2 6
36648- 14 14 14 86 86 86 138 138 138 162 162 162
36649-154 154 154 38 38 38 26 26 26 6 6 6
36650- 2 2 6 2 2 6 2 2 6 2 2 6
36651- 86 86 86 46 46 46 14 14 14 0 0 0
36652- 0 0 0 0 0 0 0 0 0 0 0 0
36653- 0 0 0 0 0 0 0 0 0 0 0 0
36654- 0 0 0 0 0 0 0 0 0 0 0 0
36655- 0 0 0 0 0 0 0 0 0 0 0 0
36656- 0 0 0 0 0 0 0 0 0 0 0 0
36657- 0 0 0 0 0 0 0 0 0 0 0 0
36658- 0 0 0 0 0 0 0 0 0 0 0 0
36659- 0 0 0 0 0 0 0 0 0 0 0 0
36660- 0 0 0 0 0 0 0 0 0 0 0 0
36661- 0 0 0 0 0 0 0 0 0 0 0 0
36662- 0 0 0 0 0 0 0 0 0 0 0 0
36663- 0 0 0 0 0 0 0 0 0 0 0 0
36664- 0 0 0 0 0 0 0 0 0 14 14 14
36665- 46 46 46 86 86 86 2 2 6 14 14 14
36666-134 134 134 198 198 198 195 195 195 116 116 116
36667- 10 10 10 2 2 6 2 2 6 6 6 6
36668-101 98 89 187 187 187 210 210 210 218 218 218
36669-214 214 214 134 134 134 14 14 14 6 6 6
36670- 2 2 6 2 2 6 2 2 6 2 2 6
36671- 86 86 86 50 50 50 18 18 18 6 6 6
36672- 0 0 0 0 0 0 0 0 0 0 0 0
36673- 0 0 0 0 0 0 0 0 0 0 0 0
36674- 0 0 0 0 0 0 0 0 0 0 0 0
36675- 0 0 0 0 0 0 0 0 0 0 0 0
36676- 0 0 0 0 0 0 0 0 0 0 0 0
36677- 0 0 0 0 0 0 0 0 0 0 0 0
36678- 0 0 0 0 0 0 0 0 1 0 0 0
36679- 0 0 1 0 0 1 0 0 1 0 0 0
36680- 0 0 0 0 0 0 0 0 0 0 0 0
36681- 0 0 0 0 0 0 0 0 0 0 0 0
36682- 0 0 0 0 0 0 0 0 0 0 0 0
36683- 0 0 0 0 0 0 0 0 0 0 0 0
36684- 0 0 0 0 0 0 0 0 0 14 14 14
36685- 46 46 46 86 86 86 2 2 6 54 54 54
36686-218 218 218 195 195 195 226 226 226 246 246 246
36687- 58 58 58 2 2 6 2 2 6 30 30 30
36688-210 210 210 253 253 253 174 174 174 123 123 123
36689-221 221 221 234 234 234 74 74 74 2 2 6
36690- 2 2 6 2 2 6 2 2 6 2 2 6
36691- 70 70 70 58 58 58 22 22 22 6 6 6
36692- 0 0 0 0 0 0 0 0 0 0 0 0
36693- 0 0 0 0 0 0 0 0 0 0 0 0
36694- 0 0 0 0 0 0 0 0 0 0 0 0
36695- 0 0 0 0 0 0 0 0 0 0 0 0
36696- 0 0 0 0 0 0 0 0 0 0 0 0
36697- 0 0 0 0 0 0 0 0 0 0 0 0
36698- 0 0 0 0 0 0 0 0 0 0 0 0
36699- 0 0 0 0 0 0 0 0 0 0 0 0
36700- 0 0 0 0 0 0 0 0 0 0 0 0
36701- 0 0 0 0 0 0 0 0 0 0 0 0
36702- 0 0 0 0 0 0 0 0 0 0 0 0
36703- 0 0 0 0 0 0 0 0 0 0 0 0
36704- 0 0 0 0 0 0 0 0 0 14 14 14
36705- 46 46 46 82 82 82 2 2 6 106 106 106
36706-170 170 170 26 26 26 86 86 86 226 226 226
36707-123 123 123 10 10 10 14 14 14 46 46 46
36708-231 231 231 190 190 190 6 6 6 70 70 70
36709- 90 90 90 238 238 238 158 158 158 2 2 6
36710- 2 2 6 2 2 6 2 2 6 2 2 6
36711- 70 70 70 58 58 58 22 22 22 6 6 6
36712- 0 0 0 0 0 0 0 0 0 0 0 0
36713- 0 0 0 0 0 0 0 0 0 0 0 0
36714- 0 0 0 0 0 0 0 0 0 0 0 0
36715- 0 0 0 0 0 0 0 0 0 0 0 0
36716- 0 0 0 0 0 0 0 0 0 0 0 0
36717- 0 0 0 0 0 0 0 0 0 0 0 0
36718- 0 0 0 0 0 0 0 0 1 0 0 0
36719- 0 0 1 0 0 1 0 0 1 0 0 0
36720- 0 0 0 0 0 0 0 0 0 0 0 0
36721- 0 0 0 0 0 0 0 0 0 0 0 0
36722- 0 0 0 0 0 0 0 0 0 0 0 0
36723- 0 0 0 0 0 0 0 0 0 0 0 0
36724- 0 0 0 0 0 0 0 0 0 14 14 14
36725- 42 42 42 86 86 86 6 6 6 116 116 116
36726-106 106 106 6 6 6 70 70 70 149 149 149
36727-128 128 128 18 18 18 38 38 38 54 54 54
36728-221 221 221 106 106 106 2 2 6 14 14 14
36729- 46 46 46 190 190 190 198 198 198 2 2 6
36730- 2 2 6 2 2 6 2 2 6 2 2 6
36731- 74 74 74 62 62 62 22 22 22 6 6 6
36732- 0 0 0 0 0 0 0 0 0 0 0 0
36733- 0 0 0 0 0 0 0 0 0 0 0 0
36734- 0 0 0 0 0 0 0 0 0 0 0 0
36735- 0 0 0 0 0 0 0 0 0 0 0 0
36736- 0 0 0 0 0 0 0 0 0 0 0 0
36737- 0 0 0 0 0 0 0 0 0 0 0 0
36738- 0 0 0 0 0 0 0 0 1 0 0 0
36739- 0 0 1 0 0 0 0 0 1 0 0 0
36740- 0 0 0 0 0 0 0 0 0 0 0 0
36741- 0 0 0 0 0 0 0 0 0 0 0 0
36742- 0 0 0 0 0 0 0 0 0 0 0 0
36743- 0 0 0 0 0 0 0 0 0 0 0 0
36744- 0 0 0 0 0 0 0 0 0 14 14 14
36745- 42 42 42 94 94 94 14 14 14 101 101 101
36746-128 128 128 2 2 6 18 18 18 116 116 116
36747-118 98 46 121 92 8 121 92 8 98 78 10
36748-162 162 162 106 106 106 2 2 6 2 2 6
36749- 2 2 6 195 195 195 195 195 195 6 6 6
36750- 2 2 6 2 2 6 2 2 6 2 2 6
36751- 74 74 74 62 62 62 22 22 22 6 6 6
36752- 0 0 0 0 0 0 0 0 0 0 0 0
36753- 0 0 0 0 0 0 0 0 0 0 0 0
36754- 0 0 0 0 0 0 0 0 0 0 0 0
36755- 0 0 0 0 0 0 0 0 0 0 0 0
36756- 0 0 0 0 0 0 0 0 0 0 0 0
36757- 0 0 0 0 0 0 0 0 0 0 0 0
36758- 0 0 0 0 0 0 0 0 1 0 0 1
36759- 0 0 1 0 0 0 0 0 1 0 0 0
36760- 0 0 0 0 0 0 0 0 0 0 0 0
36761- 0 0 0 0 0 0 0 0 0 0 0 0
36762- 0 0 0 0 0 0 0 0 0 0 0 0
36763- 0 0 0 0 0 0 0 0 0 0 0 0
36764- 0 0 0 0 0 0 0 0 0 10 10 10
36765- 38 38 38 90 90 90 14 14 14 58 58 58
36766-210 210 210 26 26 26 54 38 6 154 114 10
36767-226 170 11 236 186 11 225 175 15 184 144 12
36768-215 174 15 175 146 61 37 26 9 2 2 6
36769- 70 70 70 246 246 246 138 138 138 2 2 6
36770- 2 2 6 2 2 6 2 2 6 2 2 6
36771- 70 70 70 66 66 66 26 26 26 6 6 6
36772- 0 0 0 0 0 0 0 0 0 0 0 0
36773- 0 0 0 0 0 0 0 0 0 0 0 0
36774- 0 0 0 0 0 0 0 0 0 0 0 0
36775- 0 0 0 0 0 0 0 0 0 0 0 0
36776- 0 0 0 0 0 0 0 0 0 0 0 0
36777- 0 0 0 0 0 0 0 0 0 0 0 0
36778- 0 0 0 0 0 0 0 0 0 0 0 0
36779- 0 0 0 0 0 0 0 0 0 0 0 0
36780- 0 0 0 0 0 0 0 0 0 0 0 0
36781- 0 0 0 0 0 0 0 0 0 0 0 0
36782- 0 0 0 0 0 0 0 0 0 0 0 0
36783- 0 0 0 0 0 0 0 0 0 0 0 0
36784- 0 0 0 0 0 0 0 0 0 10 10 10
36785- 38 38 38 86 86 86 14 14 14 10 10 10
36786-195 195 195 188 164 115 192 133 9 225 175 15
36787-239 182 13 234 190 10 232 195 16 232 200 30
36788-245 207 45 241 208 19 232 195 16 184 144 12
36789-218 194 134 211 206 186 42 42 42 2 2 6
36790- 2 2 6 2 2 6 2 2 6 2 2 6
36791- 50 50 50 74 74 74 30 30 30 6 6 6
36792- 0 0 0 0 0 0 0 0 0 0 0 0
36793- 0 0 0 0 0 0 0 0 0 0 0 0
36794- 0 0 0 0 0 0 0 0 0 0 0 0
36795- 0 0 0 0 0 0 0 0 0 0 0 0
36796- 0 0 0 0 0 0 0 0 0 0 0 0
36797- 0 0 0 0 0 0 0 0 0 0 0 0
36798- 0 0 0 0 0 0 0 0 0 0 0 0
36799- 0 0 0 0 0 0 0 0 0 0 0 0
36800- 0 0 0 0 0 0 0 0 0 0 0 0
36801- 0 0 0 0 0 0 0 0 0 0 0 0
36802- 0 0 0 0 0 0 0 0 0 0 0 0
36803- 0 0 0 0 0 0 0 0 0 0 0 0
36804- 0 0 0 0 0 0 0 0 0 10 10 10
36805- 34 34 34 86 86 86 14 14 14 2 2 6
36806-121 87 25 192 133 9 219 162 10 239 182 13
36807-236 186 11 232 195 16 241 208 19 244 214 54
36808-246 218 60 246 218 38 246 215 20 241 208 19
36809-241 208 19 226 184 13 121 87 25 2 2 6
36810- 2 2 6 2 2 6 2 2 6 2 2 6
36811- 50 50 50 82 82 82 34 34 34 10 10 10
36812- 0 0 0 0 0 0 0 0 0 0 0 0
36813- 0 0 0 0 0 0 0 0 0 0 0 0
36814- 0 0 0 0 0 0 0 0 0 0 0 0
36815- 0 0 0 0 0 0 0 0 0 0 0 0
36816- 0 0 0 0 0 0 0 0 0 0 0 0
36817- 0 0 0 0 0 0 0 0 0 0 0 0
36818- 0 0 0 0 0 0 0 0 0 0 0 0
36819- 0 0 0 0 0 0 0 0 0 0 0 0
36820- 0 0 0 0 0 0 0 0 0 0 0 0
36821- 0 0 0 0 0 0 0 0 0 0 0 0
36822- 0 0 0 0 0 0 0 0 0 0 0 0
36823- 0 0 0 0 0 0 0 0 0 0 0 0
36824- 0 0 0 0 0 0 0 0 0 10 10 10
36825- 34 34 34 82 82 82 30 30 30 61 42 6
36826-180 123 7 206 145 10 230 174 11 239 182 13
36827-234 190 10 238 202 15 241 208 19 246 218 74
36828-246 218 38 246 215 20 246 215 20 246 215 20
36829-226 184 13 215 174 15 184 144 12 6 6 6
36830- 2 2 6 2 2 6 2 2 6 2 2 6
36831- 26 26 26 94 94 94 42 42 42 14 14 14
36832- 0 0 0 0 0 0 0 0 0 0 0 0
36833- 0 0 0 0 0 0 0 0 0 0 0 0
36834- 0 0 0 0 0 0 0 0 0 0 0 0
36835- 0 0 0 0 0 0 0 0 0 0 0 0
36836- 0 0 0 0 0 0 0 0 0 0 0 0
36837- 0 0 0 0 0 0 0 0 0 0 0 0
36838- 0 0 0 0 0 0 0 0 0 0 0 0
36839- 0 0 0 0 0 0 0 0 0 0 0 0
36840- 0 0 0 0 0 0 0 0 0 0 0 0
36841- 0 0 0 0 0 0 0 0 0 0 0 0
36842- 0 0 0 0 0 0 0 0 0 0 0 0
36843- 0 0 0 0 0 0 0 0 0 0 0 0
36844- 0 0 0 0 0 0 0 0 0 10 10 10
36845- 30 30 30 78 78 78 50 50 50 104 69 6
36846-192 133 9 216 158 10 236 178 12 236 186 11
36847-232 195 16 241 208 19 244 214 54 245 215 43
36848-246 215 20 246 215 20 241 208 19 198 155 10
36849-200 144 11 216 158 10 156 118 10 2 2 6
36850- 2 2 6 2 2 6 2 2 6 2 2 6
36851- 6 6 6 90 90 90 54 54 54 18 18 18
36852- 6 6 6 0 0 0 0 0 0 0 0 0
36853- 0 0 0 0 0 0 0 0 0 0 0 0
36854- 0 0 0 0 0 0 0 0 0 0 0 0
36855- 0 0 0 0 0 0 0 0 0 0 0 0
36856- 0 0 0 0 0 0 0 0 0 0 0 0
36857- 0 0 0 0 0 0 0 0 0 0 0 0
36858- 0 0 0 0 0 0 0 0 0 0 0 0
36859- 0 0 0 0 0 0 0 0 0 0 0 0
36860- 0 0 0 0 0 0 0 0 0 0 0 0
36861- 0 0 0 0 0 0 0 0 0 0 0 0
36862- 0 0 0 0 0 0 0 0 0 0 0 0
36863- 0 0 0 0 0 0 0 0 0 0 0 0
36864- 0 0 0 0 0 0 0 0 0 10 10 10
36865- 30 30 30 78 78 78 46 46 46 22 22 22
36866-137 92 6 210 162 10 239 182 13 238 190 10
36867-238 202 15 241 208 19 246 215 20 246 215 20
36868-241 208 19 203 166 17 185 133 11 210 150 10
36869-216 158 10 210 150 10 102 78 10 2 2 6
36870- 6 6 6 54 54 54 14 14 14 2 2 6
36871- 2 2 6 62 62 62 74 74 74 30 30 30
36872- 10 10 10 0 0 0 0 0 0 0 0 0
36873- 0 0 0 0 0 0 0 0 0 0 0 0
36874- 0 0 0 0 0 0 0 0 0 0 0 0
36875- 0 0 0 0 0 0 0 0 0 0 0 0
36876- 0 0 0 0 0 0 0 0 0 0 0 0
36877- 0 0 0 0 0 0 0 0 0 0 0 0
36878- 0 0 0 0 0 0 0 0 0 0 0 0
36879- 0 0 0 0 0 0 0 0 0 0 0 0
36880- 0 0 0 0 0 0 0 0 0 0 0 0
36881- 0 0 0 0 0 0 0 0 0 0 0 0
36882- 0 0 0 0 0 0 0 0 0 0 0 0
36883- 0 0 0 0 0 0 0 0 0 0 0 0
36884- 0 0 0 0 0 0 0 0 0 10 10 10
36885- 34 34 34 78 78 78 50 50 50 6 6 6
36886- 94 70 30 139 102 15 190 146 13 226 184 13
36887-232 200 30 232 195 16 215 174 15 190 146 13
36888-168 122 10 192 133 9 210 150 10 213 154 11
36889-202 150 34 182 157 106 101 98 89 2 2 6
36890- 2 2 6 78 78 78 116 116 116 58 58 58
36891- 2 2 6 22 22 22 90 90 90 46 46 46
36892- 18 18 18 6 6 6 0 0 0 0 0 0
36893- 0 0 0 0 0 0 0 0 0 0 0 0
36894- 0 0 0 0 0 0 0 0 0 0 0 0
36895- 0 0 0 0 0 0 0 0 0 0 0 0
36896- 0 0 0 0 0 0 0 0 0 0 0 0
36897- 0 0 0 0 0 0 0 0 0 0 0 0
36898- 0 0 0 0 0 0 0 0 0 0 0 0
36899- 0 0 0 0 0 0 0 0 0 0 0 0
36900- 0 0 0 0 0 0 0 0 0 0 0 0
36901- 0 0 0 0 0 0 0 0 0 0 0 0
36902- 0 0 0 0 0 0 0 0 0 0 0 0
36903- 0 0 0 0 0 0 0 0 0 0 0 0
36904- 0 0 0 0 0 0 0 0 0 10 10 10
36905- 38 38 38 86 86 86 50 50 50 6 6 6
36906-128 128 128 174 154 114 156 107 11 168 122 10
36907-198 155 10 184 144 12 197 138 11 200 144 11
36908-206 145 10 206 145 10 197 138 11 188 164 115
36909-195 195 195 198 198 198 174 174 174 14 14 14
36910- 2 2 6 22 22 22 116 116 116 116 116 116
36911- 22 22 22 2 2 6 74 74 74 70 70 70
36912- 30 30 30 10 10 10 0 0 0 0 0 0
36913- 0 0 0 0 0 0 0 0 0 0 0 0
36914- 0 0 0 0 0 0 0 0 0 0 0 0
36915- 0 0 0 0 0 0 0 0 0 0 0 0
36916- 0 0 0 0 0 0 0 0 0 0 0 0
36917- 0 0 0 0 0 0 0 0 0 0 0 0
36918- 0 0 0 0 0 0 0 0 0 0 0 0
36919- 0 0 0 0 0 0 0 0 0 0 0 0
36920- 0 0 0 0 0 0 0 0 0 0 0 0
36921- 0 0 0 0 0 0 0 0 0 0 0 0
36922- 0 0 0 0 0 0 0 0 0 0 0 0
36923- 0 0 0 0 0 0 0 0 0 0 0 0
36924- 0 0 0 0 0 0 6 6 6 18 18 18
36925- 50 50 50 101 101 101 26 26 26 10 10 10
36926-138 138 138 190 190 190 174 154 114 156 107 11
36927-197 138 11 200 144 11 197 138 11 192 133 9
36928-180 123 7 190 142 34 190 178 144 187 187 187
36929-202 202 202 221 221 221 214 214 214 66 66 66
36930- 2 2 6 2 2 6 50 50 50 62 62 62
36931- 6 6 6 2 2 6 10 10 10 90 90 90
36932- 50 50 50 18 18 18 6 6 6 0 0 0
36933- 0 0 0 0 0 0 0 0 0 0 0 0
36934- 0 0 0 0 0 0 0 0 0 0 0 0
36935- 0 0 0 0 0 0 0 0 0 0 0 0
36936- 0 0 0 0 0 0 0 0 0 0 0 0
36937- 0 0 0 0 0 0 0 0 0 0 0 0
36938- 0 0 0 0 0 0 0 0 0 0 0 0
36939- 0 0 0 0 0 0 0 0 0 0 0 0
36940- 0 0 0 0 0 0 0 0 0 0 0 0
36941- 0 0 0 0 0 0 0 0 0 0 0 0
36942- 0 0 0 0 0 0 0 0 0 0 0 0
36943- 0 0 0 0 0 0 0 0 0 0 0 0
36944- 0 0 0 0 0 0 10 10 10 34 34 34
36945- 74 74 74 74 74 74 2 2 6 6 6 6
36946-144 144 144 198 198 198 190 190 190 178 166 146
36947-154 121 60 156 107 11 156 107 11 168 124 44
36948-174 154 114 187 187 187 190 190 190 210 210 210
36949-246 246 246 253 253 253 253 253 253 182 182 182
36950- 6 6 6 2 2 6 2 2 6 2 2 6
36951- 2 2 6 2 2 6 2 2 6 62 62 62
36952- 74 74 74 34 34 34 14 14 14 0 0 0
36953- 0 0 0 0 0 0 0 0 0 0 0 0
36954- 0 0 0 0 0 0 0 0 0 0 0 0
36955- 0 0 0 0 0 0 0 0 0 0 0 0
36956- 0 0 0 0 0 0 0 0 0 0 0 0
36957- 0 0 0 0 0 0 0 0 0 0 0 0
36958- 0 0 0 0 0 0 0 0 0 0 0 0
36959- 0 0 0 0 0 0 0 0 0 0 0 0
36960- 0 0 0 0 0 0 0 0 0 0 0 0
36961- 0 0 0 0 0 0 0 0 0 0 0 0
36962- 0 0 0 0 0 0 0 0 0 0 0 0
36963- 0 0 0 0 0 0 0 0 0 0 0 0
36964- 0 0 0 10 10 10 22 22 22 54 54 54
36965- 94 94 94 18 18 18 2 2 6 46 46 46
36966-234 234 234 221 221 221 190 190 190 190 190 190
36967-190 190 190 187 187 187 187 187 187 190 190 190
36968-190 190 190 195 195 195 214 214 214 242 242 242
36969-253 253 253 253 253 253 253 253 253 253 253 253
36970- 82 82 82 2 2 6 2 2 6 2 2 6
36971- 2 2 6 2 2 6 2 2 6 14 14 14
36972- 86 86 86 54 54 54 22 22 22 6 6 6
36973- 0 0 0 0 0 0 0 0 0 0 0 0
36974- 0 0 0 0 0 0 0 0 0 0 0 0
36975- 0 0 0 0 0 0 0 0 0 0 0 0
36976- 0 0 0 0 0 0 0 0 0 0 0 0
36977- 0 0 0 0 0 0 0 0 0 0 0 0
36978- 0 0 0 0 0 0 0 0 0 0 0 0
36979- 0 0 0 0 0 0 0 0 0 0 0 0
36980- 0 0 0 0 0 0 0 0 0 0 0 0
36981- 0 0 0 0 0 0 0 0 0 0 0 0
36982- 0 0 0 0 0 0 0 0 0 0 0 0
36983- 0 0 0 0 0 0 0 0 0 0 0 0
36984- 6 6 6 18 18 18 46 46 46 90 90 90
36985- 46 46 46 18 18 18 6 6 6 182 182 182
36986-253 253 253 246 246 246 206 206 206 190 190 190
36987-190 190 190 190 190 190 190 190 190 190 190 190
36988-206 206 206 231 231 231 250 250 250 253 253 253
36989-253 253 253 253 253 253 253 253 253 253 253 253
36990-202 202 202 14 14 14 2 2 6 2 2 6
36991- 2 2 6 2 2 6 2 2 6 2 2 6
36992- 42 42 42 86 86 86 42 42 42 18 18 18
36993- 6 6 6 0 0 0 0 0 0 0 0 0
36994- 0 0 0 0 0 0 0 0 0 0 0 0
36995- 0 0 0 0 0 0 0 0 0 0 0 0
36996- 0 0 0 0 0 0 0 0 0 0 0 0
36997- 0 0 0 0 0 0 0 0 0 0 0 0
36998- 0 0 0 0 0 0 0 0 0 0 0 0
36999- 0 0 0 0 0 0 0 0 0 0 0 0
37000- 0 0 0 0 0 0 0 0 0 0 0 0
37001- 0 0 0 0 0 0 0 0 0 0 0 0
37002- 0 0 0 0 0 0 0 0 0 0 0 0
37003- 0 0 0 0 0 0 0 0 0 6 6 6
37004- 14 14 14 38 38 38 74 74 74 66 66 66
37005- 2 2 6 6 6 6 90 90 90 250 250 250
37006-253 253 253 253 253 253 238 238 238 198 198 198
37007-190 190 190 190 190 190 195 195 195 221 221 221
37008-246 246 246 253 253 253 253 253 253 253 253 253
37009-253 253 253 253 253 253 253 253 253 253 253 253
37010-253 253 253 82 82 82 2 2 6 2 2 6
37011- 2 2 6 2 2 6 2 2 6 2 2 6
37012- 2 2 6 78 78 78 70 70 70 34 34 34
37013- 14 14 14 6 6 6 0 0 0 0 0 0
37014- 0 0 0 0 0 0 0 0 0 0 0 0
37015- 0 0 0 0 0 0 0 0 0 0 0 0
37016- 0 0 0 0 0 0 0 0 0 0 0 0
37017- 0 0 0 0 0 0 0 0 0 0 0 0
37018- 0 0 0 0 0 0 0 0 0 0 0 0
37019- 0 0 0 0 0 0 0 0 0 0 0 0
37020- 0 0 0 0 0 0 0 0 0 0 0 0
37021- 0 0 0 0 0 0 0 0 0 0 0 0
37022- 0 0 0 0 0 0 0 0 0 0 0 0
37023- 0 0 0 0 0 0 0 0 0 14 14 14
37024- 34 34 34 66 66 66 78 78 78 6 6 6
37025- 2 2 6 18 18 18 218 218 218 253 253 253
37026-253 253 253 253 253 253 253 253 253 246 246 246
37027-226 226 226 231 231 231 246 246 246 253 253 253
37028-253 253 253 253 253 253 253 253 253 253 253 253
37029-253 253 253 253 253 253 253 253 253 253 253 253
37030-253 253 253 178 178 178 2 2 6 2 2 6
37031- 2 2 6 2 2 6 2 2 6 2 2 6
37032- 2 2 6 18 18 18 90 90 90 62 62 62
37033- 30 30 30 10 10 10 0 0 0 0 0 0
37034- 0 0 0 0 0 0 0 0 0 0 0 0
37035- 0 0 0 0 0 0 0 0 0 0 0 0
37036- 0 0 0 0 0 0 0 0 0 0 0 0
37037- 0 0 0 0 0 0 0 0 0 0 0 0
37038- 0 0 0 0 0 0 0 0 0 0 0 0
37039- 0 0 0 0 0 0 0 0 0 0 0 0
37040- 0 0 0 0 0 0 0 0 0 0 0 0
37041- 0 0 0 0 0 0 0 0 0 0 0 0
37042- 0 0 0 0 0 0 0 0 0 0 0 0
37043- 0 0 0 0 0 0 10 10 10 26 26 26
37044- 58 58 58 90 90 90 18 18 18 2 2 6
37045- 2 2 6 110 110 110 253 253 253 253 253 253
37046-253 253 253 253 253 253 253 253 253 253 253 253
37047-250 250 250 253 253 253 253 253 253 253 253 253
37048-253 253 253 253 253 253 253 253 253 253 253 253
37049-253 253 253 253 253 253 253 253 253 253 253 253
37050-253 253 253 231 231 231 18 18 18 2 2 6
37051- 2 2 6 2 2 6 2 2 6 2 2 6
37052- 2 2 6 2 2 6 18 18 18 94 94 94
37053- 54 54 54 26 26 26 10 10 10 0 0 0
37054- 0 0 0 0 0 0 0 0 0 0 0 0
37055- 0 0 0 0 0 0 0 0 0 0 0 0
37056- 0 0 0 0 0 0 0 0 0 0 0 0
37057- 0 0 0 0 0 0 0 0 0 0 0 0
37058- 0 0 0 0 0 0 0 0 0 0 0 0
37059- 0 0 0 0 0 0 0 0 0 0 0 0
37060- 0 0 0 0 0 0 0 0 0 0 0 0
37061- 0 0 0 0 0 0 0 0 0 0 0 0
37062- 0 0 0 0 0 0 0 0 0 0 0 0
37063- 0 0 0 6 6 6 22 22 22 50 50 50
37064- 90 90 90 26 26 26 2 2 6 2 2 6
37065- 14 14 14 195 195 195 250 250 250 253 253 253
37066-253 253 253 253 253 253 253 253 253 253 253 253
37067-253 253 253 253 253 253 253 253 253 253 253 253
37068-253 253 253 253 253 253 253 253 253 253 253 253
37069-253 253 253 253 253 253 253 253 253 253 253 253
37070-250 250 250 242 242 242 54 54 54 2 2 6
37071- 2 2 6 2 2 6 2 2 6 2 2 6
37072- 2 2 6 2 2 6 2 2 6 38 38 38
37073- 86 86 86 50 50 50 22 22 22 6 6 6
37074- 0 0 0 0 0 0 0 0 0 0 0 0
37075- 0 0 0 0 0 0 0 0 0 0 0 0
37076- 0 0 0 0 0 0 0 0 0 0 0 0
37077- 0 0 0 0 0 0 0 0 0 0 0 0
37078- 0 0 0 0 0 0 0 0 0 0 0 0
37079- 0 0 0 0 0 0 0 0 0 0 0 0
37080- 0 0 0 0 0 0 0 0 0 0 0 0
37081- 0 0 0 0 0 0 0 0 0 0 0 0
37082- 0 0 0 0 0 0 0 0 0 0 0 0
37083- 6 6 6 14 14 14 38 38 38 82 82 82
37084- 34 34 34 2 2 6 2 2 6 2 2 6
37085- 42 42 42 195 195 195 246 246 246 253 253 253
37086-253 253 253 253 253 253 253 253 253 250 250 250
37087-242 242 242 242 242 242 250 250 250 253 253 253
37088-253 253 253 253 253 253 253 253 253 253 253 253
37089-253 253 253 250 250 250 246 246 246 238 238 238
37090-226 226 226 231 231 231 101 101 101 6 6 6
37091- 2 2 6 2 2 6 2 2 6 2 2 6
37092- 2 2 6 2 2 6 2 2 6 2 2 6
37093- 38 38 38 82 82 82 42 42 42 14 14 14
37094- 6 6 6 0 0 0 0 0 0 0 0 0
37095- 0 0 0 0 0 0 0 0 0 0 0 0
37096- 0 0 0 0 0 0 0 0 0 0 0 0
37097- 0 0 0 0 0 0 0 0 0 0 0 0
37098- 0 0 0 0 0 0 0 0 0 0 0 0
37099- 0 0 0 0 0 0 0 0 0 0 0 0
37100- 0 0 0 0 0 0 0 0 0 0 0 0
37101- 0 0 0 0 0 0 0 0 0 0 0 0
37102- 0 0 0 0 0 0 0 0 0 0 0 0
37103- 10 10 10 26 26 26 62 62 62 66 66 66
37104- 2 2 6 2 2 6 2 2 6 6 6 6
37105- 70 70 70 170 170 170 206 206 206 234 234 234
37106-246 246 246 250 250 250 250 250 250 238 238 238
37107-226 226 226 231 231 231 238 238 238 250 250 250
37108-250 250 250 250 250 250 246 246 246 231 231 231
37109-214 214 214 206 206 206 202 202 202 202 202 202
37110-198 198 198 202 202 202 182 182 182 18 18 18
37111- 2 2 6 2 2 6 2 2 6 2 2 6
37112- 2 2 6 2 2 6 2 2 6 2 2 6
37113- 2 2 6 62 62 62 66 66 66 30 30 30
37114- 10 10 10 0 0 0 0 0 0 0 0 0
37115- 0 0 0 0 0 0 0 0 0 0 0 0
37116- 0 0 0 0 0 0 0 0 0 0 0 0
37117- 0 0 0 0 0 0 0 0 0 0 0 0
37118- 0 0 0 0 0 0 0 0 0 0 0 0
37119- 0 0 0 0 0 0 0 0 0 0 0 0
37120- 0 0 0 0 0 0 0 0 0 0 0 0
37121- 0 0 0 0 0 0 0 0 0 0 0 0
37122- 0 0 0 0 0 0 0 0 0 0 0 0
37123- 14 14 14 42 42 42 82 82 82 18 18 18
37124- 2 2 6 2 2 6 2 2 6 10 10 10
37125- 94 94 94 182 182 182 218 218 218 242 242 242
37126-250 250 250 253 253 253 253 253 253 250 250 250
37127-234 234 234 253 253 253 253 253 253 253 253 253
37128-253 253 253 253 253 253 253 253 253 246 246 246
37129-238 238 238 226 226 226 210 210 210 202 202 202
37130-195 195 195 195 195 195 210 210 210 158 158 158
37131- 6 6 6 14 14 14 50 50 50 14 14 14
37132- 2 2 6 2 2 6 2 2 6 2 2 6
37133- 2 2 6 6 6 6 86 86 86 46 46 46
37134- 18 18 18 6 6 6 0 0 0 0 0 0
37135- 0 0 0 0 0 0 0 0 0 0 0 0
37136- 0 0 0 0 0 0 0 0 0 0 0 0
37137- 0 0 0 0 0 0 0 0 0 0 0 0
37138- 0 0 0 0 0 0 0 0 0 0 0 0
37139- 0 0 0 0 0 0 0 0 0 0 0 0
37140- 0 0 0 0 0 0 0 0 0 0 0 0
37141- 0 0 0 0 0 0 0 0 0 0 0 0
37142- 0 0 0 0 0 0 0 0 0 6 6 6
37143- 22 22 22 54 54 54 70 70 70 2 2 6
37144- 2 2 6 10 10 10 2 2 6 22 22 22
37145-166 166 166 231 231 231 250 250 250 253 253 253
37146-253 253 253 253 253 253 253 253 253 250 250 250
37147-242 242 242 253 253 253 253 253 253 253 253 253
37148-253 253 253 253 253 253 253 253 253 253 253 253
37149-253 253 253 253 253 253 253 253 253 246 246 246
37150-231 231 231 206 206 206 198 198 198 226 226 226
37151- 94 94 94 2 2 6 6 6 6 38 38 38
37152- 30 30 30 2 2 6 2 2 6 2 2 6
37153- 2 2 6 2 2 6 62 62 62 66 66 66
37154- 26 26 26 10 10 10 0 0 0 0 0 0
37155- 0 0 0 0 0 0 0 0 0 0 0 0
37156- 0 0 0 0 0 0 0 0 0 0 0 0
37157- 0 0 0 0 0 0 0 0 0 0 0 0
37158- 0 0 0 0 0 0 0 0 0 0 0 0
37159- 0 0 0 0 0 0 0 0 0 0 0 0
37160- 0 0 0 0 0 0 0 0 0 0 0 0
37161- 0 0 0 0 0 0 0 0 0 0 0 0
37162- 0 0 0 0 0 0 0 0 0 10 10 10
37163- 30 30 30 74 74 74 50 50 50 2 2 6
37164- 26 26 26 26 26 26 2 2 6 106 106 106
37165-238 238 238 253 253 253 253 253 253 253 253 253
37166-253 253 253 253 253 253 253 253 253 253 253 253
37167-253 253 253 253 253 253 253 253 253 253 253 253
37168-253 253 253 253 253 253 253 253 253 253 253 253
37169-253 253 253 253 253 253 253 253 253 253 253 253
37170-253 253 253 246 246 246 218 218 218 202 202 202
37171-210 210 210 14 14 14 2 2 6 2 2 6
37172- 30 30 30 22 22 22 2 2 6 2 2 6
37173- 2 2 6 2 2 6 18 18 18 86 86 86
37174- 42 42 42 14 14 14 0 0 0 0 0 0
37175- 0 0 0 0 0 0 0 0 0 0 0 0
37176- 0 0 0 0 0 0 0 0 0 0 0 0
37177- 0 0 0 0 0 0 0 0 0 0 0 0
37178- 0 0 0 0 0 0 0 0 0 0 0 0
37179- 0 0 0 0 0 0 0 0 0 0 0 0
37180- 0 0 0 0 0 0 0 0 0 0 0 0
37181- 0 0 0 0 0 0 0 0 0 0 0 0
37182- 0 0 0 0 0 0 0 0 0 14 14 14
37183- 42 42 42 90 90 90 22 22 22 2 2 6
37184- 42 42 42 2 2 6 18 18 18 218 218 218
37185-253 253 253 253 253 253 253 253 253 253 253 253
37186-253 253 253 253 253 253 253 253 253 253 253 253
37187-253 253 253 253 253 253 253 253 253 253 253 253
37188-253 253 253 253 253 253 253 253 253 253 253 253
37189-253 253 253 253 253 253 253 253 253 253 253 253
37190-253 253 253 253 253 253 250 250 250 221 221 221
37191-218 218 218 101 101 101 2 2 6 14 14 14
37192- 18 18 18 38 38 38 10 10 10 2 2 6
37193- 2 2 6 2 2 6 2 2 6 78 78 78
37194- 58 58 58 22 22 22 6 6 6 0 0 0
37195- 0 0 0 0 0 0 0 0 0 0 0 0
37196- 0 0 0 0 0 0 0 0 0 0 0 0
37197- 0 0 0 0 0 0 0 0 0 0 0 0
37198- 0 0 0 0 0 0 0 0 0 0 0 0
37199- 0 0 0 0 0 0 0 0 0 0 0 0
37200- 0 0 0 0 0 0 0 0 0 0 0 0
37201- 0 0 0 0 0 0 0 0 0 0 0 0
37202- 0 0 0 0 0 0 6 6 6 18 18 18
37203- 54 54 54 82 82 82 2 2 6 26 26 26
37204- 22 22 22 2 2 6 123 123 123 253 253 253
37205-253 253 253 253 253 253 253 253 253 253 253 253
37206-253 253 253 253 253 253 253 253 253 253 253 253
37207-253 253 253 253 253 253 253 253 253 253 253 253
37208-253 253 253 253 253 253 253 253 253 253 253 253
37209-253 253 253 253 253 253 253 253 253 253 253 253
37210-253 253 253 253 253 253 253 253 253 250 250 250
37211-238 238 238 198 198 198 6 6 6 38 38 38
37212- 58 58 58 26 26 26 38 38 38 2 2 6
37213- 2 2 6 2 2 6 2 2 6 46 46 46
37214- 78 78 78 30 30 30 10 10 10 0 0 0
37215- 0 0 0 0 0 0 0 0 0 0 0 0
37216- 0 0 0 0 0 0 0 0 0 0 0 0
37217- 0 0 0 0 0 0 0 0 0 0 0 0
37218- 0 0 0 0 0 0 0 0 0 0 0 0
37219- 0 0 0 0 0 0 0 0 0 0 0 0
37220- 0 0 0 0 0 0 0 0 0 0 0 0
37221- 0 0 0 0 0 0 0 0 0 0 0 0
37222- 0 0 0 0 0 0 10 10 10 30 30 30
37223- 74 74 74 58 58 58 2 2 6 42 42 42
37224- 2 2 6 22 22 22 231 231 231 253 253 253
37225-253 253 253 253 253 253 253 253 253 253 253 253
37226-253 253 253 253 253 253 253 253 253 250 250 250
37227-253 253 253 253 253 253 253 253 253 253 253 253
37228-253 253 253 253 253 253 253 253 253 253 253 253
37229-253 253 253 253 253 253 253 253 253 253 253 253
37230-253 253 253 253 253 253 253 253 253 253 253 253
37231-253 253 253 246 246 246 46 46 46 38 38 38
37232- 42 42 42 14 14 14 38 38 38 14 14 14
37233- 2 2 6 2 2 6 2 2 6 6 6 6
37234- 86 86 86 46 46 46 14 14 14 0 0 0
37235- 0 0 0 0 0 0 0 0 0 0 0 0
37236- 0 0 0 0 0 0 0 0 0 0 0 0
37237- 0 0 0 0 0 0 0 0 0 0 0 0
37238- 0 0 0 0 0 0 0 0 0 0 0 0
37239- 0 0 0 0 0 0 0 0 0 0 0 0
37240- 0 0 0 0 0 0 0 0 0 0 0 0
37241- 0 0 0 0 0 0 0 0 0 0 0 0
37242- 0 0 0 6 6 6 14 14 14 42 42 42
37243- 90 90 90 18 18 18 18 18 18 26 26 26
37244- 2 2 6 116 116 116 253 253 253 253 253 253
37245-253 253 253 253 253 253 253 253 253 253 253 253
37246-253 253 253 253 253 253 250 250 250 238 238 238
37247-253 253 253 253 253 253 253 253 253 253 253 253
37248-253 253 253 253 253 253 253 253 253 253 253 253
37249-253 253 253 253 253 253 253 253 253 253 253 253
37250-253 253 253 253 253 253 253 253 253 253 253 253
37251-253 253 253 253 253 253 94 94 94 6 6 6
37252- 2 2 6 2 2 6 10 10 10 34 34 34
37253- 2 2 6 2 2 6 2 2 6 2 2 6
37254- 74 74 74 58 58 58 22 22 22 6 6 6
37255- 0 0 0 0 0 0 0 0 0 0 0 0
37256- 0 0 0 0 0 0 0 0 0 0 0 0
37257- 0 0 0 0 0 0 0 0 0 0 0 0
37258- 0 0 0 0 0 0 0 0 0 0 0 0
37259- 0 0 0 0 0 0 0 0 0 0 0 0
37260- 0 0 0 0 0 0 0 0 0 0 0 0
37261- 0 0 0 0 0 0 0 0 0 0 0 0
37262- 0 0 0 10 10 10 26 26 26 66 66 66
37263- 82 82 82 2 2 6 38 38 38 6 6 6
37264- 14 14 14 210 210 210 253 253 253 253 253 253
37265-253 253 253 253 253 253 253 253 253 253 253 253
37266-253 253 253 253 253 253 246 246 246 242 242 242
37267-253 253 253 253 253 253 253 253 253 253 253 253
37268-253 253 253 253 253 253 253 253 253 253 253 253
37269-253 253 253 253 253 253 253 253 253 253 253 253
37270-253 253 253 253 253 253 253 253 253 253 253 253
37271-253 253 253 253 253 253 144 144 144 2 2 6
37272- 2 2 6 2 2 6 2 2 6 46 46 46
37273- 2 2 6 2 2 6 2 2 6 2 2 6
37274- 42 42 42 74 74 74 30 30 30 10 10 10
37275- 0 0 0 0 0 0 0 0 0 0 0 0
37276- 0 0 0 0 0 0 0 0 0 0 0 0
37277- 0 0 0 0 0 0 0 0 0 0 0 0
37278- 0 0 0 0 0 0 0 0 0 0 0 0
37279- 0 0 0 0 0 0 0 0 0 0 0 0
37280- 0 0 0 0 0 0 0 0 0 0 0 0
37281- 0 0 0 0 0 0 0 0 0 0 0 0
37282- 6 6 6 14 14 14 42 42 42 90 90 90
37283- 26 26 26 6 6 6 42 42 42 2 2 6
37284- 74 74 74 250 250 250 253 253 253 253 253 253
37285-253 253 253 253 253 253 253 253 253 253 253 253
37286-253 253 253 253 253 253 242 242 242 242 242 242
37287-253 253 253 253 253 253 253 253 253 253 253 253
37288-253 253 253 253 253 253 253 253 253 253 253 253
37289-253 253 253 253 253 253 253 253 253 253 253 253
37290-253 253 253 253 253 253 253 253 253 253 253 253
37291-253 253 253 253 253 253 182 182 182 2 2 6
37292- 2 2 6 2 2 6 2 2 6 46 46 46
37293- 2 2 6 2 2 6 2 2 6 2 2 6
37294- 10 10 10 86 86 86 38 38 38 10 10 10
37295- 0 0 0 0 0 0 0 0 0 0 0 0
37296- 0 0 0 0 0 0 0 0 0 0 0 0
37297- 0 0 0 0 0 0 0 0 0 0 0 0
37298- 0 0 0 0 0 0 0 0 0 0 0 0
37299- 0 0 0 0 0 0 0 0 0 0 0 0
37300- 0 0 0 0 0 0 0 0 0 0 0 0
37301- 0 0 0 0 0 0 0 0 0 0 0 0
37302- 10 10 10 26 26 26 66 66 66 82 82 82
37303- 2 2 6 22 22 22 18 18 18 2 2 6
37304-149 149 149 253 253 253 253 253 253 253 253 253
37305-253 253 253 253 253 253 253 253 253 253 253 253
37306-253 253 253 253 253 253 234 234 234 242 242 242
37307-253 253 253 253 253 253 253 253 253 253 253 253
37308-253 253 253 253 253 253 253 253 253 253 253 253
37309-253 253 253 253 253 253 253 253 253 253 253 253
37310-253 253 253 253 253 253 253 253 253 253 253 253
37311-253 253 253 253 253 253 206 206 206 2 2 6
37312- 2 2 6 2 2 6 2 2 6 38 38 38
37313- 2 2 6 2 2 6 2 2 6 2 2 6
37314- 6 6 6 86 86 86 46 46 46 14 14 14
37315- 0 0 0 0 0 0 0 0 0 0 0 0
37316- 0 0 0 0 0 0 0 0 0 0 0 0
37317- 0 0 0 0 0 0 0 0 0 0 0 0
37318- 0 0 0 0 0 0 0 0 0 0 0 0
37319- 0 0 0 0 0 0 0 0 0 0 0 0
37320- 0 0 0 0 0 0 0 0 0 0 0 0
37321- 0 0 0 0 0 0 0 0 0 6 6 6
37322- 18 18 18 46 46 46 86 86 86 18 18 18
37323- 2 2 6 34 34 34 10 10 10 6 6 6
37324-210 210 210 253 253 253 253 253 253 253 253 253
37325-253 253 253 253 253 253 253 253 253 253 253 253
37326-253 253 253 253 253 253 234 234 234 242 242 242
37327-253 253 253 253 253 253 253 253 253 253 253 253
37328-253 253 253 253 253 253 253 253 253 253 253 253
37329-253 253 253 253 253 253 253 253 253 253 253 253
37330-253 253 253 253 253 253 253 253 253 253 253 253
37331-253 253 253 253 253 253 221 221 221 6 6 6
37332- 2 2 6 2 2 6 6 6 6 30 30 30
37333- 2 2 6 2 2 6 2 2 6 2 2 6
37334- 2 2 6 82 82 82 54 54 54 18 18 18
37335- 6 6 6 0 0 0 0 0 0 0 0 0
37336- 0 0 0 0 0 0 0 0 0 0 0 0
37337- 0 0 0 0 0 0 0 0 0 0 0 0
37338- 0 0 0 0 0 0 0 0 0 0 0 0
37339- 0 0 0 0 0 0 0 0 0 0 0 0
37340- 0 0 0 0 0 0 0 0 0 0 0 0
37341- 0 0 0 0 0 0 0 0 0 10 10 10
37342- 26 26 26 66 66 66 62 62 62 2 2 6
37343- 2 2 6 38 38 38 10 10 10 26 26 26
37344-238 238 238 253 253 253 253 253 253 253 253 253
37345-253 253 253 253 253 253 253 253 253 253 253 253
37346-253 253 253 253 253 253 231 231 231 238 238 238
37347-253 253 253 253 253 253 253 253 253 253 253 253
37348-253 253 253 253 253 253 253 253 253 253 253 253
37349-253 253 253 253 253 253 253 253 253 253 253 253
37350-253 253 253 253 253 253 253 253 253 253 253 253
37351-253 253 253 253 253 253 231 231 231 6 6 6
37352- 2 2 6 2 2 6 10 10 10 30 30 30
37353- 2 2 6 2 2 6 2 2 6 2 2 6
37354- 2 2 6 66 66 66 58 58 58 22 22 22
37355- 6 6 6 0 0 0 0 0 0 0 0 0
37356- 0 0 0 0 0 0 0 0 0 0 0 0
37357- 0 0 0 0 0 0 0 0 0 0 0 0
37358- 0 0 0 0 0 0 0 0 0 0 0 0
37359- 0 0 0 0 0 0 0 0 0 0 0 0
37360- 0 0 0 0 0 0 0 0 0 0 0 0
37361- 0 0 0 0 0 0 0 0 0 10 10 10
37362- 38 38 38 78 78 78 6 6 6 2 2 6
37363- 2 2 6 46 46 46 14 14 14 42 42 42
37364-246 246 246 253 253 253 253 253 253 253 253 253
37365-253 253 253 253 253 253 253 253 253 253 253 253
37366-253 253 253 253 253 253 231 231 231 242 242 242
37367-253 253 253 253 253 253 253 253 253 253 253 253
37368-253 253 253 253 253 253 253 253 253 253 253 253
37369-253 253 253 253 253 253 253 253 253 253 253 253
37370-253 253 253 253 253 253 253 253 253 253 253 253
37371-253 253 253 253 253 253 234 234 234 10 10 10
37372- 2 2 6 2 2 6 22 22 22 14 14 14
37373- 2 2 6 2 2 6 2 2 6 2 2 6
37374- 2 2 6 66 66 66 62 62 62 22 22 22
37375- 6 6 6 0 0 0 0 0 0 0 0 0
37376- 0 0 0 0 0 0 0 0 0 0 0 0
37377- 0 0 0 0 0 0 0 0 0 0 0 0
37378- 0 0 0 0 0 0 0 0 0 0 0 0
37379- 0 0 0 0 0 0 0 0 0 0 0 0
37380- 0 0 0 0 0 0 0 0 0 0 0 0
37381- 0 0 0 0 0 0 6 6 6 18 18 18
37382- 50 50 50 74 74 74 2 2 6 2 2 6
37383- 14 14 14 70 70 70 34 34 34 62 62 62
37384-250 250 250 253 253 253 253 253 253 253 253 253
37385-253 253 253 253 253 253 253 253 253 253 253 253
37386-253 253 253 253 253 253 231 231 231 246 246 246
37387-253 253 253 253 253 253 253 253 253 253 253 253
37388-253 253 253 253 253 253 253 253 253 253 253 253
37389-253 253 253 253 253 253 253 253 253 253 253 253
37390-253 253 253 253 253 253 253 253 253 253 253 253
37391-253 253 253 253 253 253 234 234 234 14 14 14
37392- 2 2 6 2 2 6 30 30 30 2 2 6
37393- 2 2 6 2 2 6 2 2 6 2 2 6
37394- 2 2 6 66 66 66 62 62 62 22 22 22
37395- 6 6 6 0 0 0 0 0 0 0 0 0
37396- 0 0 0 0 0 0 0 0 0 0 0 0
37397- 0 0 0 0 0 0 0 0 0 0 0 0
37398- 0 0 0 0 0 0 0 0 0 0 0 0
37399- 0 0 0 0 0 0 0 0 0 0 0 0
37400- 0 0 0 0 0 0 0 0 0 0 0 0
37401- 0 0 0 0 0 0 6 6 6 18 18 18
37402- 54 54 54 62 62 62 2 2 6 2 2 6
37403- 2 2 6 30 30 30 46 46 46 70 70 70
37404-250 250 250 253 253 253 253 253 253 253 253 253
37405-253 253 253 253 253 253 253 253 253 253 253 253
37406-253 253 253 253 253 253 231 231 231 246 246 246
37407-253 253 253 253 253 253 253 253 253 253 253 253
37408-253 253 253 253 253 253 253 253 253 253 253 253
37409-253 253 253 253 253 253 253 253 253 253 253 253
37410-253 253 253 253 253 253 253 253 253 253 253 253
37411-253 253 253 253 253 253 226 226 226 10 10 10
37412- 2 2 6 6 6 6 30 30 30 2 2 6
37413- 2 2 6 2 2 6 2 2 6 2 2 6
37414- 2 2 6 66 66 66 58 58 58 22 22 22
37415- 6 6 6 0 0 0 0 0 0 0 0 0
37416- 0 0 0 0 0 0 0 0 0 0 0 0
37417- 0 0 0 0 0 0 0 0 0 0 0 0
37418- 0 0 0 0 0 0 0 0 0 0 0 0
37419- 0 0 0 0 0 0 0 0 0 0 0 0
37420- 0 0 0 0 0 0 0 0 0 0 0 0
37421- 0 0 0 0 0 0 6 6 6 22 22 22
37422- 58 58 58 62 62 62 2 2 6 2 2 6
37423- 2 2 6 2 2 6 30 30 30 78 78 78
37424-250 250 250 253 253 253 253 253 253 253 253 253
37425-253 253 253 253 253 253 253 253 253 253 253 253
37426-253 253 253 253 253 253 231 231 231 246 246 246
37427-253 253 253 253 253 253 253 253 253 253 253 253
37428-253 253 253 253 253 253 253 253 253 253 253 253
37429-253 253 253 253 253 253 253 253 253 253 253 253
37430-253 253 253 253 253 253 253 253 253 253 253 253
37431-253 253 253 253 253 253 206 206 206 2 2 6
37432- 22 22 22 34 34 34 18 14 6 22 22 22
37433- 26 26 26 18 18 18 6 6 6 2 2 6
37434- 2 2 6 82 82 82 54 54 54 18 18 18
37435- 6 6 6 0 0 0 0 0 0 0 0 0
37436- 0 0 0 0 0 0 0 0 0 0 0 0
37437- 0 0 0 0 0 0 0 0 0 0 0 0
37438- 0 0 0 0 0 0 0 0 0 0 0 0
37439- 0 0 0 0 0 0 0 0 0 0 0 0
37440- 0 0 0 0 0 0 0 0 0 0 0 0
37441- 0 0 0 0 0 0 6 6 6 26 26 26
37442- 62 62 62 106 106 106 74 54 14 185 133 11
37443-210 162 10 121 92 8 6 6 6 62 62 62
37444-238 238 238 253 253 253 253 253 253 253 253 253
37445-253 253 253 253 253 253 253 253 253 253 253 253
37446-253 253 253 253 253 253 231 231 231 246 246 246
37447-253 253 253 253 253 253 253 253 253 253 253 253
37448-253 253 253 253 253 253 253 253 253 253 253 253
37449-253 253 253 253 253 253 253 253 253 253 253 253
37450-253 253 253 253 253 253 253 253 253 253 253 253
37451-253 253 253 253 253 253 158 158 158 18 18 18
37452- 14 14 14 2 2 6 2 2 6 2 2 6
37453- 6 6 6 18 18 18 66 66 66 38 38 38
37454- 6 6 6 94 94 94 50 50 50 18 18 18
37455- 6 6 6 0 0 0 0 0 0 0 0 0
37456- 0 0 0 0 0 0 0 0 0 0 0 0
37457- 0 0 0 0 0 0 0 0 0 0 0 0
37458- 0 0 0 0 0 0 0 0 0 0 0 0
37459- 0 0 0 0 0 0 0 0 0 0 0 0
37460- 0 0 0 0 0 0 0 0 0 6 6 6
37461- 10 10 10 10 10 10 18 18 18 38 38 38
37462- 78 78 78 142 134 106 216 158 10 242 186 14
37463-246 190 14 246 190 14 156 118 10 10 10 10
37464- 90 90 90 238 238 238 253 253 253 253 253 253
37465-253 253 253 253 253 253 253 253 253 253 253 253
37466-253 253 253 253 253 253 231 231 231 250 250 250
37467-253 253 253 253 253 253 253 253 253 253 253 253
37468-253 253 253 253 253 253 253 253 253 253 253 253
37469-253 253 253 253 253 253 253 253 253 253 253 253
37470-253 253 253 253 253 253 253 253 253 246 230 190
37471-238 204 91 238 204 91 181 142 44 37 26 9
37472- 2 2 6 2 2 6 2 2 6 2 2 6
37473- 2 2 6 2 2 6 38 38 38 46 46 46
37474- 26 26 26 106 106 106 54 54 54 18 18 18
37475- 6 6 6 0 0 0 0 0 0 0 0 0
37476- 0 0 0 0 0 0 0 0 0 0 0 0
37477- 0 0 0 0 0 0 0 0 0 0 0 0
37478- 0 0 0 0 0 0 0 0 0 0 0 0
37479- 0 0 0 0 0 0 0 0 0 0 0 0
37480- 0 0 0 6 6 6 14 14 14 22 22 22
37481- 30 30 30 38 38 38 50 50 50 70 70 70
37482-106 106 106 190 142 34 226 170 11 242 186 14
37483-246 190 14 246 190 14 246 190 14 154 114 10
37484- 6 6 6 74 74 74 226 226 226 253 253 253
37485-253 253 253 253 253 253 253 253 253 253 253 253
37486-253 253 253 253 253 253 231 231 231 250 250 250
37487-253 253 253 253 253 253 253 253 253 253 253 253
37488-253 253 253 253 253 253 253 253 253 253 253 253
37489-253 253 253 253 253 253 253 253 253 253 253 253
37490-253 253 253 253 253 253 253 253 253 228 184 62
37491-241 196 14 241 208 19 232 195 16 38 30 10
37492- 2 2 6 2 2 6 2 2 6 2 2 6
37493- 2 2 6 6 6 6 30 30 30 26 26 26
37494-203 166 17 154 142 90 66 66 66 26 26 26
37495- 6 6 6 0 0 0 0 0 0 0 0 0
37496- 0 0 0 0 0 0 0 0 0 0 0 0
37497- 0 0 0 0 0 0 0 0 0 0 0 0
37498- 0 0 0 0 0 0 0 0 0 0 0 0
37499- 0 0 0 0 0 0 0 0 0 0 0 0
37500- 6 6 6 18 18 18 38 38 38 58 58 58
37501- 78 78 78 86 86 86 101 101 101 123 123 123
37502-175 146 61 210 150 10 234 174 13 246 186 14
37503-246 190 14 246 190 14 246 190 14 238 190 10
37504-102 78 10 2 2 6 46 46 46 198 198 198
37505-253 253 253 253 253 253 253 253 253 253 253 253
37506-253 253 253 253 253 253 234 234 234 242 242 242
37507-253 253 253 253 253 253 253 253 253 253 253 253
37508-253 253 253 253 253 253 253 253 253 253 253 253
37509-253 253 253 253 253 253 253 253 253 253 253 253
37510-253 253 253 253 253 253 253 253 253 224 178 62
37511-242 186 14 241 196 14 210 166 10 22 18 6
37512- 2 2 6 2 2 6 2 2 6 2 2 6
37513- 2 2 6 2 2 6 6 6 6 121 92 8
37514-238 202 15 232 195 16 82 82 82 34 34 34
37515- 10 10 10 0 0 0 0 0 0 0 0 0
37516- 0 0 0 0 0 0 0 0 0 0 0 0
37517- 0 0 0 0 0 0 0 0 0 0 0 0
37518- 0 0 0 0 0 0 0 0 0 0 0 0
37519- 0 0 0 0 0 0 0 0 0 0 0 0
37520- 14 14 14 38 38 38 70 70 70 154 122 46
37521-190 142 34 200 144 11 197 138 11 197 138 11
37522-213 154 11 226 170 11 242 186 14 246 190 14
37523-246 190 14 246 190 14 246 190 14 246 190 14
37524-225 175 15 46 32 6 2 2 6 22 22 22
37525-158 158 158 250 250 250 253 253 253 253 253 253
37526-253 253 253 253 253 253 253 253 253 253 253 253
37527-253 253 253 253 253 253 253 253 253 253 253 253
37528-253 253 253 253 253 253 253 253 253 253 253 253
37529-253 253 253 253 253 253 253 253 253 253 253 253
37530-253 253 253 250 250 250 242 242 242 224 178 62
37531-239 182 13 236 186 11 213 154 11 46 32 6
37532- 2 2 6 2 2 6 2 2 6 2 2 6
37533- 2 2 6 2 2 6 61 42 6 225 175 15
37534-238 190 10 236 186 11 112 100 78 42 42 42
37535- 14 14 14 0 0 0 0 0 0 0 0 0
37536- 0 0 0 0 0 0 0 0 0 0 0 0
37537- 0 0 0 0 0 0 0 0 0 0 0 0
37538- 0 0 0 0 0 0 0 0 0 0 0 0
37539- 0 0 0 0 0 0 0 0 0 6 6 6
37540- 22 22 22 54 54 54 154 122 46 213 154 11
37541-226 170 11 230 174 11 226 170 11 226 170 11
37542-236 178 12 242 186 14 246 190 14 246 190 14
37543-246 190 14 246 190 14 246 190 14 246 190 14
37544-241 196 14 184 144 12 10 10 10 2 2 6
37545- 6 6 6 116 116 116 242 242 242 253 253 253
37546-253 253 253 253 253 253 253 253 253 253 253 253
37547-253 253 253 253 253 253 253 253 253 253 253 253
37548-253 253 253 253 253 253 253 253 253 253 253 253
37549-253 253 253 253 253 253 253 253 253 253 253 253
37550-253 253 253 231 231 231 198 198 198 214 170 54
37551-236 178 12 236 178 12 210 150 10 137 92 6
37552- 18 14 6 2 2 6 2 2 6 2 2 6
37553- 6 6 6 70 47 6 200 144 11 236 178 12
37554-239 182 13 239 182 13 124 112 88 58 58 58
37555- 22 22 22 6 6 6 0 0 0 0 0 0
37556- 0 0 0 0 0 0 0 0 0 0 0 0
37557- 0 0 0 0 0 0 0 0 0 0 0 0
37558- 0 0 0 0 0 0 0 0 0 0 0 0
37559- 0 0 0 0 0 0 0 0 0 10 10 10
37560- 30 30 30 70 70 70 180 133 36 226 170 11
37561-239 182 13 242 186 14 242 186 14 246 186 14
37562-246 190 14 246 190 14 246 190 14 246 190 14
37563-246 190 14 246 190 14 246 190 14 246 190 14
37564-246 190 14 232 195 16 98 70 6 2 2 6
37565- 2 2 6 2 2 6 66 66 66 221 221 221
37566-253 253 253 253 253 253 253 253 253 253 253 253
37567-253 253 253 253 253 253 253 253 253 253 253 253
37568-253 253 253 253 253 253 253 253 253 253 253 253
37569-253 253 253 253 253 253 253 253 253 253 253 253
37570-253 253 253 206 206 206 198 198 198 214 166 58
37571-230 174 11 230 174 11 216 158 10 192 133 9
37572-163 110 8 116 81 8 102 78 10 116 81 8
37573-167 114 7 197 138 11 226 170 11 239 182 13
37574-242 186 14 242 186 14 162 146 94 78 78 78
37575- 34 34 34 14 14 14 6 6 6 0 0 0
37576- 0 0 0 0 0 0 0 0 0 0 0 0
37577- 0 0 0 0 0 0 0 0 0 0 0 0
37578- 0 0 0 0 0 0 0 0 0 0 0 0
37579- 0 0 0 0 0 0 0 0 0 6 6 6
37580- 30 30 30 78 78 78 190 142 34 226 170 11
37581-239 182 13 246 190 14 246 190 14 246 190 14
37582-246 190 14 246 190 14 246 190 14 246 190 14
37583-246 190 14 246 190 14 246 190 14 246 190 14
37584-246 190 14 241 196 14 203 166 17 22 18 6
37585- 2 2 6 2 2 6 2 2 6 38 38 38
37586-218 218 218 253 253 253 253 253 253 253 253 253
37587-253 253 253 253 253 253 253 253 253 253 253 253
37588-253 253 253 253 253 253 253 253 253 253 253 253
37589-253 253 253 253 253 253 253 253 253 253 253 253
37590-250 250 250 206 206 206 198 198 198 202 162 69
37591-226 170 11 236 178 12 224 166 10 210 150 10
37592-200 144 11 197 138 11 192 133 9 197 138 11
37593-210 150 10 226 170 11 242 186 14 246 190 14
37594-246 190 14 246 186 14 225 175 15 124 112 88
37595- 62 62 62 30 30 30 14 14 14 6 6 6
37596- 0 0 0 0 0 0 0 0 0 0 0 0
37597- 0 0 0 0 0 0 0 0 0 0 0 0
37598- 0 0 0 0 0 0 0 0 0 0 0 0
37599- 0 0 0 0 0 0 0 0 0 10 10 10
37600- 30 30 30 78 78 78 174 135 50 224 166 10
37601-239 182 13 246 190 14 246 190 14 246 190 14
37602-246 190 14 246 190 14 246 190 14 246 190 14
37603-246 190 14 246 190 14 246 190 14 246 190 14
37604-246 190 14 246 190 14 241 196 14 139 102 15
37605- 2 2 6 2 2 6 2 2 6 2 2 6
37606- 78 78 78 250 250 250 253 253 253 253 253 253
37607-253 253 253 253 253 253 253 253 253 253 253 253
37608-253 253 253 253 253 253 253 253 253 253 253 253
37609-253 253 253 253 253 253 253 253 253 253 253 253
37610-250 250 250 214 214 214 198 198 198 190 150 46
37611-219 162 10 236 178 12 234 174 13 224 166 10
37612-216 158 10 213 154 11 213 154 11 216 158 10
37613-226 170 11 239 182 13 246 190 14 246 190 14
37614-246 190 14 246 190 14 242 186 14 206 162 42
37615-101 101 101 58 58 58 30 30 30 14 14 14
37616- 6 6 6 0 0 0 0 0 0 0 0 0
37617- 0 0 0 0 0 0 0 0 0 0 0 0
37618- 0 0 0 0 0 0 0 0 0 0 0 0
37619- 0 0 0 0 0 0 0 0 0 10 10 10
37620- 30 30 30 74 74 74 174 135 50 216 158 10
37621-236 178 12 246 190 14 246 190 14 246 190 14
37622-246 190 14 246 190 14 246 190 14 246 190 14
37623-246 190 14 246 190 14 246 190 14 246 190 14
37624-246 190 14 246 190 14 241 196 14 226 184 13
37625- 61 42 6 2 2 6 2 2 6 2 2 6
37626- 22 22 22 238 238 238 253 253 253 253 253 253
37627-253 253 253 253 253 253 253 253 253 253 253 253
37628-253 253 253 253 253 253 253 253 253 253 253 253
37629-253 253 253 253 253 253 253 253 253 253 253 253
37630-253 253 253 226 226 226 187 187 187 180 133 36
37631-216 158 10 236 178 12 239 182 13 236 178 12
37632-230 174 11 226 170 11 226 170 11 230 174 11
37633-236 178 12 242 186 14 246 190 14 246 190 14
37634-246 190 14 246 190 14 246 186 14 239 182 13
37635-206 162 42 106 106 106 66 66 66 34 34 34
37636- 14 14 14 6 6 6 0 0 0 0 0 0
37637- 0 0 0 0 0 0 0 0 0 0 0 0
37638- 0 0 0 0 0 0 0 0 0 0 0 0
37639- 0 0 0 0 0 0 0 0 0 6 6 6
37640- 26 26 26 70 70 70 163 133 67 213 154 11
37641-236 178 12 246 190 14 246 190 14 246 190 14
37642-246 190 14 246 190 14 246 190 14 246 190 14
37643-246 190 14 246 190 14 246 190 14 246 190 14
37644-246 190 14 246 190 14 246 190 14 241 196 14
37645-190 146 13 18 14 6 2 2 6 2 2 6
37646- 46 46 46 246 246 246 253 253 253 253 253 253
37647-253 253 253 253 253 253 253 253 253 253 253 253
37648-253 253 253 253 253 253 253 253 253 253 253 253
37649-253 253 253 253 253 253 253 253 253 253 253 253
37650-253 253 253 221 221 221 86 86 86 156 107 11
37651-216 158 10 236 178 12 242 186 14 246 186 14
37652-242 186 14 239 182 13 239 182 13 242 186 14
37653-242 186 14 246 186 14 246 190 14 246 190 14
37654-246 190 14 246 190 14 246 190 14 246 190 14
37655-242 186 14 225 175 15 142 122 72 66 66 66
37656- 30 30 30 10 10 10 0 0 0 0 0 0
37657- 0 0 0 0 0 0 0 0 0 0 0 0
37658- 0 0 0 0 0 0 0 0 0 0 0 0
37659- 0 0 0 0 0 0 0 0 0 6 6 6
37660- 26 26 26 70 70 70 163 133 67 210 150 10
37661-236 178 12 246 190 14 246 190 14 246 190 14
37662-246 190 14 246 190 14 246 190 14 246 190 14
37663-246 190 14 246 190 14 246 190 14 246 190 14
37664-246 190 14 246 190 14 246 190 14 246 190 14
37665-232 195 16 121 92 8 34 34 34 106 106 106
37666-221 221 221 253 253 253 253 253 253 253 253 253
37667-253 253 253 253 253 253 253 253 253 253 253 253
37668-253 253 253 253 253 253 253 253 253 253 253 253
37669-253 253 253 253 253 253 253 253 253 253 253 253
37670-242 242 242 82 82 82 18 14 6 163 110 8
37671-216 158 10 236 178 12 242 186 14 246 190 14
37672-246 190 14 246 190 14 246 190 14 246 190 14
37673-246 190 14 246 190 14 246 190 14 246 190 14
37674-246 190 14 246 190 14 246 190 14 246 190 14
37675-246 190 14 246 190 14 242 186 14 163 133 67
37676- 46 46 46 18 18 18 6 6 6 0 0 0
37677- 0 0 0 0 0 0 0 0 0 0 0 0
37678- 0 0 0 0 0 0 0 0 0 0 0 0
37679- 0 0 0 0 0 0 0 0 0 10 10 10
37680- 30 30 30 78 78 78 163 133 67 210 150 10
37681-236 178 12 246 186 14 246 190 14 246 190 14
37682-246 190 14 246 190 14 246 190 14 246 190 14
37683-246 190 14 246 190 14 246 190 14 246 190 14
37684-246 190 14 246 190 14 246 190 14 246 190 14
37685-241 196 14 215 174 15 190 178 144 253 253 253
37686-253 253 253 253 253 253 253 253 253 253 253 253
37687-253 253 253 253 253 253 253 253 253 253 253 253
37688-253 253 253 253 253 253 253 253 253 253 253 253
37689-253 253 253 253 253 253 253 253 253 218 218 218
37690- 58 58 58 2 2 6 22 18 6 167 114 7
37691-216 158 10 236 178 12 246 186 14 246 190 14
37692-246 190 14 246 190 14 246 190 14 246 190 14
37693-246 190 14 246 190 14 246 190 14 246 190 14
37694-246 190 14 246 190 14 246 190 14 246 190 14
37695-246 190 14 246 186 14 242 186 14 190 150 46
37696- 54 54 54 22 22 22 6 6 6 0 0 0
37697- 0 0 0 0 0 0 0 0 0 0 0 0
37698- 0 0 0 0 0 0 0 0 0 0 0 0
37699- 0 0 0 0 0 0 0 0 0 14 14 14
37700- 38 38 38 86 86 86 180 133 36 213 154 11
37701-236 178 12 246 186 14 246 190 14 246 190 14
37702-246 190 14 246 190 14 246 190 14 246 190 14
37703-246 190 14 246 190 14 246 190 14 246 190 14
37704-246 190 14 246 190 14 246 190 14 246 190 14
37705-246 190 14 232 195 16 190 146 13 214 214 214
37706-253 253 253 253 253 253 253 253 253 253 253 253
37707-253 253 253 253 253 253 253 253 253 253 253 253
37708-253 253 253 253 253 253 253 253 253 253 253 253
37709-253 253 253 250 250 250 170 170 170 26 26 26
37710- 2 2 6 2 2 6 37 26 9 163 110 8
37711-219 162 10 239 182 13 246 186 14 246 190 14
37712-246 190 14 246 190 14 246 190 14 246 190 14
37713-246 190 14 246 190 14 246 190 14 246 190 14
37714-246 190 14 246 190 14 246 190 14 246 190 14
37715-246 186 14 236 178 12 224 166 10 142 122 72
37716- 46 46 46 18 18 18 6 6 6 0 0 0
37717- 0 0 0 0 0 0 0 0 0 0 0 0
37718- 0 0 0 0 0 0 0 0 0 0 0 0
37719- 0 0 0 0 0 0 6 6 6 18 18 18
37720- 50 50 50 109 106 95 192 133 9 224 166 10
37721-242 186 14 246 190 14 246 190 14 246 190 14
37722-246 190 14 246 190 14 246 190 14 246 190 14
37723-246 190 14 246 190 14 246 190 14 246 190 14
37724-246 190 14 246 190 14 246 190 14 246 190 14
37725-242 186 14 226 184 13 210 162 10 142 110 46
37726-226 226 226 253 253 253 253 253 253 253 253 253
37727-253 253 253 253 253 253 253 253 253 253 253 253
37728-253 253 253 253 253 253 253 253 253 253 253 253
37729-198 198 198 66 66 66 2 2 6 2 2 6
37730- 2 2 6 2 2 6 50 34 6 156 107 11
37731-219 162 10 239 182 13 246 186 14 246 190 14
37732-246 190 14 246 190 14 246 190 14 246 190 14
37733-246 190 14 246 190 14 246 190 14 246 190 14
37734-246 190 14 246 190 14 246 190 14 242 186 14
37735-234 174 13 213 154 11 154 122 46 66 66 66
37736- 30 30 30 10 10 10 0 0 0 0 0 0
37737- 0 0 0 0 0 0 0 0 0 0 0 0
37738- 0 0 0 0 0 0 0 0 0 0 0 0
37739- 0 0 0 0 0 0 6 6 6 22 22 22
37740- 58 58 58 154 121 60 206 145 10 234 174 13
37741-242 186 14 246 186 14 246 190 14 246 190 14
37742-246 190 14 246 190 14 246 190 14 246 190 14
37743-246 190 14 246 190 14 246 190 14 246 190 14
37744-246 190 14 246 190 14 246 190 14 246 190 14
37745-246 186 14 236 178 12 210 162 10 163 110 8
37746- 61 42 6 138 138 138 218 218 218 250 250 250
37747-253 253 253 253 253 253 253 253 253 250 250 250
37748-242 242 242 210 210 210 144 144 144 66 66 66
37749- 6 6 6 2 2 6 2 2 6 2 2 6
37750- 2 2 6 2 2 6 61 42 6 163 110 8
37751-216 158 10 236 178 12 246 190 14 246 190 14
37752-246 190 14 246 190 14 246 190 14 246 190 14
37753-246 190 14 246 190 14 246 190 14 246 190 14
37754-246 190 14 239 182 13 230 174 11 216 158 10
37755-190 142 34 124 112 88 70 70 70 38 38 38
37756- 18 18 18 6 6 6 0 0 0 0 0 0
37757- 0 0 0 0 0 0 0 0 0 0 0 0
37758- 0 0 0 0 0 0 0 0 0 0 0 0
37759- 0 0 0 0 0 0 6 6 6 22 22 22
37760- 62 62 62 168 124 44 206 145 10 224 166 10
37761-236 178 12 239 182 13 242 186 14 242 186 14
37762-246 186 14 246 190 14 246 190 14 246 190 14
37763-246 190 14 246 190 14 246 190 14 246 190 14
37764-246 190 14 246 190 14 246 190 14 246 190 14
37765-246 190 14 236 178 12 216 158 10 175 118 6
37766- 80 54 7 2 2 6 6 6 6 30 30 30
37767- 54 54 54 62 62 62 50 50 50 38 38 38
37768- 14 14 14 2 2 6 2 2 6 2 2 6
37769- 2 2 6 2 2 6 2 2 6 2 2 6
37770- 2 2 6 6 6 6 80 54 7 167 114 7
37771-213 154 11 236 178 12 246 190 14 246 190 14
37772-246 190 14 246 190 14 246 190 14 246 190 14
37773-246 190 14 242 186 14 239 182 13 239 182 13
37774-230 174 11 210 150 10 174 135 50 124 112 88
37775- 82 82 82 54 54 54 34 34 34 18 18 18
37776- 6 6 6 0 0 0 0 0 0 0 0 0
37777- 0 0 0 0 0 0 0 0 0 0 0 0
37778- 0 0 0 0 0 0 0 0 0 0 0 0
37779- 0 0 0 0 0 0 6 6 6 18 18 18
37780- 50 50 50 158 118 36 192 133 9 200 144 11
37781-216 158 10 219 162 10 224 166 10 226 170 11
37782-230 174 11 236 178 12 239 182 13 239 182 13
37783-242 186 14 246 186 14 246 190 14 246 190 14
37784-246 190 14 246 190 14 246 190 14 246 190 14
37785-246 186 14 230 174 11 210 150 10 163 110 8
37786-104 69 6 10 10 10 2 2 6 2 2 6
37787- 2 2 6 2 2 6 2 2 6 2 2 6
37788- 2 2 6 2 2 6 2 2 6 2 2 6
37789- 2 2 6 2 2 6 2 2 6 2 2 6
37790- 2 2 6 6 6 6 91 60 6 167 114 7
37791-206 145 10 230 174 11 242 186 14 246 190 14
37792-246 190 14 246 190 14 246 186 14 242 186 14
37793-239 182 13 230 174 11 224 166 10 213 154 11
37794-180 133 36 124 112 88 86 86 86 58 58 58
37795- 38 38 38 22 22 22 10 10 10 6 6 6
37796- 0 0 0 0 0 0 0 0 0 0 0 0
37797- 0 0 0 0 0 0 0 0 0 0 0 0
37798- 0 0 0 0 0 0 0 0 0 0 0 0
37799- 0 0 0 0 0 0 0 0 0 14 14 14
37800- 34 34 34 70 70 70 138 110 50 158 118 36
37801-167 114 7 180 123 7 192 133 9 197 138 11
37802-200 144 11 206 145 10 213 154 11 219 162 10
37803-224 166 10 230 174 11 239 182 13 242 186 14
37804-246 186 14 246 186 14 246 186 14 246 186 14
37805-239 182 13 216 158 10 185 133 11 152 99 6
37806-104 69 6 18 14 6 2 2 6 2 2 6
37807- 2 2 6 2 2 6 2 2 6 2 2 6
37808- 2 2 6 2 2 6 2 2 6 2 2 6
37809- 2 2 6 2 2 6 2 2 6 2 2 6
37810- 2 2 6 6 6 6 80 54 7 152 99 6
37811-192 133 9 219 162 10 236 178 12 239 182 13
37812-246 186 14 242 186 14 239 182 13 236 178 12
37813-224 166 10 206 145 10 192 133 9 154 121 60
37814- 94 94 94 62 62 62 42 42 42 22 22 22
37815- 14 14 14 6 6 6 0 0 0 0 0 0
37816- 0 0 0 0 0 0 0 0 0 0 0 0
37817- 0 0 0 0 0 0 0 0 0 0 0 0
37818- 0 0 0 0 0 0 0 0 0 0 0 0
37819- 0 0 0 0 0 0 0 0 0 6 6 6
37820- 18 18 18 34 34 34 58 58 58 78 78 78
37821-101 98 89 124 112 88 142 110 46 156 107 11
37822-163 110 8 167 114 7 175 118 6 180 123 7
37823-185 133 11 197 138 11 210 150 10 219 162 10
37824-226 170 11 236 178 12 236 178 12 234 174 13
37825-219 162 10 197 138 11 163 110 8 130 83 6
37826- 91 60 6 10 10 10 2 2 6 2 2 6
37827- 18 18 18 38 38 38 38 38 38 38 38 38
37828- 38 38 38 38 38 38 38 38 38 38 38 38
37829- 38 38 38 38 38 38 26 26 26 2 2 6
37830- 2 2 6 6 6 6 70 47 6 137 92 6
37831-175 118 6 200 144 11 219 162 10 230 174 11
37832-234 174 13 230 174 11 219 162 10 210 150 10
37833-192 133 9 163 110 8 124 112 88 82 82 82
37834- 50 50 50 30 30 30 14 14 14 6 6 6
37835- 0 0 0 0 0 0 0 0 0 0 0 0
37836- 0 0 0 0 0 0 0 0 0 0 0 0
37837- 0 0 0 0 0 0 0 0 0 0 0 0
37838- 0 0 0 0 0 0 0 0 0 0 0 0
37839- 0 0 0 0 0 0 0 0 0 0 0 0
37840- 6 6 6 14 14 14 22 22 22 34 34 34
37841- 42 42 42 58 58 58 74 74 74 86 86 86
37842-101 98 89 122 102 70 130 98 46 121 87 25
37843-137 92 6 152 99 6 163 110 8 180 123 7
37844-185 133 11 197 138 11 206 145 10 200 144 11
37845-180 123 7 156 107 11 130 83 6 104 69 6
37846- 50 34 6 54 54 54 110 110 110 101 98 89
37847- 86 86 86 82 82 82 78 78 78 78 78 78
37848- 78 78 78 78 78 78 78 78 78 78 78 78
37849- 78 78 78 82 82 82 86 86 86 94 94 94
37850-106 106 106 101 101 101 86 66 34 124 80 6
37851-156 107 11 180 123 7 192 133 9 200 144 11
37852-206 145 10 200 144 11 192 133 9 175 118 6
37853-139 102 15 109 106 95 70 70 70 42 42 42
37854- 22 22 22 10 10 10 0 0 0 0 0 0
37855- 0 0 0 0 0 0 0 0 0 0 0 0
37856- 0 0 0 0 0 0 0 0 0 0 0 0
37857- 0 0 0 0 0 0 0 0 0 0 0 0
37858- 0 0 0 0 0 0 0 0 0 0 0 0
37859- 0 0 0 0 0 0 0 0 0 0 0 0
37860- 0 0 0 0 0 0 6 6 6 10 10 10
37861- 14 14 14 22 22 22 30 30 30 38 38 38
37862- 50 50 50 62 62 62 74 74 74 90 90 90
37863-101 98 89 112 100 78 121 87 25 124 80 6
37864-137 92 6 152 99 6 152 99 6 152 99 6
37865-138 86 6 124 80 6 98 70 6 86 66 30
37866-101 98 89 82 82 82 58 58 58 46 46 46
37867- 38 38 38 34 34 34 34 34 34 34 34 34
37868- 34 34 34 34 34 34 34 34 34 34 34 34
37869- 34 34 34 34 34 34 38 38 38 42 42 42
37870- 54 54 54 82 82 82 94 86 76 91 60 6
37871-134 86 6 156 107 11 167 114 7 175 118 6
37872-175 118 6 167 114 7 152 99 6 121 87 25
37873-101 98 89 62 62 62 34 34 34 18 18 18
37874- 6 6 6 0 0 0 0 0 0 0 0 0
37875- 0 0 0 0 0 0 0 0 0 0 0 0
37876- 0 0 0 0 0 0 0 0 0 0 0 0
37877- 0 0 0 0 0 0 0 0 0 0 0 0
37878- 0 0 0 0 0 0 0 0 0 0 0 0
37879- 0 0 0 0 0 0 0 0 0 0 0 0
37880- 0 0 0 0 0 0 0 0 0 0 0 0
37881- 0 0 0 6 6 6 6 6 6 10 10 10
37882- 18 18 18 22 22 22 30 30 30 42 42 42
37883- 50 50 50 66 66 66 86 86 86 101 98 89
37884-106 86 58 98 70 6 104 69 6 104 69 6
37885-104 69 6 91 60 6 82 62 34 90 90 90
37886- 62 62 62 38 38 38 22 22 22 14 14 14
37887- 10 10 10 10 10 10 10 10 10 10 10 10
37888- 10 10 10 10 10 10 6 6 6 10 10 10
37889- 10 10 10 10 10 10 10 10 10 14 14 14
37890- 22 22 22 42 42 42 70 70 70 89 81 66
37891- 80 54 7 104 69 6 124 80 6 137 92 6
37892-134 86 6 116 81 8 100 82 52 86 86 86
37893- 58 58 58 30 30 30 14 14 14 6 6 6
37894- 0 0 0 0 0 0 0 0 0 0 0 0
37895- 0 0 0 0 0 0 0 0 0 0 0 0
37896- 0 0 0 0 0 0 0 0 0 0 0 0
37897- 0 0 0 0 0 0 0 0 0 0 0 0
37898- 0 0 0 0 0 0 0 0 0 0 0 0
37899- 0 0 0 0 0 0 0 0 0 0 0 0
37900- 0 0 0 0 0 0 0 0 0 0 0 0
37901- 0 0 0 0 0 0 0 0 0 0 0 0
37902- 0 0 0 6 6 6 10 10 10 14 14 14
37903- 18 18 18 26 26 26 38 38 38 54 54 54
37904- 70 70 70 86 86 86 94 86 76 89 81 66
37905- 89 81 66 86 86 86 74 74 74 50 50 50
37906- 30 30 30 14 14 14 6 6 6 0 0 0
37907- 0 0 0 0 0 0 0 0 0 0 0 0
37908- 0 0 0 0 0 0 0 0 0 0 0 0
37909- 0 0 0 0 0 0 0 0 0 0 0 0
37910- 6 6 6 18 18 18 34 34 34 58 58 58
37911- 82 82 82 89 81 66 89 81 66 89 81 66
37912- 94 86 66 94 86 76 74 74 74 50 50 50
37913- 26 26 26 14 14 14 6 6 6 0 0 0
37914- 0 0 0 0 0 0 0 0 0 0 0 0
37915- 0 0 0 0 0 0 0 0 0 0 0 0
37916- 0 0 0 0 0 0 0 0 0 0 0 0
37917- 0 0 0 0 0 0 0 0 0 0 0 0
37918- 0 0 0 0 0 0 0 0 0 0 0 0
37919- 0 0 0 0 0 0 0 0 0 0 0 0
37920- 0 0 0 0 0 0 0 0 0 0 0 0
37921- 0 0 0 0 0 0 0 0 0 0 0 0
37922- 0 0 0 0 0 0 0 0 0 0 0 0
37923- 6 6 6 6 6 6 14 14 14 18 18 18
37924- 30 30 30 38 38 38 46 46 46 54 54 54
37925- 50 50 50 42 42 42 30 30 30 18 18 18
37926- 10 10 10 0 0 0 0 0 0 0 0 0
37927- 0 0 0 0 0 0 0 0 0 0 0 0
37928- 0 0 0 0 0 0 0 0 0 0 0 0
37929- 0 0 0 0 0 0 0 0 0 0 0 0
37930- 0 0 0 6 6 6 14 14 14 26 26 26
37931- 38 38 38 50 50 50 58 58 58 58 58 58
37932- 54 54 54 42 42 42 30 30 30 18 18 18
37933- 10 10 10 0 0 0 0 0 0 0 0 0
37934- 0 0 0 0 0 0 0 0 0 0 0 0
37935- 0 0 0 0 0 0 0 0 0 0 0 0
37936- 0 0 0 0 0 0 0 0 0 0 0 0
37937- 0 0 0 0 0 0 0 0 0 0 0 0
37938- 0 0 0 0 0 0 0 0 0 0 0 0
37939- 0 0 0 0 0 0 0 0 0 0 0 0
37940- 0 0 0 0 0 0 0 0 0 0 0 0
37941- 0 0 0 0 0 0 0 0 0 0 0 0
37942- 0 0 0 0 0 0 0 0 0 0 0 0
37943- 0 0 0 0 0 0 0 0 0 6 6 6
37944- 6 6 6 10 10 10 14 14 14 18 18 18
37945- 18 18 18 14 14 14 10 10 10 6 6 6
37946- 0 0 0 0 0 0 0 0 0 0 0 0
37947- 0 0 0 0 0 0 0 0 0 0 0 0
37948- 0 0 0 0 0 0 0 0 0 0 0 0
37949- 0 0 0 0 0 0 0 0 0 0 0 0
37950- 0 0 0 0 0 0 0 0 0 6 6 6
37951- 14 14 14 18 18 18 22 22 22 22 22 22
37952- 18 18 18 14 14 14 10 10 10 6 6 6
37953- 0 0 0 0 0 0 0 0 0 0 0 0
37954- 0 0 0 0 0 0 0 0 0 0 0 0
37955- 0 0 0 0 0 0 0 0 0 0 0 0
37956- 0 0 0 0 0 0 0 0 0 0 0 0
37957- 0 0 0 0 0 0 0 0 0 0 0 0
37958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37971+4 4 4 4 4 4
37972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37985+4 4 4 4 4 4
37986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37999+4 4 4 4 4 4
38000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38013+4 4 4 4 4 4
38014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38027+4 4 4 4 4 4
38028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38041+4 4 4 4 4 4
38042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38046+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38047+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38051+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38052+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38053+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38055+4 4 4 4 4 4
38056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38060+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38061+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38062+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38065+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38066+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38067+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38068+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38069+4 4 4 4 4 4
38070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38074+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38075+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38076+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38079+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38080+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38081+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38082+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38083+4 4 4 4 4 4
38084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38087+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38088+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38089+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38090+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38092+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38093+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38094+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38095+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38096+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38097+4 4 4 4 4 4
38098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38101+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38102+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38103+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38104+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38105+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38106+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38107+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38108+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38109+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38110+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38111+4 4 4 4 4 4
38112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38115+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38116+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38117+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38118+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38119+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38120+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38121+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38122+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38123+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38124+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38125+4 4 4 4 4 4
38126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38128+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38129+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38130+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38131+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38132+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38133+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38134+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38135+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38136+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38137+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38138+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38139+4 4 4 4 4 4
38140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38142+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38143+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38144+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38145+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38146+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38147+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38148+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38149+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38150+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38151+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38152+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38153+4 4 4 4 4 4
38154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38156+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38157+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38158+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38159+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38160+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38161+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38162+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38163+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38164+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38165+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38166+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38167+4 4 4 4 4 4
38168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38170+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38171+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38172+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38173+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38174+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38175+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38176+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38177+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38178+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38179+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38180+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38181+4 4 4 4 4 4
38182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38183+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38184+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38185+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38186+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38187+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38188+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38189+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38190+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38191+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38192+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38193+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38194+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38195+4 4 4 4 4 4
38196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38197+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38198+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38199+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38200+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38201+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38202+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38203+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38204+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38205+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38206+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38207+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38208+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38209+0 0 0 4 4 4
38210+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38211+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38212+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38213+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38214+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38215+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38216+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38217+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38218+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38219+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38220+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38221+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38222+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38223+2 0 0 0 0 0
38224+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38225+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38226+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38227+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38228+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38229+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38230+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38231+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38232+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38233+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38234+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38235+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38236+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38237+37 38 37 0 0 0
38238+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38239+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38240+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38241+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38242+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38243+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38244+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38245+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38246+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38247+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38248+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38249+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38250+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38251+85 115 134 4 0 0
38252+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38253+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38254+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38255+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38256+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38257+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38258+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38259+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38260+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38261+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38262+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38263+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38264+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38265+60 73 81 4 0 0
38266+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38267+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38268+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38269+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38270+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38271+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38272+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38273+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38274+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38275+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38276+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38277+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38278+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38279+16 19 21 4 0 0
38280+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38281+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38282+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38283+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38284+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38285+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38286+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38287+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38288+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38289+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38290+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38291+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38292+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38293+4 0 0 4 3 3
38294+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38295+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38296+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38298+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38299+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38300+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38301+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38302+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38303+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38304+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38305+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38306+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38307+3 2 2 4 4 4
38308+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38309+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38310+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38311+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38312+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38313+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38314+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38315+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38316+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38317+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38318+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38319+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38320+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38321+4 4 4 4 4 4
38322+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38323+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38324+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38325+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38326+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38327+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38328+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38329+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38330+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38331+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38332+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38333+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38334+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38335+4 4 4 4 4 4
38336+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38337+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38338+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38339+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38340+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38341+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38342+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38343+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38344+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38345+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38346+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38347+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38348+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38349+5 5 5 5 5 5
38350+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38351+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38352+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38353+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38354+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38355+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38356+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38357+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38358+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38359+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38360+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38361+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38362+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38363+5 5 5 4 4 4
38364+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38365+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38366+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38367+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38368+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38369+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38370+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38371+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38372+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38373+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38374+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38375+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38377+4 4 4 4 4 4
38378+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38379+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38380+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38381+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38382+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38383+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38384+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38385+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38386+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38387+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38388+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38389+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38391+4 4 4 4 4 4
38392+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38393+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38394+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38395+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38396+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38397+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38398+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38399+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38400+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38401+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38402+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38405+4 4 4 4 4 4
38406+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38407+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38408+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38409+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38410+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38411+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38412+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38413+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38414+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38415+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38416+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38419+4 4 4 4 4 4
38420+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38421+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38422+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38423+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38424+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38425+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38426+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38427+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38428+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38429+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38430+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38433+4 4 4 4 4 4
38434+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38435+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38436+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38437+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38438+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38439+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38440+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38441+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38442+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38443+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38444+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38447+4 4 4 4 4 4
38448+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38449+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38450+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38451+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38452+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38453+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38454+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38455+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38456+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38457+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38458+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38461+4 4 4 4 4 4
38462+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38463+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38464+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38465+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38466+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38467+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38468+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38469+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38470+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38471+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38472+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38475+4 4 4 4 4 4
38476+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38477+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38478+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38479+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38480+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38481+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38482+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38483+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38484+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38485+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38486+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38489+4 4 4 4 4 4
38490+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38491+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38492+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38493+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38494+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38495+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38496+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38497+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38498+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38499+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38500+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38503+4 4 4 4 4 4
38504+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38505+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38506+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38507+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38508+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38509+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38510+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38511+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38512+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38513+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38514+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38517+4 4 4 4 4 4
38518+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38519+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38520+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38521+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38522+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38523+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38524+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38525+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38526+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38527+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38528+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38531+4 4 4 4 4 4
38532+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38533+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38534+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38535+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38536+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38537+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38538+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38539+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38540+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38541+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38542+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38545+4 4 4 4 4 4
38546+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38547+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38548+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38549+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38550+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38551+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38552+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38553+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38554+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38555+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38556+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38559+4 4 4 4 4 4
38560+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38561+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38562+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38563+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38564+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38565+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38566+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38567+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38568+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38569+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38570+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38573+4 4 4 4 4 4
38574+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38575+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38576+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38577+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38578+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38579+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38580+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38581+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38582+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38583+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38584+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38587+4 4 4 4 4 4
38588+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38589+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38590+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38591+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38592+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38593+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38594+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38595+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38596+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38597+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38598+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38601+4 4 4 4 4 4
38602+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38603+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38604+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38605+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38606+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38607+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38608+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38609+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38610+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38611+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38612+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38615+4 4 4 4 4 4
38616+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38617+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38618+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38619+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38620+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38621+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38622+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38623+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38624+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38625+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38626+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38629+4 4 4 4 4 4
38630+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38631+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38632+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38633+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38634+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38635+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38636+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38637+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38638+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38639+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38640+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38643+4 4 4 4 4 4
38644+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38645+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38646+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38647+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38648+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38649+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38650+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38651+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38652+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38653+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38654+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38657+4 4 4 4 4 4
38658+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38659+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38660+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38661+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38662+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38663+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38664+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38665+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38666+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38667+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38668+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38671+4 4 4 4 4 4
38672+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38673+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38674+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38675+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38676+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38677+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38678+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38679+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38680+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38681+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38682+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38685+4 4 4 4 4 4
38686+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38687+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38688+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38689+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38690+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38691+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38692+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38693+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38694+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38695+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38696+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38699+4 4 4 4 4 4
38700+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38701+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38702+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38703+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38704+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38705+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38706+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38707+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38708+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38709+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38710+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38713+4 4 4 4 4 4
38714+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38715+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38716+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38717+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38718+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38719+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38720+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38721+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38722+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38723+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38724+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38727+4 4 4 4 4 4
38728+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38729+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38730+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38731+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38732+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38733+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38734+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38735+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38736+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38737+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38738+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38741+4 4 4 4 4 4
38742+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38743+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38744+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38745+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38746+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38747+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38748+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38749+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38750+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38751+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38752+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38755+4 4 4 4 4 4
38756+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38757+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38758+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38759+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38760+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38761+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38762+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38763+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38764+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38765+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38766+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38769+4 4 4 4 4 4
38770+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38771+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38772+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38773+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38774+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38775+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38776+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38777+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38778+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38779+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38783+4 4 4 4 4 4
38784+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38785+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38786+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38787+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38788+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38789+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38790+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38791+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38792+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38793+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38797+4 4 4 4 4 4
38798+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38799+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38800+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38801+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38802+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38803+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38804+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38805+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38806+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38807+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38811+4 4 4 4 4 4
38812+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38813+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38814+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38815+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38816+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38817+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38818+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38819+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38820+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38821+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38825+4 4 4 4 4 4
38826+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38827+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38828+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38829+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38830+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38831+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38832+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38833+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38834+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38839+4 4 4 4 4 4
38840+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38841+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38842+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38843+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38844+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38845+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38846+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38847+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38848+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38853+4 4 4 4 4 4
38854+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38855+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38856+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38857+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38858+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38859+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38860+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38861+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38862+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38867+4 4 4 4 4 4
38868+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38869+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
38870+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38871+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
38872+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
38873+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
38874+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
38875+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
38876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38881+4 4 4 4 4 4
38882+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38883+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
38884+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
38885+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
38886+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
38887+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
38888+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
38889+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38895+4 4 4 4 4 4
38896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38897+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
38898+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38899+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
38900+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
38901+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
38902+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
38903+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
38904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38909+4 4 4 4 4 4
38910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38911+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
38912+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
38913+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
38914+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
38915+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
38916+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
38917+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
38918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38923+4 4 4 4 4 4
38924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38925+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38926+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
38927+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38928+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
38929+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
38930+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
38931+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38937+4 4 4 4 4 4
38938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38940+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38941+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
38942+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
38943+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
38944+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
38945+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38951+4 4 4 4 4 4
38952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38955+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38956+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
38957+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
38958+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
38959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38965+4 4 4 4 4 4
38966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38969+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38970+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38971+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
38972+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
38973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38979+4 4 4 4 4 4
38980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38983+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38984+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38985+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38986+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
38987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38993+4 4 4 4 4 4
38994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38997+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
38998+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
38999+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39000+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39007+4 4 4 4 4 4
39008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39012+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39013+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39014+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39021+4 4 4 4 4 4
39022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39026+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39027+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39028+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39035+4 4 4 4 4 4
39036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39040+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39041+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39042+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39049+4 4 4 4 4 4
39050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39054+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39055+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39063+4 4 4 4 4 4
39064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39068+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39069+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39077+4 4 4 4 4 4
39078diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39079index 3473e75..c930142 100644
39080--- a/drivers/video/udlfb.c
39081+++ b/drivers/video/udlfb.c
39082@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39083 dlfb_urb_completion(urb);
39084
39085 error:
39086- atomic_add(bytes_sent, &dev->bytes_sent);
39087- atomic_add(bytes_identical, &dev->bytes_identical);
39088- atomic_add(width*height*2, &dev->bytes_rendered);
39089+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39090+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39091+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39092 end_cycles = get_cycles();
39093- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39094+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39095 >> 10)), /* Kcycles */
39096 &dev->cpu_kcycles_used);
39097
39098@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39099 dlfb_urb_completion(urb);
39100
39101 error:
39102- atomic_add(bytes_sent, &dev->bytes_sent);
39103- atomic_add(bytes_identical, &dev->bytes_identical);
39104- atomic_add(bytes_rendered, &dev->bytes_rendered);
39105+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39106+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39107+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39108 end_cycles = get_cycles();
39109- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39110+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39111 >> 10)), /* Kcycles */
39112 &dev->cpu_kcycles_used);
39113 }
39114@@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39115 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39116 struct dlfb_data *dev = fb_info->par;
39117 return snprintf(buf, PAGE_SIZE, "%u\n",
39118- atomic_read(&dev->bytes_rendered));
39119+ atomic_read_unchecked(&dev->bytes_rendered));
39120 }
39121
39122 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39123@@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39124 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39125 struct dlfb_data *dev = fb_info->par;
39126 return snprintf(buf, PAGE_SIZE, "%u\n",
39127- atomic_read(&dev->bytes_identical));
39128+ atomic_read_unchecked(&dev->bytes_identical));
39129 }
39130
39131 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39132@@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39133 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39134 struct dlfb_data *dev = fb_info->par;
39135 return snprintf(buf, PAGE_SIZE, "%u\n",
39136- atomic_read(&dev->bytes_sent));
39137+ atomic_read_unchecked(&dev->bytes_sent));
39138 }
39139
39140 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39141@@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39142 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39143 struct dlfb_data *dev = fb_info->par;
39144 return snprintf(buf, PAGE_SIZE, "%u\n",
39145- atomic_read(&dev->cpu_kcycles_used));
39146+ atomic_read_unchecked(&dev->cpu_kcycles_used));
39147 }
39148
39149 static ssize_t edid_show(
39150@@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39151 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39152 struct dlfb_data *dev = fb_info->par;
39153
39154- atomic_set(&dev->bytes_rendered, 0);
39155- atomic_set(&dev->bytes_identical, 0);
39156- atomic_set(&dev->bytes_sent, 0);
39157- atomic_set(&dev->cpu_kcycles_used, 0);
39158+ atomic_set_unchecked(&dev->bytes_rendered, 0);
39159+ atomic_set_unchecked(&dev->bytes_identical, 0);
39160+ atomic_set_unchecked(&dev->bytes_sent, 0);
39161+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39162
39163 return count;
39164 }
39165diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39166index 7f8472c..9842e87 100644
39167--- a/drivers/video/uvesafb.c
39168+++ b/drivers/video/uvesafb.c
39169@@ -19,6 +19,7 @@
39170 #include <linux/io.h>
39171 #include <linux/mutex.h>
39172 #include <linux/slab.h>
39173+#include <linux/moduleloader.h>
39174 #include <video/edid.h>
39175 #include <video/uvesafb.h>
39176 #ifdef CONFIG_X86
39177@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39178 NULL,
39179 };
39180
39181- return call_usermodehelper(v86d_path, argv, envp, 1);
39182+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39183 }
39184
39185 /*
39186@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39187 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39188 par->pmi_setpal = par->ypan = 0;
39189 } else {
39190+
39191+#ifdef CONFIG_PAX_KERNEXEC
39192+#ifdef CONFIG_MODULES
39193+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39194+#endif
39195+ if (!par->pmi_code) {
39196+ par->pmi_setpal = par->ypan = 0;
39197+ return 0;
39198+ }
39199+#endif
39200+
39201 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39202 + task->t.regs.edi);
39203+
39204+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39205+ pax_open_kernel();
39206+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39207+ pax_close_kernel();
39208+
39209+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39210+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39211+#else
39212 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39213 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39214+#endif
39215+
39216 printk(KERN_INFO "uvesafb: protected mode interface info at "
39217 "%04x:%04x\n",
39218 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39219@@ -1821,6 +1844,11 @@ out:
39220 if (par->vbe_modes)
39221 kfree(par->vbe_modes);
39222
39223+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39224+ if (par->pmi_code)
39225+ module_free_exec(NULL, par->pmi_code);
39226+#endif
39227+
39228 framebuffer_release(info);
39229 return err;
39230 }
39231@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39232 kfree(par->vbe_state_orig);
39233 if (par->vbe_state_saved)
39234 kfree(par->vbe_state_saved);
39235+
39236+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39237+ if (par->pmi_code)
39238+ module_free_exec(NULL, par->pmi_code);
39239+#endif
39240+
39241 }
39242
39243 framebuffer_release(info);
39244diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39245index 501b340..86bd4cf 100644
39246--- a/drivers/video/vesafb.c
39247+++ b/drivers/video/vesafb.c
39248@@ -9,6 +9,7 @@
39249 */
39250
39251 #include <linux/module.h>
39252+#include <linux/moduleloader.h>
39253 #include <linux/kernel.h>
39254 #include <linux/errno.h>
39255 #include <linux/string.h>
39256@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39257 static int vram_total __initdata; /* Set total amount of memory */
39258 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39259 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39260-static void (*pmi_start)(void) __read_mostly;
39261-static void (*pmi_pal) (void) __read_mostly;
39262+static void (*pmi_start)(void) __read_only;
39263+static void (*pmi_pal) (void) __read_only;
39264 static int depth __read_mostly;
39265 static int vga_compat __read_mostly;
39266 /* --------------------------------------------------------------------- */
39267@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39268 unsigned int size_vmode;
39269 unsigned int size_remap;
39270 unsigned int size_total;
39271+ void *pmi_code = NULL;
39272
39273 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39274 return -ENODEV;
39275@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39276 size_remap = size_total;
39277 vesafb_fix.smem_len = size_remap;
39278
39279-#ifndef __i386__
39280- screen_info.vesapm_seg = 0;
39281-#endif
39282-
39283 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39284 printk(KERN_WARNING
39285 "vesafb: cannot reserve video memory at 0x%lx\n",
39286@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39287 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39288 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39289
39290+#ifdef __i386__
39291+
39292+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39293+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39294+ if (!pmi_code)
39295+#elif !defined(CONFIG_PAX_KERNEXEC)
39296+ if (0)
39297+#endif
39298+
39299+#endif
39300+ screen_info.vesapm_seg = 0;
39301+
39302 if (screen_info.vesapm_seg) {
39303- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39304- screen_info.vesapm_seg,screen_info.vesapm_off);
39305+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39306+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39307 }
39308
39309 if (screen_info.vesapm_seg < 0xc000)
39310@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39311
39312 if (ypan || pmi_setpal) {
39313 unsigned short *pmi_base;
39314+
39315 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39316- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39317- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39318+
39319+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39320+ pax_open_kernel();
39321+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39322+#else
39323+ pmi_code = pmi_base;
39324+#endif
39325+
39326+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39327+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39328+
39329+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39330+ pmi_start = ktva_ktla(pmi_start);
39331+ pmi_pal = ktva_ktla(pmi_pal);
39332+ pax_close_kernel();
39333+#endif
39334+
39335 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39336 if (pmi_base[3]) {
39337 printk(KERN_INFO "vesafb: pmi: ports = ");
39338@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39339 info->node, info->fix.id);
39340 return 0;
39341 err:
39342+
39343+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39344+ module_free_exec(NULL, pmi_code);
39345+#endif
39346+
39347 if (info->screen_base)
39348 iounmap(info->screen_base);
39349 framebuffer_release(info);
39350diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39351index 88714ae..16c2e11 100644
39352--- a/drivers/video/via/via_clock.h
39353+++ b/drivers/video/via/via_clock.h
39354@@ -56,7 +56,7 @@ struct via_clock {
39355
39356 void (*set_engine_pll_state)(u8 state);
39357 void (*set_engine_pll)(struct via_pll_config config);
39358-};
39359+} __no_const;
39360
39361
39362 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39363diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39364index e56c934..fc22f4b 100644
39365--- a/drivers/xen/xen-pciback/conf_space.h
39366+++ b/drivers/xen/xen-pciback/conf_space.h
39367@@ -44,15 +44,15 @@ struct config_field {
39368 struct {
39369 conf_dword_write write;
39370 conf_dword_read read;
39371- } dw;
39372+ } __no_const dw;
39373 struct {
39374 conf_word_write write;
39375 conf_word_read read;
39376- } w;
39377+ } __no_const w;
39378 struct {
39379 conf_byte_write write;
39380 conf_byte_read read;
39381- } b;
39382+ } __no_const b;
39383 } u;
39384 struct list_head list;
39385 };
39386diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39387index 879ed88..bc03a01 100644
39388--- a/fs/9p/vfs_inode.c
39389+++ b/fs/9p/vfs_inode.c
39390@@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39391 void
39392 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39393 {
39394- char *s = nd_get_link(nd);
39395+ const char *s = nd_get_link(nd);
39396
39397 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39398 IS_ERR(s) ? "<error>" : s);
39399diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39400index 79e2ca7..5828ad1 100644
39401--- a/fs/Kconfig.binfmt
39402+++ b/fs/Kconfig.binfmt
39403@@ -86,7 +86,7 @@ config HAVE_AOUT
39404
39405 config BINFMT_AOUT
39406 tristate "Kernel support for a.out and ECOFF binaries"
39407- depends on HAVE_AOUT
39408+ depends on HAVE_AOUT && BROKEN
39409 ---help---
39410 A.out (Assembler.OUTput) is a set of formats for libraries and
39411 executables used in the earliest versions of UNIX. Linux used
39412diff --git a/fs/aio.c b/fs/aio.c
39413index 969beb0..09fab51 100644
39414--- a/fs/aio.c
39415+++ b/fs/aio.c
39416@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39417 size += sizeof(struct io_event) * nr_events;
39418 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39419
39420- if (nr_pages < 0)
39421+ if (nr_pages <= 0)
39422 return -EINVAL;
39423
39424 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39425@@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39426 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39427 {
39428 ssize_t ret;
39429+ struct iovec iovstack;
39430
39431 #ifdef CONFIG_COMPAT
39432 if (compat)
39433 ret = compat_rw_copy_check_uvector(type,
39434 (struct compat_iovec __user *)kiocb->ki_buf,
39435- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39436+ kiocb->ki_nbytes, 1, &iovstack,
39437 &kiocb->ki_iovec, 1);
39438 else
39439 #endif
39440 ret = rw_copy_check_uvector(type,
39441 (struct iovec __user *)kiocb->ki_buf,
39442- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39443+ kiocb->ki_nbytes, 1, &iovstack,
39444 &kiocb->ki_iovec, 1);
39445 if (ret < 0)
39446 goto out;
39447
39448+ if (kiocb->ki_iovec == &iovstack) {
39449+ kiocb->ki_inline_vec = iovstack;
39450+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39451+ }
39452 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39453 kiocb->ki_cur_seg = 0;
39454 /* ki_nbytes/left now reflect bytes instead of segs */
39455diff --git a/fs/attr.c b/fs/attr.c
39456index 7ee7ba4..0c61a60 100644
39457--- a/fs/attr.c
39458+++ b/fs/attr.c
39459@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39460 unsigned long limit;
39461
39462 limit = rlimit(RLIMIT_FSIZE);
39463+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39464 if (limit != RLIM_INFINITY && offset > limit)
39465 goto out_sig;
39466 if (offset > inode->i_sb->s_maxbytes)
39467diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39468index e1fbdee..cd5ea56 100644
39469--- a/fs/autofs4/waitq.c
39470+++ b/fs/autofs4/waitq.c
39471@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39472 {
39473 unsigned long sigpipe, flags;
39474 mm_segment_t fs;
39475- const char *data = (const char *)addr;
39476+ const char __user *data = (const char __force_user *)addr;
39477 ssize_t wr = 0;
39478
39479 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39480diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39481index 8342ca6..82fd192 100644
39482--- a/fs/befs/linuxvfs.c
39483+++ b/fs/befs/linuxvfs.c
39484@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39485 {
39486 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39487 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39488- char *link = nd_get_link(nd);
39489+ const char *link = nd_get_link(nd);
39490 if (!IS_ERR(link))
39491 kfree(link);
39492 }
39493diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39494index a6395bd..a5b24c4 100644
39495--- a/fs/binfmt_aout.c
39496+++ b/fs/binfmt_aout.c
39497@@ -16,6 +16,7 @@
39498 #include <linux/string.h>
39499 #include <linux/fs.h>
39500 #include <linux/file.h>
39501+#include <linux/security.h>
39502 #include <linux/stat.h>
39503 #include <linux/fcntl.h>
39504 #include <linux/ptrace.h>
39505@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39506 #endif
39507 # define START_STACK(u) ((void __user *)u.start_stack)
39508
39509+ memset(&dump, 0, sizeof(dump));
39510+
39511 fs = get_fs();
39512 set_fs(KERNEL_DS);
39513 has_dumped = 1;
39514@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39515
39516 /* If the size of the dump file exceeds the rlimit, then see what would happen
39517 if we wrote the stack, but not the data area. */
39518+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39519 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39520 dump.u_dsize = 0;
39521
39522 /* Make sure we have enough room to write the stack and data areas. */
39523+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39524 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39525 dump.u_ssize = 0;
39526
39527@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39528 rlim = rlimit(RLIMIT_DATA);
39529 if (rlim >= RLIM_INFINITY)
39530 rlim = ~0;
39531+
39532+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39533 if (ex.a_data + ex.a_bss > rlim)
39534 return -ENOMEM;
39535
39536@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39537 install_exec_creds(bprm);
39538 current->flags &= ~PF_FORKNOEXEC;
39539
39540+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39541+ current->mm->pax_flags = 0UL;
39542+#endif
39543+
39544+#ifdef CONFIG_PAX_PAGEEXEC
39545+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39546+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39547+
39548+#ifdef CONFIG_PAX_EMUTRAMP
39549+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39550+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39551+#endif
39552+
39553+#ifdef CONFIG_PAX_MPROTECT
39554+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39555+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39556+#endif
39557+
39558+ }
39559+#endif
39560+
39561 if (N_MAGIC(ex) == OMAGIC) {
39562 unsigned long text_addr, map_size;
39563 loff_t pos;
39564@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39565
39566 down_write(&current->mm->mmap_sem);
39567 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39568- PROT_READ | PROT_WRITE | PROT_EXEC,
39569+ PROT_READ | PROT_WRITE,
39570 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39571 fd_offset + ex.a_text);
39572 up_write(&current->mm->mmap_sem);
39573diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39574index 21ac5ee..c1090ea 100644
39575--- a/fs/binfmt_elf.c
39576+++ b/fs/binfmt_elf.c
39577@@ -32,6 +32,7 @@
39578 #include <linux/elf.h>
39579 #include <linux/utsname.h>
39580 #include <linux/coredump.h>
39581+#include <linux/xattr.h>
39582 #include <asm/uaccess.h>
39583 #include <asm/param.h>
39584 #include <asm/page.h>
39585@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39586 #define elf_core_dump NULL
39587 #endif
39588
39589+#ifdef CONFIG_PAX_MPROTECT
39590+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39591+#endif
39592+
39593 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39594 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39595 #else
39596@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39597 .load_binary = load_elf_binary,
39598 .load_shlib = load_elf_library,
39599 .core_dump = elf_core_dump,
39600+
39601+#ifdef CONFIG_PAX_MPROTECT
39602+ .handle_mprotect= elf_handle_mprotect,
39603+#endif
39604+
39605 .min_coredump = ELF_EXEC_PAGESIZE,
39606 };
39607
39608@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39609
39610 static int set_brk(unsigned long start, unsigned long end)
39611 {
39612+ unsigned long e = end;
39613+
39614 start = ELF_PAGEALIGN(start);
39615 end = ELF_PAGEALIGN(end);
39616 if (end > start) {
39617@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39618 if (BAD_ADDR(addr))
39619 return addr;
39620 }
39621- current->mm->start_brk = current->mm->brk = end;
39622+ current->mm->start_brk = current->mm->brk = e;
39623 return 0;
39624 }
39625
39626@@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39627 elf_addr_t __user *u_rand_bytes;
39628 const char *k_platform = ELF_PLATFORM;
39629 const char *k_base_platform = ELF_BASE_PLATFORM;
39630- unsigned char k_rand_bytes[16];
39631+ u32 k_rand_bytes[4];
39632 int items;
39633 elf_addr_t *elf_info;
39634 int ei_index = 0;
39635 const struct cred *cred = current_cred();
39636 struct vm_area_struct *vma;
39637+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39638
39639 /*
39640 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39641@@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39642 * Generate 16 random bytes for userspace PRNG seeding.
39643 */
39644 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39645- u_rand_bytes = (elf_addr_t __user *)
39646- STACK_ALLOC(p, sizeof(k_rand_bytes));
39647+ srandom32(k_rand_bytes[0] ^ random32());
39648+ srandom32(k_rand_bytes[1] ^ random32());
39649+ srandom32(k_rand_bytes[2] ^ random32());
39650+ srandom32(k_rand_bytes[3] ^ random32());
39651+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39652+ u_rand_bytes = (elf_addr_t __user *) p;
39653 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39654 return -EFAULT;
39655
39656@@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39657 return -EFAULT;
39658 current->mm->env_end = p;
39659
39660+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39661+
39662 /* Put the elf_info on the stack in the right place. */
39663 sp = (elf_addr_t __user *)envp + 1;
39664- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39665+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39666 return -EFAULT;
39667 return 0;
39668 }
39669@@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39670 {
39671 struct elf_phdr *elf_phdata;
39672 struct elf_phdr *eppnt;
39673- unsigned long load_addr = 0;
39674+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39675 int load_addr_set = 0;
39676 unsigned long last_bss = 0, elf_bss = 0;
39677- unsigned long error = ~0UL;
39678+ unsigned long error = -EINVAL;
39679 unsigned long total_size;
39680 int retval, i, size;
39681
39682@@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39683 goto out_close;
39684 }
39685
39686+#ifdef CONFIG_PAX_SEGMEXEC
39687+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39688+ pax_task_size = SEGMEXEC_TASK_SIZE;
39689+#endif
39690+
39691 eppnt = elf_phdata;
39692 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39693 if (eppnt->p_type == PT_LOAD) {
39694@@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39695 k = load_addr + eppnt->p_vaddr;
39696 if (BAD_ADDR(k) ||
39697 eppnt->p_filesz > eppnt->p_memsz ||
39698- eppnt->p_memsz > TASK_SIZE ||
39699- TASK_SIZE - eppnt->p_memsz < k) {
39700+ eppnt->p_memsz > pax_task_size ||
39701+ pax_task_size - eppnt->p_memsz < k) {
39702 error = -ENOMEM;
39703 goto out_close;
39704 }
39705@@ -528,6 +552,348 @@ out:
39706 return error;
39707 }
39708
39709+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
39710+{
39711+ unsigned long pax_flags = 0UL;
39712+
39713+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39714+
39715+#ifdef CONFIG_PAX_PAGEEXEC
39716+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39717+ pax_flags |= MF_PAX_PAGEEXEC;
39718+#endif
39719+
39720+#ifdef CONFIG_PAX_SEGMEXEC
39721+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39722+ pax_flags |= MF_PAX_SEGMEXEC;
39723+#endif
39724+
39725+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39726+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39727+ if ((__supported_pte_mask & _PAGE_NX))
39728+ pax_flags &= ~MF_PAX_SEGMEXEC;
39729+ else
39730+ pax_flags &= ~MF_PAX_PAGEEXEC;
39731+ }
39732+#endif
39733+
39734+#ifdef CONFIG_PAX_EMUTRAMP
39735+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39736+ pax_flags |= MF_PAX_EMUTRAMP;
39737+#endif
39738+
39739+#ifdef CONFIG_PAX_MPROTECT
39740+ if (elf_phdata->p_flags & PF_MPROTECT)
39741+ pax_flags |= MF_PAX_MPROTECT;
39742+#endif
39743+
39744+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39745+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39746+ pax_flags |= MF_PAX_RANDMMAP;
39747+#endif
39748+
39749+#endif
39750+
39751+ return pax_flags;
39752+}
39753+
39754+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
39755+{
39756+ unsigned long pax_flags = 0UL;
39757+
39758+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39759+
39760+#ifdef CONFIG_PAX_PAGEEXEC
39761+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39762+ pax_flags |= MF_PAX_PAGEEXEC;
39763+#endif
39764+
39765+#ifdef CONFIG_PAX_SEGMEXEC
39766+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39767+ pax_flags |= MF_PAX_SEGMEXEC;
39768+#endif
39769+
39770+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39771+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39772+ if ((__supported_pte_mask & _PAGE_NX))
39773+ pax_flags &= ~MF_PAX_SEGMEXEC;
39774+ else
39775+ pax_flags &= ~MF_PAX_PAGEEXEC;
39776+ }
39777+#endif
39778+
39779+#ifdef CONFIG_PAX_EMUTRAMP
39780+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39781+ pax_flags |= MF_PAX_EMUTRAMP;
39782+#endif
39783+
39784+#ifdef CONFIG_PAX_MPROTECT
39785+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39786+ pax_flags |= MF_PAX_MPROTECT;
39787+#endif
39788+
39789+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39790+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39791+ pax_flags |= MF_PAX_RANDMMAP;
39792+#endif
39793+
39794+#endif
39795+
39796+ return pax_flags;
39797+}
39798+
39799+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39800+{
39801+ unsigned long pax_flags = 0UL;
39802+
39803+#ifdef CONFIG_PAX_EI_PAX
39804+
39805+#ifdef CONFIG_PAX_PAGEEXEC
39806+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39807+ pax_flags |= MF_PAX_PAGEEXEC;
39808+#endif
39809+
39810+#ifdef CONFIG_PAX_SEGMEXEC
39811+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39812+ pax_flags |= MF_PAX_SEGMEXEC;
39813+#endif
39814+
39815+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39816+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39817+ if ((__supported_pte_mask & _PAGE_NX))
39818+ pax_flags &= ~MF_PAX_SEGMEXEC;
39819+ else
39820+ pax_flags &= ~MF_PAX_PAGEEXEC;
39821+ }
39822+#endif
39823+
39824+#ifdef CONFIG_PAX_EMUTRAMP
39825+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39826+ pax_flags |= MF_PAX_EMUTRAMP;
39827+#endif
39828+
39829+#ifdef CONFIG_PAX_MPROTECT
39830+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39831+ pax_flags |= MF_PAX_MPROTECT;
39832+#endif
39833+
39834+#ifdef CONFIG_PAX_ASLR
39835+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39836+ pax_flags |= MF_PAX_RANDMMAP;
39837+#endif
39838+
39839+#else
39840+
39841+#ifdef CONFIG_PAX_PAGEEXEC
39842+ pax_flags |= MF_PAX_PAGEEXEC;
39843+#endif
39844+
39845+#ifdef CONFIG_PAX_MPROTECT
39846+ pax_flags |= MF_PAX_MPROTECT;
39847+#endif
39848+
39849+#ifdef CONFIG_PAX_RANDMMAP
39850+ pax_flags |= MF_PAX_RANDMMAP;
39851+#endif
39852+
39853+#ifdef CONFIG_PAX_SEGMEXEC
39854+ if (!(__supported_pte_mask & _PAGE_NX)) {
39855+ pax_flags &= ~MF_PAX_PAGEEXEC;
39856+ pax_flags |= MF_PAX_SEGMEXEC;
39857+ }
39858+#endif
39859+
39860+#endif
39861+
39862+ return pax_flags;
39863+}
39864+
39865+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39866+{
39867+
39868+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39869+ unsigned long i;
39870+
39871+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39872+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39873+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39874+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39875+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39876+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39877+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39878+ return ~0UL;
39879+
39880+#ifdef CONFIG_PAX_SOFTMODE
39881+ if (pax_softmode)
39882+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
39883+ else
39884+#endif
39885+
39886+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
39887+ break;
39888+ }
39889+#endif
39890+
39891+ return ~0UL;
39892+}
39893+
39894+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
39895+{
39896+ unsigned long pax_flags = 0UL;
39897+
39898+#ifdef CONFIG_PAX_PAGEEXEC
39899+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
39900+ pax_flags |= MF_PAX_PAGEEXEC;
39901+#endif
39902+
39903+#ifdef CONFIG_PAX_SEGMEXEC
39904+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
39905+ pax_flags |= MF_PAX_SEGMEXEC;
39906+#endif
39907+
39908+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39909+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39910+ if ((__supported_pte_mask & _PAGE_NX))
39911+ pax_flags &= ~MF_PAX_SEGMEXEC;
39912+ else
39913+ pax_flags &= ~MF_PAX_PAGEEXEC;
39914+ }
39915+#endif
39916+
39917+#ifdef CONFIG_PAX_EMUTRAMP
39918+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
39919+ pax_flags |= MF_PAX_EMUTRAMP;
39920+#endif
39921+
39922+#ifdef CONFIG_PAX_MPROTECT
39923+ if (pax_flags_softmode & MF_PAX_MPROTECT)
39924+ pax_flags |= MF_PAX_MPROTECT;
39925+#endif
39926+
39927+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39928+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
39929+ pax_flags |= MF_PAX_RANDMMAP;
39930+#endif
39931+
39932+ return pax_flags;
39933+}
39934+
39935+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
39936+{
39937+ unsigned long pax_flags = 0UL;
39938+
39939+#ifdef CONFIG_PAX_PAGEEXEC
39940+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
39941+ pax_flags |= MF_PAX_PAGEEXEC;
39942+#endif
39943+
39944+#ifdef CONFIG_PAX_SEGMEXEC
39945+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
39946+ pax_flags |= MF_PAX_SEGMEXEC;
39947+#endif
39948+
39949+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39950+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39951+ if ((__supported_pte_mask & _PAGE_NX))
39952+ pax_flags &= ~MF_PAX_SEGMEXEC;
39953+ else
39954+ pax_flags &= ~MF_PAX_PAGEEXEC;
39955+ }
39956+#endif
39957+
39958+#ifdef CONFIG_PAX_EMUTRAMP
39959+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
39960+ pax_flags |= MF_PAX_EMUTRAMP;
39961+#endif
39962+
39963+#ifdef CONFIG_PAX_MPROTECT
39964+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
39965+ pax_flags |= MF_PAX_MPROTECT;
39966+#endif
39967+
39968+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39969+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
39970+ pax_flags |= MF_PAX_RANDMMAP;
39971+#endif
39972+
39973+ return pax_flags;
39974+}
39975+
39976+static unsigned long pax_parse_xattr_pax(struct file * const file)
39977+{
39978+
39979+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
39980+ ssize_t xattr_size, i;
39981+ unsigned char xattr_value[5];
39982+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
39983+
39984+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
39985+ if (xattr_size <= 0)
39986+ return ~0UL;
39987+
39988+ for (i = 0; i < xattr_size; i++)
39989+ switch (xattr_value[i]) {
39990+ default:
39991+ return ~0UL;
39992+
39993+#define parse_flag(option1, option2, flag) \
39994+ case option1: \
39995+ pax_flags_hardmode |= MF_PAX_##flag; \
39996+ break; \
39997+ case option2: \
39998+ pax_flags_softmode |= MF_PAX_##flag; \
39999+ break;
40000+
40001+ parse_flag('p', 'P', PAGEEXEC);
40002+ parse_flag('e', 'E', EMUTRAMP);
40003+ parse_flag('m', 'M', MPROTECT);
40004+ parse_flag('r', 'R', RANDMMAP);
40005+ parse_flag('s', 'S', SEGMEXEC);
40006+
40007+#undef parse_flag
40008+ }
40009+
40010+ if (pax_flags_hardmode & pax_flags_softmode)
40011+ return ~0UL;
40012+
40013+#ifdef CONFIG_PAX_SOFTMODE
40014+ if (pax_softmode)
40015+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
40016+ else
40017+#endif
40018+
40019+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
40020+#else
40021+ return ~0UL;
40022+#endif
40023+}
40024+
40025+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40026+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40027+{
40028+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40029+
40030+ pax_flags = pax_parse_ei_pax(elf_ex);
40031+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40032+ xattr_pax_flags = pax_parse_xattr_pax(file);
40033+
40034+ if (pt_pax_flags == ~0UL)
40035+ pt_pax_flags = xattr_pax_flags;
40036+ else if (xattr_pax_flags == ~0UL)
40037+ xattr_pax_flags = pt_pax_flags;
40038+ if (pt_pax_flags != xattr_pax_flags)
40039+ return -EINVAL;
40040+ if (pt_pax_flags != ~0UL)
40041+ pax_flags = pt_pax_flags;
40042+
40043+ if (0 > pax_check_flags(&pax_flags))
40044+ return -EINVAL;
40045+
40046+ current->mm->pax_flags = pax_flags;
40047+ return 0;
40048+}
40049+#endif
40050+
40051 /*
40052 * These are the functions used to load ELF style executables and shared
40053 * libraries. There is no binary dependent code anywhere else.
40054@@ -544,6 +910,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40055 {
40056 unsigned int random_variable = 0;
40057
40058+#ifdef CONFIG_PAX_RANDUSTACK
40059+ if (randomize_va_space)
40060+ return stack_top - current->mm->delta_stack;
40061+#endif
40062+
40063 if ((current->flags & PF_RANDOMIZE) &&
40064 !(current->personality & ADDR_NO_RANDOMIZE)) {
40065 random_variable = get_random_int() & STACK_RND_MASK;
40066@@ -562,7 +933,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40067 unsigned long load_addr = 0, load_bias = 0;
40068 int load_addr_set = 0;
40069 char * elf_interpreter = NULL;
40070- unsigned long error;
40071+ unsigned long error = 0;
40072 struct elf_phdr *elf_ppnt, *elf_phdata;
40073 unsigned long elf_bss, elf_brk;
40074 int retval, i;
40075@@ -572,11 +943,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40076 unsigned long start_code, end_code, start_data, end_data;
40077 unsigned long reloc_func_desc __maybe_unused = 0;
40078 int executable_stack = EXSTACK_DEFAULT;
40079- unsigned long def_flags = 0;
40080 struct {
40081 struct elfhdr elf_ex;
40082 struct elfhdr interp_elf_ex;
40083 } *loc;
40084+ unsigned long pax_task_size = TASK_SIZE;
40085
40086 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40087 if (!loc) {
40088@@ -713,11 +1084,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40089
40090 /* OK, This is the point of no return */
40091 current->flags &= ~PF_FORKNOEXEC;
40092- current->mm->def_flags = def_flags;
40093+
40094+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40095+ current->mm->pax_flags = 0UL;
40096+#endif
40097+
40098+#ifdef CONFIG_PAX_DLRESOLVE
40099+ current->mm->call_dl_resolve = 0UL;
40100+#endif
40101+
40102+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40103+ current->mm->call_syscall = 0UL;
40104+#endif
40105+
40106+#ifdef CONFIG_PAX_ASLR
40107+ current->mm->delta_mmap = 0UL;
40108+ current->mm->delta_stack = 0UL;
40109+#endif
40110+
40111+ current->mm->def_flags = 0;
40112+
40113+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40114+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40115+ send_sig(SIGKILL, current, 0);
40116+ goto out_free_dentry;
40117+ }
40118+#endif
40119+
40120+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40121+ pax_set_initial_flags(bprm);
40122+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40123+ if (pax_set_initial_flags_func)
40124+ (pax_set_initial_flags_func)(bprm);
40125+#endif
40126+
40127+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40128+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40129+ current->mm->context.user_cs_limit = PAGE_SIZE;
40130+ current->mm->def_flags |= VM_PAGEEXEC;
40131+ }
40132+#endif
40133+
40134+#ifdef CONFIG_PAX_SEGMEXEC
40135+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40136+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40137+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40138+ pax_task_size = SEGMEXEC_TASK_SIZE;
40139+ current->mm->def_flags |= VM_NOHUGEPAGE;
40140+ }
40141+#endif
40142+
40143+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40144+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40145+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40146+ put_cpu();
40147+ }
40148+#endif
40149
40150 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40151 may depend on the personality. */
40152 SET_PERSONALITY(loc->elf_ex);
40153+
40154+#ifdef CONFIG_PAX_ASLR
40155+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40156+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40157+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40158+ }
40159+#endif
40160+
40161+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40162+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40163+ executable_stack = EXSTACK_DISABLE_X;
40164+ current->personality &= ~READ_IMPLIES_EXEC;
40165+ } else
40166+#endif
40167+
40168 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40169 current->personality |= READ_IMPLIES_EXEC;
40170
40171@@ -808,6 +1249,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40172 #else
40173 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40174 #endif
40175+
40176+#ifdef CONFIG_PAX_RANDMMAP
40177+ /* PaX: randomize base address at the default exe base if requested */
40178+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40179+#ifdef CONFIG_SPARC64
40180+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40181+#else
40182+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40183+#endif
40184+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40185+ elf_flags |= MAP_FIXED;
40186+ }
40187+#endif
40188+
40189 }
40190
40191 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40192@@ -840,9 +1295,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40193 * allowed task size. Note that p_filesz must always be
40194 * <= p_memsz so it is only necessary to check p_memsz.
40195 */
40196- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40197- elf_ppnt->p_memsz > TASK_SIZE ||
40198- TASK_SIZE - elf_ppnt->p_memsz < k) {
40199+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40200+ elf_ppnt->p_memsz > pax_task_size ||
40201+ pax_task_size - elf_ppnt->p_memsz < k) {
40202 /* set_brk can never work. Avoid overflows. */
40203 send_sig(SIGKILL, current, 0);
40204 retval = -EINVAL;
40205@@ -870,6 +1325,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40206 start_data += load_bias;
40207 end_data += load_bias;
40208
40209+#ifdef CONFIG_PAX_RANDMMAP
40210+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40211+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40212+#endif
40213+
40214 /* Calling set_brk effectively mmaps the pages that we need
40215 * for the bss and break sections. We must do this before
40216 * mapping in the interpreter, to make sure it doesn't wind
40217@@ -881,9 +1341,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40218 goto out_free_dentry;
40219 }
40220 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40221- send_sig(SIGSEGV, current, 0);
40222- retval = -EFAULT; /* Nobody gets to see this, but.. */
40223- goto out_free_dentry;
40224+ /*
40225+ * This bss-zeroing can fail if the ELF
40226+ * file specifies odd protections. So
40227+ * we don't check the return value
40228+ */
40229 }
40230
40231 if (elf_interpreter) {
40232@@ -1098,7 +1560,7 @@ out:
40233 * Decide what to dump of a segment, part, all or none.
40234 */
40235 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40236- unsigned long mm_flags)
40237+ unsigned long mm_flags, long signr)
40238 {
40239 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40240
40241@@ -1132,7 +1594,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40242 if (vma->vm_file == NULL)
40243 return 0;
40244
40245- if (FILTER(MAPPED_PRIVATE))
40246+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40247 goto whole;
40248
40249 /*
40250@@ -1354,9 +1816,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40251 {
40252 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40253 int i = 0;
40254- do
40255+ do {
40256 i += 2;
40257- while (auxv[i - 2] != AT_NULL);
40258+ } while (auxv[i - 2] != AT_NULL);
40259 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40260 }
40261
40262@@ -1862,14 +2324,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40263 }
40264
40265 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40266- unsigned long mm_flags)
40267+ struct coredump_params *cprm)
40268 {
40269 struct vm_area_struct *vma;
40270 size_t size = 0;
40271
40272 for (vma = first_vma(current, gate_vma); vma != NULL;
40273 vma = next_vma(vma, gate_vma))
40274- size += vma_dump_size(vma, mm_flags);
40275+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40276 return size;
40277 }
40278
40279@@ -1963,7 +2425,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40280
40281 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40282
40283- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40284+ offset += elf_core_vma_data_size(gate_vma, cprm);
40285 offset += elf_core_extra_data_size();
40286 e_shoff = offset;
40287
40288@@ -1977,10 +2439,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40289 offset = dataoff;
40290
40291 size += sizeof(*elf);
40292+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40293 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40294 goto end_coredump;
40295
40296 size += sizeof(*phdr4note);
40297+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40298 if (size > cprm->limit
40299 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40300 goto end_coredump;
40301@@ -1994,7 +2458,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40302 phdr.p_offset = offset;
40303 phdr.p_vaddr = vma->vm_start;
40304 phdr.p_paddr = 0;
40305- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40306+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40307 phdr.p_memsz = vma->vm_end - vma->vm_start;
40308 offset += phdr.p_filesz;
40309 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40310@@ -2005,6 +2469,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40311 phdr.p_align = ELF_EXEC_PAGESIZE;
40312
40313 size += sizeof(phdr);
40314+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40315 if (size > cprm->limit
40316 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40317 goto end_coredump;
40318@@ -2029,7 +2494,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40319 unsigned long addr;
40320 unsigned long end;
40321
40322- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40323+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40324
40325 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40326 struct page *page;
40327@@ -2038,6 +2503,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40328 page = get_dump_page(addr);
40329 if (page) {
40330 void *kaddr = kmap(page);
40331+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40332 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40333 !dump_write(cprm->file, kaddr,
40334 PAGE_SIZE);
40335@@ -2055,6 +2521,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40336
40337 if (e_phnum == PN_XNUM) {
40338 size += sizeof(*shdr4extnum);
40339+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40340 if (size > cprm->limit
40341 || !dump_write(cprm->file, shdr4extnum,
40342 sizeof(*shdr4extnum)))
40343@@ -2075,6 +2542,97 @@ out:
40344
40345 #endif /* CONFIG_ELF_CORE */
40346
40347+#ifdef CONFIG_PAX_MPROTECT
40348+/* PaX: non-PIC ELF libraries need relocations on their executable segments
40349+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40350+ * we'll remove VM_MAYWRITE for good on RELRO segments.
40351+ *
40352+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40353+ * basis because we want to allow the common case and not the special ones.
40354+ */
40355+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40356+{
40357+ struct elfhdr elf_h;
40358+ struct elf_phdr elf_p;
40359+ unsigned long i;
40360+ unsigned long oldflags;
40361+ bool is_textrel_rw, is_textrel_rx, is_relro;
40362+
40363+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40364+ return;
40365+
40366+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40367+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40368+
40369+#ifdef CONFIG_PAX_ELFRELOCS
40370+ /* possible TEXTREL */
40371+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40372+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40373+#else
40374+ is_textrel_rw = false;
40375+ is_textrel_rx = false;
40376+#endif
40377+
40378+ /* possible RELRO */
40379+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40380+
40381+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40382+ return;
40383+
40384+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40385+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40386+
40387+#ifdef CONFIG_PAX_ETEXECRELOCS
40388+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40389+#else
40390+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40391+#endif
40392+
40393+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40394+ !elf_check_arch(&elf_h) ||
40395+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40396+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40397+ return;
40398+
40399+ for (i = 0UL; i < elf_h.e_phnum; i++) {
40400+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40401+ return;
40402+ switch (elf_p.p_type) {
40403+ case PT_DYNAMIC:
40404+ if (!is_textrel_rw && !is_textrel_rx)
40405+ continue;
40406+ i = 0UL;
40407+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40408+ elf_dyn dyn;
40409+
40410+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40411+ return;
40412+ if (dyn.d_tag == DT_NULL)
40413+ return;
40414+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40415+ gr_log_textrel(vma);
40416+ if (is_textrel_rw)
40417+ vma->vm_flags |= VM_MAYWRITE;
40418+ else
40419+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40420+ vma->vm_flags &= ~VM_MAYWRITE;
40421+ return;
40422+ }
40423+ i++;
40424+ }
40425+ return;
40426+
40427+ case PT_GNU_RELRO:
40428+ if (!is_relro)
40429+ continue;
40430+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40431+ vma->vm_flags &= ~VM_MAYWRITE;
40432+ return;
40433+ }
40434+ }
40435+}
40436+#endif
40437+
40438 static int __init init_elf_binfmt(void)
40439 {
40440 return register_binfmt(&elf_format);
40441diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40442index 1bffbe0..c8c283e 100644
40443--- a/fs/binfmt_flat.c
40444+++ b/fs/binfmt_flat.c
40445@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40446 realdatastart = (unsigned long) -ENOMEM;
40447 printk("Unable to allocate RAM for process data, errno %d\n",
40448 (int)-realdatastart);
40449+ down_write(&current->mm->mmap_sem);
40450 do_munmap(current->mm, textpos, text_len);
40451+ up_write(&current->mm->mmap_sem);
40452 ret = realdatastart;
40453 goto err;
40454 }
40455@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40456 }
40457 if (IS_ERR_VALUE(result)) {
40458 printk("Unable to read data+bss, errno %d\n", (int)-result);
40459+ down_write(&current->mm->mmap_sem);
40460 do_munmap(current->mm, textpos, text_len);
40461 do_munmap(current->mm, realdatastart, len);
40462+ up_write(&current->mm->mmap_sem);
40463 ret = result;
40464 goto err;
40465 }
40466@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40467 }
40468 if (IS_ERR_VALUE(result)) {
40469 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40470+ down_write(&current->mm->mmap_sem);
40471 do_munmap(current->mm, textpos, text_len + data_len + extra +
40472 MAX_SHARED_LIBS * sizeof(unsigned long));
40473+ up_write(&current->mm->mmap_sem);
40474 ret = result;
40475 goto err;
40476 }
40477diff --git a/fs/bio.c b/fs/bio.c
40478index b1fe82c..84da0a9 100644
40479--- a/fs/bio.c
40480+++ b/fs/bio.c
40481@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40482 const int read = bio_data_dir(bio) == READ;
40483 struct bio_map_data *bmd = bio->bi_private;
40484 int i;
40485- char *p = bmd->sgvecs[0].iov_base;
40486+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40487
40488 __bio_for_each_segment(bvec, bio, i, 0) {
40489 char *addr = page_address(bvec->bv_page);
40490diff --git a/fs/block_dev.c b/fs/block_dev.c
40491index b07f1da..9efcb92 100644
40492--- a/fs/block_dev.c
40493+++ b/fs/block_dev.c
40494@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40495 else if (bdev->bd_contains == bdev)
40496 return true; /* is a whole device which isn't held */
40497
40498- else if (whole->bd_holder == bd_may_claim)
40499+ else if (whole->bd_holder == (void *)bd_may_claim)
40500 return true; /* is a partition of a device that is being partitioned */
40501 else if (whole->bd_holder != NULL)
40502 return false; /* is a partition of a held device */
40503diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40504index dede441..f2a2507 100644
40505--- a/fs/btrfs/ctree.c
40506+++ b/fs/btrfs/ctree.c
40507@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40508 free_extent_buffer(buf);
40509 add_root_to_dirty_list(root);
40510 } else {
40511- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40512- parent_start = parent->start;
40513- else
40514+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40515+ if (parent)
40516+ parent_start = parent->start;
40517+ else
40518+ parent_start = 0;
40519+ } else
40520 parent_start = 0;
40521
40522 WARN_ON(trans->transid != btrfs_header_generation(parent));
40523diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40524index fd1a06d..6e9033d 100644
40525--- a/fs/btrfs/inode.c
40526+++ b/fs/btrfs/inode.c
40527@@ -6895,7 +6895,7 @@ fail:
40528 return -ENOMEM;
40529 }
40530
40531-static int btrfs_getattr(struct vfsmount *mnt,
40532+int btrfs_getattr(struct vfsmount *mnt,
40533 struct dentry *dentry, struct kstat *stat)
40534 {
40535 struct inode *inode = dentry->d_inode;
40536@@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40537 return 0;
40538 }
40539
40540+EXPORT_SYMBOL(btrfs_getattr);
40541+
40542+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40543+{
40544+ return BTRFS_I(inode)->root->anon_dev;
40545+}
40546+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40547+
40548 /*
40549 * If a file is moved, it will inherit the cow and compression flags of the new
40550 * directory.
40551diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40552index c04f02c..f5c9e2e 100644
40553--- a/fs/btrfs/ioctl.c
40554+++ b/fs/btrfs/ioctl.c
40555@@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40556 for (i = 0; i < num_types; i++) {
40557 struct btrfs_space_info *tmp;
40558
40559+ /* Don't copy in more than we allocated */
40560 if (!slot_count)
40561 break;
40562
40563+ slot_count--;
40564+
40565 info = NULL;
40566 rcu_read_lock();
40567 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40568@@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40569 memcpy(dest, &space, sizeof(space));
40570 dest++;
40571 space_args.total_spaces++;
40572- slot_count--;
40573 }
40574- if (!slot_count)
40575- break;
40576 }
40577 up_read(&info->groups_sem);
40578 }
40579
40580- user_dest = (struct btrfs_ioctl_space_info *)
40581+ user_dest = (struct btrfs_ioctl_space_info __user *)
40582 (arg + sizeof(struct btrfs_ioctl_space_args));
40583
40584 if (copy_to_user(user_dest, dest_orig, alloc_size))
40585diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40586index cfb5543..1ae7347 100644
40587--- a/fs/btrfs/relocation.c
40588+++ b/fs/btrfs/relocation.c
40589@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40590 }
40591 spin_unlock(&rc->reloc_root_tree.lock);
40592
40593- BUG_ON((struct btrfs_root *)node->data != root);
40594+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40595
40596 if (!del) {
40597 spin_lock(&rc->reloc_root_tree.lock);
40598diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40599index 622f469..e8d2d55 100644
40600--- a/fs/cachefiles/bind.c
40601+++ b/fs/cachefiles/bind.c
40602@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40603 args);
40604
40605 /* start by checking things over */
40606- ASSERT(cache->fstop_percent >= 0 &&
40607- cache->fstop_percent < cache->fcull_percent &&
40608+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40609 cache->fcull_percent < cache->frun_percent &&
40610 cache->frun_percent < 100);
40611
40612- ASSERT(cache->bstop_percent >= 0 &&
40613- cache->bstop_percent < cache->bcull_percent &&
40614+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40615 cache->bcull_percent < cache->brun_percent &&
40616 cache->brun_percent < 100);
40617
40618diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40619index 0a1467b..6a53245 100644
40620--- a/fs/cachefiles/daemon.c
40621+++ b/fs/cachefiles/daemon.c
40622@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40623 if (n > buflen)
40624 return -EMSGSIZE;
40625
40626- if (copy_to_user(_buffer, buffer, n) != 0)
40627+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40628 return -EFAULT;
40629
40630 return n;
40631@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40632 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40633 return -EIO;
40634
40635- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40636+ if (datalen > PAGE_SIZE - 1)
40637 return -EOPNOTSUPP;
40638
40639 /* drag the command string into the kernel so we can parse it */
40640@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40641 if (args[0] != '%' || args[1] != '\0')
40642 return -EINVAL;
40643
40644- if (fstop < 0 || fstop >= cache->fcull_percent)
40645+ if (fstop >= cache->fcull_percent)
40646 return cachefiles_daemon_range_error(cache, args);
40647
40648 cache->fstop_percent = fstop;
40649@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40650 if (args[0] != '%' || args[1] != '\0')
40651 return -EINVAL;
40652
40653- if (bstop < 0 || bstop >= cache->bcull_percent)
40654+ if (bstop >= cache->bcull_percent)
40655 return cachefiles_daemon_range_error(cache, args);
40656
40657 cache->bstop_percent = bstop;
40658diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40659index bd6bc1b..b627b53 100644
40660--- a/fs/cachefiles/internal.h
40661+++ b/fs/cachefiles/internal.h
40662@@ -57,7 +57,7 @@ struct cachefiles_cache {
40663 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40664 struct rb_root active_nodes; /* active nodes (can't be culled) */
40665 rwlock_t active_lock; /* lock for active_nodes */
40666- atomic_t gravecounter; /* graveyard uniquifier */
40667+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40668 unsigned frun_percent; /* when to stop culling (% files) */
40669 unsigned fcull_percent; /* when to start culling (% files) */
40670 unsigned fstop_percent; /* when to stop allocating (% files) */
40671@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40672 * proc.c
40673 */
40674 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40675-extern atomic_t cachefiles_lookup_histogram[HZ];
40676-extern atomic_t cachefiles_mkdir_histogram[HZ];
40677-extern atomic_t cachefiles_create_histogram[HZ];
40678+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40679+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40680+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40681
40682 extern int __init cachefiles_proc_init(void);
40683 extern void cachefiles_proc_cleanup(void);
40684 static inline
40685-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40686+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40687 {
40688 unsigned long jif = jiffies - start_jif;
40689 if (jif >= HZ)
40690 jif = HZ - 1;
40691- atomic_inc(&histogram[jif]);
40692+ atomic_inc_unchecked(&histogram[jif]);
40693 }
40694
40695 #else
40696diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
40697index a0358c2..d6137f2 100644
40698--- a/fs/cachefiles/namei.c
40699+++ b/fs/cachefiles/namei.c
40700@@ -318,7 +318,7 @@ try_again:
40701 /* first step is to make up a grave dentry in the graveyard */
40702 sprintf(nbuffer, "%08x%08x",
40703 (uint32_t) get_seconds(),
40704- (uint32_t) atomic_inc_return(&cache->gravecounter));
40705+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40706
40707 /* do the multiway lock magic */
40708 trap = lock_rename(cache->graveyard, dir);
40709diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
40710index eccd339..4c1d995 100644
40711--- a/fs/cachefiles/proc.c
40712+++ b/fs/cachefiles/proc.c
40713@@ -14,9 +14,9 @@
40714 #include <linux/seq_file.h>
40715 #include "internal.h"
40716
40717-atomic_t cachefiles_lookup_histogram[HZ];
40718-atomic_t cachefiles_mkdir_histogram[HZ];
40719-atomic_t cachefiles_create_histogram[HZ];
40720+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40721+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40722+atomic_unchecked_t cachefiles_create_histogram[HZ];
40723
40724 /*
40725 * display the latency histogram
40726@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
40727 return 0;
40728 default:
40729 index = (unsigned long) v - 3;
40730- x = atomic_read(&cachefiles_lookup_histogram[index]);
40731- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40732- z = atomic_read(&cachefiles_create_histogram[index]);
40733+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40734+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40735+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40736 if (x == 0 && y == 0 && z == 0)
40737 return 0;
40738
40739diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
40740index 0e3c092..818480e 100644
40741--- a/fs/cachefiles/rdwr.c
40742+++ b/fs/cachefiles/rdwr.c
40743@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
40744 old_fs = get_fs();
40745 set_fs(KERNEL_DS);
40746 ret = file->f_op->write(
40747- file, (const void __user *) data, len, &pos);
40748+ file, (const void __force_user *) data, len, &pos);
40749 set_fs(old_fs);
40750 kunmap(page);
40751 if (ret != len)
40752diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
40753index 9895400..fa40a7d 100644
40754--- a/fs/ceph/dir.c
40755+++ b/fs/ceph/dir.c
40756@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
40757 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40758 struct ceph_mds_client *mdsc = fsc->mdsc;
40759 unsigned frag = fpos_frag(filp->f_pos);
40760- int off = fpos_off(filp->f_pos);
40761+ unsigned int off = fpos_off(filp->f_pos);
40762 int err;
40763 u32 ftype;
40764 struct ceph_mds_reply_info_parsed *rinfo;
40765diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
40766index 84e8c07..6170d31 100644
40767--- a/fs/cifs/cifs_debug.c
40768+++ b/fs/cifs/cifs_debug.c
40769@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40770
40771 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40772 #ifdef CONFIG_CIFS_STATS2
40773- atomic_set(&totBufAllocCount, 0);
40774- atomic_set(&totSmBufAllocCount, 0);
40775+ atomic_set_unchecked(&totBufAllocCount, 0);
40776+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40777 #endif /* CONFIG_CIFS_STATS2 */
40778 spin_lock(&cifs_tcp_ses_lock);
40779 list_for_each(tmp1, &cifs_tcp_ses_list) {
40780@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40781 tcon = list_entry(tmp3,
40782 struct cifs_tcon,
40783 tcon_list);
40784- atomic_set(&tcon->num_smbs_sent, 0);
40785- atomic_set(&tcon->num_writes, 0);
40786- atomic_set(&tcon->num_reads, 0);
40787- atomic_set(&tcon->num_oplock_brks, 0);
40788- atomic_set(&tcon->num_opens, 0);
40789- atomic_set(&tcon->num_posixopens, 0);
40790- atomic_set(&tcon->num_posixmkdirs, 0);
40791- atomic_set(&tcon->num_closes, 0);
40792- atomic_set(&tcon->num_deletes, 0);
40793- atomic_set(&tcon->num_mkdirs, 0);
40794- atomic_set(&tcon->num_rmdirs, 0);
40795- atomic_set(&tcon->num_renames, 0);
40796- atomic_set(&tcon->num_t2renames, 0);
40797- atomic_set(&tcon->num_ffirst, 0);
40798- atomic_set(&tcon->num_fnext, 0);
40799- atomic_set(&tcon->num_fclose, 0);
40800- atomic_set(&tcon->num_hardlinks, 0);
40801- atomic_set(&tcon->num_symlinks, 0);
40802- atomic_set(&tcon->num_locks, 0);
40803+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40804+ atomic_set_unchecked(&tcon->num_writes, 0);
40805+ atomic_set_unchecked(&tcon->num_reads, 0);
40806+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40807+ atomic_set_unchecked(&tcon->num_opens, 0);
40808+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40809+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40810+ atomic_set_unchecked(&tcon->num_closes, 0);
40811+ atomic_set_unchecked(&tcon->num_deletes, 0);
40812+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40813+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40814+ atomic_set_unchecked(&tcon->num_renames, 0);
40815+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40816+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40817+ atomic_set_unchecked(&tcon->num_fnext, 0);
40818+ atomic_set_unchecked(&tcon->num_fclose, 0);
40819+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40820+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40821+ atomic_set_unchecked(&tcon->num_locks, 0);
40822 }
40823 }
40824 }
40825@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40826 smBufAllocCount.counter, cifs_min_small);
40827 #ifdef CONFIG_CIFS_STATS2
40828 seq_printf(m, "Total Large %d Small %d Allocations\n",
40829- atomic_read(&totBufAllocCount),
40830- atomic_read(&totSmBufAllocCount));
40831+ atomic_read_unchecked(&totBufAllocCount),
40832+ atomic_read_unchecked(&totSmBufAllocCount));
40833 #endif /* CONFIG_CIFS_STATS2 */
40834
40835 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40836@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40837 if (tcon->need_reconnect)
40838 seq_puts(m, "\tDISCONNECTED ");
40839 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40840- atomic_read(&tcon->num_smbs_sent),
40841- atomic_read(&tcon->num_oplock_brks));
40842+ atomic_read_unchecked(&tcon->num_smbs_sent),
40843+ atomic_read_unchecked(&tcon->num_oplock_brks));
40844 seq_printf(m, "\nReads: %d Bytes: %lld",
40845- atomic_read(&tcon->num_reads),
40846+ atomic_read_unchecked(&tcon->num_reads),
40847 (long long)(tcon->bytes_read));
40848 seq_printf(m, "\nWrites: %d Bytes: %lld",
40849- atomic_read(&tcon->num_writes),
40850+ atomic_read_unchecked(&tcon->num_writes),
40851 (long long)(tcon->bytes_written));
40852 seq_printf(m, "\nFlushes: %d",
40853- atomic_read(&tcon->num_flushes));
40854+ atomic_read_unchecked(&tcon->num_flushes));
40855 seq_printf(m, "\nLocks: %d HardLinks: %d "
40856 "Symlinks: %d",
40857- atomic_read(&tcon->num_locks),
40858- atomic_read(&tcon->num_hardlinks),
40859- atomic_read(&tcon->num_symlinks));
40860+ atomic_read_unchecked(&tcon->num_locks),
40861+ atomic_read_unchecked(&tcon->num_hardlinks),
40862+ atomic_read_unchecked(&tcon->num_symlinks));
40863 seq_printf(m, "\nOpens: %d Closes: %d "
40864 "Deletes: %d",
40865- atomic_read(&tcon->num_opens),
40866- atomic_read(&tcon->num_closes),
40867- atomic_read(&tcon->num_deletes));
40868+ atomic_read_unchecked(&tcon->num_opens),
40869+ atomic_read_unchecked(&tcon->num_closes),
40870+ atomic_read_unchecked(&tcon->num_deletes));
40871 seq_printf(m, "\nPosix Opens: %d "
40872 "Posix Mkdirs: %d",
40873- atomic_read(&tcon->num_posixopens),
40874- atomic_read(&tcon->num_posixmkdirs));
40875+ atomic_read_unchecked(&tcon->num_posixopens),
40876+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40877 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40878- atomic_read(&tcon->num_mkdirs),
40879- atomic_read(&tcon->num_rmdirs));
40880+ atomic_read_unchecked(&tcon->num_mkdirs),
40881+ atomic_read_unchecked(&tcon->num_rmdirs));
40882 seq_printf(m, "\nRenames: %d T2 Renames %d",
40883- atomic_read(&tcon->num_renames),
40884- atomic_read(&tcon->num_t2renames));
40885+ atomic_read_unchecked(&tcon->num_renames),
40886+ atomic_read_unchecked(&tcon->num_t2renames));
40887 seq_printf(m, "\nFindFirst: %d FNext %d "
40888 "FClose %d",
40889- atomic_read(&tcon->num_ffirst),
40890- atomic_read(&tcon->num_fnext),
40891- atomic_read(&tcon->num_fclose));
40892+ atomic_read_unchecked(&tcon->num_ffirst),
40893+ atomic_read_unchecked(&tcon->num_fnext),
40894+ atomic_read_unchecked(&tcon->num_fclose));
40895 }
40896 }
40897 }
40898diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
40899index 8f1fe32..38f9e27 100644
40900--- a/fs/cifs/cifsfs.c
40901+++ b/fs/cifs/cifsfs.c
40902@@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
40903 cifs_req_cachep = kmem_cache_create("cifs_request",
40904 CIFSMaxBufSize +
40905 MAX_CIFS_HDR_SIZE, 0,
40906- SLAB_HWCACHE_ALIGN, NULL);
40907+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40908 if (cifs_req_cachep == NULL)
40909 return -ENOMEM;
40910
40911@@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
40912 efficient to alloc 1 per page off the slab compared to 17K (5page)
40913 alloc of large cifs buffers even when page debugging is on */
40914 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40915- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40916+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40917 NULL);
40918 if (cifs_sm_req_cachep == NULL) {
40919 mempool_destroy(cifs_req_poolp);
40920@@ -1101,8 +1101,8 @@ init_cifs(void)
40921 atomic_set(&bufAllocCount, 0);
40922 atomic_set(&smBufAllocCount, 0);
40923 #ifdef CONFIG_CIFS_STATS2
40924- atomic_set(&totBufAllocCount, 0);
40925- atomic_set(&totSmBufAllocCount, 0);
40926+ atomic_set_unchecked(&totBufAllocCount, 0);
40927+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40928 #endif /* CONFIG_CIFS_STATS2 */
40929
40930 atomic_set(&midCount, 0);
40931diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
40932index 8238aa1..0347196 100644
40933--- a/fs/cifs/cifsglob.h
40934+++ b/fs/cifs/cifsglob.h
40935@@ -392,28 +392,28 @@ struct cifs_tcon {
40936 __u16 Flags; /* optional support bits */
40937 enum statusEnum tidStatus;
40938 #ifdef CONFIG_CIFS_STATS
40939- atomic_t num_smbs_sent;
40940- atomic_t num_writes;
40941- atomic_t num_reads;
40942- atomic_t num_flushes;
40943- atomic_t num_oplock_brks;
40944- atomic_t num_opens;
40945- atomic_t num_closes;
40946- atomic_t num_deletes;
40947- atomic_t num_mkdirs;
40948- atomic_t num_posixopens;
40949- atomic_t num_posixmkdirs;
40950- atomic_t num_rmdirs;
40951- atomic_t num_renames;
40952- atomic_t num_t2renames;
40953- atomic_t num_ffirst;
40954- atomic_t num_fnext;
40955- atomic_t num_fclose;
40956- atomic_t num_hardlinks;
40957- atomic_t num_symlinks;
40958- atomic_t num_locks;
40959- atomic_t num_acl_get;
40960- atomic_t num_acl_set;
40961+ atomic_unchecked_t num_smbs_sent;
40962+ atomic_unchecked_t num_writes;
40963+ atomic_unchecked_t num_reads;
40964+ atomic_unchecked_t num_flushes;
40965+ atomic_unchecked_t num_oplock_brks;
40966+ atomic_unchecked_t num_opens;
40967+ atomic_unchecked_t num_closes;
40968+ atomic_unchecked_t num_deletes;
40969+ atomic_unchecked_t num_mkdirs;
40970+ atomic_unchecked_t num_posixopens;
40971+ atomic_unchecked_t num_posixmkdirs;
40972+ atomic_unchecked_t num_rmdirs;
40973+ atomic_unchecked_t num_renames;
40974+ atomic_unchecked_t num_t2renames;
40975+ atomic_unchecked_t num_ffirst;
40976+ atomic_unchecked_t num_fnext;
40977+ atomic_unchecked_t num_fclose;
40978+ atomic_unchecked_t num_hardlinks;
40979+ atomic_unchecked_t num_symlinks;
40980+ atomic_unchecked_t num_locks;
40981+ atomic_unchecked_t num_acl_get;
40982+ atomic_unchecked_t num_acl_set;
40983 #ifdef CONFIG_CIFS_STATS2
40984 unsigned long long time_writes;
40985 unsigned long long time_reads;
40986@@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
40987 }
40988
40989 #ifdef CONFIG_CIFS_STATS
40990-#define cifs_stats_inc atomic_inc
40991+#define cifs_stats_inc atomic_inc_unchecked
40992
40993 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
40994 unsigned int bytes)
40995@@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
40996 /* Various Debug counters */
40997 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40998 #ifdef CONFIG_CIFS_STATS2
40999-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41000-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41001+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41002+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41003 #endif
41004 GLOBAL_EXTERN atomic_t smBufAllocCount;
41005 GLOBAL_EXTERN atomic_t midCount;
41006diff --git a/fs/cifs/link.c b/fs/cifs/link.c
41007index 6b0e064..94e6c3c 100644
41008--- a/fs/cifs/link.c
41009+++ b/fs/cifs/link.c
41010@@ -600,7 +600,7 @@ symlink_exit:
41011
41012 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41013 {
41014- char *p = nd_get_link(nd);
41015+ const char *p = nd_get_link(nd);
41016 if (!IS_ERR(p))
41017 kfree(p);
41018 }
41019diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
41020index 703ef5c..2a44ed5 100644
41021--- a/fs/cifs/misc.c
41022+++ b/fs/cifs/misc.c
41023@@ -156,7 +156,7 @@ cifs_buf_get(void)
41024 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41025 atomic_inc(&bufAllocCount);
41026 #ifdef CONFIG_CIFS_STATS2
41027- atomic_inc(&totBufAllocCount);
41028+ atomic_inc_unchecked(&totBufAllocCount);
41029 #endif /* CONFIG_CIFS_STATS2 */
41030 }
41031
41032@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41033 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41034 atomic_inc(&smBufAllocCount);
41035 #ifdef CONFIG_CIFS_STATS2
41036- atomic_inc(&totSmBufAllocCount);
41037+ atomic_inc_unchecked(&totSmBufAllocCount);
41038 #endif /* CONFIG_CIFS_STATS2 */
41039
41040 }
41041diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41042index 6901578..d402eb5 100644
41043--- a/fs/coda/cache.c
41044+++ b/fs/coda/cache.c
41045@@ -24,7 +24,7 @@
41046 #include "coda_linux.h"
41047 #include "coda_cache.h"
41048
41049-static atomic_t permission_epoch = ATOMIC_INIT(0);
41050+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41051
41052 /* replace or extend an acl cache hit */
41053 void coda_cache_enter(struct inode *inode, int mask)
41054@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41055 struct coda_inode_info *cii = ITOC(inode);
41056
41057 spin_lock(&cii->c_lock);
41058- cii->c_cached_epoch = atomic_read(&permission_epoch);
41059+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41060 if (cii->c_uid != current_fsuid()) {
41061 cii->c_uid = current_fsuid();
41062 cii->c_cached_perm = mask;
41063@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41064 {
41065 struct coda_inode_info *cii = ITOC(inode);
41066 spin_lock(&cii->c_lock);
41067- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41068+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41069 spin_unlock(&cii->c_lock);
41070 }
41071
41072 /* remove all acl caches */
41073 void coda_cache_clear_all(struct super_block *sb)
41074 {
41075- atomic_inc(&permission_epoch);
41076+ atomic_inc_unchecked(&permission_epoch);
41077 }
41078
41079
41080@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41081 spin_lock(&cii->c_lock);
41082 hit = (mask & cii->c_cached_perm) == mask &&
41083 cii->c_uid == current_fsuid() &&
41084- cii->c_cached_epoch == atomic_read(&permission_epoch);
41085+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41086 spin_unlock(&cii->c_lock);
41087
41088 return hit;
41089diff --git a/fs/compat.c b/fs/compat.c
41090index c987875..08771ca 100644
41091--- a/fs/compat.c
41092+++ b/fs/compat.c
41093@@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41094 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41095 {
41096 compat_ino_t ino = stat->ino;
41097- typeof(ubuf->st_uid) uid = 0;
41098- typeof(ubuf->st_gid) gid = 0;
41099+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41100+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41101 int err;
41102
41103 SET_UID(uid, stat->uid);
41104@@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41105
41106 set_fs(KERNEL_DS);
41107 /* The __user pointer cast is valid because of the set_fs() */
41108- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41109+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41110 set_fs(oldfs);
41111 /* truncating is ok because it's a user address */
41112 if (!ret)
41113@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41114 goto out;
41115
41116 ret = -EINVAL;
41117- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41118+ if (nr_segs > UIO_MAXIOV)
41119 goto out;
41120 if (nr_segs > fast_segs) {
41121 ret = -ENOMEM;
41122@@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41123
41124 struct compat_readdir_callback {
41125 struct compat_old_linux_dirent __user *dirent;
41126+ struct file * file;
41127 int result;
41128 };
41129
41130@@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41131 buf->result = -EOVERFLOW;
41132 return -EOVERFLOW;
41133 }
41134+
41135+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41136+ return 0;
41137+
41138 buf->result++;
41139 dirent = buf->dirent;
41140 if (!access_ok(VERIFY_WRITE, dirent,
41141@@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41142
41143 buf.result = 0;
41144 buf.dirent = dirent;
41145+ buf.file = file;
41146
41147 error = vfs_readdir(file, compat_fillonedir, &buf);
41148 if (buf.result)
41149@@ -914,6 +920,7 @@ struct compat_linux_dirent {
41150 struct compat_getdents_callback {
41151 struct compat_linux_dirent __user *current_dir;
41152 struct compat_linux_dirent __user *previous;
41153+ struct file * file;
41154 int count;
41155 int error;
41156 };
41157@@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41158 buf->error = -EOVERFLOW;
41159 return -EOVERFLOW;
41160 }
41161+
41162+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41163+ return 0;
41164+
41165 dirent = buf->previous;
41166 if (dirent) {
41167 if (__put_user(offset, &dirent->d_off))
41168@@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41169 buf.previous = NULL;
41170 buf.count = count;
41171 buf.error = 0;
41172+ buf.file = file;
41173
41174 error = vfs_readdir(file, compat_filldir, &buf);
41175 if (error >= 0)
41176@@ -1003,6 +1015,7 @@ out:
41177 struct compat_getdents_callback64 {
41178 struct linux_dirent64 __user *current_dir;
41179 struct linux_dirent64 __user *previous;
41180+ struct file * file;
41181 int count;
41182 int error;
41183 };
41184@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41185 buf->error = -EINVAL; /* only used if we fail.. */
41186 if (reclen > buf->count)
41187 return -EINVAL;
41188+
41189+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41190+ return 0;
41191+
41192 dirent = buf->previous;
41193
41194 if (dirent) {
41195@@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41196 buf.previous = NULL;
41197 buf.count = count;
41198 buf.error = 0;
41199+ buf.file = file;
41200
41201 error = vfs_readdir(file, compat_filldir64, &buf);
41202 if (error >= 0)
41203 error = buf.error;
41204 lastdirent = buf.previous;
41205 if (lastdirent) {
41206- typeof(lastdirent->d_off) d_off = file->f_pos;
41207+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41208 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41209 error = -EFAULT;
41210 else
41211diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41212index 112e45a..b59845b 100644
41213--- a/fs/compat_binfmt_elf.c
41214+++ b/fs/compat_binfmt_elf.c
41215@@ -30,11 +30,13 @@
41216 #undef elf_phdr
41217 #undef elf_shdr
41218 #undef elf_note
41219+#undef elf_dyn
41220 #undef elf_addr_t
41221 #define elfhdr elf32_hdr
41222 #define elf_phdr elf32_phdr
41223 #define elf_shdr elf32_shdr
41224 #define elf_note elf32_note
41225+#define elf_dyn Elf32_Dyn
41226 #define elf_addr_t Elf32_Addr
41227
41228 /*
41229diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41230index 51352de..93292ff 100644
41231--- a/fs/compat_ioctl.c
41232+++ b/fs/compat_ioctl.c
41233@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41234
41235 err = get_user(palp, &up->palette);
41236 err |= get_user(length, &up->length);
41237+ if (err)
41238+ return -EFAULT;
41239
41240 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41241 err = put_user(compat_ptr(palp), &up_native->palette);
41242@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41243 return -EFAULT;
41244 if (__get_user(udata, &ss32->iomem_base))
41245 return -EFAULT;
41246- ss.iomem_base = compat_ptr(udata);
41247+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41248 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41249 __get_user(ss.port_high, &ss32->port_high))
41250 return -EFAULT;
41251@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41252 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41253 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41254 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41255- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41256+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41257 return -EFAULT;
41258
41259 return ioctl_preallocate(file, p);
41260@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41261 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41262 {
41263 unsigned int a, b;
41264- a = *(unsigned int *)p;
41265- b = *(unsigned int *)q;
41266+ a = *(const unsigned int *)p;
41267+ b = *(const unsigned int *)q;
41268 if (a > b)
41269 return 1;
41270 if (a < b)
41271diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41272index 9a37a9b..35792b6 100644
41273--- a/fs/configfs/dir.c
41274+++ b/fs/configfs/dir.c
41275@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41276 }
41277 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41278 struct configfs_dirent *next;
41279- const char * name;
41280+ const unsigned char * name;
41281+ char d_name[sizeof(next->s_dentry->d_iname)];
41282 int len;
41283 struct inode *inode = NULL;
41284
41285@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41286 continue;
41287
41288 name = configfs_get_name(next);
41289- len = strlen(name);
41290+ if (next->s_dentry && name == next->s_dentry->d_iname) {
41291+ len = next->s_dentry->d_name.len;
41292+ memcpy(d_name, name, len);
41293+ name = d_name;
41294+ } else
41295+ len = strlen(name);
41296
41297 /*
41298 * We'll have a dentry and an inode for
41299diff --git a/fs/dcache.c b/fs/dcache.c
41300index f7908ae..920a680 100644
41301--- a/fs/dcache.c
41302+++ b/fs/dcache.c
41303@@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41304 mempages -= reserve;
41305
41306 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41307- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41308+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41309
41310 dcache_init();
41311 inode_init();
41312diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
41313index f3a257d..715ac0f 100644
41314--- a/fs/debugfs/inode.c
41315+++ b/fs/debugfs/inode.c
41316@@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
41317 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
41318 {
41319 return debugfs_create_file(name,
41320+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41321+ S_IFDIR | S_IRWXU,
41322+#else
41323 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41324+#endif
41325 parent, NULL, NULL);
41326 }
41327 EXPORT_SYMBOL_GPL(debugfs_create_dir);
41328diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41329index d2039ca..a766407 100644
41330--- a/fs/ecryptfs/inode.c
41331+++ b/fs/ecryptfs/inode.c
41332@@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41333 old_fs = get_fs();
41334 set_fs(get_ds());
41335 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41336- (char __user *)lower_buf,
41337+ (char __force_user *)lower_buf,
41338 lower_bufsiz);
41339 set_fs(old_fs);
41340 if (rc < 0)
41341@@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41342 }
41343 old_fs = get_fs();
41344 set_fs(get_ds());
41345- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41346+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41347 set_fs(old_fs);
41348 if (rc < 0) {
41349 kfree(buf);
41350@@ -752,7 +752,7 @@ out:
41351 static void
41352 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41353 {
41354- char *buf = nd_get_link(nd);
41355+ const char *buf = nd_get_link(nd);
41356 if (!IS_ERR(buf)) {
41357 /* Free the char* */
41358 kfree(buf);
41359diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41360index 0dc5a3d..d3cdeea 100644
41361--- a/fs/ecryptfs/miscdev.c
41362+++ b/fs/ecryptfs/miscdev.c
41363@@ -328,7 +328,7 @@ check_list:
41364 goto out_unlock_msg_ctx;
41365 i = 5;
41366 if (msg_ctx->msg) {
41367- if (copy_to_user(&buf[i], packet_length, packet_length_size))
41368+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41369 goto out_unlock_msg_ctx;
41370 i += packet_length_size;
41371 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41372diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41373index 54eb14c..e51b453 100644
41374--- a/fs/ecryptfs/read_write.c
41375+++ b/fs/ecryptfs/read_write.c
41376@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41377 return -EIO;
41378 fs_save = get_fs();
41379 set_fs(get_ds());
41380- rc = vfs_write(lower_file, data, size, &offset);
41381+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41382 set_fs(fs_save);
41383 mark_inode_dirty_sync(ecryptfs_inode);
41384 return rc;
41385@@ -130,7 +130,12 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
41386 pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
41387 size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
41388 size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
41389- size_t total_remaining_bytes = ((offset + size) - pos);
41390+ loff_t total_remaining_bytes = ((offset + size) - pos);
41391+
41392+ if (fatal_signal_pending(current)) {
41393+ rc = -EINTR;
41394+ break;
41395+ }
41396
41397 if (fatal_signal_pending(current)) {
41398 rc = -EINTR;
41399@@ -141,7 +146,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
41400 num_bytes = total_remaining_bytes;
41401 if (pos < offset) {
41402 /* remaining zeros to write, up to destination offset */
41403- size_t total_remaining_zeros = (offset - pos);
41404+ loff_t total_remaining_zeros = (offset - pos);
41405
41406 if (num_bytes > total_remaining_zeros)
41407 num_bytes = total_remaining_zeros;
41408@@ -244,7 +249,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41409 return -EIO;
41410 fs_save = get_fs();
41411 set_fs(get_ds());
41412- rc = vfs_read(lower_file, data, size, &offset);
41413+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41414 set_fs(fs_save);
41415 return rc;
41416 }
41417diff --git a/fs/exec.c b/fs/exec.c
41418index 3625464..fac01f4 100644
41419--- a/fs/exec.c
41420+++ b/fs/exec.c
41421@@ -55,12 +55,28 @@
41422 #include <linux/pipe_fs_i.h>
41423 #include <linux/oom.h>
41424 #include <linux/compat.h>
41425+#include <linux/random.h>
41426+#include <linux/seq_file.h>
41427+
41428+#ifdef CONFIG_PAX_REFCOUNT
41429+#include <linux/kallsyms.h>
41430+#include <linux/kdebug.h>
41431+#endif
41432
41433 #include <asm/uaccess.h>
41434 #include <asm/mmu_context.h>
41435 #include <asm/tlb.h>
41436 #include "internal.h"
41437
41438+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41439+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41440+#endif
41441+
41442+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41443+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41444+EXPORT_SYMBOL(pax_set_initial_flags_func);
41445+#endif
41446+
41447 int core_uses_pid;
41448 char core_pattern[CORENAME_MAX_SIZE] = "core";
41449 unsigned int core_pipe_limit;
41450@@ -70,7 +86,7 @@ struct core_name {
41451 char *corename;
41452 int used, size;
41453 };
41454-static atomic_t call_count = ATOMIC_INIT(1);
41455+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41456
41457 /* The maximal length of core_pattern is also specified in sysctl.c */
41458
41459@@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41460 int write)
41461 {
41462 struct page *page;
41463- int ret;
41464
41465-#ifdef CONFIG_STACK_GROWSUP
41466- if (write) {
41467- ret = expand_downwards(bprm->vma, pos);
41468- if (ret < 0)
41469- return NULL;
41470- }
41471-#endif
41472- ret = get_user_pages(current, bprm->mm, pos,
41473- 1, write, 1, &page, NULL);
41474- if (ret <= 0)
41475+ if (0 > expand_downwards(bprm->vma, pos))
41476+ return NULL;
41477+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41478 return NULL;
41479
41480 if (write) {
41481@@ -274,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41482 vma->vm_end = STACK_TOP_MAX;
41483 vma->vm_start = vma->vm_end - PAGE_SIZE;
41484 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41485+
41486+#ifdef CONFIG_PAX_SEGMEXEC
41487+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41488+#endif
41489+
41490 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41491 INIT_LIST_HEAD(&vma->anon_vma_chain);
41492
41493@@ -288,6 +301,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41494 mm->stack_vm = mm->total_vm = 1;
41495 up_write(&mm->mmap_sem);
41496 bprm->p = vma->vm_end - sizeof(void *);
41497+
41498+#ifdef CONFIG_PAX_RANDUSTACK
41499+ if (randomize_va_space)
41500+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41501+#endif
41502+
41503 return 0;
41504 err:
41505 up_write(&mm->mmap_sem);
41506@@ -396,19 +415,7 @@ err:
41507 return err;
41508 }
41509
41510-struct user_arg_ptr {
41511-#ifdef CONFIG_COMPAT
41512- bool is_compat;
41513-#endif
41514- union {
41515- const char __user *const __user *native;
41516-#ifdef CONFIG_COMPAT
41517- compat_uptr_t __user *compat;
41518-#endif
41519- } ptr;
41520-};
41521-
41522-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41523+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41524 {
41525 const char __user *native;
41526
41527@@ -417,14 +424,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41528 compat_uptr_t compat;
41529
41530 if (get_user(compat, argv.ptr.compat + nr))
41531- return ERR_PTR(-EFAULT);
41532+ return (const char __force_user *)ERR_PTR(-EFAULT);
41533
41534 return compat_ptr(compat);
41535 }
41536 #endif
41537
41538 if (get_user(native, argv.ptr.native + nr))
41539- return ERR_PTR(-EFAULT);
41540+ return (const char __force_user *)ERR_PTR(-EFAULT);
41541
41542 return native;
41543 }
41544@@ -443,7 +450,7 @@ static int count(struct user_arg_ptr argv, int max)
41545 if (!p)
41546 break;
41547
41548- if (IS_ERR(p))
41549+ if (IS_ERR((const char __force_kernel *)p))
41550 return -EFAULT;
41551
41552 if (i++ >= max)
41553@@ -477,7 +484,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41554
41555 ret = -EFAULT;
41556 str = get_user_arg_ptr(argv, argc);
41557- if (IS_ERR(str))
41558+ if (IS_ERR((const char __force_kernel *)str))
41559 goto out;
41560
41561 len = strnlen_user(str, MAX_ARG_STRLEN);
41562@@ -559,7 +566,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41563 int r;
41564 mm_segment_t oldfs = get_fs();
41565 struct user_arg_ptr argv = {
41566- .ptr.native = (const char __user *const __user *)__argv,
41567+ .ptr.native = (const char __force_user *const __force_user *)__argv,
41568 };
41569
41570 set_fs(KERNEL_DS);
41571@@ -594,7 +601,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41572 unsigned long new_end = old_end - shift;
41573 struct mmu_gather tlb;
41574
41575- BUG_ON(new_start > new_end);
41576+ if (new_start >= new_end || new_start < mmap_min_addr)
41577+ return -ENOMEM;
41578
41579 /*
41580 * ensure there are no vmas between where we want to go
41581@@ -603,6 +611,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41582 if (vma != find_vma(mm, new_start))
41583 return -EFAULT;
41584
41585+#ifdef CONFIG_PAX_SEGMEXEC
41586+ BUG_ON(pax_find_mirror_vma(vma));
41587+#endif
41588+
41589 /*
41590 * cover the whole range: [new_start, old_end)
41591 */
41592@@ -683,10 +695,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41593 stack_top = arch_align_stack(stack_top);
41594 stack_top = PAGE_ALIGN(stack_top);
41595
41596- if (unlikely(stack_top < mmap_min_addr) ||
41597- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41598- return -ENOMEM;
41599-
41600 stack_shift = vma->vm_end - stack_top;
41601
41602 bprm->p -= stack_shift;
41603@@ -698,8 +706,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41604 bprm->exec -= stack_shift;
41605
41606 down_write(&mm->mmap_sem);
41607+
41608+ /* Move stack pages down in memory. */
41609+ if (stack_shift) {
41610+ ret = shift_arg_pages(vma, stack_shift);
41611+ if (ret)
41612+ goto out_unlock;
41613+ }
41614+
41615 vm_flags = VM_STACK_FLAGS;
41616
41617+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41618+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41619+ vm_flags &= ~VM_EXEC;
41620+
41621+#ifdef CONFIG_PAX_MPROTECT
41622+ if (mm->pax_flags & MF_PAX_MPROTECT)
41623+ vm_flags &= ~VM_MAYEXEC;
41624+#endif
41625+
41626+ }
41627+#endif
41628+
41629 /*
41630 * Adjust stack execute permissions; explicitly enable for
41631 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41632@@ -718,13 +746,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41633 goto out_unlock;
41634 BUG_ON(prev != vma);
41635
41636- /* Move stack pages down in memory. */
41637- if (stack_shift) {
41638- ret = shift_arg_pages(vma, stack_shift);
41639- if (ret)
41640- goto out_unlock;
41641- }
41642-
41643 /* mprotect_fixup is overkill to remove the temporary stack flags */
41644 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41645
41646@@ -805,7 +826,7 @@ int kernel_read(struct file *file, loff_t offset,
41647 old_fs = get_fs();
41648 set_fs(get_ds());
41649 /* The cast to a user pointer is valid due to the set_fs() */
41650- result = vfs_read(file, (void __user *)addr, count, &pos);
41651+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41652 set_fs(old_fs);
41653 return result;
41654 }
41655@@ -1247,7 +1268,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
41656 }
41657 rcu_read_unlock();
41658
41659- if (p->fs->users > n_fs) {
41660+ if (atomic_read(&p->fs->users) > n_fs) {
41661 bprm->unsafe |= LSM_UNSAFE_SHARE;
41662 } else {
41663 res = -EAGAIN;
41664@@ -1450,6 +1471,11 @@ static int do_execve_common(const char *filename,
41665 struct user_arg_ptr envp,
41666 struct pt_regs *regs)
41667 {
41668+#ifdef CONFIG_GRKERNSEC
41669+ struct file *old_exec_file;
41670+ struct acl_subject_label *old_acl;
41671+ struct rlimit old_rlim[RLIM_NLIMITS];
41672+#endif
41673 struct linux_binprm *bprm;
41674 struct file *file;
41675 struct files_struct *displaced;
41676@@ -1457,6 +1483,8 @@ static int do_execve_common(const char *filename,
41677 int retval;
41678 const struct cred *cred = current_cred();
41679
41680+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41681+
41682 /*
41683 * We move the actual failure in case of RLIMIT_NPROC excess from
41684 * set*uid() to execve() because too many poorly written programs
41685@@ -1497,12 +1525,27 @@ static int do_execve_common(const char *filename,
41686 if (IS_ERR(file))
41687 goto out_unmark;
41688
41689+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
41690+ retval = -EPERM;
41691+ goto out_file;
41692+ }
41693+
41694 sched_exec();
41695
41696 bprm->file = file;
41697 bprm->filename = filename;
41698 bprm->interp = filename;
41699
41700+ if (gr_process_user_ban()) {
41701+ retval = -EPERM;
41702+ goto out_file;
41703+ }
41704+
41705+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41706+ retval = -EACCES;
41707+ goto out_file;
41708+ }
41709+
41710 retval = bprm_mm_init(bprm);
41711 if (retval)
41712 goto out_file;
41713@@ -1532,9 +1575,40 @@ static int do_execve_common(const char *filename,
41714 if (retval < 0)
41715 goto out;
41716
41717+ if (!gr_tpe_allow(file)) {
41718+ retval = -EACCES;
41719+ goto out;
41720+ }
41721+
41722+ if (gr_check_crash_exec(file)) {
41723+ retval = -EACCES;
41724+ goto out;
41725+ }
41726+
41727+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41728+
41729+ gr_handle_exec_args(bprm, argv);
41730+
41731+#ifdef CONFIG_GRKERNSEC
41732+ old_acl = current->acl;
41733+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41734+ old_exec_file = current->exec_file;
41735+ get_file(file);
41736+ current->exec_file = file;
41737+#endif
41738+
41739+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41740+ bprm->unsafe);
41741+ if (retval < 0)
41742+ goto out_fail;
41743+
41744 retval = search_binary_handler(bprm,regs);
41745 if (retval < 0)
41746- goto out;
41747+ goto out_fail;
41748+#ifdef CONFIG_GRKERNSEC
41749+ if (old_exec_file)
41750+ fput(old_exec_file);
41751+#endif
41752
41753 /* execve succeeded */
41754 current->fs->in_exec = 0;
41755@@ -1545,6 +1619,14 @@ static int do_execve_common(const char *filename,
41756 put_files_struct(displaced);
41757 return retval;
41758
41759+out_fail:
41760+#ifdef CONFIG_GRKERNSEC
41761+ current->acl = old_acl;
41762+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41763+ fput(current->exec_file);
41764+ current->exec_file = old_exec_file;
41765+#endif
41766+
41767 out:
41768 if (bprm->mm) {
41769 acct_arg_size(bprm, 0);
41770@@ -1618,7 +1700,7 @@ static int expand_corename(struct core_name *cn)
41771 {
41772 char *old_corename = cn->corename;
41773
41774- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41775+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41776 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41777
41778 if (!cn->corename) {
41779@@ -1715,7 +1797,7 @@ static int format_corename(struct core_name *cn, long signr)
41780 int pid_in_pattern = 0;
41781 int err = 0;
41782
41783- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41784+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41785 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41786 cn->used = 0;
41787
41788@@ -1812,6 +1894,218 @@ out:
41789 return ispipe;
41790 }
41791
41792+int pax_check_flags(unsigned long *flags)
41793+{
41794+ int retval = 0;
41795+
41796+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41797+ if (*flags & MF_PAX_SEGMEXEC)
41798+ {
41799+ *flags &= ~MF_PAX_SEGMEXEC;
41800+ retval = -EINVAL;
41801+ }
41802+#endif
41803+
41804+ if ((*flags & MF_PAX_PAGEEXEC)
41805+
41806+#ifdef CONFIG_PAX_PAGEEXEC
41807+ && (*flags & MF_PAX_SEGMEXEC)
41808+#endif
41809+
41810+ )
41811+ {
41812+ *flags &= ~MF_PAX_PAGEEXEC;
41813+ retval = -EINVAL;
41814+ }
41815+
41816+ if ((*flags & MF_PAX_MPROTECT)
41817+
41818+#ifdef CONFIG_PAX_MPROTECT
41819+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41820+#endif
41821+
41822+ )
41823+ {
41824+ *flags &= ~MF_PAX_MPROTECT;
41825+ retval = -EINVAL;
41826+ }
41827+
41828+ if ((*flags & MF_PAX_EMUTRAMP)
41829+
41830+#ifdef CONFIG_PAX_EMUTRAMP
41831+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41832+#endif
41833+
41834+ )
41835+ {
41836+ *flags &= ~MF_PAX_EMUTRAMP;
41837+ retval = -EINVAL;
41838+ }
41839+
41840+ return retval;
41841+}
41842+
41843+EXPORT_SYMBOL(pax_check_flags);
41844+
41845+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41846+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41847+{
41848+ struct task_struct *tsk = current;
41849+ struct mm_struct *mm = current->mm;
41850+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41851+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41852+ char *path_exec = NULL;
41853+ char *path_fault = NULL;
41854+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41855+
41856+ if (buffer_exec && buffer_fault) {
41857+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41858+
41859+ down_read(&mm->mmap_sem);
41860+ vma = mm->mmap;
41861+ while (vma && (!vma_exec || !vma_fault)) {
41862+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41863+ vma_exec = vma;
41864+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41865+ vma_fault = vma;
41866+ vma = vma->vm_next;
41867+ }
41868+ if (vma_exec) {
41869+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41870+ if (IS_ERR(path_exec))
41871+ path_exec = "<path too long>";
41872+ else {
41873+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41874+ if (path_exec) {
41875+ *path_exec = 0;
41876+ path_exec = buffer_exec;
41877+ } else
41878+ path_exec = "<path too long>";
41879+ }
41880+ }
41881+ if (vma_fault) {
41882+ start = vma_fault->vm_start;
41883+ end = vma_fault->vm_end;
41884+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41885+ if (vma_fault->vm_file) {
41886+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41887+ if (IS_ERR(path_fault))
41888+ path_fault = "<path too long>";
41889+ else {
41890+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41891+ if (path_fault) {
41892+ *path_fault = 0;
41893+ path_fault = buffer_fault;
41894+ } else
41895+ path_fault = "<path too long>";
41896+ }
41897+ } else
41898+ path_fault = "<anonymous mapping>";
41899+ }
41900+ up_read(&mm->mmap_sem);
41901+ }
41902+ if (tsk->signal->curr_ip)
41903+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41904+ else
41905+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41906+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41907+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41908+ task_uid(tsk), task_euid(tsk), pc, sp);
41909+ free_page((unsigned long)buffer_exec);
41910+ free_page((unsigned long)buffer_fault);
41911+ pax_report_insns(regs, pc, sp);
41912+ do_coredump(SIGKILL, SIGKILL, regs);
41913+}
41914+#endif
41915+
41916+#ifdef CONFIG_PAX_REFCOUNT
41917+void pax_report_refcount_overflow(struct pt_regs *regs)
41918+{
41919+ if (current->signal->curr_ip)
41920+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41921+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41922+ else
41923+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41924+ current->comm, task_pid_nr(current), current_uid(), current_euid());
41925+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41926+ show_regs(regs);
41927+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
41928+}
41929+#endif
41930+
41931+#ifdef CONFIG_PAX_USERCOPY
41932+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41933+int object_is_on_stack(const void *obj, unsigned long len)
41934+{
41935+ const void * const stack = task_stack_page(current);
41936+ const void * const stackend = stack + THREAD_SIZE;
41937+
41938+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41939+ const void *frame = NULL;
41940+ const void *oldframe;
41941+#endif
41942+
41943+ if (obj + len < obj)
41944+ return -1;
41945+
41946+ if (obj + len <= stack || stackend <= obj)
41947+ return 0;
41948+
41949+ if (obj < stack || stackend < obj + len)
41950+ return -1;
41951+
41952+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41953+ oldframe = __builtin_frame_address(1);
41954+ if (oldframe)
41955+ frame = __builtin_frame_address(2);
41956+ /*
41957+ low ----------------------------------------------> high
41958+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41959+ ^----------------^
41960+ allow copies only within here
41961+ */
41962+ while (stack <= frame && frame < stackend) {
41963+ /* if obj + len extends past the last frame, this
41964+ check won't pass and the next frame will be 0,
41965+ causing us to bail out and correctly report
41966+ the copy as invalid
41967+ */
41968+ if (obj + len <= frame)
41969+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41970+ oldframe = frame;
41971+ frame = *(const void * const *)frame;
41972+ }
41973+ return -1;
41974+#else
41975+ return 1;
41976+#endif
41977+}
41978+
41979+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41980+{
41981+ if (current->signal->curr_ip)
41982+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41983+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41984+ else
41985+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41986+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41987+ dump_stack();
41988+ gr_handle_kernel_exploit();
41989+ do_group_exit(SIGKILL);
41990+}
41991+#endif
41992+
41993+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41994+void pax_track_stack(void)
41995+{
41996+ unsigned long sp = (unsigned long)&sp;
41997+ if (sp < current_thread_info()->lowest_stack &&
41998+ sp > (unsigned long)task_stack_page(current))
41999+ current_thread_info()->lowest_stack = sp;
42000+}
42001+EXPORT_SYMBOL(pax_track_stack);
42002+#endif
42003+
42004 static int zap_process(struct task_struct *start, int exit_code)
42005 {
42006 struct task_struct *t;
42007@@ -2023,17 +2317,17 @@ static void wait_for_dump_helpers(struct file *file)
42008 pipe = file->f_path.dentry->d_inode->i_pipe;
42009
42010 pipe_lock(pipe);
42011- pipe->readers++;
42012- pipe->writers--;
42013+ atomic_inc(&pipe->readers);
42014+ atomic_dec(&pipe->writers);
42015
42016- while ((pipe->readers > 1) && (!signal_pending(current))) {
42017+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42018 wake_up_interruptible_sync(&pipe->wait);
42019 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42020 pipe_wait(pipe);
42021 }
42022
42023- pipe->readers--;
42024- pipe->writers++;
42025+ atomic_dec(&pipe->readers);
42026+ atomic_inc(&pipe->writers);
42027 pipe_unlock(pipe);
42028
42029 }
42030@@ -2094,7 +2388,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42031 int retval = 0;
42032 int flag = 0;
42033 int ispipe;
42034- static atomic_t core_dump_count = ATOMIC_INIT(0);
42035+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42036 struct coredump_params cprm = {
42037 .signr = signr,
42038 .regs = regs,
42039@@ -2109,6 +2403,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42040
42041 audit_core_dumps(signr);
42042
42043+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42044+ gr_handle_brute_attach(current, cprm.mm_flags);
42045+
42046 binfmt = mm->binfmt;
42047 if (!binfmt || !binfmt->core_dump)
42048 goto fail;
42049@@ -2176,7 +2473,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42050 }
42051 cprm.limit = RLIM_INFINITY;
42052
42053- dump_count = atomic_inc_return(&core_dump_count);
42054+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
42055 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42056 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42057 task_tgid_vnr(current), current->comm);
42058@@ -2203,6 +2500,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42059 } else {
42060 struct inode *inode;
42061
42062+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42063+
42064 if (cprm.limit < binfmt->min_coredump)
42065 goto fail_unlock;
42066
42067@@ -2246,7 +2545,7 @@ close_fail:
42068 filp_close(cprm.file, NULL);
42069 fail_dropcount:
42070 if (ispipe)
42071- atomic_dec(&core_dump_count);
42072+ atomic_dec_unchecked(&core_dump_count);
42073 fail_unlock:
42074 kfree(cn.corename);
42075 fail_corename:
42076@@ -2265,7 +2564,7 @@ fail:
42077 */
42078 int dump_write(struct file *file, const void *addr, int nr)
42079 {
42080- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42081+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42082 }
42083 EXPORT_SYMBOL(dump_write);
42084
42085diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42086index a8cbe1b..fed04cb 100644
42087--- a/fs/ext2/balloc.c
42088+++ b/fs/ext2/balloc.c
42089@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42090
42091 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42092 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42093- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42094+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42095 sbi->s_resuid != current_fsuid() &&
42096 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42097 return 0;
42098diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42099index a203892..4e64db5 100644
42100--- a/fs/ext3/balloc.c
42101+++ b/fs/ext3/balloc.c
42102@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42103
42104 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42105 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42106- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42107+ if (free_blocks < root_blocks + 1 &&
42108 !use_reservation && sbi->s_resuid != current_fsuid() &&
42109- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42110+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42111+ !capable_nolog(CAP_SYS_RESOURCE)) {
42112 return 0;
42113 }
42114 return 1;
42115diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42116index 12ccacd..a6035fce0 100644
42117--- a/fs/ext4/balloc.c
42118+++ b/fs/ext4/balloc.c
42119@@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42120 /* Hm, nope. Are (enough) root reserved clusters available? */
42121 if (sbi->s_resuid == current_fsuid() ||
42122 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42123- capable(CAP_SYS_RESOURCE) ||
42124- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42125+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42126+ capable_nolog(CAP_SYS_RESOURCE)) {
42127
42128 if (free_clusters >= (nclusters + dirty_clusters))
42129 return 1;
42130diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42131index 5b0e26a..0aa002d 100644
42132--- a/fs/ext4/ext4.h
42133+++ b/fs/ext4/ext4.h
42134@@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42135 unsigned long s_mb_last_start;
42136
42137 /* stats for buddy allocator */
42138- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42139- atomic_t s_bal_success; /* we found long enough chunks */
42140- atomic_t s_bal_allocated; /* in blocks */
42141- atomic_t s_bal_ex_scanned; /* total extents scanned */
42142- atomic_t s_bal_goals; /* goal hits */
42143- atomic_t s_bal_breaks; /* too long searches */
42144- atomic_t s_bal_2orders; /* 2^order hits */
42145+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42146+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42147+ atomic_unchecked_t s_bal_allocated; /* in blocks */
42148+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42149+ atomic_unchecked_t s_bal_goals; /* goal hits */
42150+ atomic_unchecked_t s_bal_breaks; /* too long searches */
42151+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42152 spinlock_t s_bal_lock;
42153 unsigned long s_mb_buddies_generated;
42154 unsigned long long s_mb_generation_time;
42155- atomic_t s_mb_lost_chunks;
42156- atomic_t s_mb_preallocated;
42157- atomic_t s_mb_discarded;
42158+ atomic_unchecked_t s_mb_lost_chunks;
42159+ atomic_unchecked_t s_mb_preallocated;
42160+ atomic_unchecked_t s_mb_discarded;
42161 atomic_t s_lock_busy;
42162
42163 /* locality groups */
42164diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42165index e2d8be8..c7f0ce9 100644
42166--- a/fs/ext4/mballoc.c
42167+++ b/fs/ext4/mballoc.c
42168@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42169 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42170
42171 if (EXT4_SB(sb)->s_mb_stats)
42172- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42173+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42174
42175 break;
42176 }
42177@@ -2088,7 +2088,7 @@ repeat:
42178 ac->ac_status = AC_STATUS_CONTINUE;
42179 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42180 cr = 3;
42181- atomic_inc(&sbi->s_mb_lost_chunks);
42182+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42183 goto repeat;
42184 }
42185 }
42186@@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42187 if (sbi->s_mb_stats) {
42188 ext4_msg(sb, KERN_INFO,
42189 "mballoc: %u blocks %u reqs (%u success)",
42190- atomic_read(&sbi->s_bal_allocated),
42191- atomic_read(&sbi->s_bal_reqs),
42192- atomic_read(&sbi->s_bal_success));
42193+ atomic_read_unchecked(&sbi->s_bal_allocated),
42194+ atomic_read_unchecked(&sbi->s_bal_reqs),
42195+ atomic_read_unchecked(&sbi->s_bal_success));
42196 ext4_msg(sb, KERN_INFO,
42197 "mballoc: %u extents scanned, %u goal hits, "
42198 "%u 2^N hits, %u breaks, %u lost",
42199- atomic_read(&sbi->s_bal_ex_scanned),
42200- atomic_read(&sbi->s_bal_goals),
42201- atomic_read(&sbi->s_bal_2orders),
42202- atomic_read(&sbi->s_bal_breaks),
42203- atomic_read(&sbi->s_mb_lost_chunks));
42204+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42205+ atomic_read_unchecked(&sbi->s_bal_goals),
42206+ atomic_read_unchecked(&sbi->s_bal_2orders),
42207+ atomic_read_unchecked(&sbi->s_bal_breaks),
42208+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42209 ext4_msg(sb, KERN_INFO,
42210 "mballoc: %lu generated and it took %Lu",
42211 sbi->s_mb_buddies_generated,
42212 sbi->s_mb_generation_time);
42213 ext4_msg(sb, KERN_INFO,
42214 "mballoc: %u preallocated, %u discarded",
42215- atomic_read(&sbi->s_mb_preallocated),
42216- atomic_read(&sbi->s_mb_discarded));
42217+ atomic_read_unchecked(&sbi->s_mb_preallocated),
42218+ atomic_read_unchecked(&sbi->s_mb_discarded));
42219 }
42220
42221 free_percpu(sbi->s_locality_groups);
42222@@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42223 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42224
42225 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42226- atomic_inc(&sbi->s_bal_reqs);
42227- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42228+ atomic_inc_unchecked(&sbi->s_bal_reqs);
42229+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42230 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42231- atomic_inc(&sbi->s_bal_success);
42232- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42233+ atomic_inc_unchecked(&sbi->s_bal_success);
42234+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42235 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42236 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42237- atomic_inc(&sbi->s_bal_goals);
42238+ atomic_inc_unchecked(&sbi->s_bal_goals);
42239 if (ac->ac_found > sbi->s_mb_max_to_scan)
42240- atomic_inc(&sbi->s_bal_breaks);
42241+ atomic_inc_unchecked(&sbi->s_bal_breaks);
42242 }
42243
42244 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42245@@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42246 trace_ext4_mb_new_inode_pa(ac, pa);
42247
42248 ext4_mb_use_inode_pa(ac, pa);
42249- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42250+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42251
42252 ei = EXT4_I(ac->ac_inode);
42253 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42254@@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42255 trace_ext4_mb_new_group_pa(ac, pa);
42256
42257 ext4_mb_use_group_pa(ac, pa);
42258- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42259+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42260
42261 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42262 lg = ac->ac_lg;
42263@@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42264 * from the bitmap and continue.
42265 */
42266 }
42267- atomic_add(free, &sbi->s_mb_discarded);
42268+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
42269
42270 return err;
42271 }
42272@@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42273 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42274 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42275 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42276- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42277+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42278 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42279
42280 return 0;
42281diff --git a/fs/fcntl.c b/fs/fcntl.c
42282index 22764c7..86372c9 100644
42283--- a/fs/fcntl.c
42284+++ b/fs/fcntl.c
42285@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42286 if (err)
42287 return err;
42288
42289+ if (gr_handle_chroot_fowner(pid, type))
42290+ return -ENOENT;
42291+ if (gr_check_protected_task_fowner(pid, type))
42292+ return -EACCES;
42293+
42294 f_modown(filp, pid, type, force);
42295 return 0;
42296 }
42297@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42298
42299 static int f_setown_ex(struct file *filp, unsigned long arg)
42300 {
42301- struct f_owner_ex * __user owner_p = (void * __user)arg;
42302+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42303 struct f_owner_ex owner;
42304 struct pid *pid;
42305 int type;
42306@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42307
42308 static int f_getown_ex(struct file *filp, unsigned long arg)
42309 {
42310- struct f_owner_ex * __user owner_p = (void * __user)arg;
42311+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42312 struct f_owner_ex owner;
42313 int ret = 0;
42314
42315@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42316 switch (cmd) {
42317 case F_DUPFD:
42318 case F_DUPFD_CLOEXEC:
42319+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42320 if (arg >= rlimit(RLIMIT_NOFILE))
42321 break;
42322 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42323diff --git a/fs/fifo.c b/fs/fifo.c
42324index b1a524d..4ee270e 100644
42325--- a/fs/fifo.c
42326+++ b/fs/fifo.c
42327@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42328 */
42329 filp->f_op = &read_pipefifo_fops;
42330 pipe->r_counter++;
42331- if (pipe->readers++ == 0)
42332+ if (atomic_inc_return(&pipe->readers) == 1)
42333 wake_up_partner(inode);
42334
42335- if (!pipe->writers) {
42336+ if (!atomic_read(&pipe->writers)) {
42337 if ((filp->f_flags & O_NONBLOCK)) {
42338 /* suppress POLLHUP until we have
42339 * seen a writer */
42340@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42341 * errno=ENXIO when there is no process reading the FIFO.
42342 */
42343 ret = -ENXIO;
42344- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42345+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42346 goto err;
42347
42348 filp->f_op = &write_pipefifo_fops;
42349 pipe->w_counter++;
42350- if (!pipe->writers++)
42351+ if (atomic_inc_return(&pipe->writers) == 1)
42352 wake_up_partner(inode);
42353
42354- if (!pipe->readers) {
42355+ if (!atomic_read(&pipe->readers)) {
42356 wait_for_partner(inode, &pipe->r_counter);
42357 if (signal_pending(current))
42358 goto err_wr;
42359@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42360 */
42361 filp->f_op = &rdwr_pipefifo_fops;
42362
42363- pipe->readers++;
42364- pipe->writers++;
42365+ atomic_inc(&pipe->readers);
42366+ atomic_inc(&pipe->writers);
42367 pipe->r_counter++;
42368 pipe->w_counter++;
42369- if (pipe->readers == 1 || pipe->writers == 1)
42370+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42371 wake_up_partner(inode);
42372 break;
42373
42374@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42375 return 0;
42376
42377 err_rd:
42378- if (!--pipe->readers)
42379+ if (atomic_dec_and_test(&pipe->readers))
42380 wake_up_interruptible(&pipe->wait);
42381 ret = -ERESTARTSYS;
42382 goto err;
42383
42384 err_wr:
42385- if (!--pipe->writers)
42386+ if (atomic_dec_and_test(&pipe->writers))
42387 wake_up_interruptible(&pipe->wait);
42388 ret = -ERESTARTSYS;
42389 goto err;
42390
42391 err:
42392- if (!pipe->readers && !pipe->writers)
42393+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42394 free_pipe_info(inode);
42395
42396 err_nocleanup:
42397diff --git a/fs/file.c b/fs/file.c
42398index 4c6992d..104cdea 100644
42399--- a/fs/file.c
42400+++ b/fs/file.c
42401@@ -15,6 +15,7 @@
42402 #include <linux/slab.h>
42403 #include <linux/vmalloc.h>
42404 #include <linux/file.h>
42405+#include <linux/security.h>
42406 #include <linux/fdtable.h>
42407 #include <linux/bitops.h>
42408 #include <linux/interrupt.h>
42409@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42410 * N.B. For clone tasks sharing a files structure, this test
42411 * will limit the total number of files that can be opened.
42412 */
42413+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42414 if (nr >= rlimit(RLIMIT_NOFILE))
42415 return -EMFILE;
42416
42417diff --git a/fs/filesystems.c b/fs/filesystems.c
42418index 0845f84..7b4ebef 100644
42419--- a/fs/filesystems.c
42420+++ b/fs/filesystems.c
42421@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42422 int len = dot ? dot - name : strlen(name);
42423
42424 fs = __get_fs_type(name, len);
42425+
42426+#ifdef CONFIG_GRKERNSEC_MODHARDEN
42427+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42428+#else
42429 if (!fs && (request_module("%.*s", len, name) == 0))
42430+#endif
42431 fs = __get_fs_type(name, len);
42432
42433 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42434diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42435index 78b519c..212c0d0 100644
42436--- a/fs/fs_struct.c
42437+++ b/fs/fs_struct.c
42438@@ -4,6 +4,7 @@
42439 #include <linux/path.h>
42440 #include <linux/slab.h>
42441 #include <linux/fs_struct.h>
42442+#include <linux/grsecurity.h>
42443 #include "internal.h"
42444
42445 static inline void path_get_longterm(struct path *path)
42446@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42447 old_root = fs->root;
42448 fs->root = *path;
42449 path_get_longterm(path);
42450+ gr_set_chroot_entries(current, path);
42451 write_seqcount_end(&fs->seq);
42452 spin_unlock(&fs->lock);
42453 if (old_root.dentry)
42454@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42455 && fs->root.mnt == old_root->mnt) {
42456 path_get_longterm(new_root);
42457 fs->root = *new_root;
42458+ gr_set_chroot_entries(p, new_root);
42459 count++;
42460 }
42461 if (fs->pwd.dentry == old_root->dentry
42462@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42463 spin_lock(&fs->lock);
42464 write_seqcount_begin(&fs->seq);
42465 tsk->fs = NULL;
42466- kill = !--fs->users;
42467+ gr_clear_chroot_entries(tsk);
42468+ kill = !atomic_dec_return(&fs->users);
42469 write_seqcount_end(&fs->seq);
42470 spin_unlock(&fs->lock);
42471 task_unlock(tsk);
42472@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42473 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42474 /* We don't need to lock fs - think why ;-) */
42475 if (fs) {
42476- fs->users = 1;
42477+ atomic_set(&fs->users, 1);
42478 fs->in_exec = 0;
42479 spin_lock_init(&fs->lock);
42480 seqcount_init(&fs->seq);
42481@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42482 spin_lock(&old->lock);
42483 fs->root = old->root;
42484 path_get_longterm(&fs->root);
42485+ /* instead of calling gr_set_chroot_entries here,
42486+ we call it from every caller of this function
42487+ */
42488 fs->pwd = old->pwd;
42489 path_get_longterm(&fs->pwd);
42490 spin_unlock(&old->lock);
42491@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42492
42493 task_lock(current);
42494 spin_lock(&fs->lock);
42495- kill = !--fs->users;
42496+ kill = !atomic_dec_return(&fs->users);
42497 current->fs = new_fs;
42498+ gr_set_chroot_entries(current, &new_fs->root);
42499 spin_unlock(&fs->lock);
42500 task_unlock(current);
42501
42502@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
42503
42504 /* to be mentioned only in INIT_TASK */
42505 struct fs_struct init_fs = {
42506- .users = 1,
42507+ .users = ATOMIC_INIT(1),
42508 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42509 .seq = SEQCNT_ZERO,
42510 .umask = 0022,
42511@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42512 task_lock(current);
42513
42514 spin_lock(&init_fs.lock);
42515- init_fs.users++;
42516+ atomic_inc(&init_fs.users);
42517 spin_unlock(&init_fs.lock);
42518
42519 spin_lock(&fs->lock);
42520 current->fs = &init_fs;
42521- kill = !--fs->users;
42522+ gr_set_chroot_entries(current, &current->fs->root);
42523+ kill = !atomic_dec_return(&fs->users);
42524 spin_unlock(&fs->lock);
42525
42526 task_unlock(current);
42527diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42528index 9905350..02eaec4 100644
42529--- a/fs/fscache/cookie.c
42530+++ b/fs/fscache/cookie.c
42531@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42532 parent ? (char *) parent->def->name : "<no-parent>",
42533 def->name, netfs_data);
42534
42535- fscache_stat(&fscache_n_acquires);
42536+ fscache_stat_unchecked(&fscache_n_acquires);
42537
42538 /* if there's no parent cookie, then we don't create one here either */
42539 if (!parent) {
42540- fscache_stat(&fscache_n_acquires_null);
42541+ fscache_stat_unchecked(&fscache_n_acquires_null);
42542 _leave(" [no parent]");
42543 return NULL;
42544 }
42545@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42546 /* allocate and initialise a cookie */
42547 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42548 if (!cookie) {
42549- fscache_stat(&fscache_n_acquires_oom);
42550+ fscache_stat_unchecked(&fscache_n_acquires_oom);
42551 _leave(" [ENOMEM]");
42552 return NULL;
42553 }
42554@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42555
42556 switch (cookie->def->type) {
42557 case FSCACHE_COOKIE_TYPE_INDEX:
42558- fscache_stat(&fscache_n_cookie_index);
42559+ fscache_stat_unchecked(&fscache_n_cookie_index);
42560 break;
42561 case FSCACHE_COOKIE_TYPE_DATAFILE:
42562- fscache_stat(&fscache_n_cookie_data);
42563+ fscache_stat_unchecked(&fscache_n_cookie_data);
42564 break;
42565 default:
42566- fscache_stat(&fscache_n_cookie_special);
42567+ fscache_stat_unchecked(&fscache_n_cookie_special);
42568 break;
42569 }
42570
42571@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42572 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42573 atomic_dec(&parent->n_children);
42574 __fscache_cookie_put(cookie);
42575- fscache_stat(&fscache_n_acquires_nobufs);
42576+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42577 _leave(" = NULL");
42578 return NULL;
42579 }
42580 }
42581
42582- fscache_stat(&fscache_n_acquires_ok);
42583+ fscache_stat_unchecked(&fscache_n_acquires_ok);
42584 _leave(" = %p", cookie);
42585 return cookie;
42586 }
42587@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42588 cache = fscache_select_cache_for_object(cookie->parent);
42589 if (!cache) {
42590 up_read(&fscache_addremove_sem);
42591- fscache_stat(&fscache_n_acquires_no_cache);
42592+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42593 _leave(" = -ENOMEDIUM [no cache]");
42594 return -ENOMEDIUM;
42595 }
42596@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42597 object = cache->ops->alloc_object(cache, cookie);
42598 fscache_stat_d(&fscache_n_cop_alloc_object);
42599 if (IS_ERR(object)) {
42600- fscache_stat(&fscache_n_object_no_alloc);
42601+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
42602 ret = PTR_ERR(object);
42603 goto error;
42604 }
42605
42606- fscache_stat(&fscache_n_object_alloc);
42607+ fscache_stat_unchecked(&fscache_n_object_alloc);
42608
42609 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42610
42611@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42612 struct fscache_object *object;
42613 struct hlist_node *_p;
42614
42615- fscache_stat(&fscache_n_updates);
42616+ fscache_stat_unchecked(&fscache_n_updates);
42617
42618 if (!cookie) {
42619- fscache_stat(&fscache_n_updates_null);
42620+ fscache_stat_unchecked(&fscache_n_updates_null);
42621 _leave(" [no cookie]");
42622 return;
42623 }
42624@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42625 struct fscache_object *object;
42626 unsigned long event;
42627
42628- fscache_stat(&fscache_n_relinquishes);
42629+ fscache_stat_unchecked(&fscache_n_relinquishes);
42630 if (retire)
42631- fscache_stat(&fscache_n_relinquishes_retire);
42632+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42633
42634 if (!cookie) {
42635- fscache_stat(&fscache_n_relinquishes_null);
42636+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42637 _leave(" [no cookie]");
42638 return;
42639 }
42640@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42641
42642 /* wait for the cookie to finish being instantiated (or to fail) */
42643 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42644- fscache_stat(&fscache_n_relinquishes_waitcrt);
42645+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42646 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42647 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42648 }
42649diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
42650index f6aad48..88dcf26 100644
42651--- a/fs/fscache/internal.h
42652+++ b/fs/fscache/internal.h
42653@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42654 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42655 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42656
42657-extern atomic_t fscache_n_op_pend;
42658-extern atomic_t fscache_n_op_run;
42659-extern atomic_t fscache_n_op_enqueue;
42660-extern atomic_t fscache_n_op_deferred_release;
42661-extern atomic_t fscache_n_op_release;
42662-extern atomic_t fscache_n_op_gc;
42663-extern atomic_t fscache_n_op_cancelled;
42664-extern atomic_t fscache_n_op_rejected;
42665+extern atomic_unchecked_t fscache_n_op_pend;
42666+extern atomic_unchecked_t fscache_n_op_run;
42667+extern atomic_unchecked_t fscache_n_op_enqueue;
42668+extern atomic_unchecked_t fscache_n_op_deferred_release;
42669+extern atomic_unchecked_t fscache_n_op_release;
42670+extern atomic_unchecked_t fscache_n_op_gc;
42671+extern atomic_unchecked_t fscache_n_op_cancelled;
42672+extern atomic_unchecked_t fscache_n_op_rejected;
42673
42674-extern atomic_t fscache_n_attr_changed;
42675-extern atomic_t fscache_n_attr_changed_ok;
42676-extern atomic_t fscache_n_attr_changed_nobufs;
42677-extern atomic_t fscache_n_attr_changed_nomem;
42678-extern atomic_t fscache_n_attr_changed_calls;
42679+extern atomic_unchecked_t fscache_n_attr_changed;
42680+extern atomic_unchecked_t fscache_n_attr_changed_ok;
42681+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42682+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42683+extern atomic_unchecked_t fscache_n_attr_changed_calls;
42684
42685-extern atomic_t fscache_n_allocs;
42686-extern atomic_t fscache_n_allocs_ok;
42687-extern atomic_t fscache_n_allocs_wait;
42688-extern atomic_t fscache_n_allocs_nobufs;
42689-extern atomic_t fscache_n_allocs_intr;
42690-extern atomic_t fscache_n_allocs_object_dead;
42691-extern atomic_t fscache_n_alloc_ops;
42692-extern atomic_t fscache_n_alloc_op_waits;
42693+extern atomic_unchecked_t fscache_n_allocs;
42694+extern atomic_unchecked_t fscache_n_allocs_ok;
42695+extern atomic_unchecked_t fscache_n_allocs_wait;
42696+extern atomic_unchecked_t fscache_n_allocs_nobufs;
42697+extern atomic_unchecked_t fscache_n_allocs_intr;
42698+extern atomic_unchecked_t fscache_n_allocs_object_dead;
42699+extern atomic_unchecked_t fscache_n_alloc_ops;
42700+extern atomic_unchecked_t fscache_n_alloc_op_waits;
42701
42702-extern atomic_t fscache_n_retrievals;
42703-extern atomic_t fscache_n_retrievals_ok;
42704-extern atomic_t fscache_n_retrievals_wait;
42705-extern atomic_t fscache_n_retrievals_nodata;
42706-extern atomic_t fscache_n_retrievals_nobufs;
42707-extern atomic_t fscache_n_retrievals_intr;
42708-extern atomic_t fscache_n_retrievals_nomem;
42709-extern atomic_t fscache_n_retrievals_object_dead;
42710-extern atomic_t fscache_n_retrieval_ops;
42711-extern atomic_t fscache_n_retrieval_op_waits;
42712+extern atomic_unchecked_t fscache_n_retrievals;
42713+extern atomic_unchecked_t fscache_n_retrievals_ok;
42714+extern atomic_unchecked_t fscache_n_retrievals_wait;
42715+extern atomic_unchecked_t fscache_n_retrievals_nodata;
42716+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42717+extern atomic_unchecked_t fscache_n_retrievals_intr;
42718+extern atomic_unchecked_t fscache_n_retrievals_nomem;
42719+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42720+extern atomic_unchecked_t fscache_n_retrieval_ops;
42721+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42722
42723-extern atomic_t fscache_n_stores;
42724-extern atomic_t fscache_n_stores_ok;
42725-extern atomic_t fscache_n_stores_again;
42726-extern atomic_t fscache_n_stores_nobufs;
42727-extern atomic_t fscache_n_stores_oom;
42728-extern atomic_t fscache_n_store_ops;
42729-extern atomic_t fscache_n_store_calls;
42730-extern atomic_t fscache_n_store_pages;
42731-extern atomic_t fscache_n_store_radix_deletes;
42732-extern atomic_t fscache_n_store_pages_over_limit;
42733+extern atomic_unchecked_t fscache_n_stores;
42734+extern atomic_unchecked_t fscache_n_stores_ok;
42735+extern atomic_unchecked_t fscache_n_stores_again;
42736+extern atomic_unchecked_t fscache_n_stores_nobufs;
42737+extern atomic_unchecked_t fscache_n_stores_oom;
42738+extern atomic_unchecked_t fscache_n_store_ops;
42739+extern atomic_unchecked_t fscache_n_store_calls;
42740+extern atomic_unchecked_t fscache_n_store_pages;
42741+extern atomic_unchecked_t fscache_n_store_radix_deletes;
42742+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42743
42744-extern atomic_t fscache_n_store_vmscan_not_storing;
42745-extern atomic_t fscache_n_store_vmscan_gone;
42746-extern atomic_t fscache_n_store_vmscan_busy;
42747-extern atomic_t fscache_n_store_vmscan_cancelled;
42748+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42749+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42750+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42751+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42752
42753-extern atomic_t fscache_n_marks;
42754-extern atomic_t fscache_n_uncaches;
42755+extern atomic_unchecked_t fscache_n_marks;
42756+extern atomic_unchecked_t fscache_n_uncaches;
42757
42758-extern atomic_t fscache_n_acquires;
42759-extern atomic_t fscache_n_acquires_null;
42760-extern atomic_t fscache_n_acquires_no_cache;
42761-extern atomic_t fscache_n_acquires_ok;
42762-extern atomic_t fscache_n_acquires_nobufs;
42763-extern atomic_t fscache_n_acquires_oom;
42764+extern atomic_unchecked_t fscache_n_acquires;
42765+extern atomic_unchecked_t fscache_n_acquires_null;
42766+extern atomic_unchecked_t fscache_n_acquires_no_cache;
42767+extern atomic_unchecked_t fscache_n_acquires_ok;
42768+extern atomic_unchecked_t fscache_n_acquires_nobufs;
42769+extern atomic_unchecked_t fscache_n_acquires_oom;
42770
42771-extern atomic_t fscache_n_updates;
42772-extern atomic_t fscache_n_updates_null;
42773-extern atomic_t fscache_n_updates_run;
42774+extern atomic_unchecked_t fscache_n_updates;
42775+extern atomic_unchecked_t fscache_n_updates_null;
42776+extern atomic_unchecked_t fscache_n_updates_run;
42777
42778-extern atomic_t fscache_n_relinquishes;
42779-extern atomic_t fscache_n_relinquishes_null;
42780-extern atomic_t fscache_n_relinquishes_waitcrt;
42781-extern atomic_t fscache_n_relinquishes_retire;
42782+extern atomic_unchecked_t fscache_n_relinquishes;
42783+extern atomic_unchecked_t fscache_n_relinquishes_null;
42784+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42785+extern atomic_unchecked_t fscache_n_relinquishes_retire;
42786
42787-extern atomic_t fscache_n_cookie_index;
42788-extern atomic_t fscache_n_cookie_data;
42789-extern atomic_t fscache_n_cookie_special;
42790+extern atomic_unchecked_t fscache_n_cookie_index;
42791+extern atomic_unchecked_t fscache_n_cookie_data;
42792+extern atomic_unchecked_t fscache_n_cookie_special;
42793
42794-extern atomic_t fscache_n_object_alloc;
42795-extern atomic_t fscache_n_object_no_alloc;
42796-extern atomic_t fscache_n_object_lookups;
42797-extern atomic_t fscache_n_object_lookups_negative;
42798-extern atomic_t fscache_n_object_lookups_positive;
42799-extern atomic_t fscache_n_object_lookups_timed_out;
42800-extern atomic_t fscache_n_object_created;
42801-extern atomic_t fscache_n_object_avail;
42802-extern atomic_t fscache_n_object_dead;
42803+extern atomic_unchecked_t fscache_n_object_alloc;
42804+extern atomic_unchecked_t fscache_n_object_no_alloc;
42805+extern atomic_unchecked_t fscache_n_object_lookups;
42806+extern atomic_unchecked_t fscache_n_object_lookups_negative;
42807+extern atomic_unchecked_t fscache_n_object_lookups_positive;
42808+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42809+extern atomic_unchecked_t fscache_n_object_created;
42810+extern atomic_unchecked_t fscache_n_object_avail;
42811+extern atomic_unchecked_t fscache_n_object_dead;
42812
42813-extern atomic_t fscache_n_checkaux_none;
42814-extern atomic_t fscache_n_checkaux_okay;
42815-extern atomic_t fscache_n_checkaux_update;
42816-extern atomic_t fscache_n_checkaux_obsolete;
42817+extern atomic_unchecked_t fscache_n_checkaux_none;
42818+extern atomic_unchecked_t fscache_n_checkaux_okay;
42819+extern atomic_unchecked_t fscache_n_checkaux_update;
42820+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42821
42822 extern atomic_t fscache_n_cop_alloc_object;
42823 extern atomic_t fscache_n_cop_lookup_object;
42824@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
42825 atomic_inc(stat);
42826 }
42827
42828+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42829+{
42830+ atomic_inc_unchecked(stat);
42831+}
42832+
42833 static inline void fscache_stat_d(atomic_t *stat)
42834 {
42835 atomic_dec(stat);
42836@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
42837
42838 #define __fscache_stat(stat) (NULL)
42839 #define fscache_stat(stat) do {} while (0)
42840+#define fscache_stat_unchecked(stat) do {} while (0)
42841 #define fscache_stat_d(stat) do {} while (0)
42842 #endif
42843
42844diff --git a/fs/fscache/object.c b/fs/fscache/object.c
42845index b6b897c..0ffff9c 100644
42846--- a/fs/fscache/object.c
42847+++ b/fs/fscache/object.c
42848@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42849 /* update the object metadata on disk */
42850 case FSCACHE_OBJECT_UPDATING:
42851 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42852- fscache_stat(&fscache_n_updates_run);
42853+ fscache_stat_unchecked(&fscache_n_updates_run);
42854 fscache_stat(&fscache_n_cop_update_object);
42855 object->cache->ops->update_object(object);
42856 fscache_stat_d(&fscache_n_cop_update_object);
42857@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42858 spin_lock(&object->lock);
42859 object->state = FSCACHE_OBJECT_DEAD;
42860 spin_unlock(&object->lock);
42861- fscache_stat(&fscache_n_object_dead);
42862+ fscache_stat_unchecked(&fscache_n_object_dead);
42863 goto terminal_transit;
42864
42865 /* handle the parent cache of this object being withdrawn from
42866@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42867 spin_lock(&object->lock);
42868 object->state = FSCACHE_OBJECT_DEAD;
42869 spin_unlock(&object->lock);
42870- fscache_stat(&fscache_n_object_dead);
42871+ fscache_stat_unchecked(&fscache_n_object_dead);
42872 goto terminal_transit;
42873
42874 /* complain about the object being woken up once it is
42875@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
42876 parent->cookie->def->name, cookie->def->name,
42877 object->cache->tag->name);
42878
42879- fscache_stat(&fscache_n_object_lookups);
42880+ fscache_stat_unchecked(&fscache_n_object_lookups);
42881 fscache_stat(&fscache_n_cop_lookup_object);
42882 ret = object->cache->ops->lookup_object(object);
42883 fscache_stat_d(&fscache_n_cop_lookup_object);
42884@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
42885 if (ret == -ETIMEDOUT) {
42886 /* probably stuck behind another object, so move this one to
42887 * the back of the queue */
42888- fscache_stat(&fscache_n_object_lookups_timed_out);
42889+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42890 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42891 }
42892
42893@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
42894
42895 spin_lock(&object->lock);
42896 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42897- fscache_stat(&fscache_n_object_lookups_negative);
42898+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42899
42900 /* transit here to allow write requests to begin stacking up
42901 * and read requests to begin returning ENODATA */
42902@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
42903 * result, in which case there may be data available */
42904 spin_lock(&object->lock);
42905 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42906- fscache_stat(&fscache_n_object_lookups_positive);
42907+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42908
42909 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42910
42911@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
42912 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42913 } else {
42914 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42915- fscache_stat(&fscache_n_object_created);
42916+ fscache_stat_unchecked(&fscache_n_object_created);
42917
42918 object->state = FSCACHE_OBJECT_AVAILABLE;
42919 spin_unlock(&object->lock);
42920@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
42921 fscache_enqueue_dependents(object);
42922
42923 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42924- fscache_stat(&fscache_n_object_avail);
42925+ fscache_stat_unchecked(&fscache_n_object_avail);
42926
42927 _leave("");
42928 }
42929@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
42930 enum fscache_checkaux result;
42931
42932 if (!object->cookie->def->check_aux) {
42933- fscache_stat(&fscache_n_checkaux_none);
42934+ fscache_stat_unchecked(&fscache_n_checkaux_none);
42935 return FSCACHE_CHECKAUX_OKAY;
42936 }
42937
42938@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
42939 switch (result) {
42940 /* entry okay as is */
42941 case FSCACHE_CHECKAUX_OKAY:
42942- fscache_stat(&fscache_n_checkaux_okay);
42943+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
42944 break;
42945
42946 /* entry requires update */
42947 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42948- fscache_stat(&fscache_n_checkaux_update);
42949+ fscache_stat_unchecked(&fscache_n_checkaux_update);
42950 break;
42951
42952 /* entry requires deletion */
42953 case FSCACHE_CHECKAUX_OBSOLETE:
42954- fscache_stat(&fscache_n_checkaux_obsolete);
42955+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42956 break;
42957
42958 default:
42959diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
42960index 30afdfa..2256596 100644
42961--- a/fs/fscache/operation.c
42962+++ b/fs/fscache/operation.c
42963@@ -17,7 +17,7 @@
42964 #include <linux/slab.h>
42965 #include "internal.h"
42966
42967-atomic_t fscache_op_debug_id;
42968+atomic_unchecked_t fscache_op_debug_id;
42969 EXPORT_SYMBOL(fscache_op_debug_id);
42970
42971 /**
42972@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
42973 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42974 ASSERTCMP(atomic_read(&op->usage), >, 0);
42975
42976- fscache_stat(&fscache_n_op_enqueue);
42977+ fscache_stat_unchecked(&fscache_n_op_enqueue);
42978 switch (op->flags & FSCACHE_OP_TYPE) {
42979 case FSCACHE_OP_ASYNC:
42980 _debug("queue async");
42981@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
42982 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42983 if (op->processor)
42984 fscache_enqueue_operation(op);
42985- fscache_stat(&fscache_n_op_run);
42986+ fscache_stat_unchecked(&fscache_n_op_run);
42987 }
42988
42989 /*
42990@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
42991 if (object->n_ops > 1) {
42992 atomic_inc(&op->usage);
42993 list_add_tail(&op->pend_link, &object->pending_ops);
42994- fscache_stat(&fscache_n_op_pend);
42995+ fscache_stat_unchecked(&fscache_n_op_pend);
42996 } else if (!list_empty(&object->pending_ops)) {
42997 atomic_inc(&op->usage);
42998 list_add_tail(&op->pend_link, &object->pending_ops);
42999- fscache_stat(&fscache_n_op_pend);
43000+ fscache_stat_unchecked(&fscache_n_op_pend);
43001 fscache_start_operations(object);
43002 } else {
43003 ASSERTCMP(object->n_in_progress, ==, 0);
43004@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43005 object->n_exclusive++; /* reads and writes must wait */
43006 atomic_inc(&op->usage);
43007 list_add_tail(&op->pend_link, &object->pending_ops);
43008- fscache_stat(&fscache_n_op_pend);
43009+ fscache_stat_unchecked(&fscache_n_op_pend);
43010 ret = 0;
43011 } else {
43012 /* not allowed to submit ops in any other state */
43013@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
43014 if (object->n_exclusive > 0) {
43015 atomic_inc(&op->usage);
43016 list_add_tail(&op->pend_link, &object->pending_ops);
43017- fscache_stat(&fscache_n_op_pend);
43018+ fscache_stat_unchecked(&fscache_n_op_pend);
43019 } else if (!list_empty(&object->pending_ops)) {
43020 atomic_inc(&op->usage);
43021 list_add_tail(&op->pend_link, &object->pending_ops);
43022- fscache_stat(&fscache_n_op_pend);
43023+ fscache_stat_unchecked(&fscache_n_op_pend);
43024 fscache_start_operations(object);
43025 } else {
43026 ASSERTCMP(object->n_exclusive, ==, 0);
43027@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
43028 object->n_ops++;
43029 atomic_inc(&op->usage);
43030 list_add_tail(&op->pend_link, &object->pending_ops);
43031- fscache_stat(&fscache_n_op_pend);
43032+ fscache_stat_unchecked(&fscache_n_op_pend);
43033 ret = 0;
43034 } else if (object->state == FSCACHE_OBJECT_DYING ||
43035 object->state == FSCACHE_OBJECT_LC_DYING ||
43036 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43037- fscache_stat(&fscache_n_op_rejected);
43038+ fscache_stat_unchecked(&fscache_n_op_rejected);
43039 ret = -ENOBUFS;
43040 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43041 fscache_report_unexpected_submission(object, op, ostate);
43042@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
43043
43044 ret = -EBUSY;
43045 if (!list_empty(&op->pend_link)) {
43046- fscache_stat(&fscache_n_op_cancelled);
43047+ fscache_stat_unchecked(&fscache_n_op_cancelled);
43048 list_del_init(&op->pend_link);
43049 object->n_ops--;
43050 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43051@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
43052 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43053 BUG();
43054
43055- fscache_stat(&fscache_n_op_release);
43056+ fscache_stat_unchecked(&fscache_n_op_release);
43057
43058 if (op->release) {
43059 op->release(op);
43060@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
43061 * lock, and defer it otherwise */
43062 if (!spin_trylock(&object->lock)) {
43063 _debug("defer put");
43064- fscache_stat(&fscache_n_op_deferred_release);
43065+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
43066
43067 cache = object->cache;
43068 spin_lock(&cache->op_gc_list_lock);
43069@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43070
43071 _debug("GC DEFERRED REL OBJ%x OP%x",
43072 object->debug_id, op->debug_id);
43073- fscache_stat(&fscache_n_op_gc);
43074+ fscache_stat_unchecked(&fscache_n_op_gc);
43075
43076 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43077
43078diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43079index 3f7a59b..cf196cc 100644
43080--- a/fs/fscache/page.c
43081+++ b/fs/fscache/page.c
43082@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43083 val = radix_tree_lookup(&cookie->stores, page->index);
43084 if (!val) {
43085 rcu_read_unlock();
43086- fscache_stat(&fscache_n_store_vmscan_not_storing);
43087+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43088 __fscache_uncache_page(cookie, page);
43089 return true;
43090 }
43091@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43092 spin_unlock(&cookie->stores_lock);
43093
43094 if (xpage) {
43095- fscache_stat(&fscache_n_store_vmscan_cancelled);
43096- fscache_stat(&fscache_n_store_radix_deletes);
43097+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43098+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43099 ASSERTCMP(xpage, ==, page);
43100 } else {
43101- fscache_stat(&fscache_n_store_vmscan_gone);
43102+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43103 }
43104
43105 wake_up_bit(&cookie->flags, 0);
43106@@ -107,7 +107,7 @@ page_busy:
43107 /* we might want to wait here, but that could deadlock the allocator as
43108 * the work threads writing to the cache may all end up sleeping
43109 * on memory allocation */
43110- fscache_stat(&fscache_n_store_vmscan_busy);
43111+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43112 return false;
43113 }
43114 EXPORT_SYMBOL(__fscache_maybe_release_page);
43115@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43116 FSCACHE_COOKIE_STORING_TAG);
43117 if (!radix_tree_tag_get(&cookie->stores, page->index,
43118 FSCACHE_COOKIE_PENDING_TAG)) {
43119- fscache_stat(&fscache_n_store_radix_deletes);
43120+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43121 xpage = radix_tree_delete(&cookie->stores, page->index);
43122 }
43123 spin_unlock(&cookie->stores_lock);
43124@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43125
43126 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43127
43128- fscache_stat(&fscache_n_attr_changed_calls);
43129+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43130
43131 if (fscache_object_is_active(object)) {
43132 fscache_stat(&fscache_n_cop_attr_changed);
43133@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43134
43135 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43136
43137- fscache_stat(&fscache_n_attr_changed);
43138+ fscache_stat_unchecked(&fscache_n_attr_changed);
43139
43140 op = kzalloc(sizeof(*op), GFP_KERNEL);
43141 if (!op) {
43142- fscache_stat(&fscache_n_attr_changed_nomem);
43143+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43144 _leave(" = -ENOMEM");
43145 return -ENOMEM;
43146 }
43147@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43148 if (fscache_submit_exclusive_op(object, op) < 0)
43149 goto nobufs;
43150 spin_unlock(&cookie->lock);
43151- fscache_stat(&fscache_n_attr_changed_ok);
43152+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43153 fscache_put_operation(op);
43154 _leave(" = 0");
43155 return 0;
43156@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43157 nobufs:
43158 spin_unlock(&cookie->lock);
43159 kfree(op);
43160- fscache_stat(&fscache_n_attr_changed_nobufs);
43161+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43162 _leave(" = %d", -ENOBUFS);
43163 return -ENOBUFS;
43164 }
43165@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43166 /* allocate a retrieval operation and attempt to submit it */
43167 op = kzalloc(sizeof(*op), GFP_NOIO);
43168 if (!op) {
43169- fscache_stat(&fscache_n_retrievals_nomem);
43170+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43171 return NULL;
43172 }
43173
43174@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43175 return 0;
43176 }
43177
43178- fscache_stat(&fscache_n_retrievals_wait);
43179+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
43180
43181 jif = jiffies;
43182 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43183 fscache_wait_bit_interruptible,
43184 TASK_INTERRUPTIBLE) != 0) {
43185- fscache_stat(&fscache_n_retrievals_intr);
43186+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43187 _leave(" = -ERESTARTSYS");
43188 return -ERESTARTSYS;
43189 }
43190@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43191 */
43192 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43193 struct fscache_retrieval *op,
43194- atomic_t *stat_op_waits,
43195- atomic_t *stat_object_dead)
43196+ atomic_unchecked_t *stat_op_waits,
43197+ atomic_unchecked_t *stat_object_dead)
43198 {
43199 int ret;
43200
43201@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43202 goto check_if_dead;
43203
43204 _debug(">>> WT");
43205- fscache_stat(stat_op_waits);
43206+ fscache_stat_unchecked(stat_op_waits);
43207 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43208 fscache_wait_bit_interruptible,
43209 TASK_INTERRUPTIBLE) < 0) {
43210@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43211
43212 check_if_dead:
43213 if (unlikely(fscache_object_is_dead(object))) {
43214- fscache_stat(stat_object_dead);
43215+ fscache_stat_unchecked(stat_object_dead);
43216 return -ENOBUFS;
43217 }
43218 return 0;
43219@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43220
43221 _enter("%p,%p,,,", cookie, page);
43222
43223- fscache_stat(&fscache_n_retrievals);
43224+ fscache_stat_unchecked(&fscache_n_retrievals);
43225
43226 if (hlist_empty(&cookie->backing_objects))
43227 goto nobufs;
43228@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43229 goto nobufs_unlock;
43230 spin_unlock(&cookie->lock);
43231
43232- fscache_stat(&fscache_n_retrieval_ops);
43233+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43234
43235 /* pin the netfs read context in case we need to do the actual netfs
43236 * read because we've encountered a cache read failure */
43237@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43238
43239 error:
43240 if (ret == -ENOMEM)
43241- fscache_stat(&fscache_n_retrievals_nomem);
43242+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43243 else if (ret == -ERESTARTSYS)
43244- fscache_stat(&fscache_n_retrievals_intr);
43245+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43246 else if (ret == -ENODATA)
43247- fscache_stat(&fscache_n_retrievals_nodata);
43248+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43249 else if (ret < 0)
43250- fscache_stat(&fscache_n_retrievals_nobufs);
43251+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43252 else
43253- fscache_stat(&fscache_n_retrievals_ok);
43254+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43255
43256 fscache_put_retrieval(op);
43257 _leave(" = %d", ret);
43258@@ -429,7 +429,7 @@ nobufs_unlock:
43259 spin_unlock(&cookie->lock);
43260 kfree(op);
43261 nobufs:
43262- fscache_stat(&fscache_n_retrievals_nobufs);
43263+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43264 _leave(" = -ENOBUFS");
43265 return -ENOBUFS;
43266 }
43267@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43268
43269 _enter("%p,,%d,,,", cookie, *nr_pages);
43270
43271- fscache_stat(&fscache_n_retrievals);
43272+ fscache_stat_unchecked(&fscache_n_retrievals);
43273
43274 if (hlist_empty(&cookie->backing_objects))
43275 goto nobufs;
43276@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43277 goto nobufs_unlock;
43278 spin_unlock(&cookie->lock);
43279
43280- fscache_stat(&fscache_n_retrieval_ops);
43281+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43282
43283 /* pin the netfs read context in case we need to do the actual netfs
43284 * read because we've encountered a cache read failure */
43285@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43286
43287 error:
43288 if (ret == -ENOMEM)
43289- fscache_stat(&fscache_n_retrievals_nomem);
43290+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43291 else if (ret == -ERESTARTSYS)
43292- fscache_stat(&fscache_n_retrievals_intr);
43293+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43294 else if (ret == -ENODATA)
43295- fscache_stat(&fscache_n_retrievals_nodata);
43296+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43297 else if (ret < 0)
43298- fscache_stat(&fscache_n_retrievals_nobufs);
43299+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43300 else
43301- fscache_stat(&fscache_n_retrievals_ok);
43302+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43303
43304 fscache_put_retrieval(op);
43305 _leave(" = %d", ret);
43306@@ -545,7 +545,7 @@ nobufs_unlock:
43307 spin_unlock(&cookie->lock);
43308 kfree(op);
43309 nobufs:
43310- fscache_stat(&fscache_n_retrievals_nobufs);
43311+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43312 _leave(" = -ENOBUFS");
43313 return -ENOBUFS;
43314 }
43315@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43316
43317 _enter("%p,%p,,,", cookie, page);
43318
43319- fscache_stat(&fscache_n_allocs);
43320+ fscache_stat_unchecked(&fscache_n_allocs);
43321
43322 if (hlist_empty(&cookie->backing_objects))
43323 goto nobufs;
43324@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43325 goto nobufs_unlock;
43326 spin_unlock(&cookie->lock);
43327
43328- fscache_stat(&fscache_n_alloc_ops);
43329+ fscache_stat_unchecked(&fscache_n_alloc_ops);
43330
43331 ret = fscache_wait_for_retrieval_activation(
43332 object, op,
43333@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43334
43335 error:
43336 if (ret == -ERESTARTSYS)
43337- fscache_stat(&fscache_n_allocs_intr);
43338+ fscache_stat_unchecked(&fscache_n_allocs_intr);
43339 else if (ret < 0)
43340- fscache_stat(&fscache_n_allocs_nobufs);
43341+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43342 else
43343- fscache_stat(&fscache_n_allocs_ok);
43344+ fscache_stat_unchecked(&fscache_n_allocs_ok);
43345
43346 fscache_put_retrieval(op);
43347 _leave(" = %d", ret);
43348@@ -625,7 +625,7 @@ nobufs_unlock:
43349 spin_unlock(&cookie->lock);
43350 kfree(op);
43351 nobufs:
43352- fscache_stat(&fscache_n_allocs_nobufs);
43353+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43354 _leave(" = -ENOBUFS");
43355 return -ENOBUFS;
43356 }
43357@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43358
43359 spin_lock(&cookie->stores_lock);
43360
43361- fscache_stat(&fscache_n_store_calls);
43362+ fscache_stat_unchecked(&fscache_n_store_calls);
43363
43364 /* find a page to store */
43365 page = NULL;
43366@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43367 page = results[0];
43368 _debug("gang %d [%lx]", n, page->index);
43369 if (page->index > op->store_limit) {
43370- fscache_stat(&fscache_n_store_pages_over_limit);
43371+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43372 goto superseded;
43373 }
43374
43375@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43376 spin_unlock(&cookie->stores_lock);
43377 spin_unlock(&object->lock);
43378
43379- fscache_stat(&fscache_n_store_pages);
43380+ fscache_stat_unchecked(&fscache_n_store_pages);
43381 fscache_stat(&fscache_n_cop_write_page);
43382 ret = object->cache->ops->write_page(op, page);
43383 fscache_stat_d(&fscache_n_cop_write_page);
43384@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43385 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43386 ASSERT(PageFsCache(page));
43387
43388- fscache_stat(&fscache_n_stores);
43389+ fscache_stat_unchecked(&fscache_n_stores);
43390
43391 op = kzalloc(sizeof(*op), GFP_NOIO);
43392 if (!op)
43393@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43394 spin_unlock(&cookie->stores_lock);
43395 spin_unlock(&object->lock);
43396
43397- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43398+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43399 op->store_limit = object->store_limit;
43400
43401 if (fscache_submit_op(object, &op->op) < 0)
43402@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43403
43404 spin_unlock(&cookie->lock);
43405 radix_tree_preload_end();
43406- fscache_stat(&fscache_n_store_ops);
43407- fscache_stat(&fscache_n_stores_ok);
43408+ fscache_stat_unchecked(&fscache_n_store_ops);
43409+ fscache_stat_unchecked(&fscache_n_stores_ok);
43410
43411 /* the work queue now carries its own ref on the object */
43412 fscache_put_operation(&op->op);
43413@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43414 return 0;
43415
43416 already_queued:
43417- fscache_stat(&fscache_n_stores_again);
43418+ fscache_stat_unchecked(&fscache_n_stores_again);
43419 already_pending:
43420 spin_unlock(&cookie->stores_lock);
43421 spin_unlock(&object->lock);
43422 spin_unlock(&cookie->lock);
43423 radix_tree_preload_end();
43424 kfree(op);
43425- fscache_stat(&fscache_n_stores_ok);
43426+ fscache_stat_unchecked(&fscache_n_stores_ok);
43427 _leave(" = 0");
43428 return 0;
43429
43430@@ -851,14 +851,14 @@ nobufs:
43431 spin_unlock(&cookie->lock);
43432 radix_tree_preload_end();
43433 kfree(op);
43434- fscache_stat(&fscache_n_stores_nobufs);
43435+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
43436 _leave(" = -ENOBUFS");
43437 return -ENOBUFS;
43438
43439 nomem_free:
43440 kfree(op);
43441 nomem:
43442- fscache_stat(&fscache_n_stores_oom);
43443+ fscache_stat_unchecked(&fscache_n_stores_oom);
43444 _leave(" = -ENOMEM");
43445 return -ENOMEM;
43446 }
43447@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43448 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43449 ASSERTCMP(page, !=, NULL);
43450
43451- fscache_stat(&fscache_n_uncaches);
43452+ fscache_stat_unchecked(&fscache_n_uncaches);
43453
43454 /* cache withdrawal may beat us to it */
43455 if (!PageFsCache(page))
43456@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43457 unsigned long loop;
43458
43459 #ifdef CONFIG_FSCACHE_STATS
43460- atomic_add(pagevec->nr, &fscache_n_marks);
43461+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43462 #endif
43463
43464 for (loop = 0; loop < pagevec->nr; loop++) {
43465diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43466index 4765190..2a067f2 100644
43467--- a/fs/fscache/stats.c
43468+++ b/fs/fscache/stats.c
43469@@ -18,95 +18,95 @@
43470 /*
43471 * operation counters
43472 */
43473-atomic_t fscache_n_op_pend;
43474-atomic_t fscache_n_op_run;
43475-atomic_t fscache_n_op_enqueue;
43476-atomic_t fscache_n_op_requeue;
43477-atomic_t fscache_n_op_deferred_release;
43478-atomic_t fscache_n_op_release;
43479-atomic_t fscache_n_op_gc;
43480-atomic_t fscache_n_op_cancelled;
43481-atomic_t fscache_n_op_rejected;
43482+atomic_unchecked_t fscache_n_op_pend;
43483+atomic_unchecked_t fscache_n_op_run;
43484+atomic_unchecked_t fscache_n_op_enqueue;
43485+atomic_unchecked_t fscache_n_op_requeue;
43486+atomic_unchecked_t fscache_n_op_deferred_release;
43487+atomic_unchecked_t fscache_n_op_release;
43488+atomic_unchecked_t fscache_n_op_gc;
43489+atomic_unchecked_t fscache_n_op_cancelled;
43490+atomic_unchecked_t fscache_n_op_rejected;
43491
43492-atomic_t fscache_n_attr_changed;
43493-atomic_t fscache_n_attr_changed_ok;
43494-atomic_t fscache_n_attr_changed_nobufs;
43495-atomic_t fscache_n_attr_changed_nomem;
43496-atomic_t fscache_n_attr_changed_calls;
43497+atomic_unchecked_t fscache_n_attr_changed;
43498+atomic_unchecked_t fscache_n_attr_changed_ok;
43499+atomic_unchecked_t fscache_n_attr_changed_nobufs;
43500+atomic_unchecked_t fscache_n_attr_changed_nomem;
43501+atomic_unchecked_t fscache_n_attr_changed_calls;
43502
43503-atomic_t fscache_n_allocs;
43504-atomic_t fscache_n_allocs_ok;
43505-atomic_t fscache_n_allocs_wait;
43506-atomic_t fscache_n_allocs_nobufs;
43507-atomic_t fscache_n_allocs_intr;
43508-atomic_t fscache_n_allocs_object_dead;
43509-atomic_t fscache_n_alloc_ops;
43510-atomic_t fscache_n_alloc_op_waits;
43511+atomic_unchecked_t fscache_n_allocs;
43512+atomic_unchecked_t fscache_n_allocs_ok;
43513+atomic_unchecked_t fscache_n_allocs_wait;
43514+atomic_unchecked_t fscache_n_allocs_nobufs;
43515+atomic_unchecked_t fscache_n_allocs_intr;
43516+atomic_unchecked_t fscache_n_allocs_object_dead;
43517+atomic_unchecked_t fscache_n_alloc_ops;
43518+atomic_unchecked_t fscache_n_alloc_op_waits;
43519
43520-atomic_t fscache_n_retrievals;
43521-atomic_t fscache_n_retrievals_ok;
43522-atomic_t fscache_n_retrievals_wait;
43523-atomic_t fscache_n_retrievals_nodata;
43524-atomic_t fscache_n_retrievals_nobufs;
43525-atomic_t fscache_n_retrievals_intr;
43526-atomic_t fscache_n_retrievals_nomem;
43527-atomic_t fscache_n_retrievals_object_dead;
43528-atomic_t fscache_n_retrieval_ops;
43529-atomic_t fscache_n_retrieval_op_waits;
43530+atomic_unchecked_t fscache_n_retrievals;
43531+atomic_unchecked_t fscache_n_retrievals_ok;
43532+atomic_unchecked_t fscache_n_retrievals_wait;
43533+atomic_unchecked_t fscache_n_retrievals_nodata;
43534+atomic_unchecked_t fscache_n_retrievals_nobufs;
43535+atomic_unchecked_t fscache_n_retrievals_intr;
43536+atomic_unchecked_t fscache_n_retrievals_nomem;
43537+atomic_unchecked_t fscache_n_retrievals_object_dead;
43538+atomic_unchecked_t fscache_n_retrieval_ops;
43539+atomic_unchecked_t fscache_n_retrieval_op_waits;
43540
43541-atomic_t fscache_n_stores;
43542-atomic_t fscache_n_stores_ok;
43543-atomic_t fscache_n_stores_again;
43544-atomic_t fscache_n_stores_nobufs;
43545-atomic_t fscache_n_stores_oom;
43546-atomic_t fscache_n_store_ops;
43547-atomic_t fscache_n_store_calls;
43548-atomic_t fscache_n_store_pages;
43549-atomic_t fscache_n_store_radix_deletes;
43550-atomic_t fscache_n_store_pages_over_limit;
43551+atomic_unchecked_t fscache_n_stores;
43552+atomic_unchecked_t fscache_n_stores_ok;
43553+atomic_unchecked_t fscache_n_stores_again;
43554+atomic_unchecked_t fscache_n_stores_nobufs;
43555+atomic_unchecked_t fscache_n_stores_oom;
43556+atomic_unchecked_t fscache_n_store_ops;
43557+atomic_unchecked_t fscache_n_store_calls;
43558+atomic_unchecked_t fscache_n_store_pages;
43559+atomic_unchecked_t fscache_n_store_radix_deletes;
43560+atomic_unchecked_t fscache_n_store_pages_over_limit;
43561
43562-atomic_t fscache_n_store_vmscan_not_storing;
43563-atomic_t fscache_n_store_vmscan_gone;
43564-atomic_t fscache_n_store_vmscan_busy;
43565-atomic_t fscache_n_store_vmscan_cancelled;
43566+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43567+atomic_unchecked_t fscache_n_store_vmscan_gone;
43568+atomic_unchecked_t fscache_n_store_vmscan_busy;
43569+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43570
43571-atomic_t fscache_n_marks;
43572-atomic_t fscache_n_uncaches;
43573+atomic_unchecked_t fscache_n_marks;
43574+atomic_unchecked_t fscache_n_uncaches;
43575
43576-atomic_t fscache_n_acquires;
43577-atomic_t fscache_n_acquires_null;
43578-atomic_t fscache_n_acquires_no_cache;
43579-atomic_t fscache_n_acquires_ok;
43580-atomic_t fscache_n_acquires_nobufs;
43581-atomic_t fscache_n_acquires_oom;
43582+atomic_unchecked_t fscache_n_acquires;
43583+atomic_unchecked_t fscache_n_acquires_null;
43584+atomic_unchecked_t fscache_n_acquires_no_cache;
43585+atomic_unchecked_t fscache_n_acquires_ok;
43586+atomic_unchecked_t fscache_n_acquires_nobufs;
43587+atomic_unchecked_t fscache_n_acquires_oom;
43588
43589-atomic_t fscache_n_updates;
43590-atomic_t fscache_n_updates_null;
43591-atomic_t fscache_n_updates_run;
43592+atomic_unchecked_t fscache_n_updates;
43593+atomic_unchecked_t fscache_n_updates_null;
43594+atomic_unchecked_t fscache_n_updates_run;
43595
43596-atomic_t fscache_n_relinquishes;
43597-atomic_t fscache_n_relinquishes_null;
43598-atomic_t fscache_n_relinquishes_waitcrt;
43599-atomic_t fscache_n_relinquishes_retire;
43600+atomic_unchecked_t fscache_n_relinquishes;
43601+atomic_unchecked_t fscache_n_relinquishes_null;
43602+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43603+atomic_unchecked_t fscache_n_relinquishes_retire;
43604
43605-atomic_t fscache_n_cookie_index;
43606-atomic_t fscache_n_cookie_data;
43607-atomic_t fscache_n_cookie_special;
43608+atomic_unchecked_t fscache_n_cookie_index;
43609+atomic_unchecked_t fscache_n_cookie_data;
43610+atomic_unchecked_t fscache_n_cookie_special;
43611
43612-atomic_t fscache_n_object_alloc;
43613-atomic_t fscache_n_object_no_alloc;
43614-atomic_t fscache_n_object_lookups;
43615-atomic_t fscache_n_object_lookups_negative;
43616-atomic_t fscache_n_object_lookups_positive;
43617-atomic_t fscache_n_object_lookups_timed_out;
43618-atomic_t fscache_n_object_created;
43619-atomic_t fscache_n_object_avail;
43620-atomic_t fscache_n_object_dead;
43621+atomic_unchecked_t fscache_n_object_alloc;
43622+atomic_unchecked_t fscache_n_object_no_alloc;
43623+atomic_unchecked_t fscache_n_object_lookups;
43624+atomic_unchecked_t fscache_n_object_lookups_negative;
43625+atomic_unchecked_t fscache_n_object_lookups_positive;
43626+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43627+atomic_unchecked_t fscache_n_object_created;
43628+atomic_unchecked_t fscache_n_object_avail;
43629+atomic_unchecked_t fscache_n_object_dead;
43630
43631-atomic_t fscache_n_checkaux_none;
43632-atomic_t fscache_n_checkaux_okay;
43633-atomic_t fscache_n_checkaux_update;
43634-atomic_t fscache_n_checkaux_obsolete;
43635+atomic_unchecked_t fscache_n_checkaux_none;
43636+atomic_unchecked_t fscache_n_checkaux_okay;
43637+atomic_unchecked_t fscache_n_checkaux_update;
43638+atomic_unchecked_t fscache_n_checkaux_obsolete;
43639
43640 atomic_t fscache_n_cop_alloc_object;
43641 atomic_t fscache_n_cop_lookup_object;
43642@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
43643 seq_puts(m, "FS-Cache statistics\n");
43644
43645 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43646- atomic_read(&fscache_n_cookie_index),
43647- atomic_read(&fscache_n_cookie_data),
43648- atomic_read(&fscache_n_cookie_special));
43649+ atomic_read_unchecked(&fscache_n_cookie_index),
43650+ atomic_read_unchecked(&fscache_n_cookie_data),
43651+ atomic_read_unchecked(&fscache_n_cookie_special));
43652
43653 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43654- atomic_read(&fscache_n_object_alloc),
43655- atomic_read(&fscache_n_object_no_alloc),
43656- atomic_read(&fscache_n_object_avail),
43657- atomic_read(&fscache_n_object_dead));
43658+ atomic_read_unchecked(&fscache_n_object_alloc),
43659+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43660+ atomic_read_unchecked(&fscache_n_object_avail),
43661+ atomic_read_unchecked(&fscache_n_object_dead));
43662 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43663- atomic_read(&fscache_n_checkaux_none),
43664- atomic_read(&fscache_n_checkaux_okay),
43665- atomic_read(&fscache_n_checkaux_update),
43666- atomic_read(&fscache_n_checkaux_obsolete));
43667+ atomic_read_unchecked(&fscache_n_checkaux_none),
43668+ atomic_read_unchecked(&fscache_n_checkaux_okay),
43669+ atomic_read_unchecked(&fscache_n_checkaux_update),
43670+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43671
43672 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43673- atomic_read(&fscache_n_marks),
43674- atomic_read(&fscache_n_uncaches));
43675+ atomic_read_unchecked(&fscache_n_marks),
43676+ atomic_read_unchecked(&fscache_n_uncaches));
43677
43678 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43679 " oom=%u\n",
43680- atomic_read(&fscache_n_acquires),
43681- atomic_read(&fscache_n_acquires_null),
43682- atomic_read(&fscache_n_acquires_no_cache),
43683- atomic_read(&fscache_n_acquires_ok),
43684- atomic_read(&fscache_n_acquires_nobufs),
43685- atomic_read(&fscache_n_acquires_oom));
43686+ atomic_read_unchecked(&fscache_n_acquires),
43687+ atomic_read_unchecked(&fscache_n_acquires_null),
43688+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
43689+ atomic_read_unchecked(&fscache_n_acquires_ok),
43690+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
43691+ atomic_read_unchecked(&fscache_n_acquires_oom));
43692
43693 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43694- atomic_read(&fscache_n_object_lookups),
43695- atomic_read(&fscache_n_object_lookups_negative),
43696- atomic_read(&fscache_n_object_lookups_positive),
43697- atomic_read(&fscache_n_object_created),
43698- atomic_read(&fscache_n_object_lookups_timed_out));
43699+ atomic_read_unchecked(&fscache_n_object_lookups),
43700+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
43701+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
43702+ atomic_read_unchecked(&fscache_n_object_created),
43703+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43704
43705 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43706- atomic_read(&fscache_n_updates),
43707- atomic_read(&fscache_n_updates_null),
43708- atomic_read(&fscache_n_updates_run));
43709+ atomic_read_unchecked(&fscache_n_updates),
43710+ atomic_read_unchecked(&fscache_n_updates_null),
43711+ atomic_read_unchecked(&fscache_n_updates_run));
43712
43713 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43714- atomic_read(&fscache_n_relinquishes),
43715- atomic_read(&fscache_n_relinquishes_null),
43716- atomic_read(&fscache_n_relinquishes_waitcrt),
43717- atomic_read(&fscache_n_relinquishes_retire));
43718+ atomic_read_unchecked(&fscache_n_relinquishes),
43719+ atomic_read_unchecked(&fscache_n_relinquishes_null),
43720+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43721+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
43722
43723 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43724- atomic_read(&fscache_n_attr_changed),
43725- atomic_read(&fscache_n_attr_changed_ok),
43726- atomic_read(&fscache_n_attr_changed_nobufs),
43727- atomic_read(&fscache_n_attr_changed_nomem),
43728- atomic_read(&fscache_n_attr_changed_calls));
43729+ atomic_read_unchecked(&fscache_n_attr_changed),
43730+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
43731+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43732+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43733+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
43734
43735 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43736- atomic_read(&fscache_n_allocs),
43737- atomic_read(&fscache_n_allocs_ok),
43738- atomic_read(&fscache_n_allocs_wait),
43739- atomic_read(&fscache_n_allocs_nobufs),
43740- atomic_read(&fscache_n_allocs_intr));
43741+ atomic_read_unchecked(&fscache_n_allocs),
43742+ atomic_read_unchecked(&fscache_n_allocs_ok),
43743+ atomic_read_unchecked(&fscache_n_allocs_wait),
43744+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
43745+ atomic_read_unchecked(&fscache_n_allocs_intr));
43746 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43747- atomic_read(&fscache_n_alloc_ops),
43748- atomic_read(&fscache_n_alloc_op_waits),
43749- atomic_read(&fscache_n_allocs_object_dead));
43750+ atomic_read_unchecked(&fscache_n_alloc_ops),
43751+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
43752+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
43753
43754 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43755 " int=%u oom=%u\n",
43756- atomic_read(&fscache_n_retrievals),
43757- atomic_read(&fscache_n_retrievals_ok),
43758- atomic_read(&fscache_n_retrievals_wait),
43759- atomic_read(&fscache_n_retrievals_nodata),
43760- atomic_read(&fscache_n_retrievals_nobufs),
43761- atomic_read(&fscache_n_retrievals_intr),
43762- atomic_read(&fscache_n_retrievals_nomem));
43763+ atomic_read_unchecked(&fscache_n_retrievals),
43764+ atomic_read_unchecked(&fscache_n_retrievals_ok),
43765+ atomic_read_unchecked(&fscache_n_retrievals_wait),
43766+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
43767+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43768+ atomic_read_unchecked(&fscache_n_retrievals_intr),
43769+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
43770 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43771- atomic_read(&fscache_n_retrieval_ops),
43772- atomic_read(&fscache_n_retrieval_op_waits),
43773- atomic_read(&fscache_n_retrievals_object_dead));
43774+ atomic_read_unchecked(&fscache_n_retrieval_ops),
43775+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43776+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43777
43778 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43779- atomic_read(&fscache_n_stores),
43780- atomic_read(&fscache_n_stores_ok),
43781- atomic_read(&fscache_n_stores_again),
43782- atomic_read(&fscache_n_stores_nobufs),
43783- atomic_read(&fscache_n_stores_oom));
43784+ atomic_read_unchecked(&fscache_n_stores),
43785+ atomic_read_unchecked(&fscache_n_stores_ok),
43786+ atomic_read_unchecked(&fscache_n_stores_again),
43787+ atomic_read_unchecked(&fscache_n_stores_nobufs),
43788+ atomic_read_unchecked(&fscache_n_stores_oom));
43789 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43790- atomic_read(&fscache_n_store_ops),
43791- atomic_read(&fscache_n_store_calls),
43792- atomic_read(&fscache_n_store_pages),
43793- atomic_read(&fscache_n_store_radix_deletes),
43794- atomic_read(&fscache_n_store_pages_over_limit));
43795+ atomic_read_unchecked(&fscache_n_store_ops),
43796+ atomic_read_unchecked(&fscache_n_store_calls),
43797+ atomic_read_unchecked(&fscache_n_store_pages),
43798+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43799+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43800
43801 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43802- atomic_read(&fscache_n_store_vmscan_not_storing),
43803- atomic_read(&fscache_n_store_vmscan_gone),
43804- atomic_read(&fscache_n_store_vmscan_busy),
43805- atomic_read(&fscache_n_store_vmscan_cancelled));
43806+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43807+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43808+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43809+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43810
43811 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43812- atomic_read(&fscache_n_op_pend),
43813- atomic_read(&fscache_n_op_run),
43814- atomic_read(&fscache_n_op_enqueue),
43815- atomic_read(&fscache_n_op_cancelled),
43816- atomic_read(&fscache_n_op_rejected));
43817+ atomic_read_unchecked(&fscache_n_op_pend),
43818+ atomic_read_unchecked(&fscache_n_op_run),
43819+ atomic_read_unchecked(&fscache_n_op_enqueue),
43820+ atomic_read_unchecked(&fscache_n_op_cancelled),
43821+ atomic_read_unchecked(&fscache_n_op_rejected));
43822 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43823- atomic_read(&fscache_n_op_deferred_release),
43824- atomic_read(&fscache_n_op_release),
43825- atomic_read(&fscache_n_op_gc));
43826+ atomic_read_unchecked(&fscache_n_op_deferred_release),
43827+ atomic_read_unchecked(&fscache_n_op_release),
43828+ atomic_read_unchecked(&fscache_n_op_gc));
43829
43830 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43831 atomic_read(&fscache_n_cop_alloc_object),
43832diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
43833index 3426521..3b75162 100644
43834--- a/fs/fuse/cuse.c
43835+++ b/fs/fuse/cuse.c
43836@@ -587,10 +587,12 @@ static int __init cuse_init(void)
43837 INIT_LIST_HEAD(&cuse_conntbl[i]);
43838
43839 /* inherit and extend fuse_dev_operations */
43840- cuse_channel_fops = fuse_dev_operations;
43841- cuse_channel_fops.owner = THIS_MODULE;
43842- cuse_channel_fops.open = cuse_channel_open;
43843- cuse_channel_fops.release = cuse_channel_release;
43844+ pax_open_kernel();
43845+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43846+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43847+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43848+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43849+ pax_close_kernel();
43850
43851 cuse_class = class_create(THIS_MODULE, "cuse");
43852 if (IS_ERR(cuse_class))
43853diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
43854index 2aaf3ea..8e50863 100644
43855--- a/fs/fuse/dev.c
43856+++ b/fs/fuse/dev.c
43857@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
43858 ret = 0;
43859 pipe_lock(pipe);
43860
43861- if (!pipe->readers) {
43862+ if (!atomic_read(&pipe->readers)) {
43863 send_sig(SIGPIPE, current, 0);
43864 if (!ret)
43865 ret = -EPIPE;
43866diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
43867index 9f63e49..d8a64c0 100644
43868--- a/fs/fuse/dir.c
43869+++ b/fs/fuse/dir.c
43870@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
43871 return link;
43872 }
43873
43874-static void free_link(char *link)
43875+static void free_link(const char *link)
43876 {
43877 if (!IS_ERR(link))
43878 free_page((unsigned long) link);
43879diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
43880index cfd4959..a780959 100644
43881--- a/fs/gfs2/inode.c
43882+++ b/fs/gfs2/inode.c
43883@@ -1490,7 +1490,7 @@ out:
43884
43885 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43886 {
43887- char *s = nd_get_link(nd);
43888+ const char *s = nd_get_link(nd);
43889 if (!IS_ERR(s))
43890 kfree(s);
43891 }
43892diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
43893index 0be5a78..9cfb853 100644
43894--- a/fs/hugetlbfs/inode.c
43895+++ b/fs/hugetlbfs/inode.c
43896@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
43897 .kill_sb = kill_litter_super,
43898 };
43899
43900-static struct vfsmount *hugetlbfs_vfsmount;
43901+struct vfsmount *hugetlbfs_vfsmount;
43902
43903 static int can_do_hugetlb_shm(void)
43904 {
43905diff --git a/fs/inode.c b/fs/inode.c
43906index ee4e66b..0451521 100644
43907--- a/fs/inode.c
43908+++ b/fs/inode.c
43909@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
43910
43911 #ifdef CONFIG_SMP
43912 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
43913- static atomic_t shared_last_ino;
43914- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
43915+ static atomic_unchecked_t shared_last_ino;
43916+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
43917
43918 res = next - LAST_INO_BATCH;
43919 }
43920diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
43921index e513f19..2ab1351 100644
43922--- a/fs/jffs2/erase.c
43923+++ b/fs/jffs2/erase.c
43924@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
43925 struct jffs2_unknown_node marker = {
43926 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43927 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43928- .totlen = cpu_to_je32(c->cleanmarker_size)
43929+ .totlen = cpu_to_je32(c->cleanmarker_size),
43930+ .hdr_crc = cpu_to_je32(0)
43931 };
43932
43933 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43934diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
43935index b09e51d..e482afa 100644
43936--- a/fs/jffs2/wbuf.c
43937+++ b/fs/jffs2/wbuf.c
43938@@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
43939 {
43940 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43941 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43942- .totlen = constant_cpu_to_je32(8)
43943+ .totlen = constant_cpu_to_je32(8),
43944+ .hdr_crc = constant_cpu_to_je32(0)
43945 };
43946
43947 /*
43948diff --git a/fs/jfs/super.c b/fs/jfs/super.c
43949index a44eff0..462e07d 100644
43950--- a/fs/jfs/super.c
43951+++ b/fs/jfs/super.c
43952@@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
43953
43954 jfs_inode_cachep =
43955 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43956- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43957+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43958 init_once);
43959 if (jfs_inode_cachep == NULL)
43960 return -ENOMEM;
43961diff --git a/fs/libfs.c b/fs/libfs.c
43962index f6d411e..e82a08d 100644
43963--- a/fs/libfs.c
43964+++ b/fs/libfs.c
43965@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
43966
43967 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43968 struct dentry *next;
43969+ char d_name[sizeof(next->d_iname)];
43970+ const unsigned char *name;
43971+
43972 next = list_entry(p, struct dentry, d_u.d_child);
43973 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
43974 if (!simple_positive(next)) {
43975@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
43976
43977 spin_unlock(&next->d_lock);
43978 spin_unlock(&dentry->d_lock);
43979- if (filldir(dirent, next->d_name.name,
43980+ name = next->d_name.name;
43981+ if (name == next->d_iname) {
43982+ memcpy(d_name, name, next->d_name.len);
43983+ name = d_name;
43984+ }
43985+ if (filldir(dirent, name,
43986 next->d_name.len, filp->f_pos,
43987 next->d_inode->i_ino,
43988 dt_type(next->d_inode)) < 0)
43989diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
43990index 8392cb8..80d6193 100644
43991--- a/fs/lockd/clntproc.c
43992+++ b/fs/lockd/clntproc.c
43993@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
43994 /*
43995 * Cookie counter for NLM requests
43996 */
43997-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43998+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43999
44000 void nlmclnt_next_cookie(struct nlm_cookie *c)
44001 {
44002- u32 cookie = atomic_inc_return(&nlm_cookie);
44003+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44004
44005 memcpy(c->data, &cookie, 4);
44006 c->len=4;
44007diff --git a/fs/locks.c b/fs/locks.c
44008index 637694b..f84a121 100644
44009--- a/fs/locks.c
44010+++ b/fs/locks.c
44011@@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
44012 return;
44013
44014 if (filp->f_op && filp->f_op->flock) {
44015- struct file_lock fl = {
44016+ struct file_lock flock = {
44017 .fl_pid = current->tgid,
44018 .fl_file = filp,
44019 .fl_flags = FL_FLOCK,
44020 .fl_type = F_UNLCK,
44021 .fl_end = OFFSET_MAX,
44022 };
44023- filp->f_op->flock(filp, F_SETLKW, &fl);
44024- if (fl.fl_ops && fl.fl_ops->fl_release_private)
44025- fl.fl_ops->fl_release_private(&fl);
44026+ filp->f_op->flock(filp, F_SETLKW, &flock);
44027+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
44028+ flock.fl_ops->fl_release_private(&flock);
44029 }
44030
44031 lock_flocks();
44032diff --git a/fs/namei.c b/fs/namei.c
44033index 5008f01..90328a7 100644
44034--- a/fs/namei.c
44035+++ b/fs/namei.c
44036@@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
44037 if (ret != -EACCES)
44038 return ret;
44039
44040+#ifdef CONFIG_GRKERNSEC
44041+ /* we'll block if we have to log due to a denied capability use */
44042+ if (mask & MAY_NOT_BLOCK)
44043+ return -ECHILD;
44044+#endif
44045+
44046 if (S_ISDIR(inode->i_mode)) {
44047 /* DACs are overridable for directories */
44048- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44049- return 0;
44050 if (!(mask & MAY_WRITE))
44051- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44052+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44053+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44054 return 0;
44055+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44056+ return 0;
44057 return -EACCES;
44058 }
44059 /*
44060+ * Searching includes executable on directories, else just read.
44061+ */
44062+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44063+ if (mask == MAY_READ)
44064+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44065+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44066+ return 0;
44067+
44068+ /*
44069 * Read/write DACs are always overridable.
44070 * Executable DACs are overridable when there is
44071 * at least one exec bit set.
44072@@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44073 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44074 return 0;
44075
44076- /*
44077- * Searching includes executable on directories, else just read.
44078- */
44079- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44080- if (mask == MAY_READ)
44081- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44082- return 0;
44083-
44084 return -EACCES;
44085 }
44086
44087@@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44088 return error;
44089 }
44090
44091+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
44092+ dentry->d_inode, dentry, nd->path.mnt)) {
44093+ error = -EACCES;
44094+ *p = ERR_PTR(error); /* no ->put_link(), please */
44095+ path_put(&nd->path);
44096+ return error;
44097+ }
44098+
44099 nd->last_type = LAST_BIND;
44100 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44101 error = PTR_ERR(*p);
44102 if (!IS_ERR(*p)) {
44103- char *s = nd_get_link(nd);
44104+ const char *s = nd_get_link(nd);
44105 error = 0;
44106 if (s)
44107 error = __vfs_follow_link(nd, s);
44108@@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
44109 if (!err)
44110 err = complete_walk(nd);
44111
44112+ if (!(nd->flags & LOOKUP_PARENT)) {
44113+#ifdef CONFIG_GRKERNSEC
44114+ if (flags & LOOKUP_RCU) {
44115+ if (!err)
44116+ path_put(&nd->path);
44117+ err = -ECHILD;
44118+ } else
44119+#endif
44120+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44121+ if (!err)
44122+ path_put(&nd->path);
44123+ err = -ENOENT;
44124+ }
44125+ }
44126+
44127 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44128 if (!nd->inode->i_op->lookup) {
44129 path_put(&nd->path);
44130@@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
44131 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44132
44133 if (likely(!retval)) {
44134+ if (*name != '/' && nd->path.dentry && nd->inode) {
44135+#ifdef CONFIG_GRKERNSEC
44136+ if (flags & LOOKUP_RCU)
44137+ return -ECHILD;
44138+#endif
44139+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44140+ return -ENOENT;
44141+ }
44142+
44143 if (unlikely(!audit_dummy_context())) {
44144 if (nd->path.dentry && nd->inode)
44145 audit_inode(name, nd->path.dentry);
44146@@ -2046,6 +2086,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44147 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44148 return -EPERM;
44149
44150+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44151+ return -EPERM;
44152+ if (gr_handle_rawio(inode))
44153+ return -EPERM;
44154+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44155+ return -EACCES;
44156+
44157 return 0;
44158 }
44159
44160@@ -2107,6 +2154,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44161 error = complete_walk(nd);
44162 if (error)
44163 return ERR_PTR(error);
44164+#ifdef CONFIG_GRKERNSEC
44165+ if (nd->flags & LOOKUP_RCU) {
44166+ error = -ECHILD;
44167+ goto exit;
44168+ }
44169+#endif
44170+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44171+ error = -ENOENT;
44172+ goto exit;
44173+ }
44174 audit_inode(pathname, nd->path.dentry);
44175 if (open_flag & O_CREAT) {
44176 error = -EISDIR;
44177@@ -2117,6 +2174,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44178 error = complete_walk(nd);
44179 if (error)
44180 return ERR_PTR(error);
44181+#ifdef CONFIG_GRKERNSEC
44182+ if (nd->flags & LOOKUP_RCU) {
44183+ error = -ECHILD;
44184+ goto exit;
44185+ }
44186+#endif
44187+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44188+ error = -ENOENT;
44189+ goto exit;
44190+ }
44191 audit_inode(pathname, dir);
44192 goto ok;
44193 }
44194@@ -2138,6 +2205,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44195 error = complete_walk(nd);
44196 if (error)
44197 return ERR_PTR(-ECHILD);
44198+#ifdef CONFIG_GRKERNSEC
44199+ if (nd->flags & LOOKUP_RCU) {
44200+ error = -ECHILD;
44201+ goto exit;
44202+ }
44203+#endif
44204+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44205+ error = -ENOENT;
44206+ goto exit;
44207+ }
44208
44209 error = -ENOTDIR;
44210 if (nd->flags & LOOKUP_DIRECTORY) {
44211@@ -2178,6 +2255,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44212 /* Negative dentry, just create the file */
44213 if (!dentry->d_inode) {
44214 int mode = op->mode;
44215+
44216+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44217+ error = -EACCES;
44218+ goto exit_mutex_unlock;
44219+ }
44220+
44221 if (!IS_POSIXACL(dir->d_inode))
44222 mode &= ~current_umask();
44223 /*
44224@@ -2201,6 +2284,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44225 error = vfs_create(dir->d_inode, dentry, mode, nd);
44226 if (error)
44227 goto exit_mutex_unlock;
44228+ else
44229+ gr_handle_create(path->dentry, path->mnt);
44230 mutex_unlock(&dir->d_inode->i_mutex);
44231 dput(nd->path.dentry);
44232 nd->path.dentry = dentry;
44233@@ -2210,6 +2295,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44234 /*
44235 * It already exists.
44236 */
44237+
44238+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44239+ error = -ENOENT;
44240+ goto exit_mutex_unlock;
44241+ }
44242+
44243+ /* only check if O_CREAT is specified, all other checks need to go
44244+ into may_open */
44245+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44246+ error = -EACCES;
44247+ goto exit_mutex_unlock;
44248+ }
44249+
44250 mutex_unlock(&dir->d_inode->i_mutex);
44251 audit_inode(pathname, path->dentry);
44252
44253@@ -2422,6 +2520,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44254 *path = nd.path;
44255 return dentry;
44256 eexist:
44257+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44258+ dput(dentry);
44259+ dentry = ERR_PTR(-ENOENT);
44260+ goto fail;
44261+ }
44262 dput(dentry);
44263 dentry = ERR_PTR(-EEXIST);
44264 fail:
44265@@ -2444,6 +2547,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44266 }
44267 EXPORT_SYMBOL(user_path_create);
44268
44269+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44270+{
44271+ char *tmp = getname(pathname);
44272+ struct dentry *res;
44273+ if (IS_ERR(tmp))
44274+ return ERR_CAST(tmp);
44275+ res = kern_path_create(dfd, tmp, path, is_dir);
44276+ if (IS_ERR(res))
44277+ putname(tmp);
44278+ else
44279+ *to = tmp;
44280+ return res;
44281+}
44282+
44283 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44284 {
44285 int error = may_create(dir, dentry);
44286@@ -2511,6 +2628,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44287 error = mnt_want_write(path.mnt);
44288 if (error)
44289 goto out_dput;
44290+
44291+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44292+ error = -EPERM;
44293+ goto out_drop_write;
44294+ }
44295+
44296+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44297+ error = -EACCES;
44298+ goto out_drop_write;
44299+ }
44300+
44301 error = security_path_mknod(&path, dentry, mode, dev);
44302 if (error)
44303 goto out_drop_write;
44304@@ -2528,6 +2656,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44305 }
44306 out_drop_write:
44307 mnt_drop_write(path.mnt);
44308+
44309+ if (!error)
44310+ gr_handle_create(dentry, path.mnt);
44311 out_dput:
44312 dput(dentry);
44313 mutex_unlock(&path.dentry->d_inode->i_mutex);
44314@@ -2577,12 +2708,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44315 error = mnt_want_write(path.mnt);
44316 if (error)
44317 goto out_dput;
44318+
44319+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44320+ error = -EACCES;
44321+ goto out_drop_write;
44322+ }
44323+
44324 error = security_path_mkdir(&path, dentry, mode);
44325 if (error)
44326 goto out_drop_write;
44327 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44328 out_drop_write:
44329 mnt_drop_write(path.mnt);
44330+
44331+ if (!error)
44332+ gr_handle_create(dentry, path.mnt);
44333 out_dput:
44334 dput(dentry);
44335 mutex_unlock(&path.dentry->d_inode->i_mutex);
44336@@ -2662,6 +2802,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44337 char * name;
44338 struct dentry *dentry;
44339 struct nameidata nd;
44340+ ino_t saved_ino = 0;
44341+ dev_t saved_dev = 0;
44342
44343 error = user_path_parent(dfd, pathname, &nd, &name);
44344 if (error)
44345@@ -2690,6 +2832,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44346 error = -ENOENT;
44347 goto exit3;
44348 }
44349+
44350+ saved_ino = dentry->d_inode->i_ino;
44351+ saved_dev = gr_get_dev_from_dentry(dentry);
44352+
44353+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44354+ error = -EACCES;
44355+ goto exit3;
44356+ }
44357+
44358 error = mnt_want_write(nd.path.mnt);
44359 if (error)
44360 goto exit3;
44361@@ -2697,6 +2848,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44362 if (error)
44363 goto exit4;
44364 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44365+ if (!error && (saved_dev || saved_ino))
44366+ gr_handle_delete(saved_ino, saved_dev);
44367 exit4:
44368 mnt_drop_write(nd.path.mnt);
44369 exit3:
44370@@ -2759,6 +2912,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44371 struct dentry *dentry;
44372 struct nameidata nd;
44373 struct inode *inode = NULL;
44374+ ino_t saved_ino = 0;
44375+ dev_t saved_dev = 0;
44376
44377 error = user_path_parent(dfd, pathname, &nd, &name);
44378 if (error)
44379@@ -2781,6 +2936,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44380 if (!inode)
44381 goto slashes;
44382 ihold(inode);
44383+
44384+ if (inode->i_nlink <= 1) {
44385+ saved_ino = inode->i_ino;
44386+ saved_dev = gr_get_dev_from_dentry(dentry);
44387+ }
44388+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44389+ error = -EACCES;
44390+ goto exit2;
44391+ }
44392+
44393 error = mnt_want_write(nd.path.mnt);
44394 if (error)
44395 goto exit2;
44396@@ -2788,6 +2953,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44397 if (error)
44398 goto exit3;
44399 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44400+ if (!error && (saved_ino || saved_dev))
44401+ gr_handle_delete(saved_ino, saved_dev);
44402 exit3:
44403 mnt_drop_write(nd.path.mnt);
44404 exit2:
44405@@ -2863,10 +3030,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44406 error = mnt_want_write(path.mnt);
44407 if (error)
44408 goto out_dput;
44409+
44410+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44411+ error = -EACCES;
44412+ goto out_drop_write;
44413+ }
44414+
44415 error = security_path_symlink(&path, dentry, from);
44416 if (error)
44417 goto out_drop_write;
44418 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44419+ if (!error)
44420+ gr_handle_create(dentry, path.mnt);
44421 out_drop_write:
44422 mnt_drop_write(path.mnt);
44423 out_dput:
44424@@ -2938,6 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44425 {
44426 struct dentry *new_dentry;
44427 struct path old_path, new_path;
44428+ char *to = NULL;
44429 int how = 0;
44430 int error;
44431
44432@@ -2961,7 +3137,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44433 if (error)
44434 return error;
44435
44436- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44437+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44438 error = PTR_ERR(new_dentry);
44439 if (IS_ERR(new_dentry))
44440 goto out;
44441@@ -2972,13 +3148,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44442 error = mnt_want_write(new_path.mnt);
44443 if (error)
44444 goto out_dput;
44445+
44446+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44447+ old_path.dentry->d_inode,
44448+ old_path.dentry->d_inode->i_mode, to)) {
44449+ error = -EACCES;
44450+ goto out_drop_write;
44451+ }
44452+
44453+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44454+ old_path.dentry, old_path.mnt, to)) {
44455+ error = -EACCES;
44456+ goto out_drop_write;
44457+ }
44458+
44459 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44460 if (error)
44461 goto out_drop_write;
44462 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44463+ if (!error)
44464+ gr_handle_create(new_dentry, new_path.mnt);
44465 out_drop_write:
44466 mnt_drop_write(new_path.mnt);
44467 out_dput:
44468+ putname(to);
44469 dput(new_dentry);
44470 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44471 path_put(&new_path);
44472@@ -3206,6 +3399,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44473 if (new_dentry == trap)
44474 goto exit5;
44475
44476+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44477+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44478+ to);
44479+ if (error)
44480+ goto exit5;
44481+
44482 error = mnt_want_write(oldnd.path.mnt);
44483 if (error)
44484 goto exit5;
44485@@ -3215,6 +3414,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44486 goto exit6;
44487 error = vfs_rename(old_dir->d_inode, old_dentry,
44488 new_dir->d_inode, new_dentry);
44489+ if (!error)
44490+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44491+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44492 exit6:
44493 mnt_drop_write(oldnd.path.mnt);
44494 exit5:
44495@@ -3240,6 +3442,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44496
44497 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44498 {
44499+ char tmpbuf[64];
44500+ const char *newlink;
44501 int len;
44502
44503 len = PTR_ERR(link);
44504@@ -3249,7 +3453,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44505 len = strlen(link);
44506 if (len > (unsigned) buflen)
44507 len = buflen;
44508- if (copy_to_user(buffer, link, len))
44509+
44510+ if (len < sizeof(tmpbuf)) {
44511+ memcpy(tmpbuf, link, len);
44512+ newlink = tmpbuf;
44513+ } else
44514+ newlink = link;
44515+
44516+ if (copy_to_user(buffer, newlink, len))
44517 len = -EFAULT;
44518 out:
44519 return len;
44520diff --git a/fs/namespace.c b/fs/namespace.c
44521index cfc6d44..b4632a5 100644
44522--- a/fs/namespace.c
44523+++ b/fs/namespace.c
44524@@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44525 if (!(sb->s_flags & MS_RDONLY))
44526 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44527 up_write(&sb->s_umount);
44528+
44529+ gr_log_remount(mnt->mnt_devname, retval);
44530+
44531 return retval;
44532 }
44533
44534@@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44535 br_write_unlock(vfsmount_lock);
44536 up_write(&namespace_sem);
44537 release_mounts(&umount_list);
44538+
44539+ gr_log_unmount(mnt->mnt_devname, retval);
44540+
44541 return retval;
44542 }
44543
44544@@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44545 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44546 MS_STRICTATIME);
44547
44548+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44549+ retval = -EPERM;
44550+ goto dput_out;
44551+ }
44552+
44553+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44554+ retval = -EPERM;
44555+ goto dput_out;
44556+ }
44557+
44558 if (flags & MS_REMOUNT)
44559 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44560 data_page);
44561@@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44562 dev_name, data_page);
44563 dput_out:
44564 path_put(&path);
44565+
44566+ gr_log_mount(dev_name, dir_name, retval);
44567+
44568 return retval;
44569 }
44570
44571@@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44572 if (error)
44573 goto out2;
44574
44575+ if (gr_handle_chroot_pivot()) {
44576+ error = -EPERM;
44577+ goto out2;
44578+ }
44579+
44580 get_fs_root(current->fs, &root);
44581 error = lock_mount(&old);
44582 if (error)
44583diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44584index 3db6b82..a57597e 100644
44585--- a/fs/nfs/blocklayout/blocklayout.c
44586+++ b/fs/nfs/blocklayout/blocklayout.c
44587@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44588 */
44589 struct parallel_io {
44590 struct kref refcnt;
44591- struct rpc_call_ops call_ops;
44592+ rpc_call_ops_no_const call_ops;
44593 void (*pnfs_callback) (void *data);
44594 void *data;
44595 };
44596diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44597index 50a15fa..ca113f9 100644
44598--- a/fs/nfs/inode.c
44599+++ b/fs/nfs/inode.c
44600@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44601 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44602 nfsi->attrtimeo_timestamp = jiffies;
44603
44604- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44605+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44606 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44607 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44608 else
44609@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44610 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44611 }
44612
44613-static atomic_long_t nfs_attr_generation_counter;
44614+static atomic_long_unchecked_t nfs_attr_generation_counter;
44615
44616 static unsigned long nfs_read_attr_generation_counter(void)
44617 {
44618- return atomic_long_read(&nfs_attr_generation_counter);
44619+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44620 }
44621
44622 unsigned long nfs_inc_attr_generation_counter(void)
44623 {
44624- return atomic_long_inc_return(&nfs_attr_generation_counter);
44625+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44626 }
44627
44628 void nfs_fattr_init(struct nfs_fattr *fattr)
44629diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
44630index 7a2e442..8e544cc 100644
44631--- a/fs/nfsd/vfs.c
44632+++ b/fs/nfsd/vfs.c
44633@@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44634 } else {
44635 oldfs = get_fs();
44636 set_fs(KERNEL_DS);
44637- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44638+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44639 set_fs(oldfs);
44640 }
44641
44642@@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44643
44644 /* Write the data. */
44645 oldfs = get_fs(); set_fs(KERNEL_DS);
44646- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44647+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44648 set_fs(oldfs);
44649 if (host_err < 0)
44650 goto out_nfserr;
44651@@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
44652 */
44653
44654 oldfs = get_fs(); set_fs(KERNEL_DS);
44655- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44656+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44657 set_fs(oldfs);
44658
44659 if (host_err < 0)
44660diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
44661index 9fde1c0..14e8827 100644
44662--- a/fs/notify/fanotify/fanotify_user.c
44663+++ b/fs/notify/fanotify/fanotify_user.c
44664@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
44665 goto out_close_fd;
44666
44667 ret = -EFAULT;
44668- if (copy_to_user(buf, &fanotify_event_metadata,
44669+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44670+ copy_to_user(buf, &fanotify_event_metadata,
44671 fanotify_event_metadata.event_len))
44672 goto out_kill_access_response;
44673
44674diff --git a/fs/notify/notification.c b/fs/notify/notification.c
44675index ee18815..7aa5d01 100644
44676--- a/fs/notify/notification.c
44677+++ b/fs/notify/notification.c
44678@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
44679 * get set to 0 so it will never get 'freed'
44680 */
44681 static struct fsnotify_event *q_overflow_event;
44682-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44683+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44684
44685 /**
44686 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44687@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44688 */
44689 u32 fsnotify_get_cookie(void)
44690 {
44691- return atomic_inc_return(&fsnotify_sync_cookie);
44692+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44693 }
44694 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44695
44696diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
44697index 99e3610..02c1068 100644
44698--- a/fs/ntfs/dir.c
44699+++ b/fs/ntfs/dir.c
44700@@ -1329,7 +1329,7 @@ find_next_index_buffer:
44701 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44702 ~(s64)(ndir->itype.index.block_size - 1)));
44703 /* Bounds checks. */
44704- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44705+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44706 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44707 "inode 0x%lx or driver bug.", vdir->i_ino);
44708 goto err_out;
44709diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
44710index c587e2d..3641eaa 100644
44711--- a/fs/ntfs/file.c
44712+++ b/fs/ntfs/file.c
44713@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
44714 #endif /* NTFS_RW */
44715 };
44716
44717-const struct file_operations ntfs_empty_file_ops = {};
44718+const struct file_operations ntfs_empty_file_ops __read_only;
44719
44720-const struct inode_operations ntfs_empty_inode_ops = {};
44721+const struct inode_operations ntfs_empty_inode_ops __read_only;
44722diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
44723index 210c352..a174f83 100644
44724--- a/fs/ocfs2/localalloc.c
44725+++ b/fs/ocfs2/localalloc.c
44726@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
44727 goto bail;
44728 }
44729
44730- atomic_inc(&osb->alloc_stats.moves);
44731+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44732
44733 bail:
44734 if (handle)
44735diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
44736index d355e6e..578d905 100644
44737--- a/fs/ocfs2/ocfs2.h
44738+++ b/fs/ocfs2/ocfs2.h
44739@@ -235,11 +235,11 @@ enum ocfs2_vol_state
44740
44741 struct ocfs2_alloc_stats
44742 {
44743- atomic_t moves;
44744- atomic_t local_data;
44745- atomic_t bitmap_data;
44746- atomic_t bg_allocs;
44747- atomic_t bg_extends;
44748+ atomic_unchecked_t moves;
44749+ atomic_unchecked_t local_data;
44750+ atomic_unchecked_t bitmap_data;
44751+ atomic_unchecked_t bg_allocs;
44752+ atomic_unchecked_t bg_extends;
44753 };
44754
44755 enum ocfs2_local_alloc_state
44756diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
44757index ba5d97e..c77db25 100644
44758--- a/fs/ocfs2/suballoc.c
44759+++ b/fs/ocfs2/suballoc.c
44760@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
44761 mlog_errno(status);
44762 goto bail;
44763 }
44764- atomic_inc(&osb->alloc_stats.bg_extends);
44765+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44766
44767 /* You should never ask for this much metadata */
44768 BUG_ON(bits_wanted >
44769@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
44770 mlog_errno(status);
44771 goto bail;
44772 }
44773- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44774+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44775
44776 *suballoc_loc = res.sr_bg_blkno;
44777 *suballoc_bit_start = res.sr_bit_offset;
44778@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
44779 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
44780 res->sr_bits);
44781
44782- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44783+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44784
44785 BUG_ON(res->sr_bits != 1);
44786
44787@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
44788 mlog_errno(status);
44789 goto bail;
44790 }
44791- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44792+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44793
44794 BUG_ON(res.sr_bits != 1);
44795
44796@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
44797 cluster_start,
44798 num_clusters);
44799 if (!status)
44800- atomic_inc(&osb->alloc_stats.local_data);
44801+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44802 } else {
44803 if (min_clusters > (osb->bitmap_cpg - 1)) {
44804 /* The only paths asking for contiguousness
44805@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
44806 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44807 res.sr_bg_blkno,
44808 res.sr_bit_offset);
44809- atomic_inc(&osb->alloc_stats.bitmap_data);
44810+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44811 *num_clusters = res.sr_bits;
44812 }
44813 }
44814diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
44815index 4994f8b..eaab8eb 100644
44816--- a/fs/ocfs2/super.c
44817+++ b/fs/ocfs2/super.c
44818@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
44819 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44820 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44821 "Stats",
44822- atomic_read(&osb->alloc_stats.bitmap_data),
44823- atomic_read(&osb->alloc_stats.local_data),
44824- atomic_read(&osb->alloc_stats.bg_allocs),
44825- atomic_read(&osb->alloc_stats.moves),
44826- atomic_read(&osb->alloc_stats.bg_extends));
44827+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44828+ atomic_read_unchecked(&osb->alloc_stats.local_data),
44829+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44830+ atomic_read_unchecked(&osb->alloc_stats.moves),
44831+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44832
44833 out += snprintf(buf + out, len - out,
44834 "%10s => State: %u Descriptor: %llu Size: %u bits "
44835@@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
44836 spin_lock_init(&osb->osb_xattr_lock);
44837 ocfs2_init_steal_slots(osb);
44838
44839- atomic_set(&osb->alloc_stats.moves, 0);
44840- atomic_set(&osb->alloc_stats.local_data, 0);
44841- atomic_set(&osb->alloc_stats.bitmap_data, 0);
44842- atomic_set(&osb->alloc_stats.bg_allocs, 0);
44843- atomic_set(&osb->alloc_stats.bg_extends, 0);
44844+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44845+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44846+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44847+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44848+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44849
44850 /* Copy the blockcheck stats from the superblock probe */
44851 osb->osb_ecc_stats = *stats;
44852diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
44853index 5d22872..523db20 100644
44854--- a/fs/ocfs2/symlink.c
44855+++ b/fs/ocfs2/symlink.c
44856@@ -142,7 +142,7 @@ bail:
44857
44858 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44859 {
44860- char *link = nd_get_link(nd);
44861+ const char *link = nd_get_link(nd);
44862 if (!IS_ERR(link))
44863 kfree(link);
44864 }
44865diff --git a/fs/open.c b/fs/open.c
44866index 22c41b5..695cb17 100644
44867--- a/fs/open.c
44868+++ b/fs/open.c
44869@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
44870 error = locks_verify_truncate(inode, NULL, length);
44871 if (!error)
44872 error = security_path_truncate(&path);
44873+
44874+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44875+ error = -EACCES;
44876+
44877 if (!error)
44878 error = do_truncate(path.dentry, length, 0, NULL);
44879
44880@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
44881 if (__mnt_is_readonly(path.mnt))
44882 res = -EROFS;
44883
44884+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44885+ res = -EACCES;
44886+
44887 out_path_release:
44888 path_put(&path);
44889 out:
44890@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
44891 if (error)
44892 goto dput_and_out;
44893
44894+ gr_log_chdir(path.dentry, path.mnt);
44895+
44896 set_fs_pwd(current->fs, &path);
44897
44898 dput_and_out:
44899@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
44900 goto out_putf;
44901
44902 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
44903+
44904+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44905+ error = -EPERM;
44906+
44907+ if (!error)
44908+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44909+
44910 if (!error)
44911 set_fs_pwd(current->fs, &file->f_path);
44912 out_putf:
44913@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
44914 if (error)
44915 goto dput_and_out;
44916
44917+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44918+ goto dput_and_out;
44919+
44920 set_fs_root(current->fs, &path);
44921+
44922+ gr_handle_chroot_chdir(&path);
44923+
44924 error = 0;
44925 dput_and_out:
44926 path_put(&path);
44927@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
44928 if (error)
44929 return error;
44930 mutex_lock(&inode->i_mutex);
44931+
44932+ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
44933+ error = -EACCES;
44934+ goto out_unlock;
44935+ }
44936+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
44937+ error = -EACCES;
44938+ goto out_unlock;
44939+ }
44940+
44941 error = security_path_chmod(path->dentry, path->mnt, mode);
44942 if (error)
44943 goto out_unlock;
44944@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
44945 int error;
44946 struct iattr newattrs;
44947
44948+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
44949+ return -EACCES;
44950+
44951 newattrs.ia_valid = ATTR_CTIME;
44952 if (user != (uid_t) -1) {
44953 newattrs.ia_valid |= ATTR_UID;
44954diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
44955index 6296b40..417c00f 100644
44956--- a/fs/partitions/efi.c
44957+++ b/fs/partitions/efi.c
44958@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
44959 if (!gpt)
44960 return NULL;
44961
44962+ if (!le32_to_cpu(gpt->num_partition_entries))
44963+ return NULL;
44964+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
44965+ if (!pte)
44966+ return NULL;
44967+
44968 count = le32_to_cpu(gpt->num_partition_entries) *
44969 le32_to_cpu(gpt->sizeof_partition_entry);
44970- if (!count)
44971- return NULL;
44972- pte = kzalloc(count, GFP_KERNEL);
44973- if (!pte)
44974- return NULL;
44975-
44976 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
44977 (u8 *) pte,
44978 count) < count) {
44979diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
44980index bd8ae78..539d250 100644
44981--- a/fs/partitions/ldm.c
44982+++ b/fs/partitions/ldm.c
44983@@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
44984 goto found;
44985 }
44986
44987- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44988+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44989 if (!f) {
44990 ldm_crit ("Out of memory.");
44991 return false;
44992diff --git a/fs/pipe.c b/fs/pipe.c
44993index 4065f07..68c0706 100644
44994--- a/fs/pipe.c
44995+++ b/fs/pipe.c
44996@@ -420,9 +420,9 @@ redo:
44997 }
44998 if (bufs) /* More to do? */
44999 continue;
45000- if (!pipe->writers)
45001+ if (!atomic_read(&pipe->writers))
45002 break;
45003- if (!pipe->waiting_writers) {
45004+ if (!atomic_read(&pipe->waiting_writers)) {
45005 /* syscall merging: Usually we must not sleep
45006 * if O_NONBLOCK is set, or if we got some data.
45007 * But if a writer sleeps in kernel space, then
45008@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
45009 mutex_lock(&inode->i_mutex);
45010 pipe = inode->i_pipe;
45011
45012- if (!pipe->readers) {
45013+ if (!atomic_read(&pipe->readers)) {
45014 send_sig(SIGPIPE, current, 0);
45015 ret = -EPIPE;
45016 goto out;
45017@@ -530,7 +530,7 @@ redo1:
45018 for (;;) {
45019 int bufs;
45020
45021- if (!pipe->readers) {
45022+ if (!atomic_read(&pipe->readers)) {
45023 send_sig(SIGPIPE, current, 0);
45024 if (!ret)
45025 ret = -EPIPE;
45026@@ -616,9 +616,9 @@ redo2:
45027 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45028 do_wakeup = 0;
45029 }
45030- pipe->waiting_writers++;
45031+ atomic_inc(&pipe->waiting_writers);
45032 pipe_wait(pipe);
45033- pipe->waiting_writers--;
45034+ atomic_dec(&pipe->waiting_writers);
45035 }
45036 out:
45037 mutex_unlock(&inode->i_mutex);
45038@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45039 mask = 0;
45040 if (filp->f_mode & FMODE_READ) {
45041 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45042- if (!pipe->writers && filp->f_version != pipe->w_counter)
45043+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45044 mask |= POLLHUP;
45045 }
45046
45047@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45048 * Most Unices do not set POLLERR for FIFOs but on Linux they
45049 * behave exactly like pipes for poll().
45050 */
45051- if (!pipe->readers)
45052+ if (!atomic_read(&pipe->readers))
45053 mask |= POLLERR;
45054 }
45055
45056@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
45057
45058 mutex_lock(&inode->i_mutex);
45059 pipe = inode->i_pipe;
45060- pipe->readers -= decr;
45061- pipe->writers -= decw;
45062+ atomic_sub(decr, &pipe->readers);
45063+ atomic_sub(decw, &pipe->writers);
45064
45065- if (!pipe->readers && !pipe->writers) {
45066+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45067 free_pipe_info(inode);
45068 } else {
45069 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45070@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45071
45072 if (inode->i_pipe) {
45073 ret = 0;
45074- inode->i_pipe->readers++;
45075+ atomic_inc(&inode->i_pipe->readers);
45076 }
45077
45078 mutex_unlock(&inode->i_mutex);
45079@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45080
45081 if (inode->i_pipe) {
45082 ret = 0;
45083- inode->i_pipe->writers++;
45084+ atomic_inc(&inode->i_pipe->writers);
45085 }
45086
45087 mutex_unlock(&inode->i_mutex);
45088@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45089 if (inode->i_pipe) {
45090 ret = 0;
45091 if (filp->f_mode & FMODE_READ)
45092- inode->i_pipe->readers++;
45093+ atomic_inc(&inode->i_pipe->readers);
45094 if (filp->f_mode & FMODE_WRITE)
45095- inode->i_pipe->writers++;
45096+ atomic_inc(&inode->i_pipe->writers);
45097 }
45098
45099 mutex_unlock(&inode->i_mutex);
45100@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45101 inode->i_pipe = NULL;
45102 }
45103
45104-static struct vfsmount *pipe_mnt __read_mostly;
45105+struct vfsmount *pipe_mnt __read_mostly;
45106
45107 /*
45108 * pipefs_dname() is called from d_path().
45109@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45110 goto fail_iput;
45111 inode->i_pipe = pipe;
45112
45113- pipe->readers = pipe->writers = 1;
45114+ atomic_set(&pipe->readers, 1);
45115+ atomic_set(&pipe->writers, 1);
45116 inode->i_fop = &rdwr_pipefifo_fops;
45117
45118 /*
45119diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45120index 15af622..0e9f4467 100644
45121--- a/fs/proc/Kconfig
45122+++ b/fs/proc/Kconfig
45123@@ -30,12 +30,12 @@ config PROC_FS
45124
45125 config PROC_KCORE
45126 bool "/proc/kcore support" if !ARM
45127- depends on PROC_FS && MMU
45128+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45129
45130 config PROC_VMCORE
45131 bool "/proc/vmcore support"
45132- depends on PROC_FS && CRASH_DUMP
45133- default y
45134+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45135+ default n
45136 help
45137 Exports the dump image of crashed kernel in ELF format.
45138
45139@@ -59,8 +59,8 @@ config PROC_SYSCTL
45140 limited in memory.
45141
45142 config PROC_PAGE_MONITOR
45143- default y
45144- depends on PROC_FS && MMU
45145+ default n
45146+ depends on PROC_FS && MMU && !GRKERNSEC
45147 bool "Enable /proc page monitoring" if EXPERT
45148 help
45149 Various /proc files exist to monitor process memory utilization:
45150diff --git a/fs/proc/array.c b/fs/proc/array.c
45151index 3a1dafd..d41fc37 100644
45152--- a/fs/proc/array.c
45153+++ b/fs/proc/array.c
45154@@ -60,6 +60,7 @@
45155 #include <linux/tty.h>
45156 #include <linux/string.h>
45157 #include <linux/mman.h>
45158+#include <linux/grsecurity.h>
45159 #include <linux/proc_fs.h>
45160 #include <linux/ioport.h>
45161 #include <linux/uaccess.h>
45162@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45163 seq_putc(m, '\n');
45164 }
45165
45166+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45167+static inline void task_pax(struct seq_file *m, struct task_struct *p)
45168+{
45169+ if (p->mm)
45170+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45171+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45172+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45173+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45174+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45175+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45176+ else
45177+ seq_printf(m, "PaX:\t-----\n");
45178+}
45179+#endif
45180+
45181 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45182 struct pid *pid, struct task_struct *task)
45183 {
45184@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45185 task_cpus_allowed(m, task);
45186 cpuset_task_status_allowed(m, task);
45187 task_context_switch_counts(m, task);
45188+
45189+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45190+ task_pax(m, task);
45191+#endif
45192+
45193+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45194+ task_grsec_rbac(m, task);
45195+#endif
45196+
45197 return 0;
45198 }
45199
45200+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45201+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45202+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45203+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45204+#endif
45205+
45206 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45207 struct pid *pid, struct task_struct *task, int whole)
45208 {
45209@@ -449,6 +480,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45210 gtime = task->gtime;
45211 }
45212
45213+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45214+ if (PAX_RAND_FLAGS(mm)) {
45215+ eip = 0;
45216+ esp = 0;
45217+ wchan = 0;
45218+ }
45219+#endif
45220+#ifdef CONFIG_GRKERNSEC_HIDESYM
45221+ wchan = 0;
45222+ eip =0;
45223+ esp =0;
45224+#endif
45225+
45226 /* scale priority and nice values from timeslices to -20..20 */
45227 /* to make it look like a "normal" Unix priority/nice value */
45228 priority = task_prio(task);
45229@@ -489,9 +533,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45230 vsize,
45231 mm ? get_mm_rss(mm) : 0,
45232 rsslim,
45233+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45234+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45235+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45236+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45237+#else
45238 mm ? (permitted ? mm->start_code : 1) : 0,
45239 mm ? (permitted ? mm->end_code : 1) : 0,
45240 (permitted && mm) ? mm->start_stack : 0,
45241+#endif
45242 esp,
45243 eip,
45244 /* The signal information here is obsolete.
45245@@ -544,3 +594,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45246
45247 return 0;
45248 }
45249+
45250+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45251+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45252+{
45253+ u32 curr_ip = 0;
45254+ unsigned long flags;
45255+
45256+ if (lock_task_sighand(task, &flags)) {
45257+ curr_ip = task->signal->curr_ip;
45258+ unlock_task_sighand(task, &flags);
45259+ }
45260+
45261+ return sprintf(buffer, "%pI4\n", &curr_ip);
45262+}
45263+#endif
45264diff --git a/fs/proc/base.c b/fs/proc/base.c
45265index 1fc1dca..357b933 100644
45266--- a/fs/proc/base.c
45267+++ b/fs/proc/base.c
45268@@ -107,6 +107,22 @@ struct pid_entry {
45269 union proc_op op;
45270 };
45271
45272+struct getdents_callback {
45273+ struct linux_dirent __user * current_dir;
45274+ struct linux_dirent __user * previous;
45275+ struct file * file;
45276+ int count;
45277+ int error;
45278+};
45279+
45280+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45281+ loff_t offset, u64 ino, unsigned int d_type)
45282+{
45283+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45284+ buf->error = -EINVAL;
45285+ return 0;
45286+}
45287+
45288 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45289 .name = (NAME), \
45290 .len = sizeof(NAME) - 1, \
45291@@ -194,26 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
45292 return result;
45293 }
45294
45295-static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45296-{
45297- struct mm_struct *mm;
45298- int err;
45299-
45300- err = mutex_lock_killable(&task->signal->cred_guard_mutex);
45301- if (err)
45302- return ERR_PTR(err);
45303-
45304- mm = get_task_mm(task);
45305- if (mm && mm != current->mm &&
45306- !ptrace_may_access(task, mode)) {
45307- mmput(mm);
45308- mm = ERR_PTR(-EACCES);
45309- }
45310- mutex_unlock(&task->signal->cred_guard_mutex);
45311-
45312- return mm;
45313-}
45314-
45315 struct mm_struct *mm_for_maps(struct task_struct *task)
45316 {
45317 return mm_access(task, PTRACE_MODE_READ);
45318@@ -229,6 +225,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45319 if (!mm->arg_end)
45320 goto out_mm; /* Shh! No looking before we're done */
45321
45322+ if (gr_acl_handle_procpidmem(task))
45323+ goto out_mm;
45324+
45325 len = mm->arg_end - mm->arg_start;
45326
45327 if (len > PAGE_SIZE)
45328@@ -256,12 +255,28 @@ out:
45329 return res;
45330 }
45331
45332+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45333+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45334+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45335+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45336+#endif
45337+
45338 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45339 {
45340 struct mm_struct *mm = mm_for_maps(task);
45341 int res = PTR_ERR(mm);
45342 if (mm && !IS_ERR(mm)) {
45343 unsigned int nwords = 0;
45344+
45345+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45346+ /* allow if we're currently ptracing this task */
45347+ if (PAX_RAND_FLAGS(mm) &&
45348+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45349+ mmput(mm);
45350+ return 0;
45351+ }
45352+#endif
45353+
45354 do {
45355 nwords += 2;
45356 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45357@@ -275,7 +290,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45358 }
45359
45360
45361-#ifdef CONFIG_KALLSYMS
45362+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45363 /*
45364 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45365 * Returns the resolved symbol. If that fails, simply return the address.
45366@@ -314,7 +329,7 @@ static void unlock_trace(struct task_struct *task)
45367 mutex_unlock(&task->signal->cred_guard_mutex);
45368 }
45369
45370-#ifdef CONFIG_STACKTRACE
45371+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45372
45373 #define MAX_STACK_TRACE_DEPTH 64
45374
45375@@ -505,7 +520,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45376 return count;
45377 }
45378
45379-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45380+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45381 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45382 {
45383 long nr;
45384@@ -534,7 +549,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45385 /************************************************************************/
45386
45387 /* permission checks */
45388-static int proc_fd_access_allowed(struct inode *inode)
45389+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45390 {
45391 struct task_struct *task;
45392 int allowed = 0;
45393@@ -544,7 +559,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45394 */
45395 task = get_proc_task(inode);
45396 if (task) {
45397- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45398+ if (log)
45399+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45400+ else
45401+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45402 put_task_struct(task);
45403 }
45404 return allowed;
45405@@ -775,6 +793,13 @@ static int mem_open(struct inode* inode, struct file* file)
45406 if (IS_ERR(mm))
45407 return PTR_ERR(mm);
45408
45409+ if (mm) {
45410+ /* ensure this mm_struct can't be freed */
45411+ atomic_inc(&mm->mm_count);
45412+ /* but do not pin its memory */
45413+ mmput(mm);
45414+ }
45415+
45416 /* OK to pass negative loff_t, we can catch out-of-range */
45417 file->f_mode |= FMODE_UNSIGNED_OFFSET;
45418 file->private_data = mm;
45419@@ -782,57 +807,18 @@ static int mem_open(struct inode* inode, struct file* file)
45420 return 0;
45421 }
45422
45423-static ssize_t mem_read(struct file * file, char __user * buf,
45424- size_t count, loff_t *ppos)
45425+static ssize_t mem_rw(struct file *file, char __user *buf,
45426+ size_t count, loff_t *ppos, int write)
45427 {
45428- int ret;
45429- char *page;
45430- unsigned long src = *ppos;
45431 struct mm_struct *mm = file->private_data;
45432-
45433- if (!mm)
45434- return 0;
45435-
45436- page = (char *)__get_free_page(GFP_TEMPORARY);
45437- if (!page)
45438- return -ENOMEM;
45439-
45440- ret = 0;
45441-
45442- while (count > 0) {
45443- int this_len, retval;
45444-
45445- this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
45446- retval = access_remote_vm(mm, src, page, this_len, 0);
45447- if (!retval) {
45448- if (!ret)
45449- ret = -EIO;
45450- break;
45451- }
45452-
45453- if (copy_to_user(buf, page, retval)) {
45454- ret = -EFAULT;
45455- break;
45456- }
45457-
45458- ret += retval;
45459- src += retval;
45460- buf += retval;
45461- count -= retval;
45462- }
45463- *ppos = src;
45464-
45465- free_page((unsigned long) page);
45466- return ret;
45467-}
45468-
45469-static ssize_t mem_write(struct file * file, const char __user *buf,
45470- size_t count, loff_t *ppos)
45471-{
45472- int copied;
45473+ unsigned long addr = *ppos;
45474+ ssize_t copied;
45475 char *page;
45476- unsigned long dst = *ppos;
45477- struct mm_struct *mm = file->private_data;
45478+
45479+#ifdef CONFIG_GRKERNSEC
45480+ if (write)
45481+ return -EPERM;
45482+#endif
45483
45484 if (!mm)
45485 return 0;
45486@@ -842,31 +828,54 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
45487 return -ENOMEM;
45488
45489 copied = 0;
45490+ if (!atomic_inc_not_zero(&mm->mm_users))
45491+ goto free;
45492+
45493 while (count > 0) {
45494- int this_len, retval;
45495+ int this_len = min_t(int, count, PAGE_SIZE);
45496
45497- this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
45498- if (copy_from_user(page, buf, this_len)) {
45499+ if (write && copy_from_user(page, buf, this_len)) {
45500 copied = -EFAULT;
45501 break;
45502 }
45503- retval = access_remote_vm(mm, dst, page, this_len, 1);
45504- if (!retval) {
45505+
45506+ this_len = access_remote_vm(mm, addr, page, this_len, write);
45507+ if (!this_len) {
45508 if (!copied)
45509 copied = -EIO;
45510 break;
45511 }
45512- copied += retval;
45513- buf += retval;
45514- dst += retval;
45515- count -= retval;
45516+
45517+ if (!write && copy_to_user(buf, page, this_len)) {
45518+ copied = -EFAULT;
45519+ break;
45520+ }
45521+
45522+ buf += this_len;
45523+ addr += this_len;
45524+ copied += this_len;
45525+ count -= this_len;
45526 }
45527- *ppos = dst;
45528+ *ppos = addr;
45529
45530+ mmput(mm);
45531+free:
45532 free_page((unsigned long) page);
45533 return copied;
45534 }
45535
45536+static ssize_t mem_read(struct file *file, char __user *buf,
45537+ size_t count, loff_t *ppos)
45538+{
45539+ return mem_rw(file, buf, count, ppos, 0);
45540+}
45541+
45542+static ssize_t mem_write(struct file *file, const char __user *buf,
45543+ size_t count, loff_t *ppos)
45544+{
45545+ return mem_rw(file, (char __user*)buf, count, ppos, 1);
45546+}
45547+
45548 loff_t mem_lseek(struct file *file, loff_t offset, int orig)
45549 {
45550 switch (orig) {
45551@@ -886,8 +895,8 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
45552 static int mem_release(struct inode *inode, struct file *file)
45553 {
45554 struct mm_struct *mm = file->private_data;
45555-
45556- mmput(mm);
45557+ if (mm)
45558+ mmdrop(mm);
45559 return 0;
45560 }
45561
45562@@ -911,6 +920,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45563 if (!task)
45564 goto out_no_task;
45565
45566+ if (gr_acl_handle_procpidmem(task))
45567+ goto out;
45568+
45569 ret = -ENOMEM;
45570 page = (char *)__get_free_page(GFP_TEMPORARY);
45571 if (!page)
45572@@ -1533,7 +1545,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45573 path_put(&nd->path);
45574
45575 /* Are we allowed to snoop on the tasks file descriptors? */
45576- if (!proc_fd_access_allowed(inode))
45577+ if (!proc_fd_access_allowed(inode,0))
45578 goto out;
45579
45580 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45581@@ -1572,8 +1584,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45582 struct path path;
45583
45584 /* Are we allowed to snoop on the tasks file descriptors? */
45585- if (!proc_fd_access_allowed(inode))
45586- goto out;
45587+ /* logging this is needed for learning on chromium to work properly,
45588+ but we don't want to flood the logs from 'ps' which does a readlink
45589+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45590+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45591+ */
45592+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45593+ if (!proc_fd_access_allowed(inode,0))
45594+ goto out;
45595+ } else {
45596+ if (!proc_fd_access_allowed(inode,1))
45597+ goto out;
45598+ }
45599
45600 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45601 if (error)
45602@@ -1638,7 +1660,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45603 rcu_read_lock();
45604 cred = __task_cred(task);
45605 inode->i_uid = cred->euid;
45606+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45607+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45608+#else
45609 inode->i_gid = cred->egid;
45610+#endif
45611 rcu_read_unlock();
45612 }
45613 security_task_to_inode(task, inode);
45614@@ -1656,6 +1682,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45615 struct inode *inode = dentry->d_inode;
45616 struct task_struct *task;
45617 const struct cred *cred;
45618+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45619+ const struct cred *tmpcred = current_cred();
45620+#endif
45621
45622 generic_fillattr(inode, stat);
45623
45624@@ -1663,13 +1692,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45625 stat->uid = 0;
45626 stat->gid = 0;
45627 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45628+
45629+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45630+ rcu_read_unlock();
45631+ return -ENOENT;
45632+ }
45633+
45634 if (task) {
45635+ cred = __task_cred(task);
45636+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45637+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45638+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45639+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45640+#endif
45641+ ) {
45642+#endif
45643 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45644+#ifdef CONFIG_GRKERNSEC_PROC_USER
45645+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45646+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45647+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45648+#endif
45649 task_dumpable(task)) {
45650- cred = __task_cred(task);
45651 stat->uid = cred->euid;
45652+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45653+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45654+#else
45655 stat->gid = cred->egid;
45656+#endif
45657 }
45658+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45659+ } else {
45660+ rcu_read_unlock();
45661+ return -ENOENT;
45662+ }
45663+#endif
45664 }
45665 rcu_read_unlock();
45666 return 0;
45667@@ -1706,11 +1763,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45668
45669 if (task) {
45670 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45671+#ifdef CONFIG_GRKERNSEC_PROC_USER
45672+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45673+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45674+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45675+#endif
45676 task_dumpable(task)) {
45677 rcu_read_lock();
45678 cred = __task_cred(task);
45679 inode->i_uid = cred->euid;
45680+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45681+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45682+#else
45683 inode->i_gid = cred->egid;
45684+#endif
45685 rcu_read_unlock();
45686 } else {
45687 inode->i_uid = 0;
45688@@ -1828,7 +1894,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45689 int fd = proc_fd(inode);
45690
45691 if (task) {
45692- files = get_files_struct(task);
45693+ if (!gr_acl_handle_procpidmem(task))
45694+ files = get_files_struct(task);
45695 put_task_struct(task);
45696 }
45697 if (files) {
45698@@ -2096,11 +2163,21 @@ static const struct file_operations proc_fd_operations = {
45699 */
45700 static int proc_fd_permission(struct inode *inode, int mask)
45701 {
45702+ struct task_struct *task;
45703 int rv = generic_permission(inode, mask);
45704- if (rv == 0)
45705- return 0;
45706+
45707 if (task_pid(current) == proc_pid(inode))
45708 rv = 0;
45709+
45710+ task = get_proc_task(inode);
45711+ if (task == NULL)
45712+ return rv;
45713+
45714+ if (gr_acl_handle_procpidmem(task))
45715+ rv = -EACCES;
45716+
45717+ put_task_struct(task);
45718+
45719 return rv;
45720 }
45721
45722@@ -2210,6 +2287,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
45723 if (!task)
45724 goto out_no_task;
45725
45726+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45727+ goto out;
45728+
45729 /*
45730 * Yes, it does not scale. And it should not. Don't add
45731 * new entries into /proc/<tgid>/ without very good reasons.
45732@@ -2254,6 +2334,9 @@ static int proc_pident_readdir(struct file *filp,
45733 if (!task)
45734 goto out_no_task;
45735
45736+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45737+ goto out;
45738+
45739 ret = 0;
45740 i = filp->f_pos;
45741 switch (i) {
45742@@ -2524,7 +2607,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
45743 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45744 void *cookie)
45745 {
45746- char *s = nd_get_link(nd);
45747+ const char *s = nd_get_link(nd);
45748 if (!IS_ERR(s))
45749 __putname(s);
45750 }
45751@@ -2722,7 +2805,7 @@ static const struct pid_entry tgid_base_stuff[] = {
45752 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45753 #endif
45754 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45755-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45756+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45757 INF("syscall", S_IRUGO, proc_pid_syscall),
45758 #endif
45759 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45760@@ -2747,10 +2830,10 @@ static const struct pid_entry tgid_base_stuff[] = {
45761 #ifdef CONFIG_SECURITY
45762 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45763 #endif
45764-#ifdef CONFIG_KALLSYMS
45765+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45766 INF("wchan", S_IRUGO, proc_pid_wchan),
45767 #endif
45768-#ifdef CONFIG_STACKTRACE
45769+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45770 ONE("stack", S_IRUGO, proc_pid_stack),
45771 #endif
45772 #ifdef CONFIG_SCHEDSTATS
45773@@ -2784,6 +2867,9 @@ static const struct pid_entry tgid_base_stuff[] = {
45774 #ifdef CONFIG_HARDWALL
45775 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45776 #endif
45777+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45778+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45779+#endif
45780 };
45781
45782 static int proc_tgid_base_readdir(struct file * filp,
45783@@ -2909,7 +2995,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
45784 if (!inode)
45785 goto out;
45786
45787+#ifdef CONFIG_GRKERNSEC_PROC_USER
45788+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45789+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45790+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45791+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45792+#else
45793 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45794+#endif
45795 inode->i_op = &proc_tgid_base_inode_operations;
45796 inode->i_fop = &proc_tgid_base_operations;
45797 inode->i_flags|=S_IMMUTABLE;
45798@@ -2951,7 +3044,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
45799 if (!task)
45800 goto out;
45801
45802+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45803+ goto out_put_task;
45804+
45805 result = proc_pid_instantiate(dir, dentry, task, NULL);
45806+out_put_task:
45807 put_task_struct(task);
45808 out:
45809 return result;
45810@@ -3016,6 +3113,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45811 {
45812 unsigned int nr;
45813 struct task_struct *reaper;
45814+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45815+ const struct cred *tmpcred = current_cred();
45816+ const struct cred *itercred;
45817+#endif
45818+ filldir_t __filldir = filldir;
45819 struct tgid_iter iter;
45820 struct pid_namespace *ns;
45821
45822@@ -3039,8 +3141,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45823 for (iter = next_tgid(ns, iter);
45824 iter.task;
45825 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45826+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45827+ rcu_read_lock();
45828+ itercred = __task_cred(iter.task);
45829+#endif
45830+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45831+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45832+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45833+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45834+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45835+#endif
45836+ )
45837+#endif
45838+ )
45839+ __filldir = &gr_fake_filldir;
45840+ else
45841+ __filldir = filldir;
45842+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45843+ rcu_read_unlock();
45844+#endif
45845 filp->f_pos = iter.tgid + TGID_OFFSET;
45846- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45847+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45848 put_task_struct(iter.task);
45849 goto out;
45850 }
45851@@ -3068,7 +3189,7 @@ static const struct pid_entry tid_base_stuff[] = {
45852 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45853 #endif
45854 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45855-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45856+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45857 INF("syscall", S_IRUGO, proc_pid_syscall),
45858 #endif
45859 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45860@@ -3092,10 +3213,10 @@ static const struct pid_entry tid_base_stuff[] = {
45861 #ifdef CONFIG_SECURITY
45862 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45863 #endif
45864-#ifdef CONFIG_KALLSYMS
45865+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45866 INF("wchan", S_IRUGO, proc_pid_wchan),
45867 #endif
45868-#ifdef CONFIG_STACKTRACE
45869+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45870 ONE("stack", S_IRUGO, proc_pid_stack),
45871 #endif
45872 #ifdef CONFIG_SCHEDSTATS
45873diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
45874index 82676e3..5f8518a 100644
45875--- a/fs/proc/cmdline.c
45876+++ b/fs/proc/cmdline.c
45877@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
45878
45879 static int __init proc_cmdline_init(void)
45880 {
45881+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45882+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45883+#else
45884 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45885+#endif
45886 return 0;
45887 }
45888 module_init(proc_cmdline_init);
45889diff --git a/fs/proc/devices.c b/fs/proc/devices.c
45890index b143471..bb105e5 100644
45891--- a/fs/proc/devices.c
45892+++ b/fs/proc/devices.c
45893@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
45894
45895 static int __init proc_devices_init(void)
45896 {
45897+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45898+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45899+#else
45900 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45901+#endif
45902 return 0;
45903 }
45904 module_init(proc_devices_init);
45905diff --git a/fs/proc/inode.c b/fs/proc/inode.c
45906index 7737c54..7172574 100644
45907--- a/fs/proc/inode.c
45908+++ b/fs/proc/inode.c
45909@@ -18,12 +18,18 @@
45910 #include <linux/module.h>
45911 #include <linux/sysctl.h>
45912 #include <linux/slab.h>
45913+#include <linux/grsecurity.h>
45914
45915 #include <asm/system.h>
45916 #include <asm/uaccess.h>
45917
45918 #include "internal.h"
45919
45920+#ifdef CONFIG_PROC_SYSCTL
45921+extern const struct inode_operations proc_sys_inode_operations;
45922+extern const struct inode_operations proc_sys_dir_operations;
45923+#endif
45924+
45925 static void proc_evict_inode(struct inode *inode)
45926 {
45927 struct proc_dir_entry *de;
45928@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
45929 ns_ops = PROC_I(inode)->ns_ops;
45930 if (ns_ops && ns_ops->put)
45931 ns_ops->put(PROC_I(inode)->ns);
45932+
45933+#ifdef CONFIG_PROC_SYSCTL
45934+ if (inode->i_op == &proc_sys_inode_operations ||
45935+ inode->i_op == &proc_sys_dir_operations)
45936+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
45937+#endif
45938+
45939 }
45940
45941 static struct kmem_cache * proc_inode_cachep;
45942@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
45943 if (de->mode) {
45944 inode->i_mode = de->mode;
45945 inode->i_uid = de->uid;
45946+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45947+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45948+#else
45949 inode->i_gid = de->gid;
45950+#endif
45951 }
45952 if (de->size)
45953 inode->i_size = de->size;
45954diff --git a/fs/proc/internal.h b/fs/proc/internal.h
45955index 7838e5c..ff92cbc 100644
45956--- a/fs/proc/internal.h
45957+++ b/fs/proc/internal.h
45958@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45959 struct pid *pid, struct task_struct *task);
45960 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45961 struct pid *pid, struct task_struct *task);
45962+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45963+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45964+#endif
45965 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45966
45967 extern const struct file_operations proc_maps_operations;
45968diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
45969index d245cb2..f4e8498 100644
45970--- a/fs/proc/kcore.c
45971+++ b/fs/proc/kcore.c
45972@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
45973 * the addresses in the elf_phdr on our list.
45974 */
45975 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45976- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45977+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45978+ if (tsz > buflen)
45979 tsz = buflen;
45980-
45981+
45982 while (buflen) {
45983 struct kcore_list *m;
45984
45985@@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
45986 kfree(elf_buf);
45987 } else {
45988 if (kern_addr_valid(start)) {
45989- unsigned long n;
45990+ char *elf_buf;
45991+ mm_segment_t oldfs;
45992
45993- n = copy_to_user(buffer, (char *)start, tsz);
45994- /*
45995- * We cannot distingush between fault on source
45996- * and fault on destination. When this happens
45997- * we clear too and hope it will trigger the
45998- * EFAULT again.
45999- */
46000- if (n) {
46001- if (clear_user(buffer + tsz - n,
46002- n))
46003+ elf_buf = kmalloc(tsz, GFP_KERNEL);
46004+ if (!elf_buf)
46005+ return -ENOMEM;
46006+ oldfs = get_fs();
46007+ set_fs(KERNEL_DS);
46008+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46009+ set_fs(oldfs);
46010+ if (copy_to_user(buffer, elf_buf, tsz)) {
46011+ kfree(elf_buf);
46012 return -EFAULT;
46013+ }
46014 }
46015+ set_fs(oldfs);
46016+ kfree(elf_buf);
46017 } else {
46018 if (clear_user(buffer, tsz))
46019 return -EFAULT;
46020@@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46021
46022 static int open_kcore(struct inode *inode, struct file *filp)
46023 {
46024+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46025+ return -EPERM;
46026+#endif
46027 if (!capable(CAP_SYS_RAWIO))
46028 return -EPERM;
46029 if (kcore_need_update)
46030diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
46031index 80e4645..53e5fcf 100644
46032--- a/fs/proc/meminfo.c
46033+++ b/fs/proc/meminfo.c
46034@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46035 vmi.used >> 10,
46036 vmi.largest_chunk >> 10
46037 #ifdef CONFIG_MEMORY_FAILURE
46038- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46039+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46040 #endif
46041 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46042 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46043diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
46044index b1822dd..df622cb 100644
46045--- a/fs/proc/nommu.c
46046+++ b/fs/proc/nommu.c
46047@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
46048 if (len < 1)
46049 len = 1;
46050 seq_printf(m, "%*c", len, ' ');
46051- seq_path(m, &file->f_path, "");
46052+ seq_path(m, &file->f_path, "\n\\");
46053 }
46054
46055 seq_putc(m, '\n');
46056diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
46057index f738024..876984a 100644
46058--- a/fs/proc/proc_net.c
46059+++ b/fs/proc/proc_net.c
46060@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
46061 struct task_struct *task;
46062 struct nsproxy *ns;
46063 struct net *net = NULL;
46064+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46065+ const struct cred *cred = current_cred();
46066+#endif
46067+
46068+#ifdef CONFIG_GRKERNSEC_PROC_USER
46069+ if (cred->fsuid)
46070+ return net;
46071+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46072+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46073+ return net;
46074+#endif
46075
46076 rcu_read_lock();
46077 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46078diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
46079index a6b6217..1e0579d 100644
46080--- a/fs/proc/proc_sysctl.c
46081+++ b/fs/proc/proc_sysctl.c
46082@@ -9,11 +9,13 @@
46083 #include <linux/namei.h>
46084 #include "internal.h"
46085
46086+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46087+
46088 static const struct dentry_operations proc_sys_dentry_operations;
46089 static const struct file_operations proc_sys_file_operations;
46090-static const struct inode_operations proc_sys_inode_operations;
46091+const struct inode_operations proc_sys_inode_operations;
46092 static const struct file_operations proc_sys_dir_file_operations;
46093-static const struct inode_operations proc_sys_dir_operations;
46094+const struct inode_operations proc_sys_dir_operations;
46095
46096 void proc_sys_poll_notify(struct ctl_table_poll *poll)
46097 {
46098@@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
46099
46100 err = NULL;
46101 d_set_d_op(dentry, &proc_sys_dentry_operations);
46102+
46103+ gr_handle_proc_create(dentry, inode);
46104+
46105 d_add(dentry, inode);
46106
46107+ if (gr_handle_sysctl(p, MAY_EXEC))
46108+ err = ERR_PTR(-ENOENT);
46109+
46110 out:
46111 sysctl_head_finish(head);
46112 return err;
46113@@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
46114 if (!table->proc_handler)
46115 goto out;
46116
46117+#ifdef CONFIG_GRKERNSEC
46118+ error = -EPERM;
46119+ if (write && !capable(CAP_SYS_ADMIN))
46120+ goto out;
46121+#endif
46122+
46123 /* careful: calling conventions are nasty here */
46124 res = count;
46125 error = table->proc_handler(table, write, buf, &res, ppos);
46126@@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
46127 return -ENOMEM;
46128 } else {
46129 d_set_d_op(child, &proc_sys_dentry_operations);
46130+
46131+ gr_handle_proc_create(child, inode);
46132+
46133 d_add(child, inode);
46134 }
46135 } else {
46136@@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
46137 if (*pos < file->f_pos)
46138 continue;
46139
46140+ if (gr_handle_sysctl(table, 0))
46141+ continue;
46142+
46143 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46144 if (res)
46145 return res;
46146@@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
46147 if (IS_ERR(head))
46148 return PTR_ERR(head);
46149
46150+ if (table && gr_handle_sysctl(table, MAY_EXEC))
46151+ return -ENOENT;
46152+
46153 generic_fillattr(inode, stat);
46154 if (table)
46155 stat->mode = (stat->mode & S_IFMT) | table->mode;
46156@@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
46157 .llseek = generic_file_llseek,
46158 };
46159
46160-static const struct inode_operations proc_sys_inode_operations = {
46161+const struct inode_operations proc_sys_inode_operations = {
46162 .permission = proc_sys_permission,
46163 .setattr = proc_sys_setattr,
46164 .getattr = proc_sys_getattr,
46165 };
46166
46167-static const struct inode_operations proc_sys_dir_operations = {
46168+const struct inode_operations proc_sys_dir_operations = {
46169 .lookup = proc_sys_lookup,
46170 .permission = proc_sys_permission,
46171 .setattr = proc_sys_setattr,
46172diff --git a/fs/proc/root.c b/fs/proc/root.c
46173index 03102d9..4ae347e 100644
46174--- a/fs/proc/root.c
46175+++ b/fs/proc/root.c
46176@@ -121,7 +121,15 @@ void __init proc_root_init(void)
46177 #ifdef CONFIG_PROC_DEVICETREE
46178 proc_device_tree_init();
46179 #endif
46180+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46181+#ifdef CONFIG_GRKERNSEC_PROC_USER
46182+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46183+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46184+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46185+#endif
46186+#else
46187 proc_mkdir("bus", NULL);
46188+#endif
46189 proc_sys_init();
46190 }
46191
46192diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
46193index 7dcd2a2..d1d9cb6 100644
46194--- a/fs/proc/task_mmu.c
46195+++ b/fs/proc/task_mmu.c
46196@@ -52,8 +52,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46197 "VmExe:\t%8lu kB\n"
46198 "VmLib:\t%8lu kB\n"
46199 "VmPTE:\t%8lu kB\n"
46200- "VmSwap:\t%8lu kB\n",
46201- hiwater_vm << (PAGE_SHIFT-10),
46202+ "VmSwap:\t%8lu kB\n"
46203+
46204+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46205+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46206+#endif
46207+
46208+ ,hiwater_vm << (PAGE_SHIFT-10),
46209 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46210 mm->locked_vm << (PAGE_SHIFT-10),
46211 mm->pinned_vm << (PAGE_SHIFT-10),
46212@@ -62,7 +67,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46213 data << (PAGE_SHIFT-10),
46214 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46215 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46216- swap << (PAGE_SHIFT-10));
46217+ swap << (PAGE_SHIFT-10)
46218+
46219+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46220+ , mm->context.user_cs_base, mm->context.user_cs_limit
46221+#endif
46222+
46223+ );
46224 }
46225
46226 unsigned long task_vsize(struct mm_struct *mm)
46227@@ -209,6 +220,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46228 return ret;
46229 }
46230
46231+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46232+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46233+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46234+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46235+#endif
46236+
46237 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46238 {
46239 struct mm_struct *mm = vma->vm_mm;
46240@@ -227,13 +244,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46241 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46242 }
46243
46244- /* We don't show the stack guard page in /proc/maps */
46245+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46246+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46247+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46248+#else
46249 start = vma->vm_start;
46250- if (stack_guard_page_start(vma, start))
46251- start += PAGE_SIZE;
46252 end = vma->vm_end;
46253- if (stack_guard_page_end(vma, end))
46254- end -= PAGE_SIZE;
46255+#endif
46256
46257 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46258 start,
46259@@ -242,7 +259,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46260 flags & VM_WRITE ? 'w' : '-',
46261 flags & VM_EXEC ? 'x' : '-',
46262 flags & VM_MAYSHARE ? 's' : 'p',
46263+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46264+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46265+#else
46266 pgoff,
46267+#endif
46268 MAJOR(dev), MINOR(dev), ino, &len);
46269
46270 /*
46271@@ -251,7 +272,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46272 */
46273 if (file) {
46274 pad_len_spaces(m, len);
46275- seq_path(m, &file->f_path, "\n");
46276+ seq_path(m, &file->f_path, "\n\\");
46277 } else {
46278 const char *name = arch_vma_name(vma);
46279 if (!name) {
46280@@ -259,8 +280,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46281 if (vma->vm_start <= mm->brk &&
46282 vma->vm_end >= mm->start_brk) {
46283 name = "[heap]";
46284- } else if (vma->vm_start <= mm->start_stack &&
46285- vma->vm_end >= mm->start_stack) {
46286+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46287+ (vma->vm_start <= mm->start_stack &&
46288+ vma->vm_end >= mm->start_stack)) {
46289 name = "[stack]";
46290 }
46291 } else {
46292@@ -435,11 +457,16 @@ static int show_smap(struct seq_file *m, void *v)
46293 };
46294
46295 memset(&mss, 0, sizeof mss);
46296- mss.vma = vma;
46297- /* mmap_sem is held in m_start */
46298- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46299- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46300-
46301+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46302+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46303+#endif
46304+ mss.vma = vma;
46305+ /* mmap_sem is held in m_start */
46306+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46307+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46308+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46309+ }
46310+#endif
46311 show_map_vma(m, vma);
46312
46313 seq_printf(m,
46314@@ -457,7 +484,11 @@ static int show_smap(struct seq_file *m, void *v)
46315 "KernelPageSize: %8lu kB\n"
46316 "MMUPageSize: %8lu kB\n"
46317 "Locked: %8lu kB\n",
46318+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46319+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46320+#else
46321 (vma->vm_end - vma->vm_start) >> 10,
46322+#endif
46323 mss.resident >> 10,
46324 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46325 mss.shared_clean >> 10,
46326@@ -1036,7 +1067,7 @@ static int show_numa_map(struct seq_file *m, void *v)
46327
46328 if (file) {
46329 seq_printf(m, " file=");
46330- seq_path(m, &file->f_path, "\n\t= ");
46331+ seq_path(m, &file->f_path, "\n\t\\= ");
46332 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46333 seq_printf(m, " heap");
46334 } else if (vma->vm_start <= mm->start_stack &&
46335diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46336index 980de54..2a4db5f 100644
46337--- a/fs/proc/task_nommu.c
46338+++ b/fs/proc/task_nommu.c
46339@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46340 else
46341 bytes += kobjsize(mm);
46342
46343- if (current->fs && current->fs->users > 1)
46344+ if (current->fs && atomic_read(&current->fs->users) > 1)
46345 sbytes += kobjsize(current->fs);
46346 else
46347 bytes += kobjsize(current->fs);
46348@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46349
46350 if (file) {
46351 pad_len_spaces(m, len);
46352- seq_path(m, &file->f_path, "");
46353+ seq_path(m, &file->f_path, "\n\\");
46354 } else if (mm) {
46355 if (vma->vm_start <= mm->start_stack &&
46356 vma->vm_end >= mm->start_stack) {
46357diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46358index d67908b..d13f6a6 100644
46359--- a/fs/quota/netlink.c
46360+++ b/fs/quota/netlink.c
46361@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46362 void quota_send_warning(short type, unsigned int id, dev_t dev,
46363 const char warntype)
46364 {
46365- static atomic_t seq;
46366+ static atomic_unchecked_t seq;
46367 struct sk_buff *skb;
46368 void *msg_head;
46369 int ret;
46370@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46371 "VFS: Not enough memory to send quota warning.\n");
46372 return;
46373 }
46374- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46375+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46376 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46377 if (!msg_head) {
46378 printk(KERN_ERR
46379diff --git a/fs/readdir.c b/fs/readdir.c
46380index 356f715..c918d38 100644
46381--- a/fs/readdir.c
46382+++ b/fs/readdir.c
46383@@ -17,6 +17,7 @@
46384 #include <linux/security.h>
46385 #include <linux/syscalls.h>
46386 #include <linux/unistd.h>
46387+#include <linux/namei.h>
46388
46389 #include <asm/uaccess.h>
46390
46391@@ -67,6 +68,7 @@ struct old_linux_dirent {
46392
46393 struct readdir_callback {
46394 struct old_linux_dirent __user * dirent;
46395+ struct file * file;
46396 int result;
46397 };
46398
46399@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46400 buf->result = -EOVERFLOW;
46401 return -EOVERFLOW;
46402 }
46403+
46404+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46405+ return 0;
46406+
46407 buf->result++;
46408 dirent = buf->dirent;
46409 if (!access_ok(VERIFY_WRITE, dirent,
46410@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46411
46412 buf.result = 0;
46413 buf.dirent = dirent;
46414+ buf.file = file;
46415
46416 error = vfs_readdir(file, fillonedir, &buf);
46417 if (buf.result)
46418@@ -142,6 +149,7 @@ struct linux_dirent {
46419 struct getdents_callback {
46420 struct linux_dirent __user * current_dir;
46421 struct linux_dirent __user * previous;
46422+ struct file * file;
46423 int count;
46424 int error;
46425 };
46426@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46427 buf->error = -EOVERFLOW;
46428 return -EOVERFLOW;
46429 }
46430+
46431+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46432+ return 0;
46433+
46434 dirent = buf->previous;
46435 if (dirent) {
46436 if (__put_user(offset, &dirent->d_off))
46437@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46438 buf.previous = NULL;
46439 buf.count = count;
46440 buf.error = 0;
46441+ buf.file = file;
46442
46443 error = vfs_readdir(file, filldir, &buf);
46444 if (error >= 0)
46445@@ -229,6 +242,7 @@ out:
46446 struct getdents_callback64 {
46447 struct linux_dirent64 __user * current_dir;
46448 struct linux_dirent64 __user * previous;
46449+ struct file *file;
46450 int count;
46451 int error;
46452 };
46453@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46454 buf->error = -EINVAL; /* only used if we fail.. */
46455 if (reclen > buf->count)
46456 return -EINVAL;
46457+
46458+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46459+ return 0;
46460+
46461 dirent = buf->previous;
46462 if (dirent) {
46463 if (__put_user(offset, &dirent->d_off))
46464@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46465
46466 buf.current_dir = dirent;
46467 buf.previous = NULL;
46468+ buf.file = file;
46469 buf.count = count;
46470 buf.error = 0;
46471
46472@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46473 error = buf.error;
46474 lastdirent = buf.previous;
46475 if (lastdirent) {
46476- typeof(lastdirent->d_off) d_off = file->f_pos;
46477+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46478 if (__put_user(d_off, &lastdirent->d_off))
46479 error = -EFAULT;
46480 else
46481diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46482index 60c0804..d814f98 100644
46483--- a/fs/reiserfs/do_balan.c
46484+++ b/fs/reiserfs/do_balan.c
46485@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46486 return;
46487 }
46488
46489- atomic_inc(&(fs_generation(tb->tb_sb)));
46490+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46491 do_balance_starts(tb);
46492
46493 /* balance leaf returns 0 except if combining L R and S into
46494diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46495index 7a99811..a7c96c4 100644
46496--- a/fs/reiserfs/procfs.c
46497+++ b/fs/reiserfs/procfs.c
46498@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46499 "SMALL_TAILS " : "NO_TAILS ",
46500 replay_only(sb) ? "REPLAY_ONLY " : "",
46501 convert_reiserfs(sb) ? "CONV " : "",
46502- atomic_read(&r->s_generation_counter),
46503+ atomic_read_unchecked(&r->s_generation_counter),
46504 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46505 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46506 SF(s_good_search_by_key_reada), SF(s_bmaps),
46507diff --git a/fs/select.c b/fs/select.c
46508index d33418f..2a5345e 100644
46509--- a/fs/select.c
46510+++ b/fs/select.c
46511@@ -20,6 +20,7 @@
46512 #include <linux/module.h>
46513 #include <linux/slab.h>
46514 #include <linux/poll.h>
46515+#include <linux/security.h>
46516 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46517 #include <linux/file.h>
46518 #include <linux/fdtable.h>
46519@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46520 struct poll_list *walk = head;
46521 unsigned long todo = nfds;
46522
46523+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46524 if (nfds > rlimit(RLIMIT_NOFILE))
46525 return -EINVAL;
46526
46527diff --git a/fs/seq_file.c b/fs/seq_file.c
46528index dba43c3..a99fb63 100644
46529--- a/fs/seq_file.c
46530+++ b/fs/seq_file.c
46531@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46532 return 0;
46533 }
46534 if (!m->buf) {
46535- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46536+ m->size = PAGE_SIZE;
46537+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46538 if (!m->buf)
46539 return -ENOMEM;
46540 }
46541@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46542 Eoverflow:
46543 m->op->stop(m, p);
46544 kfree(m->buf);
46545- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46546+ m->size <<= 1;
46547+ m->buf = kmalloc(m->size, GFP_KERNEL);
46548 return !m->buf ? -ENOMEM : -EAGAIN;
46549 }
46550
46551@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46552 m->version = file->f_version;
46553 /* grab buffer if we didn't have one */
46554 if (!m->buf) {
46555- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46556+ m->size = PAGE_SIZE;
46557+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46558 if (!m->buf)
46559 goto Enomem;
46560 }
46561@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46562 goto Fill;
46563 m->op->stop(m, p);
46564 kfree(m->buf);
46565- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46566+ m->size <<= 1;
46567+ m->buf = kmalloc(m->size, GFP_KERNEL);
46568 if (!m->buf)
46569 goto Enomem;
46570 m->count = 0;
46571@@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
46572 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46573 void *data)
46574 {
46575- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46576+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46577 int res = -ENOMEM;
46578
46579 if (op) {
46580diff --git a/fs/splice.c b/fs/splice.c
46581index fa2defa..8601650 100644
46582--- a/fs/splice.c
46583+++ b/fs/splice.c
46584@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46585 pipe_lock(pipe);
46586
46587 for (;;) {
46588- if (!pipe->readers) {
46589+ if (!atomic_read(&pipe->readers)) {
46590 send_sig(SIGPIPE, current, 0);
46591 if (!ret)
46592 ret = -EPIPE;
46593@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46594 do_wakeup = 0;
46595 }
46596
46597- pipe->waiting_writers++;
46598+ atomic_inc(&pipe->waiting_writers);
46599 pipe_wait(pipe);
46600- pipe->waiting_writers--;
46601+ atomic_dec(&pipe->waiting_writers);
46602 }
46603
46604 pipe_unlock(pipe);
46605@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46606 old_fs = get_fs();
46607 set_fs(get_ds());
46608 /* The cast to a user pointer is valid due to the set_fs() */
46609- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46610+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46611 set_fs(old_fs);
46612
46613 return res;
46614@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46615 old_fs = get_fs();
46616 set_fs(get_ds());
46617 /* The cast to a user pointer is valid due to the set_fs() */
46618- res = vfs_write(file, (const char __user *)buf, count, &pos);
46619+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46620 set_fs(old_fs);
46621
46622 return res;
46623@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46624 goto err;
46625
46626 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46627- vec[i].iov_base = (void __user *) page_address(page);
46628+ vec[i].iov_base = (void __force_user *) page_address(page);
46629 vec[i].iov_len = this_len;
46630 spd.pages[i] = page;
46631 spd.nr_pages++;
46632@@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46633 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46634 {
46635 while (!pipe->nrbufs) {
46636- if (!pipe->writers)
46637+ if (!atomic_read(&pipe->writers))
46638 return 0;
46639
46640- if (!pipe->waiting_writers && sd->num_spliced)
46641+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46642 return 0;
46643
46644 if (sd->flags & SPLICE_F_NONBLOCK)
46645@@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
46646 * out of the pipe right after the splice_to_pipe(). So set
46647 * PIPE_READERS appropriately.
46648 */
46649- pipe->readers = 1;
46650+ atomic_set(&pipe->readers, 1);
46651
46652 current->splice_pipe = pipe;
46653 }
46654@@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46655 ret = -ERESTARTSYS;
46656 break;
46657 }
46658- if (!pipe->writers)
46659+ if (!atomic_read(&pipe->writers))
46660 break;
46661- if (!pipe->waiting_writers) {
46662+ if (!atomic_read(&pipe->waiting_writers)) {
46663 if (flags & SPLICE_F_NONBLOCK) {
46664 ret = -EAGAIN;
46665 break;
46666@@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46667 pipe_lock(pipe);
46668
46669 while (pipe->nrbufs >= pipe->buffers) {
46670- if (!pipe->readers) {
46671+ if (!atomic_read(&pipe->readers)) {
46672 send_sig(SIGPIPE, current, 0);
46673 ret = -EPIPE;
46674 break;
46675@@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46676 ret = -ERESTARTSYS;
46677 break;
46678 }
46679- pipe->waiting_writers++;
46680+ atomic_inc(&pipe->waiting_writers);
46681 pipe_wait(pipe);
46682- pipe->waiting_writers--;
46683+ atomic_dec(&pipe->waiting_writers);
46684 }
46685
46686 pipe_unlock(pipe);
46687@@ -1819,14 +1819,14 @@ retry:
46688 pipe_double_lock(ipipe, opipe);
46689
46690 do {
46691- if (!opipe->readers) {
46692+ if (!atomic_read(&opipe->readers)) {
46693 send_sig(SIGPIPE, current, 0);
46694 if (!ret)
46695 ret = -EPIPE;
46696 break;
46697 }
46698
46699- if (!ipipe->nrbufs && !ipipe->writers)
46700+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46701 break;
46702
46703 /*
46704@@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46705 pipe_double_lock(ipipe, opipe);
46706
46707 do {
46708- if (!opipe->readers) {
46709+ if (!atomic_read(&opipe->readers)) {
46710 send_sig(SIGPIPE, current, 0);
46711 if (!ret)
46712 ret = -EPIPE;
46713@@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46714 * return EAGAIN if we have the potential of some data in the
46715 * future, otherwise just return 0
46716 */
46717- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46718+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46719 ret = -EAGAIN;
46720
46721 pipe_unlock(ipipe);
46722diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
46723index 7fdf6a7..e6cd8ad 100644
46724--- a/fs/sysfs/dir.c
46725+++ b/fs/sysfs/dir.c
46726@@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
46727 struct sysfs_dirent *sd;
46728 int rc;
46729
46730+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46731+ const char *parent_name = parent_sd->s_name;
46732+
46733+ mode = S_IFDIR | S_IRWXU;
46734+
46735+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
46736+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
46737+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
46738+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
46739+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
46740+#endif
46741+
46742 /* allocate */
46743 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
46744 if (!sd)
46745diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
46746index 779789a..f58193c 100644
46747--- a/fs/sysfs/file.c
46748+++ b/fs/sysfs/file.c
46749@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
46750
46751 struct sysfs_open_dirent {
46752 atomic_t refcnt;
46753- atomic_t event;
46754+ atomic_unchecked_t event;
46755 wait_queue_head_t poll;
46756 struct list_head buffers; /* goes through sysfs_buffer.list */
46757 };
46758@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
46759 if (!sysfs_get_active(attr_sd))
46760 return -ENODEV;
46761
46762- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46763+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46764 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46765
46766 sysfs_put_active(attr_sd);
46767@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
46768 return -ENOMEM;
46769
46770 atomic_set(&new_od->refcnt, 0);
46771- atomic_set(&new_od->event, 1);
46772+ atomic_set_unchecked(&new_od->event, 1);
46773 init_waitqueue_head(&new_od->poll);
46774 INIT_LIST_HEAD(&new_od->buffers);
46775 goto retry;
46776@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
46777
46778 sysfs_put_active(attr_sd);
46779
46780- if (buffer->event != atomic_read(&od->event))
46781+ if (buffer->event != atomic_read_unchecked(&od->event))
46782 goto trigger;
46783
46784 return DEFAULT_POLLMASK;
46785@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
46786
46787 od = sd->s_attr.open;
46788 if (od) {
46789- atomic_inc(&od->event);
46790+ atomic_inc_unchecked(&od->event);
46791 wake_up_interruptible(&od->poll);
46792 }
46793
46794diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
46795index a7ac78f..02158e1 100644
46796--- a/fs/sysfs/symlink.c
46797+++ b/fs/sysfs/symlink.c
46798@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46799
46800 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46801 {
46802- char *page = nd_get_link(nd);
46803+ const char *page = nd_get_link(nd);
46804 if (!IS_ERR(page))
46805 free_page((unsigned long)page);
46806 }
46807diff --git a/fs/udf/misc.c b/fs/udf/misc.c
46808index c175b4d..8f36a16 100644
46809--- a/fs/udf/misc.c
46810+++ b/fs/udf/misc.c
46811@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
46812
46813 u8 udf_tag_checksum(const struct tag *t)
46814 {
46815- u8 *data = (u8 *)t;
46816+ const u8 *data = (const u8 *)t;
46817 u8 checksum = 0;
46818 int i;
46819 for (i = 0; i < sizeof(struct tag); ++i)
46820diff --git a/fs/utimes.c b/fs/utimes.c
46821index ba653f3..06ea4b1 100644
46822--- a/fs/utimes.c
46823+++ b/fs/utimes.c
46824@@ -1,6 +1,7 @@
46825 #include <linux/compiler.h>
46826 #include <linux/file.h>
46827 #include <linux/fs.h>
46828+#include <linux/security.h>
46829 #include <linux/linkage.h>
46830 #include <linux/mount.h>
46831 #include <linux/namei.h>
46832@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
46833 goto mnt_drop_write_and_out;
46834 }
46835 }
46836+
46837+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46838+ error = -EACCES;
46839+ goto mnt_drop_write_and_out;
46840+ }
46841+
46842 mutex_lock(&inode->i_mutex);
46843 error = notify_change(path->dentry, &newattrs);
46844 mutex_unlock(&inode->i_mutex);
46845diff --git a/fs/xattr.c b/fs/xattr.c
46846index 67583de..c5aad14 100644
46847--- a/fs/xattr.c
46848+++ b/fs/xattr.c
46849@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46850 * Extended attribute SET operations
46851 */
46852 static long
46853-setxattr(struct dentry *d, const char __user *name, const void __user *value,
46854+setxattr(struct path *path, const char __user *name, const void __user *value,
46855 size_t size, int flags)
46856 {
46857 int error;
46858@@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
46859 return PTR_ERR(kvalue);
46860 }
46861
46862- error = vfs_setxattr(d, kname, kvalue, size, flags);
46863+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46864+ error = -EACCES;
46865+ goto out;
46866+ }
46867+
46868+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46869+out:
46870 kfree(kvalue);
46871 return error;
46872 }
46873@@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
46874 return error;
46875 error = mnt_want_write(path.mnt);
46876 if (!error) {
46877- error = setxattr(path.dentry, name, value, size, flags);
46878+ error = setxattr(&path, name, value, size, flags);
46879 mnt_drop_write(path.mnt);
46880 }
46881 path_put(&path);
46882@@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
46883 return error;
46884 error = mnt_want_write(path.mnt);
46885 if (!error) {
46886- error = setxattr(path.dentry, name, value, size, flags);
46887+ error = setxattr(&path, name, value, size, flags);
46888 mnt_drop_write(path.mnt);
46889 }
46890 path_put(&path);
46891@@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
46892 const void __user *,value, size_t, size, int, flags)
46893 {
46894 struct file *f;
46895- struct dentry *dentry;
46896 int error = -EBADF;
46897
46898 f = fget(fd);
46899 if (!f)
46900 return error;
46901- dentry = f->f_path.dentry;
46902- audit_inode(NULL, dentry);
46903+ audit_inode(NULL, f->f_path.dentry);
46904 error = mnt_want_write_file(f);
46905 if (!error) {
46906- error = setxattr(dentry, name, value, size, flags);
46907+ error = setxattr(&f->f_path, name, value, size, flags);
46908 mnt_drop_write(f->f_path.mnt);
46909 }
46910 fput(f);
46911diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
46912index 8d5a506..7f62712 100644
46913--- a/fs/xattr_acl.c
46914+++ b/fs/xattr_acl.c
46915@@ -17,8 +17,8 @@
46916 struct posix_acl *
46917 posix_acl_from_xattr(const void *value, size_t size)
46918 {
46919- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46920- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46921+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46922+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46923 int count;
46924 struct posix_acl *acl;
46925 struct posix_acl_entry *acl_e;
46926diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
46927index d0ab788..827999b 100644
46928--- a/fs/xfs/xfs_bmap.c
46929+++ b/fs/xfs/xfs_bmap.c
46930@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
46931 int nmap,
46932 int ret_nmap);
46933 #else
46934-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46935+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46936 #endif /* DEBUG */
46937
46938 STATIC int
46939diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
46940index 79d05e8..e3e5861 100644
46941--- a/fs/xfs/xfs_dir2_sf.c
46942+++ b/fs/xfs/xfs_dir2_sf.c
46943@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
46944 }
46945
46946 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
46947- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46948+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46949+ char name[sfep->namelen];
46950+ memcpy(name, sfep->name, sfep->namelen);
46951+ if (filldir(dirent, name, sfep->namelen,
46952+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
46953+ *offset = off & 0x7fffffff;
46954+ return 0;
46955+ }
46956+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46957 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46958 *offset = off & 0x7fffffff;
46959 return 0;
46960diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
46961index d99a905..9f88202 100644
46962--- a/fs/xfs/xfs_ioctl.c
46963+++ b/fs/xfs/xfs_ioctl.c
46964@@ -128,7 +128,7 @@ xfs_find_handle(
46965 }
46966
46967 error = -EFAULT;
46968- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46969+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46970 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46971 goto out_put;
46972
46973diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
46974index 23ce927..e274cc1 100644
46975--- a/fs/xfs/xfs_iops.c
46976+++ b/fs/xfs/xfs_iops.c
46977@@ -447,7 +447,7 @@ xfs_vn_put_link(
46978 struct nameidata *nd,
46979 void *p)
46980 {
46981- char *s = nd_get_link(nd);
46982+ const char *s = nd_get_link(nd);
46983
46984 if (!IS_ERR(s))
46985 kfree(s);
46986diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
46987new file mode 100644
46988index 0000000..dfd3d34
46989--- /dev/null
46990+++ b/grsecurity/Kconfig
46991@@ -0,0 +1,1069 @@
46992+#
46993+# grecurity configuration
46994+#
46995+
46996+menu "Grsecurity"
46997+
46998+config GRKERNSEC
46999+ bool "Grsecurity"
47000+ select CRYPTO
47001+ select CRYPTO_SHA256
47002+ help
47003+ If you say Y here, you will be able to configure many features
47004+ that will enhance the security of your system. It is highly
47005+ recommended that you say Y here and read through the help
47006+ for each option so that you fully understand the features and
47007+ can evaluate their usefulness for your machine.
47008+
47009+choice
47010+ prompt "Security Level"
47011+ depends on GRKERNSEC
47012+ default GRKERNSEC_CUSTOM
47013+
47014+config GRKERNSEC_LOW
47015+ bool "Low"
47016+ select GRKERNSEC_LINK
47017+ select GRKERNSEC_FIFO
47018+ select GRKERNSEC_RANDNET
47019+ select GRKERNSEC_DMESG
47020+ select GRKERNSEC_CHROOT
47021+ select GRKERNSEC_CHROOT_CHDIR
47022+
47023+ help
47024+ If you choose this option, several of the grsecurity options will
47025+ be enabled that will give you greater protection against a number
47026+ of attacks, while assuring that none of your software will have any
47027+ conflicts with the additional security measures. If you run a lot
47028+ of unusual software, or you are having problems with the higher
47029+ security levels, you should say Y here. With this option, the
47030+ following features are enabled:
47031+
47032+ - Linking restrictions
47033+ - FIFO restrictions
47034+ - Restricted dmesg
47035+ - Enforced chdir("/") on chroot
47036+ - Runtime module disabling
47037+
47038+config GRKERNSEC_MEDIUM
47039+ bool "Medium"
47040+ select PAX
47041+ select PAX_EI_PAX
47042+ select PAX_PT_PAX_FLAGS
47043+ select PAX_HAVE_ACL_FLAGS
47044+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47045+ select GRKERNSEC_CHROOT
47046+ select GRKERNSEC_CHROOT_SYSCTL
47047+ select GRKERNSEC_LINK
47048+ select GRKERNSEC_FIFO
47049+ select GRKERNSEC_DMESG
47050+ select GRKERNSEC_RANDNET
47051+ select GRKERNSEC_FORKFAIL
47052+ select GRKERNSEC_TIME
47053+ select GRKERNSEC_SIGNAL
47054+ select GRKERNSEC_CHROOT
47055+ select GRKERNSEC_CHROOT_UNIX
47056+ select GRKERNSEC_CHROOT_MOUNT
47057+ select GRKERNSEC_CHROOT_PIVOT
47058+ select GRKERNSEC_CHROOT_DOUBLE
47059+ select GRKERNSEC_CHROOT_CHDIR
47060+ select GRKERNSEC_CHROOT_MKNOD
47061+ select GRKERNSEC_PROC
47062+ select GRKERNSEC_PROC_USERGROUP
47063+ select PAX_RANDUSTACK
47064+ select PAX_ASLR
47065+ select PAX_RANDMMAP
47066+ select PAX_REFCOUNT if (X86 || SPARC64)
47067+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47068+
47069+ help
47070+ If you say Y here, several features in addition to those included
47071+ in the low additional security level will be enabled. These
47072+ features provide even more security to your system, though in rare
47073+ cases they may be incompatible with very old or poorly written
47074+ software. If you enable this option, make sure that your auth
47075+ service (identd) is running as gid 1001. With this option,
47076+ the following features (in addition to those provided in the
47077+ low additional security level) will be enabled:
47078+
47079+ - Failed fork logging
47080+ - Time change logging
47081+ - Signal logging
47082+ - Deny mounts in chroot
47083+ - Deny double chrooting
47084+ - Deny sysctl writes in chroot
47085+ - Deny mknod in chroot
47086+ - Deny access to abstract AF_UNIX sockets out of chroot
47087+ - Deny pivot_root in chroot
47088+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
47089+ - /proc restrictions with special GID set to 10 (usually wheel)
47090+ - Address Space Layout Randomization (ASLR)
47091+ - Prevent exploitation of most refcount overflows
47092+ - Bounds checking of copying between the kernel and userland
47093+
47094+config GRKERNSEC_HIGH
47095+ bool "High"
47096+ select GRKERNSEC_LINK
47097+ select GRKERNSEC_FIFO
47098+ select GRKERNSEC_DMESG
47099+ select GRKERNSEC_FORKFAIL
47100+ select GRKERNSEC_TIME
47101+ select GRKERNSEC_SIGNAL
47102+ select GRKERNSEC_CHROOT
47103+ select GRKERNSEC_CHROOT_SHMAT
47104+ select GRKERNSEC_CHROOT_UNIX
47105+ select GRKERNSEC_CHROOT_MOUNT
47106+ select GRKERNSEC_CHROOT_FCHDIR
47107+ select GRKERNSEC_CHROOT_PIVOT
47108+ select GRKERNSEC_CHROOT_DOUBLE
47109+ select GRKERNSEC_CHROOT_CHDIR
47110+ select GRKERNSEC_CHROOT_MKNOD
47111+ select GRKERNSEC_CHROOT_CAPS
47112+ select GRKERNSEC_CHROOT_SYSCTL
47113+ select GRKERNSEC_CHROOT_FINDTASK
47114+ select GRKERNSEC_SYSFS_RESTRICT
47115+ select GRKERNSEC_PROC
47116+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47117+ select GRKERNSEC_HIDESYM
47118+ select GRKERNSEC_BRUTE
47119+ select GRKERNSEC_PROC_USERGROUP
47120+ select GRKERNSEC_KMEM
47121+ select GRKERNSEC_RESLOG
47122+ select GRKERNSEC_RANDNET
47123+ select GRKERNSEC_PROC_ADD
47124+ select GRKERNSEC_CHROOT_CHMOD
47125+ select GRKERNSEC_CHROOT_NICE
47126+ select GRKERNSEC_SETXID
47127+ select GRKERNSEC_AUDIT_MOUNT
47128+ select GRKERNSEC_MODHARDEN if (MODULES)
47129+ select GRKERNSEC_HARDEN_PTRACE
47130+ select GRKERNSEC_PTRACE_READEXEC
47131+ select GRKERNSEC_VM86 if (X86_32)
47132+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47133+ select PAX
47134+ select PAX_RANDUSTACK
47135+ select PAX_ASLR
47136+ select PAX_RANDMMAP
47137+ select PAX_NOEXEC
47138+ select PAX_MPROTECT
47139+ select PAX_EI_PAX
47140+ select PAX_PT_PAX_FLAGS
47141+ select PAX_HAVE_ACL_FLAGS
47142+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47143+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
47144+ select PAX_RANDKSTACK if (X86_TSC && X86)
47145+ select PAX_SEGMEXEC if (X86_32)
47146+ select PAX_PAGEEXEC
47147+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47148+ select PAX_EMUTRAMP if (PARISC)
47149+ select PAX_EMUSIGRT if (PARISC)
47150+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47151+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47152+ select PAX_REFCOUNT if (X86 || SPARC64)
47153+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47154+ help
47155+ If you say Y here, many of the features of grsecurity will be
47156+ enabled, which will protect you against many kinds of attacks
47157+ against your system. The heightened security comes at a cost
47158+ of an increased chance of incompatibilities with rare software
47159+ on your machine. Since this security level enables PaX, you should
47160+ view <http://pax.grsecurity.net> and read about the PaX
47161+ project. While you are there, download chpax and run it on
47162+ binaries that cause problems with PaX. Also remember that
47163+ since the /proc restrictions are enabled, you must run your
47164+ identd as gid 1001. This security level enables the following
47165+ features in addition to those listed in the low and medium
47166+ security levels:
47167+
47168+ - Additional /proc restrictions
47169+ - Chmod restrictions in chroot
47170+ - No signals, ptrace, or viewing of processes outside of chroot
47171+ - Capability restrictions in chroot
47172+ - Deny fchdir out of chroot
47173+ - Priority restrictions in chroot
47174+ - Segmentation-based implementation of PaX
47175+ - Mprotect restrictions
47176+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47177+ - Kernel stack randomization
47178+ - Mount/unmount/remount logging
47179+ - Kernel symbol hiding
47180+ - Hardening of module auto-loading
47181+ - Ptrace restrictions
47182+ - Restricted vm86 mode
47183+ - Restricted sysfs/debugfs
47184+ - Active kernel exploit response
47185+
47186+config GRKERNSEC_CUSTOM
47187+ bool "Custom"
47188+ help
47189+ If you say Y here, you will be able to configure every grsecurity
47190+ option, which allows you to enable many more features that aren't
47191+ covered in the basic security levels. These additional features
47192+ include TPE, socket restrictions, and the sysctl system for
47193+ grsecurity. It is advised that you read through the help for
47194+ each option to determine its usefulness in your situation.
47195+
47196+endchoice
47197+
47198+menu "Address Space Protection"
47199+depends on GRKERNSEC
47200+
47201+config GRKERNSEC_KMEM
47202+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
47203+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47204+ help
47205+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47206+ be written to or read from to modify or leak the contents of the running
47207+ kernel. /dev/port will also not be allowed to be opened. If you have module
47208+ support disabled, enabling this will close up four ways that are
47209+ currently used to insert malicious code into the running kernel.
47210+ Even with all these features enabled, we still highly recommend that
47211+ you use the RBAC system, as it is still possible for an attacker to
47212+ modify the running kernel through privileged I/O granted by ioperm/iopl.
47213+ If you are not using XFree86, you may be able to stop this additional
47214+ case by enabling the 'Disable privileged I/O' option. Though nothing
47215+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47216+ but only to video memory, which is the only writing we allow in this
47217+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47218+ not be allowed to mprotect it with PROT_WRITE later.
47219+ It is highly recommended that you say Y here if you meet all the
47220+ conditions above.
47221+
47222+config GRKERNSEC_VM86
47223+ bool "Restrict VM86 mode"
47224+ depends on X86_32
47225+
47226+ help
47227+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47228+ make use of a special execution mode on 32bit x86 processors called
47229+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47230+ video cards and will still work with this option enabled. The purpose
47231+ of the option is to prevent exploitation of emulation errors in
47232+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
47233+ Nearly all users should be able to enable this option.
47234+
47235+config GRKERNSEC_IO
47236+ bool "Disable privileged I/O"
47237+ depends on X86
47238+ select RTC_CLASS
47239+ select RTC_INTF_DEV
47240+ select RTC_DRV_CMOS
47241+
47242+ help
47243+ If you say Y here, all ioperm and iopl calls will return an error.
47244+ Ioperm and iopl can be used to modify the running kernel.
47245+ Unfortunately, some programs need this access to operate properly,
47246+ the most notable of which are XFree86 and hwclock. hwclock can be
47247+ remedied by having RTC support in the kernel, so real-time
47248+ clock support is enabled if this option is enabled, to ensure
47249+ that hwclock operates correctly. XFree86 still will not
47250+ operate correctly with this option enabled, so DO NOT CHOOSE Y
47251+ IF YOU USE XFree86. If you use XFree86 and you still want to
47252+ protect your kernel against modification, use the RBAC system.
47253+
47254+config GRKERNSEC_PROC_MEMMAP
47255+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
47256+ default y if (PAX_NOEXEC || PAX_ASLR)
47257+ depends on PAX_NOEXEC || PAX_ASLR
47258+ help
47259+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47260+ give no information about the addresses of its mappings if
47261+ PaX features that rely on random addresses are enabled on the task.
47262+ If you use PaX it is greatly recommended that you say Y here as it
47263+ closes up a hole that makes the full ASLR useless for suid
47264+ binaries.
47265+
47266+config GRKERNSEC_BRUTE
47267+ bool "Deter exploit bruteforcing"
47268+ help
47269+ If you say Y here, attempts to bruteforce exploits against forking
47270+ daemons such as apache or sshd, as well as against suid/sgid binaries
47271+ will be deterred. When a child of a forking daemon is killed by PaX
47272+ or crashes due to an illegal instruction or other suspicious signal,
47273+ the parent process will be delayed 30 seconds upon every subsequent
47274+ fork until the administrator is able to assess the situation and
47275+ restart the daemon.
47276+ In the suid/sgid case, the attempt is logged, the user has all their
47277+ processes terminated, and they are prevented from executing any further
47278+ processes for 15 minutes.
47279+ It is recommended that you also enable signal logging in the auditing
47280+ section so that logs are generated when a process triggers a suspicious
47281+ signal.
47282+ If the sysctl option is enabled, a sysctl option with name
47283+ "deter_bruteforce" is created.
47284+
47285+
47286+config GRKERNSEC_MODHARDEN
47287+ bool "Harden module auto-loading"
47288+ depends on MODULES
47289+ help
47290+ If you say Y here, module auto-loading in response to use of some
47291+ feature implemented by an unloaded module will be restricted to
47292+ root users. Enabling this option helps defend against attacks
47293+ by unprivileged users who abuse the auto-loading behavior to
47294+ cause a vulnerable module to load that is then exploited.
47295+
47296+ If this option prevents a legitimate use of auto-loading for a
47297+ non-root user, the administrator can execute modprobe manually
47298+ with the exact name of the module mentioned in the alert log.
47299+ Alternatively, the administrator can add the module to the list
47300+ of modules loaded at boot by modifying init scripts.
47301+
47302+ Modification of init scripts will most likely be needed on
47303+ Ubuntu servers with encrypted home directory support enabled,
47304+ as the first non-root user logging in will cause the ecb(aes),
47305+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47306+
47307+config GRKERNSEC_HIDESYM
47308+ bool "Hide kernel symbols"
47309+ help
47310+ If you say Y here, getting information on loaded modules, and
47311+ displaying all kernel symbols through a syscall will be restricted
47312+ to users with CAP_SYS_MODULE. For software compatibility reasons,
47313+ /proc/kallsyms will be restricted to the root user. The RBAC
47314+ system can hide that entry even from root.
47315+
47316+ This option also prevents leaking of kernel addresses through
47317+ several /proc entries.
47318+
47319+ Note that this option is only effective provided the following
47320+ conditions are met:
47321+ 1) The kernel using grsecurity is not precompiled by some distribution
47322+ 2) You have also enabled GRKERNSEC_DMESG
47323+ 3) You are using the RBAC system and hiding other files such as your
47324+ kernel image and System.map. Alternatively, enabling this option
47325+ causes the permissions on /boot, /lib/modules, and the kernel
47326+ source directory to change at compile time to prevent
47327+ reading by non-root users.
47328+ If the above conditions are met, this option will aid in providing a
47329+ useful protection against local kernel exploitation of overflows
47330+ and arbitrary read/write vulnerabilities.
47331+
47332+config GRKERNSEC_KERN_LOCKOUT
47333+ bool "Active kernel exploit response"
47334+ depends on X86 || ARM || PPC || SPARC
47335+ help
47336+ If you say Y here, when a PaX alert is triggered due to suspicious
47337+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47338+ or an OOPs occurs due to bad memory accesses, instead of just
47339+ terminating the offending process (and potentially allowing
47340+ a subsequent exploit from the same user), we will take one of two
47341+ actions:
47342+ If the user was root, we will panic the system
47343+ If the user was non-root, we will log the attempt, terminate
47344+ all processes owned by the user, then prevent them from creating
47345+ any new processes until the system is restarted
47346+ This deters repeated kernel exploitation/bruteforcing attempts
47347+ and is useful for later forensics.
47348+
47349+endmenu
47350+menu "Role Based Access Control Options"
47351+depends on GRKERNSEC
47352+
47353+config GRKERNSEC_RBAC_DEBUG
47354+ bool
47355+
47356+config GRKERNSEC_NO_RBAC
47357+ bool "Disable RBAC system"
47358+ help
47359+ If you say Y here, the /dev/grsec device will be removed from the kernel,
47360+ preventing the RBAC system from being enabled. You should only say Y
47361+ here if you have no intention of using the RBAC system, so as to prevent
47362+ an attacker with root access from misusing the RBAC system to hide files
47363+ and processes when loadable module support and /dev/[k]mem have been
47364+ locked down.
47365+
47366+config GRKERNSEC_ACL_HIDEKERN
47367+ bool "Hide kernel processes"
47368+ help
47369+ If you say Y here, all kernel threads will be hidden to all
47370+ processes but those whose subject has the "view hidden processes"
47371+ flag.
47372+
47373+config GRKERNSEC_ACL_MAXTRIES
47374+ int "Maximum tries before password lockout"
47375+ default 3
47376+ help
47377+ This option enforces the maximum number of times a user can attempt
47378+ to authorize themselves with the grsecurity RBAC system before being
47379+ denied the ability to attempt authorization again for a specified time.
47380+ The lower the number, the harder it will be to brute-force a password.
47381+
47382+config GRKERNSEC_ACL_TIMEOUT
47383+ int "Time to wait after max password tries, in seconds"
47384+ default 30
47385+ help
47386+ This option specifies the time the user must wait after attempting to
47387+ authorize to the RBAC system with the maximum number of invalid
47388+ passwords. The higher the number, the harder it will be to brute-force
47389+ a password.
47390+
47391+endmenu
47392+menu "Filesystem Protections"
47393+depends on GRKERNSEC
47394+
47395+config GRKERNSEC_PROC
47396+ bool "Proc restrictions"
47397+ help
47398+ If you say Y here, the permissions of the /proc filesystem
47399+ will be altered to enhance system security and privacy. You MUST
47400+ choose either a user only restriction or a user and group restriction.
47401+ Depending upon the option you choose, you can either restrict users to
47402+ see only the processes they themselves run, or choose a group that can
47403+ view all processes and files normally restricted to root if you choose
47404+ the "restrict to user only" option. NOTE: If you're running identd as
47405+ a non-root user, you will have to run it as the group you specify here.
47406+
47407+config GRKERNSEC_PROC_USER
47408+ bool "Restrict /proc to user only"
47409+ depends on GRKERNSEC_PROC
47410+ help
47411+ If you say Y here, non-root users will only be able to view their own
47412+ processes, and restricts them from viewing network-related information,
47413+ and viewing kernel symbol and module information.
47414+
47415+config GRKERNSEC_PROC_USERGROUP
47416+ bool "Allow special group"
47417+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47418+ help
47419+ If you say Y here, you will be able to select a group that will be
47420+ able to view all processes and network-related information. If you've
47421+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47422+ remain hidden. This option is useful if you want to run identd as
47423+ a non-root user.
47424+
47425+config GRKERNSEC_PROC_GID
47426+ int "GID for special group"
47427+ depends on GRKERNSEC_PROC_USERGROUP
47428+ default 1001
47429+
47430+config GRKERNSEC_PROC_ADD
47431+ bool "Additional restrictions"
47432+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47433+ help
47434+ If you say Y here, additional restrictions will be placed on
47435+ /proc that keep normal users from viewing device information and
47436+ slabinfo information that could be useful for exploits.
47437+
47438+config GRKERNSEC_LINK
47439+ bool "Linking restrictions"
47440+ help
47441+ If you say Y here, /tmp race exploits will be prevented, since users
47442+ will no longer be able to follow symlinks owned by other users in
47443+ world-writable +t directories (e.g. /tmp), unless the owner of the
47444+ symlink is the owner of the directory. users will also not be
47445+ able to hardlink to files they do not own. If the sysctl option is
47446+ enabled, a sysctl option with name "linking_restrictions" is created.
47447+
47448+config GRKERNSEC_FIFO
47449+ bool "FIFO restrictions"
47450+ help
47451+ If you say Y here, users will not be able to write to FIFOs they don't
47452+ own in world-writable +t directories (e.g. /tmp), unless the owner of
47453+ the FIFO is the same owner of the directory it's held in. If the sysctl
47454+ option is enabled, a sysctl option with name "fifo_restrictions" is
47455+ created.
47456+
47457+config GRKERNSEC_SYSFS_RESTRICT
47458+ bool "Sysfs/debugfs restriction"
47459+ depends on SYSFS
47460+ help
47461+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47462+ any filesystem normally mounted under it (e.g. debugfs) will be
47463+ mostly accessible only by root. These filesystems generally provide access
47464+ to hardware and debug information that isn't appropriate for unprivileged
47465+ users of the system. Sysfs and debugfs have also become a large source
47466+ of new vulnerabilities, ranging from infoleaks to local compromise.
47467+ There has been very little oversight with an eye toward security involved
47468+ in adding new exporters of information to these filesystems, so their
47469+ use is discouraged.
47470+ For reasons of compatibility, a few directories have been whitelisted
47471+ for access by non-root users:
47472+ /sys/fs/selinux
47473+ /sys/fs/fuse
47474+ /sys/devices/system/cpu
47475+
47476+config GRKERNSEC_ROFS
47477+ bool "Runtime read-only mount protection"
47478+ help
47479+ If you say Y here, a sysctl option with name "romount_protect" will
47480+ be created. By setting this option to 1 at runtime, filesystems
47481+ will be protected in the following ways:
47482+ * No new writable mounts will be allowed
47483+ * Existing read-only mounts won't be able to be remounted read/write
47484+ * Write operations will be denied on all block devices
47485+ This option acts independently of grsec_lock: once it is set to 1,
47486+ it cannot be turned off. Therefore, please be mindful of the resulting
47487+ behavior if this option is enabled in an init script on a read-only
47488+ filesystem. This feature is mainly intended for secure embedded systems.
47489+
47490+config GRKERNSEC_CHROOT
47491+ bool "Chroot jail restrictions"
47492+ help
47493+ If you say Y here, you will be able to choose several options that will
47494+ make breaking out of a chrooted jail much more difficult. If you
47495+ encounter no software incompatibilities with the following options, it
47496+ is recommended that you enable each one.
47497+
47498+config GRKERNSEC_CHROOT_MOUNT
47499+ bool "Deny mounts"
47500+ depends on GRKERNSEC_CHROOT
47501+ help
47502+ If you say Y here, processes inside a chroot will not be able to
47503+ mount or remount filesystems. If the sysctl option is enabled, a
47504+ sysctl option with name "chroot_deny_mount" is created.
47505+
47506+config GRKERNSEC_CHROOT_DOUBLE
47507+ bool "Deny double-chroots"
47508+ depends on GRKERNSEC_CHROOT
47509+ help
47510+ If you say Y here, processes inside a chroot will not be able to chroot
47511+ again outside the chroot. This is a widely used method of breaking
47512+ out of a chroot jail and should not be allowed. If the sysctl
47513+ option is enabled, a sysctl option with name
47514+ "chroot_deny_chroot" is created.
47515+
47516+config GRKERNSEC_CHROOT_PIVOT
47517+ bool "Deny pivot_root in chroot"
47518+ depends on GRKERNSEC_CHROOT
47519+ help
47520+ If you say Y here, processes inside a chroot will not be able to use
47521+ a function called pivot_root() that was introduced in Linux 2.3.41. It
47522+ works similar to chroot in that it changes the root filesystem. This
47523+ function could be misused in a chrooted process to attempt to break out
47524+ of the chroot, and therefore should not be allowed. If the sysctl
47525+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
47526+ created.
47527+
47528+config GRKERNSEC_CHROOT_CHDIR
47529+ bool "Enforce chdir(\"/\") on all chroots"
47530+ depends on GRKERNSEC_CHROOT
47531+ help
47532+ If you say Y here, the current working directory of all newly-chrooted
47533+ applications will be set to the the root directory of the chroot.
47534+ The man page on chroot(2) states:
47535+ Note that this call does not change the current working
47536+ directory, so that `.' can be outside the tree rooted at
47537+ `/'. In particular, the super-user can escape from a
47538+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47539+
47540+ It is recommended that you say Y here, since it's not known to break
47541+ any software. If the sysctl option is enabled, a sysctl option with
47542+ name "chroot_enforce_chdir" is created.
47543+
47544+config GRKERNSEC_CHROOT_CHMOD
47545+ bool "Deny (f)chmod +s"
47546+ depends on GRKERNSEC_CHROOT
47547+ help
47548+ If you say Y here, processes inside a chroot will not be able to chmod
47549+ or fchmod files to make them have suid or sgid bits. This protects
47550+ against another published method of breaking a chroot. If the sysctl
47551+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
47552+ created.
47553+
47554+config GRKERNSEC_CHROOT_FCHDIR
47555+ bool "Deny fchdir out of chroot"
47556+ depends on GRKERNSEC_CHROOT
47557+ help
47558+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
47559+ to a file descriptor of the chrooting process that points to a directory
47560+ outside the filesystem will be stopped. If the sysctl option
47561+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47562+
47563+config GRKERNSEC_CHROOT_MKNOD
47564+ bool "Deny mknod"
47565+ depends on GRKERNSEC_CHROOT
47566+ help
47567+ If you say Y here, processes inside a chroot will not be allowed to
47568+ mknod. The problem with using mknod inside a chroot is that it
47569+ would allow an attacker to create a device entry that is the same
47570+ as one on the physical root of your system, which could range from
47571+ anything from the console device to a device for your harddrive (which
47572+ they could then use to wipe the drive or steal data). It is recommended
47573+ that you say Y here, unless you run into software incompatibilities.
47574+ If the sysctl option is enabled, a sysctl option with name
47575+ "chroot_deny_mknod" is created.
47576+
47577+config GRKERNSEC_CHROOT_SHMAT
47578+ bool "Deny shmat() out of chroot"
47579+ depends on GRKERNSEC_CHROOT
47580+ help
47581+ If you say Y here, processes inside a chroot will not be able to attach
47582+ to shared memory segments that were created outside of the chroot jail.
47583+ It is recommended that you say Y here. If the sysctl option is enabled,
47584+ a sysctl option with name "chroot_deny_shmat" is created.
47585+
47586+config GRKERNSEC_CHROOT_UNIX
47587+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
47588+ depends on GRKERNSEC_CHROOT
47589+ help
47590+ If you say Y here, processes inside a chroot will not be able to
47591+ connect to abstract (meaning not belonging to a filesystem) Unix
47592+ domain sockets that were bound outside of a chroot. It is recommended
47593+ that you say Y here. If the sysctl option is enabled, a sysctl option
47594+ with name "chroot_deny_unix" is created.
47595+
47596+config GRKERNSEC_CHROOT_FINDTASK
47597+ bool "Protect outside processes"
47598+ depends on GRKERNSEC_CHROOT
47599+ help
47600+ If you say Y here, processes inside a chroot will not be able to
47601+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47602+ getsid, or view any process outside of the chroot. If the sysctl
47603+ option is enabled, a sysctl option with name "chroot_findtask" is
47604+ created.
47605+
47606+config GRKERNSEC_CHROOT_NICE
47607+ bool "Restrict priority changes"
47608+ depends on GRKERNSEC_CHROOT
47609+ help
47610+ If you say Y here, processes inside a chroot will not be able to raise
47611+ the priority of processes in the chroot, or alter the priority of
47612+ processes outside the chroot. This provides more security than simply
47613+ removing CAP_SYS_NICE from the process' capability set. If the
47614+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47615+ is created.
47616+
47617+config GRKERNSEC_CHROOT_SYSCTL
47618+ bool "Deny sysctl writes"
47619+ depends on GRKERNSEC_CHROOT
47620+ help
47621+ If you say Y here, an attacker in a chroot will not be able to
47622+ write to sysctl entries, either by sysctl(2) or through a /proc
47623+ interface. It is strongly recommended that you say Y here. If the
47624+ sysctl option is enabled, a sysctl option with name
47625+ "chroot_deny_sysctl" is created.
47626+
47627+config GRKERNSEC_CHROOT_CAPS
47628+ bool "Capability restrictions"
47629+ depends on GRKERNSEC_CHROOT
47630+ help
47631+ If you say Y here, the capabilities on all processes within a
47632+ chroot jail will be lowered to stop module insertion, raw i/o,
47633+ system and net admin tasks, rebooting the system, modifying immutable
47634+ files, modifying IPC owned by another, and changing the system time.
47635+ This is left an option because it can break some apps. Disable this
47636+ if your chrooted apps are having problems performing those kinds of
47637+ tasks. If the sysctl option is enabled, a sysctl option with
47638+ name "chroot_caps" is created.
47639+
47640+endmenu
47641+menu "Kernel Auditing"
47642+depends on GRKERNSEC
47643+
47644+config GRKERNSEC_AUDIT_GROUP
47645+ bool "Single group for auditing"
47646+ help
47647+ If you say Y here, the exec, chdir, and (un)mount logging features
47648+ will only operate on a group you specify. This option is recommended
47649+ if you only want to watch certain users instead of having a large
47650+ amount of logs from the entire system. If the sysctl option is enabled,
47651+ a sysctl option with name "audit_group" is created.
47652+
47653+config GRKERNSEC_AUDIT_GID
47654+ int "GID for auditing"
47655+ depends on GRKERNSEC_AUDIT_GROUP
47656+ default 1007
47657+
47658+config GRKERNSEC_EXECLOG
47659+ bool "Exec logging"
47660+ help
47661+ If you say Y here, all execve() calls will be logged (since the
47662+ other exec*() calls are frontends to execve(), all execution
47663+ will be logged). Useful for shell-servers that like to keep track
47664+ of their users. If the sysctl option is enabled, a sysctl option with
47665+ name "exec_logging" is created.
47666+ WARNING: This option when enabled will produce a LOT of logs, especially
47667+ on an active system.
47668+
47669+config GRKERNSEC_RESLOG
47670+ bool "Resource logging"
47671+ help
47672+ If you say Y here, all attempts to overstep resource limits will
47673+ be logged with the resource name, the requested size, and the current
47674+ limit. It is highly recommended that you say Y here. If the sysctl
47675+ option is enabled, a sysctl option with name "resource_logging" is
47676+ created. If the RBAC system is enabled, the sysctl value is ignored.
47677+
47678+config GRKERNSEC_CHROOT_EXECLOG
47679+ bool "Log execs within chroot"
47680+ help
47681+ If you say Y here, all executions inside a chroot jail will be logged
47682+ to syslog. This can cause a large amount of logs if certain
47683+ applications (eg. djb's daemontools) are installed on the system, and
47684+ is therefore left as an option. If the sysctl option is enabled, a
47685+ sysctl option with name "chroot_execlog" is created.
47686+
47687+config GRKERNSEC_AUDIT_PTRACE
47688+ bool "Ptrace logging"
47689+ help
47690+ If you say Y here, all attempts to attach to a process via ptrace
47691+ will be logged. If the sysctl option is enabled, a sysctl option
47692+ with name "audit_ptrace" is created.
47693+
47694+config GRKERNSEC_AUDIT_CHDIR
47695+ bool "Chdir logging"
47696+ help
47697+ If you say Y here, all chdir() calls will be logged. If the sysctl
47698+ option is enabled, a sysctl option with name "audit_chdir" is created.
47699+
47700+config GRKERNSEC_AUDIT_MOUNT
47701+ bool "(Un)Mount logging"
47702+ help
47703+ If you say Y here, all mounts and unmounts will be logged. If the
47704+ sysctl option is enabled, a sysctl option with name "audit_mount" is
47705+ created.
47706+
47707+config GRKERNSEC_SIGNAL
47708+ bool "Signal logging"
47709+ help
47710+ If you say Y here, certain important signals will be logged, such as
47711+ SIGSEGV, which will as a result inform you of when a error in a program
47712+ occurred, which in some cases could mean a possible exploit attempt.
47713+ If the sysctl option is enabled, a sysctl option with name
47714+ "signal_logging" is created.
47715+
47716+config GRKERNSEC_FORKFAIL
47717+ bool "Fork failure logging"
47718+ help
47719+ If you say Y here, all failed fork() attempts will be logged.
47720+ This could suggest a fork bomb, or someone attempting to overstep
47721+ their process limit. If the sysctl option is enabled, a sysctl option
47722+ with name "forkfail_logging" is created.
47723+
47724+config GRKERNSEC_TIME
47725+ bool "Time change logging"
47726+ help
47727+ If you say Y here, any changes of the system clock will be logged.
47728+ If the sysctl option is enabled, a sysctl option with name
47729+ "timechange_logging" is created.
47730+
47731+config GRKERNSEC_PROC_IPADDR
47732+ bool "/proc/<pid>/ipaddr support"
47733+ help
47734+ If you say Y here, a new entry will be added to each /proc/<pid>
47735+ directory that contains the IP address of the person using the task.
47736+ The IP is carried across local TCP and AF_UNIX stream sockets.
47737+ This information can be useful for IDS/IPSes to perform remote response
47738+ to a local attack. The entry is readable by only the owner of the
47739+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
47740+ the RBAC system), and thus does not create privacy concerns.
47741+
47742+config GRKERNSEC_RWXMAP_LOG
47743+ bool 'Denied RWX mmap/mprotect logging'
47744+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
47745+ help
47746+ If you say Y here, calls to mmap() and mprotect() with explicit
47747+ usage of PROT_WRITE and PROT_EXEC together will be logged when
47748+ denied by the PAX_MPROTECT feature. If the sysctl option is
47749+ enabled, a sysctl option with name "rwxmap_logging" is created.
47750+
47751+config GRKERNSEC_AUDIT_TEXTREL
47752+ bool 'ELF text relocations logging (READ HELP)'
47753+ depends on PAX_MPROTECT
47754+ help
47755+ If you say Y here, text relocations will be logged with the filename
47756+ of the offending library or binary. The purpose of the feature is
47757+ to help Linux distribution developers get rid of libraries and
47758+ binaries that need text relocations which hinder the future progress
47759+ of PaX. Only Linux distribution developers should say Y here, and
47760+ never on a production machine, as this option creates an information
47761+ leak that could aid an attacker in defeating the randomization of
47762+ a single memory region. If the sysctl option is enabled, a sysctl
47763+ option with name "audit_textrel" is created.
47764+
47765+endmenu
47766+
47767+menu "Executable Protections"
47768+depends on GRKERNSEC
47769+
47770+config GRKERNSEC_DMESG
47771+ bool "Dmesg(8) restriction"
47772+ help
47773+ If you say Y here, non-root users will not be able to use dmesg(8)
47774+ to view up to the last 4kb of messages in the kernel's log buffer.
47775+ The kernel's log buffer often contains kernel addresses and other
47776+ identifying information useful to an attacker in fingerprinting a
47777+ system for a targeted exploit.
47778+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
47779+ created.
47780+
47781+config GRKERNSEC_HARDEN_PTRACE
47782+ bool "Deter ptrace-based process snooping"
47783+ help
47784+ If you say Y here, TTY sniffers and other malicious monitoring
47785+ programs implemented through ptrace will be defeated. If you
47786+ have been using the RBAC system, this option has already been
47787+ enabled for several years for all users, with the ability to make
47788+ fine-grained exceptions.
47789+
47790+ This option only affects the ability of non-root users to ptrace
47791+ processes that are not a descendent of the ptracing process.
47792+ This means that strace ./binary and gdb ./binary will still work,
47793+ but attaching to arbitrary processes will not. If the sysctl
47794+ option is enabled, a sysctl option with name "harden_ptrace" is
47795+ created.
47796+
47797+config GRKERNSEC_PTRACE_READEXEC
47798+ bool "Require read access to ptrace sensitive binaries"
47799+ help
47800+ If you say Y here, unprivileged users will not be able to ptrace unreadable
47801+ binaries. This option is useful in environments that
47802+ remove the read bits (e.g. file mode 4711) from suid binaries to
47803+ prevent infoleaking of their contents. This option adds
47804+ consistency to the use of that file mode, as the binary could normally
47805+ be read out when run without privileges while ptracing.
47806+
47807+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
47808+ is created.
47809+
47810+config GRKERNSEC_SETXID
47811+ bool "Enforce consistent multithreaded privileges"
47812+ help
47813+ If you say Y here, a change from a root uid to a non-root uid
47814+ in a multithreaded application will cause the resulting uids,
47815+ gids, supplementary groups, and capabilities in that thread
47816+ to be propagated to the other threads of the process. In most
47817+ cases this is unnecessary, as glibc will emulate this behavior
47818+ on behalf of the application. Other libcs do not act in the
47819+ same way, allowing the other threads of the process to continue
47820+ running with root privileges. If the sysctl option is enabled,
47821+ a sysctl option with name "consistent_setxid" is created.
47822+
47823+config GRKERNSEC_TPE
47824+ bool "Trusted Path Execution (TPE)"
47825+ help
47826+ If you say Y here, you will be able to choose a gid to add to the
47827+ supplementary groups of users you want to mark as "untrusted."
47828+ These users will not be able to execute any files that are not in
47829+ root-owned directories writable only by root. If the sysctl option
47830+ is enabled, a sysctl option with name "tpe" is created.
47831+
47832+config GRKERNSEC_TPE_ALL
47833+ bool "Partially restrict all non-root users"
47834+ depends on GRKERNSEC_TPE
47835+ help
47836+ If you say Y here, all non-root users will be covered under
47837+ a weaker TPE restriction. This is separate from, and in addition to,
47838+ the main TPE options that you have selected elsewhere. Thus, if a
47839+ "trusted" GID is chosen, this restriction applies to even that GID.
47840+ Under this restriction, all non-root users will only be allowed to
47841+ execute files in directories they own that are not group or
47842+ world-writable, or in directories owned by root and writable only by
47843+ root. If the sysctl option is enabled, a sysctl option with name
47844+ "tpe_restrict_all" is created.
47845+
47846+config GRKERNSEC_TPE_INVERT
47847+ bool "Invert GID option"
47848+ depends on GRKERNSEC_TPE
47849+ help
47850+ If you say Y here, the group you specify in the TPE configuration will
47851+ decide what group TPE restrictions will be *disabled* for. This
47852+ option is useful if you want TPE restrictions to be applied to most
47853+ users on the system. If the sysctl option is enabled, a sysctl option
47854+ with name "tpe_invert" is created. Unlike other sysctl options, this
47855+ entry will default to on for backward-compatibility.
47856+
47857+config GRKERNSEC_TPE_GID
47858+ int "GID for untrusted users"
47859+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
47860+ default 1005
47861+ help
47862+ Setting this GID determines what group TPE restrictions will be
47863+ *enabled* for. If the sysctl option is enabled, a sysctl option
47864+ with name "tpe_gid" is created.
47865+
47866+config GRKERNSEC_TPE_GID
47867+ int "GID for trusted users"
47868+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
47869+ default 1005
47870+ help
47871+ Setting this GID determines what group TPE restrictions will be
47872+ *disabled* for. If the sysctl option is enabled, a sysctl option
47873+ with name "tpe_gid" is created.
47874+
47875+endmenu
47876+menu "Network Protections"
47877+depends on GRKERNSEC
47878+
47879+config GRKERNSEC_RANDNET
47880+ bool "Larger entropy pools"
47881+ help
47882+ If you say Y here, the entropy pools used for many features of Linux
47883+ and grsecurity will be doubled in size. Since several grsecurity
47884+ features use additional randomness, it is recommended that you say Y
47885+ here. Saying Y here has a similar effect as modifying
47886+ /proc/sys/kernel/random/poolsize.
47887+
47888+config GRKERNSEC_BLACKHOLE
47889+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
47890+ depends on NET
47891+ help
47892+ If you say Y here, neither TCP resets nor ICMP
47893+ destination-unreachable packets will be sent in response to packets
47894+ sent to ports for which no associated listening process exists.
47895+ This feature supports both IPV4 and IPV6 and exempts the
47896+ loopback interface from blackholing. Enabling this feature
47897+ makes a host more resilient to DoS attacks and reduces network
47898+ visibility against scanners.
47899+
47900+ The blackhole feature as-implemented is equivalent to the FreeBSD
47901+ blackhole feature, as it prevents RST responses to all packets, not
47902+ just SYNs. Under most application behavior this causes no
47903+ problems, but applications (like haproxy) may not close certain
47904+ connections in a way that cleanly terminates them on the remote
47905+ end, leaving the remote host in LAST_ACK state. Because of this
47906+ side-effect and to prevent intentional LAST_ACK DoSes, this
47907+ feature also adds automatic mitigation against such attacks.
47908+ The mitigation drastically reduces the amount of time a socket
47909+ can spend in LAST_ACK state. If you're using haproxy and not
47910+ all servers it connects to have this option enabled, consider
47911+ disabling this feature on the haproxy host.
47912+
47913+ If the sysctl option is enabled, two sysctl options with names
47914+ "ip_blackhole" and "lastack_retries" will be created.
47915+ While "ip_blackhole" takes the standard zero/non-zero on/off
47916+ toggle, "lastack_retries" uses the same kinds of values as
47917+ "tcp_retries1" and "tcp_retries2". The default value of 4
47918+ prevents a socket from lasting more than 45 seconds in LAST_ACK
47919+ state.
47920+
47921+config GRKERNSEC_SOCKET
47922+ bool "Socket restrictions"
47923+ depends on NET
47924+ help
47925+ If you say Y here, you will be able to choose from several options.
47926+ If you assign a GID on your system and add it to the supplementary
47927+ groups of users you want to restrict socket access to, this patch
47928+ will perform up to three things, based on the option(s) you choose.
47929+
47930+config GRKERNSEC_SOCKET_ALL
47931+ bool "Deny any sockets to group"
47932+ depends on GRKERNSEC_SOCKET
47933+ help
47934+ If you say Y here, you will be able to choose a GID of whose users will
47935+ be unable to connect to other hosts from your machine or run server
47936+ applications from your machine. If the sysctl option is enabled, a
47937+ sysctl option with name "socket_all" is created.
47938+
47939+config GRKERNSEC_SOCKET_ALL_GID
47940+ int "GID to deny all sockets for"
47941+ depends on GRKERNSEC_SOCKET_ALL
47942+ default 1004
47943+ help
47944+ Here you can choose the GID to disable socket access for. Remember to
47945+ add the users you want socket access disabled for to the GID
47946+ specified here. If the sysctl option is enabled, a sysctl option
47947+ with name "socket_all_gid" is created.
47948+
47949+config GRKERNSEC_SOCKET_CLIENT
47950+ bool "Deny client sockets to group"
47951+ depends on GRKERNSEC_SOCKET
47952+ help
47953+ If you say Y here, you will be able to choose a GID of whose users will
47954+ be unable to connect to other hosts from your machine, but will be
47955+ able to run servers. If this option is enabled, all users in the group
47956+ you specify will have to use passive mode when initiating ftp transfers
47957+ from the shell on your machine. If the sysctl option is enabled, a
47958+ sysctl option with name "socket_client" is created.
47959+
47960+config GRKERNSEC_SOCKET_CLIENT_GID
47961+ int "GID to deny client sockets for"
47962+ depends on GRKERNSEC_SOCKET_CLIENT
47963+ default 1003
47964+ help
47965+ Here you can choose the GID to disable client socket access for.
47966+ Remember to add the users you want client socket access disabled for to
47967+ the GID specified here. If the sysctl option is enabled, a sysctl
47968+ option with name "socket_client_gid" is created.
47969+
47970+config GRKERNSEC_SOCKET_SERVER
47971+ bool "Deny server sockets to group"
47972+ depends on GRKERNSEC_SOCKET
47973+ help
47974+ If you say Y here, you will be able to choose a GID of whose users will
47975+ be unable to run server applications from your machine. If the sysctl
47976+ option is enabled, a sysctl option with name "socket_server" is created.
47977+
47978+config GRKERNSEC_SOCKET_SERVER_GID
47979+ int "GID to deny server sockets for"
47980+ depends on GRKERNSEC_SOCKET_SERVER
47981+ default 1002
47982+ help
47983+ Here you can choose the GID to disable server socket access for.
47984+ Remember to add the users you want server socket access disabled for to
47985+ the GID specified here. If the sysctl option is enabled, a sysctl
47986+ option with name "socket_server_gid" is created.
47987+
47988+endmenu
47989+menu "Sysctl support"
47990+depends on GRKERNSEC && SYSCTL
47991+
47992+config GRKERNSEC_SYSCTL
47993+ bool "Sysctl support"
47994+ help
47995+ If you say Y here, you will be able to change the options that
47996+ grsecurity runs with at bootup, without having to recompile your
47997+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
47998+ to enable (1) or disable (0) various features. All the sysctl entries
47999+ are mutable until the "grsec_lock" entry is set to a non-zero value.
48000+ All features enabled in the kernel configuration are disabled at boot
48001+ if you do not say Y to the "Turn on features by default" option.
48002+ All options should be set at startup, and the grsec_lock entry should
48003+ be set to a non-zero value after all the options are set.
48004+ *THIS IS EXTREMELY IMPORTANT*
48005+
48006+config GRKERNSEC_SYSCTL_DISTRO
48007+ bool "Extra sysctl support for distro makers (READ HELP)"
48008+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
48009+ help
48010+ If you say Y here, additional sysctl options will be created
48011+ for features that affect processes running as root. Therefore,
48012+ it is critical when using this option that the grsec_lock entry be
48013+ enabled after boot. Only distros with prebuilt kernel packages
48014+ with this option enabled that can ensure grsec_lock is enabled
48015+ after boot should use this option.
48016+ *Failure to set grsec_lock after boot makes all grsec features
48017+ this option covers useless*
48018+
48019+ Currently this option creates the following sysctl entries:
48020+ "Disable Privileged I/O": "disable_priv_io"
48021+
48022+config GRKERNSEC_SYSCTL_ON
48023+ bool "Turn on features by default"
48024+ depends on GRKERNSEC_SYSCTL
48025+ help
48026+ If you say Y here, instead of having all features enabled in the
48027+ kernel configuration disabled at boot time, the features will be
48028+ enabled at boot time. It is recommended you say Y here unless
48029+ there is some reason you would want all sysctl-tunable features to
48030+ be disabled by default. As mentioned elsewhere, it is important
48031+ to enable the grsec_lock entry once you have finished modifying
48032+ the sysctl entries.
48033+
48034+endmenu
48035+menu "Logging Options"
48036+depends on GRKERNSEC
48037+
48038+config GRKERNSEC_FLOODTIME
48039+ int "Seconds in between log messages (minimum)"
48040+ default 10
48041+ help
48042+ This option allows you to enforce the number of seconds between
48043+ grsecurity log messages. The default should be suitable for most
48044+ people, however, if you choose to change it, choose a value small enough
48045+ to allow informative logs to be produced, but large enough to
48046+ prevent flooding.
48047+
48048+config GRKERNSEC_FLOODBURST
48049+ int "Number of messages in a burst (maximum)"
48050+ default 6
48051+ help
48052+ This option allows you to choose the maximum number of messages allowed
48053+ within the flood time interval you chose in a separate option. The
48054+ default should be suitable for most people, however if you find that
48055+ many of your logs are being interpreted as flooding, you may want to
48056+ raise this value.
48057+
48058+endmenu
48059+
48060+endmenu
48061diff --git a/grsecurity/Makefile b/grsecurity/Makefile
48062new file mode 100644
48063index 0000000..be9ae3a
48064--- /dev/null
48065+++ b/grsecurity/Makefile
48066@@ -0,0 +1,36 @@
48067+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
48068+# during 2001-2009 it has been completely redesigned by Brad Spengler
48069+# into an RBAC system
48070+#
48071+# All code in this directory and various hooks inserted throughout the kernel
48072+# are copyright Brad Spengler - Open Source Security, Inc., and released
48073+# under the GPL v2 or higher
48074+
48075+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
48076+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
48077+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
48078+
48079+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
48080+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
48081+ gracl_learn.o grsec_log.o
48082+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
48083+
48084+ifdef CONFIG_NET
48085+obj-y += grsec_sock.o
48086+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48087+endif
48088+
48089+ifndef CONFIG_GRKERNSEC
48090+obj-y += grsec_disabled.o
48091+endif
48092+
48093+ifdef CONFIG_GRKERNSEC_HIDESYM
48094+extra-y := grsec_hidesym.o
48095+$(obj)/grsec_hidesym.o:
48096+ @-chmod -f 500 /boot
48097+ @-chmod -f 500 /lib/modules
48098+ @-chmod -f 500 /lib64/modules
48099+ @-chmod -f 500 /lib32/modules
48100+ @-chmod -f 700 .
48101+ @echo ' grsec: protected kernel image paths'
48102+endif
48103diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
48104new file mode 100644
48105index 0000000..d3b423d
48106--- /dev/null
48107+++ b/grsecurity/gracl.c
48108@@ -0,0 +1,4155 @@
48109+#include <linux/kernel.h>
48110+#include <linux/module.h>
48111+#include <linux/sched.h>
48112+#include <linux/mm.h>
48113+#include <linux/file.h>
48114+#include <linux/fs.h>
48115+#include <linux/namei.h>
48116+#include <linux/mount.h>
48117+#include <linux/tty.h>
48118+#include <linux/proc_fs.h>
48119+#include <linux/lglock.h>
48120+#include <linux/slab.h>
48121+#include <linux/vmalloc.h>
48122+#include <linux/types.h>
48123+#include <linux/sysctl.h>
48124+#include <linux/netdevice.h>
48125+#include <linux/ptrace.h>
48126+#include <linux/gracl.h>
48127+#include <linux/gralloc.h>
48128+#include <linux/security.h>
48129+#include <linux/grinternal.h>
48130+#include <linux/pid_namespace.h>
48131+#include <linux/fdtable.h>
48132+#include <linux/percpu.h>
48133+
48134+#include <asm/uaccess.h>
48135+#include <asm/errno.h>
48136+#include <asm/mman.h>
48137+
48138+static struct acl_role_db acl_role_set;
48139+static struct name_db name_set;
48140+static struct inodev_db inodev_set;
48141+
48142+/* for keeping track of userspace pointers used for subjects, so we
48143+ can share references in the kernel as well
48144+*/
48145+
48146+static struct path real_root;
48147+
48148+static struct acl_subj_map_db subj_map_set;
48149+
48150+static struct acl_role_label *default_role;
48151+
48152+static struct acl_role_label *role_list;
48153+
48154+static u16 acl_sp_role_value;
48155+
48156+extern char *gr_shared_page[4];
48157+static DEFINE_MUTEX(gr_dev_mutex);
48158+DEFINE_RWLOCK(gr_inode_lock);
48159+
48160+struct gr_arg *gr_usermode;
48161+
48162+static unsigned int gr_status __read_only = GR_STATUS_INIT;
48163+
48164+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48165+extern void gr_clear_learn_entries(void);
48166+
48167+#ifdef CONFIG_GRKERNSEC_RESLOG
48168+extern void gr_log_resource(const struct task_struct *task,
48169+ const int res, const unsigned long wanted, const int gt);
48170+#endif
48171+
48172+unsigned char *gr_system_salt;
48173+unsigned char *gr_system_sum;
48174+
48175+static struct sprole_pw **acl_special_roles = NULL;
48176+static __u16 num_sprole_pws = 0;
48177+
48178+static struct acl_role_label *kernel_role = NULL;
48179+
48180+static unsigned int gr_auth_attempts = 0;
48181+static unsigned long gr_auth_expires = 0UL;
48182+
48183+#ifdef CONFIG_NET
48184+extern struct vfsmount *sock_mnt;
48185+#endif
48186+
48187+extern struct vfsmount *pipe_mnt;
48188+extern struct vfsmount *shm_mnt;
48189+#ifdef CONFIG_HUGETLBFS
48190+extern struct vfsmount *hugetlbfs_vfsmount;
48191+#endif
48192+
48193+static struct acl_object_label *fakefs_obj_rw;
48194+static struct acl_object_label *fakefs_obj_rwx;
48195+
48196+extern int gr_init_uidset(void);
48197+extern void gr_free_uidset(void);
48198+extern void gr_remove_uid(uid_t uid);
48199+extern int gr_find_uid(uid_t uid);
48200+
48201+DECLARE_BRLOCK(vfsmount_lock);
48202+
48203+__inline__ int
48204+gr_acl_is_enabled(void)
48205+{
48206+ return (gr_status & GR_READY);
48207+}
48208+
48209+#ifdef CONFIG_BTRFS_FS
48210+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48211+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48212+#endif
48213+
48214+static inline dev_t __get_dev(const struct dentry *dentry)
48215+{
48216+#ifdef CONFIG_BTRFS_FS
48217+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48218+ return get_btrfs_dev_from_inode(dentry->d_inode);
48219+ else
48220+#endif
48221+ return dentry->d_inode->i_sb->s_dev;
48222+}
48223+
48224+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48225+{
48226+ return __get_dev(dentry);
48227+}
48228+
48229+static char gr_task_roletype_to_char(struct task_struct *task)
48230+{
48231+ switch (task->role->roletype &
48232+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48233+ GR_ROLE_SPECIAL)) {
48234+ case GR_ROLE_DEFAULT:
48235+ return 'D';
48236+ case GR_ROLE_USER:
48237+ return 'U';
48238+ case GR_ROLE_GROUP:
48239+ return 'G';
48240+ case GR_ROLE_SPECIAL:
48241+ return 'S';
48242+ }
48243+
48244+ return 'X';
48245+}
48246+
48247+char gr_roletype_to_char(void)
48248+{
48249+ return gr_task_roletype_to_char(current);
48250+}
48251+
48252+__inline__ int
48253+gr_acl_tpe_check(void)
48254+{
48255+ if (unlikely(!(gr_status & GR_READY)))
48256+ return 0;
48257+ if (current->role->roletype & GR_ROLE_TPE)
48258+ return 1;
48259+ else
48260+ return 0;
48261+}
48262+
48263+int
48264+gr_handle_rawio(const struct inode *inode)
48265+{
48266+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48267+ if (inode && S_ISBLK(inode->i_mode) &&
48268+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48269+ !capable(CAP_SYS_RAWIO))
48270+ return 1;
48271+#endif
48272+ return 0;
48273+}
48274+
48275+static int
48276+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48277+{
48278+ if (likely(lena != lenb))
48279+ return 0;
48280+
48281+ return !memcmp(a, b, lena);
48282+}
48283+
48284+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48285+{
48286+ *buflen -= namelen;
48287+ if (*buflen < 0)
48288+ return -ENAMETOOLONG;
48289+ *buffer -= namelen;
48290+ memcpy(*buffer, str, namelen);
48291+ return 0;
48292+}
48293+
48294+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48295+{
48296+ return prepend(buffer, buflen, name->name, name->len);
48297+}
48298+
48299+static int prepend_path(const struct path *path, struct path *root,
48300+ char **buffer, int *buflen)
48301+{
48302+ struct dentry *dentry = path->dentry;
48303+ struct vfsmount *vfsmnt = path->mnt;
48304+ bool slash = false;
48305+ int error = 0;
48306+
48307+ while (dentry != root->dentry || vfsmnt != root->mnt) {
48308+ struct dentry * parent;
48309+
48310+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48311+ /* Global root? */
48312+ if (vfsmnt->mnt_parent == vfsmnt) {
48313+ goto out;
48314+ }
48315+ dentry = vfsmnt->mnt_mountpoint;
48316+ vfsmnt = vfsmnt->mnt_parent;
48317+ continue;
48318+ }
48319+ parent = dentry->d_parent;
48320+ prefetch(parent);
48321+ spin_lock(&dentry->d_lock);
48322+ error = prepend_name(buffer, buflen, &dentry->d_name);
48323+ spin_unlock(&dentry->d_lock);
48324+ if (!error)
48325+ error = prepend(buffer, buflen, "/", 1);
48326+ if (error)
48327+ break;
48328+
48329+ slash = true;
48330+ dentry = parent;
48331+ }
48332+
48333+out:
48334+ if (!error && !slash)
48335+ error = prepend(buffer, buflen, "/", 1);
48336+
48337+ return error;
48338+}
48339+
48340+/* this must be called with vfsmount_lock and rename_lock held */
48341+
48342+static char *__our_d_path(const struct path *path, struct path *root,
48343+ char *buf, int buflen)
48344+{
48345+ char *res = buf + buflen;
48346+ int error;
48347+
48348+ prepend(&res, &buflen, "\0", 1);
48349+ error = prepend_path(path, root, &res, &buflen);
48350+ if (error)
48351+ return ERR_PTR(error);
48352+
48353+ return res;
48354+}
48355+
48356+static char *
48357+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48358+{
48359+ char *retval;
48360+
48361+ retval = __our_d_path(path, root, buf, buflen);
48362+ if (unlikely(IS_ERR(retval)))
48363+ retval = strcpy(buf, "<path too long>");
48364+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48365+ retval[1] = '\0';
48366+
48367+ return retval;
48368+}
48369+
48370+static char *
48371+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48372+ char *buf, int buflen)
48373+{
48374+ struct path path;
48375+ char *res;
48376+
48377+ path.dentry = (struct dentry *)dentry;
48378+ path.mnt = (struct vfsmount *)vfsmnt;
48379+
48380+ /* we can use real_root.dentry, real_root.mnt, because this is only called
48381+ by the RBAC system */
48382+ res = gen_full_path(&path, &real_root, buf, buflen);
48383+
48384+ return res;
48385+}
48386+
48387+static char *
48388+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48389+ char *buf, int buflen)
48390+{
48391+ char *res;
48392+ struct path path;
48393+ struct path root;
48394+ struct task_struct *reaper = &init_task;
48395+
48396+ path.dentry = (struct dentry *)dentry;
48397+ path.mnt = (struct vfsmount *)vfsmnt;
48398+
48399+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48400+ get_fs_root(reaper->fs, &root);
48401+
48402+ write_seqlock(&rename_lock);
48403+ br_read_lock(vfsmount_lock);
48404+ res = gen_full_path(&path, &root, buf, buflen);
48405+ br_read_unlock(vfsmount_lock);
48406+ write_sequnlock(&rename_lock);
48407+
48408+ path_put(&root);
48409+ return res;
48410+}
48411+
48412+static char *
48413+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48414+{
48415+ char *ret;
48416+ write_seqlock(&rename_lock);
48417+ br_read_lock(vfsmount_lock);
48418+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48419+ PAGE_SIZE);
48420+ br_read_unlock(vfsmount_lock);
48421+ write_sequnlock(&rename_lock);
48422+ return ret;
48423+}
48424+
48425+static char *
48426+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48427+{
48428+ char *ret;
48429+ char *buf;
48430+ int buflen;
48431+
48432+ write_seqlock(&rename_lock);
48433+ br_read_lock(vfsmount_lock);
48434+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48435+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48436+ buflen = (int)(ret - buf);
48437+ if (buflen >= 5)
48438+ prepend(&ret, &buflen, "/proc", 5);
48439+ else
48440+ ret = strcpy(buf, "<path too long>");
48441+ br_read_unlock(vfsmount_lock);
48442+ write_sequnlock(&rename_lock);
48443+ return ret;
48444+}
48445+
48446+char *
48447+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48448+{
48449+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48450+ PAGE_SIZE);
48451+}
48452+
48453+char *
48454+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48455+{
48456+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48457+ PAGE_SIZE);
48458+}
48459+
48460+char *
48461+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48462+{
48463+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48464+ PAGE_SIZE);
48465+}
48466+
48467+char *
48468+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48469+{
48470+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48471+ PAGE_SIZE);
48472+}
48473+
48474+char *
48475+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48476+{
48477+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48478+ PAGE_SIZE);
48479+}
48480+
48481+__inline__ __u32
48482+to_gr_audit(const __u32 reqmode)
48483+{
48484+ /* masks off auditable permission flags, then shifts them to create
48485+ auditing flags, and adds the special case of append auditing if
48486+ we're requesting write */
48487+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48488+}
48489+
48490+struct acl_subject_label *
48491+lookup_subject_map(const struct acl_subject_label *userp)
48492+{
48493+ unsigned int index = shash(userp, subj_map_set.s_size);
48494+ struct subject_map *match;
48495+
48496+ match = subj_map_set.s_hash[index];
48497+
48498+ while (match && match->user != userp)
48499+ match = match->next;
48500+
48501+ if (match != NULL)
48502+ return match->kernel;
48503+ else
48504+ return NULL;
48505+}
48506+
48507+static void
48508+insert_subj_map_entry(struct subject_map *subjmap)
48509+{
48510+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48511+ struct subject_map **curr;
48512+
48513+ subjmap->prev = NULL;
48514+
48515+ curr = &subj_map_set.s_hash[index];
48516+ if (*curr != NULL)
48517+ (*curr)->prev = subjmap;
48518+
48519+ subjmap->next = *curr;
48520+ *curr = subjmap;
48521+
48522+ return;
48523+}
48524+
48525+static struct acl_role_label *
48526+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48527+ const gid_t gid)
48528+{
48529+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48530+ struct acl_role_label *match;
48531+ struct role_allowed_ip *ipp;
48532+ unsigned int x;
48533+ u32 curr_ip = task->signal->curr_ip;
48534+
48535+ task->signal->saved_ip = curr_ip;
48536+
48537+ match = acl_role_set.r_hash[index];
48538+
48539+ while (match) {
48540+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48541+ for (x = 0; x < match->domain_child_num; x++) {
48542+ if (match->domain_children[x] == uid)
48543+ goto found;
48544+ }
48545+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48546+ break;
48547+ match = match->next;
48548+ }
48549+found:
48550+ if (match == NULL) {
48551+ try_group:
48552+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48553+ match = acl_role_set.r_hash[index];
48554+
48555+ while (match) {
48556+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48557+ for (x = 0; x < match->domain_child_num; x++) {
48558+ if (match->domain_children[x] == gid)
48559+ goto found2;
48560+ }
48561+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48562+ break;
48563+ match = match->next;
48564+ }
48565+found2:
48566+ if (match == NULL)
48567+ match = default_role;
48568+ if (match->allowed_ips == NULL)
48569+ return match;
48570+ else {
48571+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48572+ if (likely
48573+ ((ntohl(curr_ip) & ipp->netmask) ==
48574+ (ntohl(ipp->addr) & ipp->netmask)))
48575+ return match;
48576+ }
48577+ match = default_role;
48578+ }
48579+ } else if (match->allowed_ips == NULL) {
48580+ return match;
48581+ } else {
48582+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48583+ if (likely
48584+ ((ntohl(curr_ip) & ipp->netmask) ==
48585+ (ntohl(ipp->addr) & ipp->netmask)))
48586+ return match;
48587+ }
48588+ goto try_group;
48589+ }
48590+
48591+ return match;
48592+}
48593+
48594+struct acl_subject_label *
48595+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48596+ const struct acl_role_label *role)
48597+{
48598+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48599+ struct acl_subject_label *match;
48600+
48601+ match = role->subj_hash[index];
48602+
48603+ while (match && (match->inode != ino || match->device != dev ||
48604+ (match->mode & GR_DELETED))) {
48605+ match = match->next;
48606+ }
48607+
48608+ if (match && !(match->mode & GR_DELETED))
48609+ return match;
48610+ else
48611+ return NULL;
48612+}
48613+
48614+struct acl_subject_label *
48615+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48616+ const struct acl_role_label *role)
48617+{
48618+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48619+ struct acl_subject_label *match;
48620+
48621+ match = role->subj_hash[index];
48622+
48623+ while (match && (match->inode != ino || match->device != dev ||
48624+ !(match->mode & GR_DELETED))) {
48625+ match = match->next;
48626+ }
48627+
48628+ if (match && (match->mode & GR_DELETED))
48629+ return match;
48630+ else
48631+ return NULL;
48632+}
48633+
48634+static struct acl_object_label *
48635+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48636+ const struct acl_subject_label *subj)
48637+{
48638+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48639+ struct acl_object_label *match;
48640+
48641+ match = subj->obj_hash[index];
48642+
48643+ while (match && (match->inode != ino || match->device != dev ||
48644+ (match->mode & GR_DELETED))) {
48645+ match = match->next;
48646+ }
48647+
48648+ if (match && !(match->mode & GR_DELETED))
48649+ return match;
48650+ else
48651+ return NULL;
48652+}
48653+
48654+static struct acl_object_label *
48655+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
48656+ const struct acl_subject_label *subj)
48657+{
48658+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48659+ struct acl_object_label *match;
48660+
48661+ match = subj->obj_hash[index];
48662+
48663+ while (match && (match->inode != ino || match->device != dev ||
48664+ !(match->mode & GR_DELETED))) {
48665+ match = match->next;
48666+ }
48667+
48668+ if (match && (match->mode & GR_DELETED))
48669+ return match;
48670+
48671+ match = subj->obj_hash[index];
48672+
48673+ while (match && (match->inode != ino || match->device != dev ||
48674+ (match->mode & GR_DELETED))) {
48675+ match = match->next;
48676+ }
48677+
48678+ if (match && !(match->mode & GR_DELETED))
48679+ return match;
48680+ else
48681+ return NULL;
48682+}
48683+
48684+static struct name_entry *
48685+lookup_name_entry(const char *name)
48686+{
48687+ unsigned int len = strlen(name);
48688+ unsigned int key = full_name_hash(name, len);
48689+ unsigned int index = key % name_set.n_size;
48690+ struct name_entry *match;
48691+
48692+ match = name_set.n_hash[index];
48693+
48694+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
48695+ match = match->next;
48696+
48697+ return match;
48698+}
48699+
48700+static struct name_entry *
48701+lookup_name_entry_create(const char *name)
48702+{
48703+ unsigned int len = strlen(name);
48704+ unsigned int key = full_name_hash(name, len);
48705+ unsigned int index = key % name_set.n_size;
48706+ struct name_entry *match;
48707+
48708+ match = name_set.n_hash[index];
48709+
48710+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48711+ !match->deleted))
48712+ match = match->next;
48713+
48714+ if (match && match->deleted)
48715+ return match;
48716+
48717+ match = name_set.n_hash[index];
48718+
48719+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48720+ match->deleted))
48721+ match = match->next;
48722+
48723+ if (match && !match->deleted)
48724+ return match;
48725+ else
48726+ return NULL;
48727+}
48728+
48729+static struct inodev_entry *
48730+lookup_inodev_entry(const ino_t ino, const dev_t dev)
48731+{
48732+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
48733+ struct inodev_entry *match;
48734+
48735+ match = inodev_set.i_hash[index];
48736+
48737+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
48738+ match = match->next;
48739+
48740+ return match;
48741+}
48742+
48743+static void
48744+insert_inodev_entry(struct inodev_entry *entry)
48745+{
48746+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
48747+ inodev_set.i_size);
48748+ struct inodev_entry **curr;
48749+
48750+ entry->prev = NULL;
48751+
48752+ curr = &inodev_set.i_hash[index];
48753+ if (*curr != NULL)
48754+ (*curr)->prev = entry;
48755+
48756+ entry->next = *curr;
48757+ *curr = entry;
48758+
48759+ return;
48760+}
48761+
48762+static void
48763+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48764+{
48765+ unsigned int index =
48766+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48767+ struct acl_role_label **curr;
48768+ struct acl_role_label *tmp;
48769+
48770+ curr = &acl_role_set.r_hash[index];
48771+
48772+ /* if role was already inserted due to domains and already has
48773+ a role in the same bucket as it attached, then we need to
48774+ combine these two buckets
48775+ */
48776+ if (role->next) {
48777+ tmp = role->next;
48778+ while (tmp->next)
48779+ tmp = tmp->next;
48780+ tmp->next = *curr;
48781+ } else
48782+ role->next = *curr;
48783+ *curr = role;
48784+
48785+ return;
48786+}
48787+
48788+static void
48789+insert_acl_role_label(struct acl_role_label *role)
48790+{
48791+ int i;
48792+
48793+ if (role_list == NULL) {
48794+ role_list = role;
48795+ role->prev = NULL;
48796+ } else {
48797+ role->prev = role_list;
48798+ role_list = role;
48799+ }
48800+
48801+ /* used for hash chains */
48802+ role->next = NULL;
48803+
48804+ if (role->roletype & GR_ROLE_DOMAIN) {
48805+ for (i = 0; i < role->domain_child_num; i++)
48806+ __insert_acl_role_label(role, role->domain_children[i]);
48807+ } else
48808+ __insert_acl_role_label(role, role->uidgid);
48809+}
48810+
48811+static int
48812+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48813+{
48814+ struct name_entry **curr, *nentry;
48815+ struct inodev_entry *ientry;
48816+ unsigned int len = strlen(name);
48817+ unsigned int key = full_name_hash(name, len);
48818+ unsigned int index = key % name_set.n_size;
48819+
48820+ curr = &name_set.n_hash[index];
48821+
48822+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
48823+ curr = &((*curr)->next);
48824+
48825+ if (*curr != NULL)
48826+ return 1;
48827+
48828+ nentry = acl_alloc(sizeof (struct name_entry));
48829+ if (nentry == NULL)
48830+ return 0;
48831+ ientry = acl_alloc(sizeof (struct inodev_entry));
48832+ if (ientry == NULL)
48833+ return 0;
48834+ ientry->nentry = nentry;
48835+
48836+ nentry->key = key;
48837+ nentry->name = name;
48838+ nentry->inode = inode;
48839+ nentry->device = device;
48840+ nentry->len = len;
48841+ nentry->deleted = deleted;
48842+
48843+ nentry->prev = NULL;
48844+ curr = &name_set.n_hash[index];
48845+ if (*curr != NULL)
48846+ (*curr)->prev = nentry;
48847+ nentry->next = *curr;
48848+ *curr = nentry;
48849+
48850+ /* insert us into the table searchable by inode/dev */
48851+ insert_inodev_entry(ientry);
48852+
48853+ return 1;
48854+}
48855+
48856+static void
48857+insert_acl_obj_label(struct acl_object_label *obj,
48858+ struct acl_subject_label *subj)
48859+{
48860+ unsigned int index =
48861+ fhash(obj->inode, obj->device, subj->obj_hash_size);
48862+ struct acl_object_label **curr;
48863+
48864+
48865+ obj->prev = NULL;
48866+
48867+ curr = &subj->obj_hash[index];
48868+ if (*curr != NULL)
48869+ (*curr)->prev = obj;
48870+
48871+ obj->next = *curr;
48872+ *curr = obj;
48873+
48874+ return;
48875+}
48876+
48877+static void
48878+insert_acl_subj_label(struct acl_subject_label *obj,
48879+ struct acl_role_label *role)
48880+{
48881+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48882+ struct acl_subject_label **curr;
48883+
48884+ obj->prev = NULL;
48885+
48886+ curr = &role->subj_hash[index];
48887+ if (*curr != NULL)
48888+ (*curr)->prev = obj;
48889+
48890+ obj->next = *curr;
48891+ *curr = obj;
48892+
48893+ return;
48894+}
48895+
48896+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48897+
48898+static void *
48899+create_table(__u32 * len, int elementsize)
48900+{
48901+ unsigned int table_sizes[] = {
48902+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48903+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48904+ 4194301, 8388593, 16777213, 33554393, 67108859
48905+ };
48906+ void *newtable = NULL;
48907+ unsigned int pwr = 0;
48908+
48909+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
48910+ table_sizes[pwr] <= *len)
48911+ pwr++;
48912+
48913+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
48914+ return newtable;
48915+
48916+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
48917+ newtable =
48918+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
48919+ else
48920+ newtable = vmalloc(table_sizes[pwr] * elementsize);
48921+
48922+ *len = table_sizes[pwr];
48923+
48924+ return newtable;
48925+}
48926+
48927+static int
48928+init_variables(const struct gr_arg *arg)
48929+{
48930+ struct task_struct *reaper = &init_task;
48931+ unsigned int stacksize;
48932+
48933+ subj_map_set.s_size = arg->role_db.num_subjects;
48934+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
48935+ name_set.n_size = arg->role_db.num_objects;
48936+ inodev_set.i_size = arg->role_db.num_objects;
48937+
48938+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
48939+ !name_set.n_size || !inodev_set.i_size)
48940+ return 1;
48941+
48942+ if (!gr_init_uidset())
48943+ return 1;
48944+
48945+ /* set up the stack that holds allocation info */
48946+
48947+ stacksize = arg->role_db.num_pointers + 5;
48948+
48949+ if (!acl_alloc_stack_init(stacksize))
48950+ return 1;
48951+
48952+ /* grab reference for the real root dentry and vfsmount */
48953+ get_fs_root(reaper->fs, &real_root);
48954+
48955+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48956+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
48957+#endif
48958+
48959+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
48960+ if (fakefs_obj_rw == NULL)
48961+ return 1;
48962+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
48963+
48964+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
48965+ if (fakefs_obj_rwx == NULL)
48966+ return 1;
48967+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
48968+
48969+ subj_map_set.s_hash =
48970+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
48971+ acl_role_set.r_hash =
48972+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
48973+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
48974+ inodev_set.i_hash =
48975+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
48976+
48977+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
48978+ !name_set.n_hash || !inodev_set.i_hash)
48979+ return 1;
48980+
48981+ memset(subj_map_set.s_hash, 0,
48982+ sizeof(struct subject_map *) * subj_map_set.s_size);
48983+ memset(acl_role_set.r_hash, 0,
48984+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
48985+ memset(name_set.n_hash, 0,
48986+ sizeof (struct name_entry *) * name_set.n_size);
48987+ memset(inodev_set.i_hash, 0,
48988+ sizeof (struct inodev_entry *) * inodev_set.i_size);
48989+
48990+ return 0;
48991+}
48992+
48993+/* free information not needed after startup
48994+ currently contains user->kernel pointer mappings for subjects
48995+*/
48996+
48997+static void
48998+free_init_variables(void)
48999+{
49000+ __u32 i;
49001+
49002+ if (subj_map_set.s_hash) {
49003+ for (i = 0; i < subj_map_set.s_size; i++) {
49004+ if (subj_map_set.s_hash[i]) {
49005+ kfree(subj_map_set.s_hash[i]);
49006+ subj_map_set.s_hash[i] = NULL;
49007+ }
49008+ }
49009+
49010+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49011+ PAGE_SIZE)
49012+ kfree(subj_map_set.s_hash);
49013+ else
49014+ vfree(subj_map_set.s_hash);
49015+ }
49016+
49017+ return;
49018+}
49019+
49020+static void
49021+free_variables(void)
49022+{
49023+ struct acl_subject_label *s;
49024+ struct acl_role_label *r;
49025+ struct task_struct *task, *task2;
49026+ unsigned int x;
49027+
49028+ gr_clear_learn_entries();
49029+
49030+ read_lock(&tasklist_lock);
49031+ do_each_thread(task2, task) {
49032+ task->acl_sp_role = 0;
49033+ task->acl_role_id = 0;
49034+ task->acl = NULL;
49035+ task->role = NULL;
49036+ } while_each_thread(task2, task);
49037+ read_unlock(&tasklist_lock);
49038+
49039+ /* release the reference to the real root dentry and vfsmount */
49040+ path_put(&real_root);
49041+
49042+ /* free all object hash tables */
49043+
49044+ FOR_EACH_ROLE_START(r)
49045+ if (r->subj_hash == NULL)
49046+ goto next_role;
49047+ FOR_EACH_SUBJECT_START(r, s, x)
49048+ if (s->obj_hash == NULL)
49049+ break;
49050+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49051+ kfree(s->obj_hash);
49052+ else
49053+ vfree(s->obj_hash);
49054+ FOR_EACH_SUBJECT_END(s, x)
49055+ FOR_EACH_NESTED_SUBJECT_START(r, s)
49056+ if (s->obj_hash == NULL)
49057+ break;
49058+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49059+ kfree(s->obj_hash);
49060+ else
49061+ vfree(s->obj_hash);
49062+ FOR_EACH_NESTED_SUBJECT_END(s)
49063+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49064+ kfree(r->subj_hash);
49065+ else
49066+ vfree(r->subj_hash);
49067+ r->subj_hash = NULL;
49068+next_role:
49069+ FOR_EACH_ROLE_END(r)
49070+
49071+ acl_free_all();
49072+
49073+ if (acl_role_set.r_hash) {
49074+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49075+ PAGE_SIZE)
49076+ kfree(acl_role_set.r_hash);
49077+ else
49078+ vfree(acl_role_set.r_hash);
49079+ }
49080+ if (name_set.n_hash) {
49081+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
49082+ PAGE_SIZE)
49083+ kfree(name_set.n_hash);
49084+ else
49085+ vfree(name_set.n_hash);
49086+ }
49087+
49088+ if (inodev_set.i_hash) {
49089+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49090+ PAGE_SIZE)
49091+ kfree(inodev_set.i_hash);
49092+ else
49093+ vfree(inodev_set.i_hash);
49094+ }
49095+
49096+ gr_free_uidset();
49097+
49098+ memset(&name_set, 0, sizeof (struct name_db));
49099+ memset(&inodev_set, 0, sizeof (struct inodev_db));
49100+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49101+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49102+
49103+ default_role = NULL;
49104+ role_list = NULL;
49105+
49106+ return;
49107+}
49108+
49109+static __u32
49110+count_user_objs(struct acl_object_label *userp)
49111+{
49112+ struct acl_object_label o_tmp;
49113+ __u32 num = 0;
49114+
49115+ while (userp) {
49116+ if (copy_from_user(&o_tmp, userp,
49117+ sizeof (struct acl_object_label)))
49118+ break;
49119+
49120+ userp = o_tmp.prev;
49121+ num++;
49122+ }
49123+
49124+ return num;
49125+}
49126+
49127+static struct acl_subject_label *
49128+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49129+
49130+static int
49131+copy_user_glob(struct acl_object_label *obj)
49132+{
49133+ struct acl_object_label *g_tmp, **guser;
49134+ unsigned int len;
49135+ char *tmp;
49136+
49137+ if (obj->globbed == NULL)
49138+ return 0;
49139+
49140+ guser = &obj->globbed;
49141+ while (*guser) {
49142+ g_tmp = (struct acl_object_label *)
49143+ acl_alloc(sizeof (struct acl_object_label));
49144+ if (g_tmp == NULL)
49145+ return -ENOMEM;
49146+
49147+ if (copy_from_user(g_tmp, *guser,
49148+ sizeof (struct acl_object_label)))
49149+ return -EFAULT;
49150+
49151+ len = strnlen_user(g_tmp->filename, PATH_MAX);
49152+
49153+ if (!len || len >= PATH_MAX)
49154+ return -EINVAL;
49155+
49156+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49157+ return -ENOMEM;
49158+
49159+ if (copy_from_user(tmp, g_tmp->filename, len))
49160+ return -EFAULT;
49161+ tmp[len-1] = '\0';
49162+ g_tmp->filename = tmp;
49163+
49164+ *guser = g_tmp;
49165+ guser = &(g_tmp->next);
49166+ }
49167+
49168+ return 0;
49169+}
49170+
49171+static int
49172+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49173+ struct acl_role_label *role)
49174+{
49175+ struct acl_object_label *o_tmp;
49176+ unsigned int len;
49177+ int ret;
49178+ char *tmp;
49179+
49180+ while (userp) {
49181+ if ((o_tmp = (struct acl_object_label *)
49182+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
49183+ return -ENOMEM;
49184+
49185+ if (copy_from_user(o_tmp, userp,
49186+ sizeof (struct acl_object_label)))
49187+ return -EFAULT;
49188+
49189+ userp = o_tmp->prev;
49190+
49191+ len = strnlen_user(o_tmp->filename, PATH_MAX);
49192+
49193+ if (!len || len >= PATH_MAX)
49194+ return -EINVAL;
49195+
49196+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49197+ return -ENOMEM;
49198+
49199+ if (copy_from_user(tmp, o_tmp->filename, len))
49200+ return -EFAULT;
49201+ tmp[len-1] = '\0';
49202+ o_tmp->filename = tmp;
49203+
49204+ insert_acl_obj_label(o_tmp, subj);
49205+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49206+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49207+ return -ENOMEM;
49208+
49209+ ret = copy_user_glob(o_tmp);
49210+ if (ret)
49211+ return ret;
49212+
49213+ if (o_tmp->nested) {
49214+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49215+ if (IS_ERR(o_tmp->nested))
49216+ return PTR_ERR(o_tmp->nested);
49217+
49218+ /* insert into nested subject list */
49219+ o_tmp->nested->next = role->hash->first;
49220+ role->hash->first = o_tmp->nested;
49221+ }
49222+ }
49223+
49224+ return 0;
49225+}
49226+
49227+static __u32
49228+count_user_subjs(struct acl_subject_label *userp)
49229+{
49230+ struct acl_subject_label s_tmp;
49231+ __u32 num = 0;
49232+
49233+ while (userp) {
49234+ if (copy_from_user(&s_tmp, userp,
49235+ sizeof (struct acl_subject_label)))
49236+ break;
49237+
49238+ userp = s_tmp.prev;
49239+ /* do not count nested subjects against this count, since
49240+ they are not included in the hash table, but are
49241+ attached to objects. We have already counted
49242+ the subjects in userspace for the allocation
49243+ stack
49244+ */
49245+ if (!(s_tmp.mode & GR_NESTED))
49246+ num++;
49247+ }
49248+
49249+ return num;
49250+}
49251+
49252+static int
49253+copy_user_allowedips(struct acl_role_label *rolep)
49254+{
49255+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49256+
49257+ ruserip = rolep->allowed_ips;
49258+
49259+ while (ruserip) {
49260+ rlast = rtmp;
49261+
49262+ if ((rtmp = (struct role_allowed_ip *)
49263+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49264+ return -ENOMEM;
49265+
49266+ if (copy_from_user(rtmp, ruserip,
49267+ sizeof (struct role_allowed_ip)))
49268+ return -EFAULT;
49269+
49270+ ruserip = rtmp->prev;
49271+
49272+ if (!rlast) {
49273+ rtmp->prev = NULL;
49274+ rolep->allowed_ips = rtmp;
49275+ } else {
49276+ rlast->next = rtmp;
49277+ rtmp->prev = rlast;
49278+ }
49279+
49280+ if (!ruserip)
49281+ rtmp->next = NULL;
49282+ }
49283+
49284+ return 0;
49285+}
49286+
49287+static int
49288+copy_user_transitions(struct acl_role_label *rolep)
49289+{
49290+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
49291+
49292+ unsigned int len;
49293+ char *tmp;
49294+
49295+ rusertp = rolep->transitions;
49296+
49297+ while (rusertp) {
49298+ rlast = rtmp;
49299+
49300+ if ((rtmp = (struct role_transition *)
49301+ acl_alloc(sizeof (struct role_transition))) == NULL)
49302+ return -ENOMEM;
49303+
49304+ if (copy_from_user(rtmp, rusertp,
49305+ sizeof (struct role_transition)))
49306+ return -EFAULT;
49307+
49308+ rusertp = rtmp->prev;
49309+
49310+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49311+
49312+ if (!len || len >= GR_SPROLE_LEN)
49313+ return -EINVAL;
49314+
49315+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49316+ return -ENOMEM;
49317+
49318+ if (copy_from_user(tmp, rtmp->rolename, len))
49319+ return -EFAULT;
49320+ tmp[len-1] = '\0';
49321+ rtmp->rolename = tmp;
49322+
49323+ if (!rlast) {
49324+ rtmp->prev = NULL;
49325+ rolep->transitions = rtmp;
49326+ } else {
49327+ rlast->next = rtmp;
49328+ rtmp->prev = rlast;
49329+ }
49330+
49331+ if (!rusertp)
49332+ rtmp->next = NULL;
49333+ }
49334+
49335+ return 0;
49336+}
49337+
49338+static struct acl_subject_label *
49339+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49340+{
49341+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49342+ unsigned int len;
49343+ char *tmp;
49344+ __u32 num_objs;
49345+ struct acl_ip_label **i_tmp, *i_utmp2;
49346+ struct gr_hash_struct ghash;
49347+ struct subject_map *subjmap;
49348+ unsigned int i_num;
49349+ int err;
49350+
49351+ s_tmp = lookup_subject_map(userp);
49352+
49353+ /* we've already copied this subject into the kernel, just return
49354+ the reference to it, and don't copy it over again
49355+ */
49356+ if (s_tmp)
49357+ return(s_tmp);
49358+
49359+ if ((s_tmp = (struct acl_subject_label *)
49360+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49361+ return ERR_PTR(-ENOMEM);
49362+
49363+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49364+ if (subjmap == NULL)
49365+ return ERR_PTR(-ENOMEM);
49366+
49367+ subjmap->user = userp;
49368+ subjmap->kernel = s_tmp;
49369+ insert_subj_map_entry(subjmap);
49370+
49371+ if (copy_from_user(s_tmp, userp,
49372+ sizeof (struct acl_subject_label)))
49373+ return ERR_PTR(-EFAULT);
49374+
49375+ len = strnlen_user(s_tmp->filename, PATH_MAX);
49376+
49377+ if (!len || len >= PATH_MAX)
49378+ return ERR_PTR(-EINVAL);
49379+
49380+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49381+ return ERR_PTR(-ENOMEM);
49382+
49383+ if (copy_from_user(tmp, s_tmp->filename, len))
49384+ return ERR_PTR(-EFAULT);
49385+ tmp[len-1] = '\0';
49386+ s_tmp->filename = tmp;
49387+
49388+ if (!strcmp(s_tmp->filename, "/"))
49389+ role->root_label = s_tmp;
49390+
49391+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49392+ return ERR_PTR(-EFAULT);
49393+
49394+ /* copy user and group transition tables */
49395+
49396+ if (s_tmp->user_trans_num) {
49397+ uid_t *uidlist;
49398+
49399+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49400+ if (uidlist == NULL)
49401+ return ERR_PTR(-ENOMEM);
49402+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49403+ return ERR_PTR(-EFAULT);
49404+
49405+ s_tmp->user_transitions = uidlist;
49406+ }
49407+
49408+ if (s_tmp->group_trans_num) {
49409+ gid_t *gidlist;
49410+
49411+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49412+ if (gidlist == NULL)
49413+ return ERR_PTR(-ENOMEM);
49414+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49415+ return ERR_PTR(-EFAULT);
49416+
49417+ s_tmp->group_transitions = gidlist;
49418+ }
49419+
49420+ /* set up object hash table */
49421+ num_objs = count_user_objs(ghash.first);
49422+
49423+ s_tmp->obj_hash_size = num_objs;
49424+ s_tmp->obj_hash =
49425+ (struct acl_object_label **)
49426+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49427+
49428+ if (!s_tmp->obj_hash)
49429+ return ERR_PTR(-ENOMEM);
49430+
49431+ memset(s_tmp->obj_hash, 0,
49432+ s_tmp->obj_hash_size *
49433+ sizeof (struct acl_object_label *));
49434+
49435+ /* add in objects */
49436+ err = copy_user_objs(ghash.first, s_tmp, role);
49437+
49438+ if (err)
49439+ return ERR_PTR(err);
49440+
49441+ /* set pointer for parent subject */
49442+ if (s_tmp->parent_subject) {
49443+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49444+
49445+ if (IS_ERR(s_tmp2))
49446+ return s_tmp2;
49447+
49448+ s_tmp->parent_subject = s_tmp2;
49449+ }
49450+
49451+ /* add in ip acls */
49452+
49453+ if (!s_tmp->ip_num) {
49454+ s_tmp->ips = NULL;
49455+ goto insert;
49456+ }
49457+
49458+ i_tmp =
49459+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49460+ sizeof (struct acl_ip_label *));
49461+
49462+ if (!i_tmp)
49463+ return ERR_PTR(-ENOMEM);
49464+
49465+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49466+ *(i_tmp + i_num) =
49467+ (struct acl_ip_label *)
49468+ acl_alloc(sizeof (struct acl_ip_label));
49469+ if (!*(i_tmp + i_num))
49470+ return ERR_PTR(-ENOMEM);
49471+
49472+ if (copy_from_user
49473+ (&i_utmp2, s_tmp->ips + i_num,
49474+ sizeof (struct acl_ip_label *)))
49475+ return ERR_PTR(-EFAULT);
49476+
49477+ if (copy_from_user
49478+ (*(i_tmp + i_num), i_utmp2,
49479+ sizeof (struct acl_ip_label)))
49480+ return ERR_PTR(-EFAULT);
49481+
49482+ if ((*(i_tmp + i_num))->iface == NULL)
49483+ continue;
49484+
49485+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49486+ if (!len || len >= IFNAMSIZ)
49487+ return ERR_PTR(-EINVAL);
49488+ tmp = acl_alloc(len);
49489+ if (tmp == NULL)
49490+ return ERR_PTR(-ENOMEM);
49491+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49492+ return ERR_PTR(-EFAULT);
49493+ (*(i_tmp + i_num))->iface = tmp;
49494+ }
49495+
49496+ s_tmp->ips = i_tmp;
49497+
49498+insert:
49499+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49500+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49501+ return ERR_PTR(-ENOMEM);
49502+
49503+ return s_tmp;
49504+}
49505+
49506+static int
49507+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49508+{
49509+ struct acl_subject_label s_pre;
49510+ struct acl_subject_label * ret;
49511+ int err;
49512+
49513+ while (userp) {
49514+ if (copy_from_user(&s_pre, userp,
49515+ sizeof (struct acl_subject_label)))
49516+ return -EFAULT;
49517+
49518+ /* do not add nested subjects here, add
49519+ while parsing objects
49520+ */
49521+
49522+ if (s_pre.mode & GR_NESTED) {
49523+ userp = s_pre.prev;
49524+ continue;
49525+ }
49526+
49527+ ret = do_copy_user_subj(userp, role);
49528+
49529+ err = PTR_ERR(ret);
49530+ if (IS_ERR(ret))
49531+ return err;
49532+
49533+ insert_acl_subj_label(ret, role);
49534+
49535+ userp = s_pre.prev;
49536+ }
49537+
49538+ return 0;
49539+}
49540+
49541+static int
49542+copy_user_acl(struct gr_arg *arg)
49543+{
49544+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49545+ struct sprole_pw *sptmp;
49546+ struct gr_hash_struct *ghash;
49547+ uid_t *domainlist;
49548+ unsigned int r_num;
49549+ unsigned int len;
49550+ char *tmp;
49551+ int err = 0;
49552+ __u16 i;
49553+ __u32 num_subjs;
49554+
49555+ /* we need a default and kernel role */
49556+ if (arg->role_db.num_roles < 2)
49557+ return -EINVAL;
49558+
49559+ /* copy special role authentication info from userspace */
49560+
49561+ num_sprole_pws = arg->num_sprole_pws;
49562+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49563+
49564+ if (!acl_special_roles) {
49565+ err = -ENOMEM;
49566+ goto cleanup;
49567+ }
49568+
49569+ for (i = 0; i < num_sprole_pws; i++) {
49570+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49571+ if (!sptmp) {
49572+ err = -ENOMEM;
49573+ goto cleanup;
49574+ }
49575+ if (copy_from_user(sptmp, arg->sprole_pws + i,
49576+ sizeof (struct sprole_pw))) {
49577+ err = -EFAULT;
49578+ goto cleanup;
49579+ }
49580+
49581+ len =
49582+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49583+
49584+ if (!len || len >= GR_SPROLE_LEN) {
49585+ err = -EINVAL;
49586+ goto cleanup;
49587+ }
49588+
49589+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49590+ err = -ENOMEM;
49591+ goto cleanup;
49592+ }
49593+
49594+ if (copy_from_user(tmp, sptmp->rolename, len)) {
49595+ err = -EFAULT;
49596+ goto cleanup;
49597+ }
49598+ tmp[len-1] = '\0';
49599+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49600+ printk(KERN_ALERT "Copying special role %s\n", tmp);
49601+#endif
49602+ sptmp->rolename = tmp;
49603+ acl_special_roles[i] = sptmp;
49604+ }
49605+
49606+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49607+
49608+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49609+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
49610+
49611+ if (!r_tmp) {
49612+ err = -ENOMEM;
49613+ goto cleanup;
49614+ }
49615+
49616+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
49617+ sizeof (struct acl_role_label *))) {
49618+ err = -EFAULT;
49619+ goto cleanup;
49620+ }
49621+
49622+ if (copy_from_user(r_tmp, r_utmp2,
49623+ sizeof (struct acl_role_label))) {
49624+ err = -EFAULT;
49625+ goto cleanup;
49626+ }
49627+
49628+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49629+
49630+ if (!len || len >= PATH_MAX) {
49631+ err = -EINVAL;
49632+ goto cleanup;
49633+ }
49634+
49635+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49636+ err = -ENOMEM;
49637+ goto cleanup;
49638+ }
49639+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
49640+ err = -EFAULT;
49641+ goto cleanup;
49642+ }
49643+ tmp[len-1] = '\0';
49644+ r_tmp->rolename = tmp;
49645+
49646+ if (!strcmp(r_tmp->rolename, "default")
49647+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
49648+ default_role = r_tmp;
49649+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
49650+ kernel_role = r_tmp;
49651+ }
49652+
49653+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
49654+ err = -ENOMEM;
49655+ goto cleanup;
49656+ }
49657+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
49658+ err = -EFAULT;
49659+ goto cleanup;
49660+ }
49661+
49662+ r_tmp->hash = ghash;
49663+
49664+ num_subjs = count_user_subjs(r_tmp->hash->first);
49665+
49666+ r_tmp->subj_hash_size = num_subjs;
49667+ r_tmp->subj_hash =
49668+ (struct acl_subject_label **)
49669+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
49670+
49671+ if (!r_tmp->subj_hash) {
49672+ err = -ENOMEM;
49673+ goto cleanup;
49674+ }
49675+
49676+ err = copy_user_allowedips(r_tmp);
49677+ if (err)
49678+ goto cleanup;
49679+
49680+ /* copy domain info */
49681+ if (r_tmp->domain_children != NULL) {
49682+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
49683+ if (domainlist == NULL) {
49684+ err = -ENOMEM;
49685+ goto cleanup;
49686+ }
49687+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
49688+ err = -EFAULT;
49689+ goto cleanup;
49690+ }
49691+ r_tmp->domain_children = domainlist;
49692+ }
49693+
49694+ err = copy_user_transitions(r_tmp);
49695+ if (err)
49696+ goto cleanup;
49697+
49698+ memset(r_tmp->subj_hash, 0,
49699+ r_tmp->subj_hash_size *
49700+ sizeof (struct acl_subject_label *));
49701+
49702+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
49703+
49704+ if (err)
49705+ goto cleanup;
49706+
49707+ /* set nested subject list to null */
49708+ r_tmp->hash->first = NULL;
49709+
49710+ insert_acl_role_label(r_tmp);
49711+ }
49712+
49713+ goto return_err;
49714+ cleanup:
49715+ free_variables();
49716+ return_err:
49717+ return err;
49718+
49719+}
49720+
49721+static int
49722+gracl_init(struct gr_arg *args)
49723+{
49724+ int error = 0;
49725+
49726+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
49727+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
49728+
49729+ if (init_variables(args)) {
49730+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
49731+ error = -ENOMEM;
49732+ free_variables();
49733+ goto out;
49734+ }
49735+
49736+ error = copy_user_acl(args);
49737+ free_init_variables();
49738+ if (error) {
49739+ free_variables();
49740+ goto out;
49741+ }
49742+
49743+ if ((error = gr_set_acls(0))) {
49744+ free_variables();
49745+ goto out;
49746+ }
49747+
49748+ pax_open_kernel();
49749+ gr_status |= GR_READY;
49750+ pax_close_kernel();
49751+
49752+ out:
49753+ return error;
49754+}
49755+
49756+/* derived from glibc fnmatch() 0: match, 1: no match*/
49757+
49758+static int
49759+glob_match(const char *p, const char *n)
49760+{
49761+ char c;
49762+
49763+ while ((c = *p++) != '\0') {
49764+ switch (c) {
49765+ case '?':
49766+ if (*n == '\0')
49767+ return 1;
49768+ else if (*n == '/')
49769+ return 1;
49770+ break;
49771+ case '\\':
49772+ if (*n != c)
49773+ return 1;
49774+ break;
49775+ case '*':
49776+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
49777+ if (*n == '/')
49778+ return 1;
49779+ else if (c == '?') {
49780+ if (*n == '\0')
49781+ return 1;
49782+ else
49783+ ++n;
49784+ }
49785+ }
49786+ if (c == '\0') {
49787+ return 0;
49788+ } else {
49789+ const char *endp;
49790+
49791+ if ((endp = strchr(n, '/')) == NULL)
49792+ endp = n + strlen(n);
49793+
49794+ if (c == '[') {
49795+ for (--p; n < endp; ++n)
49796+ if (!glob_match(p, n))
49797+ return 0;
49798+ } else if (c == '/') {
49799+ while (*n != '\0' && *n != '/')
49800+ ++n;
49801+ if (*n == '/' && !glob_match(p, n + 1))
49802+ return 0;
49803+ } else {
49804+ for (--p; n < endp; ++n)
49805+ if (*n == c && !glob_match(p, n))
49806+ return 0;
49807+ }
49808+
49809+ return 1;
49810+ }
49811+ case '[':
49812+ {
49813+ int not;
49814+ char cold;
49815+
49816+ if (*n == '\0' || *n == '/')
49817+ return 1;
49818+
49819+ not = (*p == '!' || *p == '^');
49820+ if (not)
49821+ ++p;
49822+
49823+ c = *p++;
49824+ for (;;) {
49825+ unsigned char fn = (unsigned char)*n;
49826+
49827+ if (c == '\0')
49828+ return 1;
49829+ else {
49830+ if (c == fn)
49831+ goto matched;
49832+ cold = c;
49833+ c = *p++;
49834+
49835+ if (c == '-' && *p != ']') {
49836+ unsigned char cend = *p++;
49837+
49838+ if (cend == '\0')
49839+ return 1;
49840+
49841+ if (cold <= fn && fn <= cend)
49842+ goto matched;
49843+
49844+ c = *p++;
49845+ }
49846+ }
49847+
49848+ if (c == ']')
49849+ break;
49850+ }
49851+ if (!not)
49852+ return 1;
49853+ break;
49854+ matched:
49855+ while (c != ']') {
49856+ if (c == '\0')
49857+ return 1;
49858+
49859+ c = *p++;
49860+ }
49861+ if (not)
49862+ return 1;
49863+ }
49864+ break;
49865+ default:
49866+ if (c != *n)
49867+ return 1;
49868+ }
49869+
49870+ ++n;
49871+ }
49872+
49873+ if (*n == '\0')
49874+ return 0;
49875+
49876+ if (*n == '/')
49877+ return 0;
49878+
49879+ return 1;
49880+}
49881+
49882+static struct acl_object_label *
49883+chk_glob_label(struct acl_object_label *globbed,
49884+ struct dentry *dentry, struct vfsmount *mnt, char **path)
49885+{
49886+ struct acl_object_label *tmp;
49887+
49888+ if (*path == NULL)
49889+ *path = gr_to_filename_nolock(dentry, mnt);
49890+
49891+ tmp = globbed;
49892+
49893+ while (tmp) {
49894+ if (!glob_match(tmp->filename, *path))
49895+ return tmp;
49896+ tmp = tmp->next;
49897+ }
49898+
49899+ return NULL;
49900+}
49901+
49902+static struct acl_object_label *
49903+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49904+ const ino_t curr_ino, const dev_t curr_dev,
49905+ const struct acl_subject_label *subj, char **path, const int checkglob)
49906+{
49907+ struct acl_subject_label *tmpsubj;
49908+ struct acl_object_label *retval;
49909+ struct acl_object_label *retval2;
49910+
49911+ tmpsubj = (struct acl_subject_label *) subj;
49912+ read_lock(&gr_inode_lock);
49913+ do {
49914+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
49915+ if (retval) {
49916+ if (checkglob && retval->globbed) {
49917+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
49918+ (struct vfsmount *)orig_mnt, path);
49919+ if (retval2)
49920+ retval = retval2;
49921+ }
49922+ break;
49923+ }
49924+ } while ((tmpsubj = tmpsubj->parent_subject));
49925+ read_unlock(&gr_inode_lock);
49926+
49927+ return retval;
49928+}
49929+
49930+static __inline__ struct acl_object_label *
49931+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49932+ struct dentry *curr_dentry,
49933+ const struct acl_subject_label *subj, char **path, const int checkglob)
49934+{
49935+ int newglob = checkglob;
49936+ ino_t inode;
49937+ dev_t device;
49938+
49939+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
49940+ as we don't want a / * rule to match instead of the / object
49941+ don't do this for create lookups that call this function though, since they're looking up
49942+ on the parent and thus need globbing checks on all paths
49943+ */
49944+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
49945+ newglob = GR_NO_GLOB;
49946+
49947+ spin_lock(&curr_dentry->d_lock);
49948+ inode = curr_dentry->d_inode->i_ino;
49949+ device = __get_dev(curr_dentry);
49950+ spin_unlock(&curr_dentry->d_lock);
49951+
49952+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
49953+}
49954+
49955+static struct acl_object_label *
49956+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49957+ const struct acl_subject_label *subj, char *path, const int checkglob)
49958+{
49959+ struct dentry *dentry = (struct dentry *) l_dentry;
49960+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49961+ struct acl_object_label *retval;
49962+ struct dentry *parent;
49963+
49964+ write_seqlock(&rename_lock);
49965+ br_read_lock(vfsmount_lock);
49966+
49967+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
49968+#ifdef CONFIG_NET
49969+ mnt == sock_mnt ||
49970+#endif
49971+#ifdef CONFIG_HUGETLBFS
49972+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
49973+#endif
49974+ /* ignore Eric Biederman */
49975+ IS_PRIVATE(l_dentry->d_inode))) {
49976+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
49977+ goto out;
49978+ }
49979+
49980+ for (;;) {
49981+ if (dentry == real_root.dentry && mnt == real_root.mnt)
49982+ break;
49983+
49984+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49985+ if (mnt->mnt_parent == mnt)
49986+ break;
49987+
49988+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49989+ if (retval != NULL)
49990+ goto out;
49991+
49992+ dentry = mnt->mnt_mountpoint;
49993+ mnt = mnt->mnt_parent;
49994+ continue;
49995+ }
49996+
49997+ parent = dentry->d_parent;
49998+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49999+ if (retval != NULL)
50000+ goto out;
50001+
50002+ dentry = parent;
50003+ }
50004+
50005+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50006+
50007+ /* real_root is pinned so we don't have to hold a reference */
50008+ if (retval == NULL)
50009+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50010+out:
50011+ br_read_unlock(vfsmount_lock);
50012+ write_sequnlock(&rename_lock);
50013+
50014+ BUG_ON(retval == NULL);
50015+
50016+ return retval;
50017+}
50018+
50019+static __inline__ struct acl_object_label *
50020+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50021+ const struct acl_subject_label *subj)
50022+{
50023+ char *path = NULL;
50024+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50025+}
50026+
50027+static __inline__ struct acl_object_label *
50028+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50029+ const struct acl_subject_label *subj)
50030+{
50031+ char *path = NULL;
50032+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50033+}
50034+
50035+static __inline__ struct acl_object_label *
50036+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50037+ const struct acl_subject_label *subj, char *path)
50038+{
50039+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50040+}
50041+
50042+static struct acl_subject_label *
50043+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50044+ const struct acl_role_label *role)
50045+{
50046+ struct dentry *dentry = (struct dentry *) l_dentry;
50047+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50048+ struct acl_subject_label *retval;
50049+ struct dentry *parent;
50050+
50051+ write_seqlock(&rename_lock);
50052+ br_read_lock(vfsmount_lock);
50053+
50054+ for (;;) {
50055+ if (dentry == real_root.dentry && mnt == real_root.mnt)
50056+ break;
50057+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50058+ if (mnt->mnt_parent == mnt)
50059+ break;
50060+
50061+ spin_lock(&dentry->d_lock);
50062+ read_lock(&gr_inode_lock);
50063+ retval =
50064+ lookup_acl_subj_label(dentry->d_inode->i_ino,
50065+ __get_dev(dentry), role);
50066+ read_unlock(&gr_inode_lock);
50067+ spin_unlock(&dentry->d_lock);
50068+ if (retval != NULL)
50069+ goto out;
50070+
50071+ dentry = mnt->mnt_mountpoint;
50072+ mnt = mnt->mnt_parent;
50073+ continue;
50074+ }
50075+
50076+ spin_lock(&dentry->d_lock);
50077+ read_lock(&gr_inode_lock);
50078+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50079+ __get_dev(dentry), role);
50080+ read_unlock(&gr_inode_lock);
50081+ parent = dentry->d_parent;
50082+ spin_unlock(&dentry->d_lock);
50083+
50084+ if (retval != NULL)
50085+ goto out;
50086+
50087+ dentry = parent;
50088+ }
50089+
50090+ spin_lock(&dentry->d_lock);
50091+ read_lock(&gr_inode_lock);
50092+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50093+ __get_dev(dentry), role);
50094+ read_unlock(&gr_inode_lock);
50095+ spin_unlock(&dentry->d_lock);
50096+
50097+ if (unlikely(retval == NULL)) {
50098+ /* real_root is pinned, we don't need to hold a reference */
50099+ read_lock(&gr_inode_lock);
50100+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50101+ __get_dev(real_root.dentry), role);
50102+ read_unlock(&gr_inode_lock);
50103+ }
50104+out:
50105+ br_read_unlock(vfsmount_lock);
50106+ write_sequnlock(&rename_lock);
50107+
50108+ BUG_ON(retval == NULL);
50109+
50110+ return retval;
50111+}
50112+
50113+static void
50114+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50115+{
50116+ struct task_struct *task = current;
50117+ const struct cred *cred = current_cred();
50118+
50119+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50120+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50121+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50122+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50123+
50124+ return;
50125+}
50126+
50127+static void
50128+gr_log_learn_sysctl(const char *path, const __u32 mode)
50129+{
50130+ struct task_struct *task = current;
50131+ const struct cred *cred = current_cred();
50132+
50133+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50134+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50135+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50136+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50137+
50138+ return;
50139+}
50140+
50141+static void
50142+gr_log_learn_id_change(const char type, const unsigned int real,
50143+ const unsigned int effective, const unsigned int fs)
50144+{
50145+ struct task_struct *task = current;
50146+ const struct cred *cred = current_cred();
50147+
50148+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50149+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50150+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50151+ type, real, effective, fs, &task->signal->saved_ip);
50152+
50153+ return;
50154+}
50155+
50156+__u32
50157+gr_search_file(const struct dentry * dentry, const __u32 mode,
50158+ const struct vfsmount * mnt)
50159+{
50160+ __u32 retval = mode;
50161+ struct acl_subject_label *curracl;
50162+ struct acl_object_label *currobj;
50163+
50164+ if (unlikely(!(gr_status & GR_READY)))
50165+ return (mode & ~GR_AUDITS);
50166+
50167+ curracl = current->acl;
50168+
50169+ currobj = chk_obj_label(dentry, mnt, curracl);
50170+ retval = currobj->mode & mode;
50171+
50172+ /* if we're opening a specified transfer file for writing
50173+ (e.g. /dev/initctl), then transfer our role to init
50174+ */
50175+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50176+ current->role->roletype & GR_ROLE_PERSIST)) {
50177+ struct task_struct *task = init_pid_ns.child_reaper;
50178+
50179+ if (task->role != current->role) {
50180+ task->acl_sp_role = 0;
50181+ task->acl_role_id = current->acl_role_id;
50182+ task->role = current->role;
50183+ rcu_read_lock();
50184+ read_lock(&grsec_exec_file_lock);
50185+ gr_apply_subject_to_task(task);
50186+ read_unlock(&grsec_exec_file_lock);
50187+ rcu_read_unlock();
50188+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50189+ }
50190+ }
50191+
50192+ if (unlikely
50193+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50194+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50195+ __u32 new_mode = mode;
50196+
50197+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50198+
50199+ retval = new_mode;
50200+
50201+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50202+ new_mode |= GR_INHERIT;
50203+
50204+ if (!(mode & GR_NOLEARN))
50205+ gr_log_learn(dentry, mnt, new_mode);
50206+ }
50207+
50208+ return retval;
50209+}
50210+
50211+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50212+ const struct dentry *parent,
50213+ const struct vfsmount *mnt)
50214+{
50215+ struct name_entry *match;
50216+ struct acl_object_label *matchpo;
50217+ struct acl_subject_label *curracl;
50218+ char *path;
50219+
50220+ if (unlikely(!(gr_status & GR_READY)))
50221+ return NULL;
50222+
50223+ preempt_disable();
50224+ path = gr_to_filename_rbac(new_dentry, mnt);
50225+ match = lookup_name_entry_create(path);
50226+
50227+ curracl = current->acl;
50228+
50229+ if (match) {
50230+ read_lock(&gr_inode_lock);
50231+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50232+ read_unlock(&gr_inode_lock);
50233+
50234+ if (matchpo) {
50235+ preempt_enable();
50236+ return matchpo;
50237+ }
50238+ }
50239+
50240+ // lookup parent
50241+
50242+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50243+
50244+ preempt_enable();
50245+ return matchpo;
50246+}
50247+
50248+__u32
50249+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50250+ const struct vfsmount * mnt, const __u32 mode)
50251+{
50252+ struct acl_object_label *matchpo;
50253+ __u32 retval;
50254+
50255+ if (unlikely(!(gr_status & GR_READY)))
50256+ return (mode & ~GR_AUDITS);
50257+
50258+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
50259+
50260+ retval = matchpo->mode & mode;
50261+
50262+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50263+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50264+ __u32 new_mode = mode;
50265+
50266+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50267+
50268+ gr_log_learn(new_dentry, mnt, new_mode);
50269+ return new_mode;
50270+ }
50271+
50272+ return retval;
50273+}
50274+
50275+__u32
50276+gr_check_link(const struct dentry * new_dentry,
50277+ const struct dentry * parent_dentry,
50278+ const struct vfsmount * parent_mnt,
50279+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50280+{
50281+ struct acl_object_label *obj;
50282+ __u32 oldmode, newmode;
50283+ __u32 needmode;
50284+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50285+ GR_DELETE | GR_INHERIT;
50286+
50287+ if (unlikely(!(gr_status & GR_READY)))
50288+ return (GR_CREATE | GR_LINK);
50289+
50290+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50291+ oldmode = obj->mode;
50292+
50293+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50294+ newmode = obj->mode;
50295+
50296+ needmode = newmode & checkmodes;
50297+
50298+ // old name for hardlink must have at least the permissions of the new name
50299+ if ((oldmode & needmode) != needmode)
50300+ goto bad;
50301+
50302+ // if old name had restrictions/auditing, make sure the new name does as well
50303+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50304+
50305+ // don't allow hardlinking of suid/sgid files without permission
50306+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50307+ needmode |= GR_SETID;
50308+
50309+ if ((newmode & needmode) != needmode)
50310+ goto bad;
50311+
50312+ // enforce minimum permissions
50313+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50314+ return newmode;
50315+bad:
50316+ needmode = oldmode;
50317+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50318+ needmode |= GR_SETID;
50319+
50320+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50321+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50322+ return (GR_CREATE | GR_LINK);
50323+ } else if (newmode & GR_SUPPRESS)
50324+ return GR_SUPPRESS;
50325+ else
50326+ return 0;
50327+}
50328+
50329+int
50330+gr_check_hidden_task(const struct task_struct *task)
50331+{
50332+ if (unlikely(!(gr_status & GR_READY)))
50333+ return 0;
50334+
50335+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50336+ return 1;
50337+
50338+ return 0;
50339+}
50340+
50341+int
50342+gr_check_protected_task(const struct task_struct *task)
50343+{
50344+ if (unlikely(!(gr_status & GR_READY) || !task))
50345+ return 0;
50346+
50347+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50348+ task->acl != current->acl)
50349+ return 1;
50350+
50351+ return 0;
50352+}
50353+
50354+int
50355+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50356+{
50357+ struct task_struct *p;
50358+ int ret = 0;
50359+
50360+ if (unlikely(!(gr_status & GR_READY) || !pid))
50361+ return ret;
50362+
50363+ read_lock(&tasklist_lock);
50364+ do_each_pid_task(pid, type, p) {
50365+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50366+ p->acl != current->acl) {
50367+ ret = 1;
50368+ goto out;
50369+ }
50370+ } while_each_pid_task(pid, type, p);
50371+out:
50372+ read_unlock(&tasklist_lock);
50373+
50374+ return ret;
50375+}
50376+
50377+void
50378+gr_copy_label(struct task_struct *tsk)
50379+{
50380+ tsk->signal->used_accept = 0;
50381+ tsk->acl_sp_role = 0;
50382+ tsk->acl_role_id = current->acl_role_id;
50383+ tsk->acl = current->acl;
50384+ tsk->role = current->role;
50385+ tsk->signal->curr_ip = current->signal->curr_ip;
50386+ tsk->signal->saved_ip = current->signal->saved_ip;
50387+ if (current->exec_file)
50388+ get_file(current->exec_file);
50389+ tsk->exec_file = current->exec_file;
50390+ tsk->is_writable = current->is_writable;
50391+ if (unlikely(current->signal->used_accept)) {
50392+ current->signal->curr_ip = 0;
50393+ current->signal->saved_ip = 0;
50394+ }
50395+
50396+ return;
50397+}
50398+
50399+static void
50400+gr_set_proc_res(struct task_struct *task)
50401+{
50402+ struct acl_subject_label *proc;
50403+ unsigned short i;
50404+
50405+ proc = task->acl;
50406+
50407+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50408+ return;
50409+
50410+ for (i = 0; i < RLIM_NLIMITS; i++) {
50411+ if (!(proc->resmask & (1 << i)))
50412+ continue;
50413+
50414+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50415+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50416+ }
50417+
50418+ return;
50419+}
50420+
50421+extern int __gr_process_user_ban(struct user_struct *user);
50422+
50423+int
50424+gr_check_user_change(int real, int effective, int fs)
50425+{
50426+ unsigned int i;
50427+ __u16 num;
50428+ uid_t *uidlist;
50429+ int curuid;
50430+ int realok = 0;
50431+ int effectiveok = 0;
50432+ int fsok = 0;
50433+
50434+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50435+ struct user_struct *user;
50436+
50437+ if (real == -1)
50438+ goto skipit;
50439+
50440+ user = find_user(real);
50441+ if (user == NULL)
50442+ goto skipit;
50443+
50444+ if (__gr_process_user_ban(user)) {
50445+ /* for find_user */
50446+ free_uid(user);
50447+ return 1;
50448+ }
50449+
50450+ /* for find_user */
50451+ free_uid(user);
50452+
50453+skipit:
50454+#endif
50455+
50456+ if (unlikely(!(gr_status & GR_READY)))
50457+ return 0;
50458+
50459+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50460+ gr_log_learn_id_change('u', real, effective, fs);
50461+
50462+ num = current->acl->user_trans_num;
50463+ uidlist = current->acl->user_transitions;
50464+
50465+ if (uidlist == NULL)
50466+ return 0;
50467+
50468+ if (real == -1)
50469+ realok = 1;
50470+ if (effective == -1)
50471+ effectiveok = 1;
50472+ if (fs == -1)
50473+ fsok = 1;
50474+
50475+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
50476+ for (i = 0; i < num; i++) {
50477+ curuid = (int)uidlist[i];
50478+ if (real == curuid)
50479+ realok = 1;
50480+ if (effective == curuid)
50481+ effectiveok = 1;
50482+ if (fs == curuid)
50483+ fsok = 1;
50484+ }
50485+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
50486+ for (i = 0; i < num; i++) {
50487+ curuid = (int)uidlist[i];
50488+ if (real == curuid)
50489+ break;
50490+ if (effective == curuid)
50491+ break;
50492+ if (fs == curuid)
50493+ break;
50494+ }
50495+ /* not in deny list */
50496+ if (i == num) {
50497+ realok = 1;
50498+ effectiveok = 1;
50499+ fsok = 1;
50500+ }
50501+ }
50502+
50503+ if (realok && effectiveok && fsok)
50504+ return 0;
50505+ else {
50506+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50507+ return 1;
50508+ }
50509+}
50510+
50511+int
50512+gr_check_group_change(int real, int effective, int fs)
50513+{
50514+ unsigned int i;
50515+ __u16 num;
50516+ gid_t *gidlist;
50517+ int curgid;
50518+ int realok = 0;
50519+ int effectiveok = 0;
50520+ int fsok = 0;
50521+
50522+ if (unlikely(!(gr_status & GR_READY)))
50523+ return 0;
50524+
50525+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50526+ gr_log_learn_id_change('g', real, effective, fs);
50527+
50528+ num = current->acl->group_trans_num;
50529+ gidlist = current->acl->group_transitions;
50530+
50531+ if (gidlist == NULL)
50532+ return 0;
50533+
50534+ if (real == -1)
50535+ realok = 1;
50536+ if (effective == -1)
50537+ effectiveok = 1;
50538+ if (fs == -1)
50539+ fsok = 1;
50540+
50541+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
50542+ for (i = 0; i < num; i++) {
50543+ curgid = (int)gidlist[i];
50544+ if (real == curgid)
50545+ realok = 1;
50546+ if (effective == curgid)
50547+ effectiveok = 1;
50548+ if (fs == curgid)
50549+ fsok = 1;
50550+ }
50551+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
50552+ for (i = 0; i < num; i++) {
50553+ curgid = (int)gidlist[i];
50554+ if (real == curgid)
50555+ break;
50556+ if (effective == curgid)
50557+ break;
50558+ if (fs == curgid)
50559+ break;
50560+ }
50561+ /* not in deny list */
50562+ if (i == num) {
50563+ realok = 1;
50564+ effectiveok = 1;
50565+ fsok = 1;
50566+ }
50567+ }
50568+
50569+ if (realok && effectiveok && fsok)
50570+ return 0;
50571+ else {
50572+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50573+ return 1;
50574+ }
50575+}
50576+
50577+void
50578+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50579+{
50580+ struct acl_role_label *role = task->role;
50581+ struct acl_subject_label *subj = NULL;
50582+ struct acl_object_label *obj;
50583+ struct file *filp;
50584+
50585+ if (unlikely(!(gr_status & GR_READY)))
50586+ return;
50587+
50588+ filp = task->exec_file;
50589+
50590+ /* kernel process, we'll give them the kernel role */
50591+ if (unlikely(!filp)) {
50592+ task->role = kernel_role;
50593+ task->acl = kernel_role->root_label;
50594+ return;
50595+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50596+ role = lookup_acl_role_label(task, uid, gid);
50597+
50598+ /* perform subject lookup in possibly new role
50599+ we can use this result below in the case where role == task->role
50600+ */
50601+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50602+
50603+ /* if we changed uid/gid, but result in the same role
50604+ and are using inheritance, don't lose the inherited subject
50605+ if current subject is other than what normal lookup
50606+ would result in, we arrived via inheritance, don't
50607+ lose subject
50608+ */
50609+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50610+ (subj == task->acl)))
50611+ task->acl = subj;
50612+
50613+ task->role = role;
50614+
50615+ task->is_writable = 0;
50616+
50617+ /* ignore additional mmap checks for processes that are writable
50618+ by the default ACL */
50619+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50620+ if (unlikely(obj->mode & GR_WRITE))
50621+ task->is_writable = 1;
50622+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50623+ if (unlikely(obj->mode & GR_WRITE))
50624+ task->is_writable = 1;
50625+
50626+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50627+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50628+#endif
50629+
50630+ gr_set_proc_res(task);
50631+
50632+ return;
50633+}
50634+
50635+int
50636+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50637+ const int unsafe_flags)
50638+{
50639+ struct task_struct *task = current;
50640+ struct acl_subject_label *newacl;
50641+ struct acl_object_label *obj;
50642+ __u32 retmode;
50643+
50644+ if (unlikely(!(gr_status & GR_READY)))
50645+ return 0;
50646+
50647+ newacl = chk_subj_label(dentry, mnt, task->role);
50648+
50649+ task_lock(task);
50650+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
50651+ !(task->role->roletype & GR_ROLE_GOD) &&
50652+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
50653+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50654+ task_unlock(task);
50655+ if (unsafe_flags & LSM_UNSAFE_SHARE)
50656+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
50657+ else
50658+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
50659+ return -EACCES;
50660+ }
50661+ task_unlock(task);
50662+
50663+ obj = chk_obj_label(dentry, mnt, task->acl);
50664+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
50665+
50666+ if (!(task->acl->mode & GR_INHERITLEARN) &&
50667+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
50668+ if (obj->nested)
50669+ task->acl = obj->nested;
50670+ else
50671+ task->acl = newacl;
50672+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
50673+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
50674+
50675+ task->is_writable = 0;
50676+
50677+ /* ignore additional mmap checks for processes that are writable
50678+ by the default ACL */
50679+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
50680+ if (unlikely(obj->mode & GR_WRITE))
50681+ task->is_writable = 1;
50682+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
50683+ if (unlikely(obj->mode & GR_WRITE))
50684+ task->is_writable = 1;
50685+
50686+ gr_set_proc_res(task);
50687+
50688+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50689+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50690+#endif
50691+ return 0;
50692+}
50693+
50694+/* always called with valid inodev ptr */
50695+static void
50696+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
50697+{
50698+ struct acl_object_label *matchpo;
50699+ struct acl_subject_label *matchps;
50700+ struct acl_subject_label *subj;
50701+ struct acl_role_label *role;
50702+ unsigned int x;
50703+
50704+ FOR_EACH_ROLE_START(role)
50705+ FOR_EACH_SUBJECT_START(role, subj, x)
50706+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
50707+ matchpo->mode |= GR_DELETED;
50708+ FOR_EACH_SUBJECT_END(subj,x)
50709+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50710+ if (subj->inode == ino && subj->device == dev)
50711+ subj->mode |= GR_DELETED;
50712+ FOR_EACH_NESTED_SUBJECT_END(subj)
50713+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
50714+ matchps->mode |= GR_DELETED;
50715+ FOR_EACH_ROLE_END(role)
50716+
50717+ inodev->nentry->deleted = 1;
50718+
50719+ return;
50720+}
50721+
50722+void
50723+gr_handle_delete(const ino_t ino, const dev_t dev)
50724+{
50725+ struct inodev_entry *inodev;
50726+
50727+ if (unlikely(!(gr_status & GR_READY)))
50728+ return;
50729+
50730+ write_lock(&gr_inode_lock);
50731+ inodev = lookup_inodev_entry(ino, dev);
50732+ if (inodev != NULL)
50733+ do_handle_delete(inodev, ino, dev);
50734+ write_unlock(&gr_inode_lock);
50735+
50736+ return;
50737+}
50738+
50739+static void
50740+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
50741+ const ino_t newinode, const dev_t newdevice,
50742+ struct acl_subject_label *subj)
50743+{
50744+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
50745+ struct acl_object_label *match;
50746+
50747+ match = subj->obj_hash[index];
50748+
50749+ while (match && (match->inode != oldinode ||
50750+ match->device != olddevice ||
50751+ !(match->mode & GR_DELETED)))
50752+ match = match->next;
50753+
50754+ if (match && (match->inode == oldinode)
50755+ && (match->device == olddevice)
50756+ && (match->mode & GR_DELETED)) {
50757+ if (match->prev == NULL) {
50758+ subj->obj_hash[index] = match->next;
50759+ if (match->next != NULL)
50760+ match->next->prev = NULL;
50761+ } else {
50762+ match->prev->next = match->next;
50763+ if (match->next != NULL)
50764+ match->next->prev = match->prev;
50765+ }
50766+ match->prev = NULL;
50767+ match->next = NULL;
50768+ match->inode = newinode;
50769+ match->device = newdevice;
50770+ match->mode &= ~GR_DELETED;
50771+
50772+ insert_acl_obj_label(match, subj);
50773+ }
50774+
50775+ return;
50776+}
50777+
50778+static void
50779+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50780+ const ino_t newinode, const dev_t newdevice,
50781+ struct acl_role_label *role)
50782+{
50783+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50784+ struct acl_subject_label *match;
50785+
50786+ match = role->subj_hash[index];
50787+
50788+ while (match && (match->inode != oldinode ||
50789+ match->device != olddevice ||
50790+ !(match->mode & GR_DELETED)))
50791+ match = match->next;
50792+
50793+ if (match && (match->inode == oldinode)
50794+ && (match->device == olddevice)
50795+ && (match->mode & GR_DELETED)) {
50796+ if (match->prev == NULL) {
50797+ role->subj_hash[index] = match->next;
50798+ if (match->next != NULL)
50799+ match->next->prev = NULL;
50800+ } else {
50801+ match->prev->next = match->next;
50802+ if (match->next != NULL)
50803+ match->next->prev = match->prev;
50804+ }
50805+ match->prev = NULL;
50806+ match->next = NULL;
50807+ match->inode = newinode;
50808+ match->device = newdevice;
50809+ match->mode &= ~GR_DELETED;
50810+
50811+ insert_acl_subj_label(match, role);
50812+ }
50813+
50814+ return;
50815+}
50816+
50817+static void
50818+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50819+ const ino_t newinode, const dev_t newdevice)
50820+{
50821+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
50822+ struct inodev_entry *match;
50823+
50824+ match = inodev_set.i_hash[index];
50825+
50826+ while (match && (match->nentry->inode != oldinode ||
50827+ match->nentry->device != olddevice || !match->nentry->deleted))
50828+ match = match->next;
50829+
50830+ if (match && (match->nentry->inode == oldinode)
50831+ && (match->nentry->device == olddevice) &&
50832+ match->nentry->deleted) {
50833+ if (match->prev == NULL) {
50834+ inodev_set.i_hash[index] = match->next;
50835+ if (match->next != NULL)
50836+ match->next->prev = NULL;
50837+ } else {
50838+ match->prev->next = match->next;
50839+ if (match->next != NULL)
50840+ match->next->prev = match->prev;
50841+ }
50842+ match->prev = NULL;
50843+ match->next = NULL;
50844+ match->nentry->inode = newinode;
50845+ match->nentry->device = newdevice;
50846+ match->nentry->deleted = 0;
50847+
50848+ insert_inodev_entry(match);
50849+ }
50850+
50851+ return;
50852+}
50853+
50854+static void
50855+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
50856+{
50857+ struct acl_subject_label *subj;
50858+ struct acl_role_label *role;
50859+ unsigned int x;
50860+
50861+ FOR_EACH_ROLE_START(role)
50862+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
50863+
50864+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50865+ if ((subj->inode == ino) && (subj->device == dev)) {
50866+ subj->inode = ino;
50867+ subj->device = dev;
50868+ }
50869+ FOR_EACH_NESTED_SUBJECT_END(subj)
50870+ FOR_EACH_SUBJECT_START(role, subj, x)
50871+ update_acl_obj_label(matchn->inode, matchn->device,
50872+ ino, dev, subj);
50873+ FOR_EACH_SUBJECT_END(subj,x)
50874+ FOR_EACH_ROLE_END(role)
50875+
50876+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
50877+
50878+ return;
50879+}
50880+
50881+static void
50882+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50883+ const struct vfsmount *mnt)
50884+{
50885+ ino_t ino = dentry->d_inode->i_ino;
50886+ dev_t dev = __get_dev(dentry);
50887+
50888+ __do_handle_create(matchn, ino, dev);
50889+
50890+ return;
50891+}
50892+
50893+void
50894+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50895+{
50896+ struct name_entry *matchn;
50897+
50898+ if (unlikely(!(gr_status & GR_READY)))
50899+ return;
50900+
50901+ preempt_disable();
50902+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
50903+
50904+ if (unlikely((unsigned long)matchn)) {
50905+ write_lock(&gr_inode_lock);
50906+ do_handle_create(matchn, dentry, mnt);
50907+ write_unlock(&gr_inode_lock);
50908+ }
50909+ preempt_enable();
50910+
50911+ return;
50912+}
50913+
50914+void
50915+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
50916+{
50917+ struct name_entry *matchn;
50918+
50919+ if (unlikely(!(gr_status & GR_READY)))
50920+ return;
50921+
50922+ preempt_disable();
50923+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
50924+
50925+ if (unlikely((unsigned long)matchn)) {
50926+ write_lock(&gr_inode_lock);
50927+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
50928+ write_unlock(&gr_inode_lock);
50929+ }
50930+ preempt_enable();
50931+
50932+ return;
50933+}
50934+
50935+void
50936+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50937+ struct dentry *old_dentry,
50938+ struct dentry *new_dentry,
50939+ struct vfsmount *mnt, const __u8 replace)
50940+{
50941+ struct name_entry *matchn;
50942+ struct inodev_entry *inodev;
50943+ struct inode *inode = new_dentry->d_inode;
50944+ ino_t old_ino = old_dentry->d_inode->i_ino;
50945+ dev_t old_dev = __get_dev(old_dentry);
50946+
50947+ /* vfs_rename swaps the name and parent link for old_dentry and
50948+ new_dentry
50949+ at this point, old_dentry has the new name, parent link, and inode
50950+ for the renamed file
50951+ if a file is being replaced by a rename, new_dentry has the inode
50952+ and name for the replaced file
50953+ */
50954+
50955+ if (unlikely(!(gr_status & GR_READY)))
50956+ return;
50957+
50958+ preempt_disable();
50959+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
50960+
50961+ /* we wouldn't have to check d_inode if it weren't for
50962+ NFS silly-renaming
50963+ */
50964+
50965+ write_lock(&gr_inode_lock);
50966+ if (unlikely(replace && inode)) {
50967+ ino_t new_ino = inode->i_ino;
50968+ dev_t new_dev = __get_dev(new_dentry);
50969+
50970+ inodev = lookup_inodev_entry(new_ino, new_dev);
50971+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
50972+ do_handle_delete(inodev, new_ino, new_dev);
50973+ }
50974+
50975+ inodev = lookup_inodev_entry(old_ino, old_dev);
50976+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
50977+ do_handle_delete(inodev, old_ino, old_dev);
50978+
50979+ if (unlikely((unsigned long)matchn))
50980+ do_handle_create(matchn, old_dentry, mnt);
50981+
50982+ write_unlock(&gr_inode_lock);
50983+ preempt_enable();
50984+
50985+ return;
50986+}
50987+
50988+static int
50989+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
50990+ unsigned char **sum)
50991+{
50992+ struct acl_role_label *r;
50993+ struct role_allowed_ip *ipp;
50994+ struct role_transition *trans;
50995+ unsigned int i;
50996+ int found = 0;
50997+ u32 curr_ip = current->signal->curr_ip;
50998+
50999+ current->signal->saved_ip = curr_ip;
51000+
51001+ /* check transition table */
51002+
51003+ for (trans = current->role->transitions; trans; trans = trans->next) {
51004+ if (!strcmp(rolename, trans->rolename)) {
51005+ found = 1;
51006+ break;
51007+ }
51008+ }
51009+
51010+ if (!found)
51011+ return 0;
51012+
51013+ /* handle special roles that do not require authentication
51014+ and check ip */
51015+
51016+ FOR_EACH_ROLE_START(r)
51017+ if (!strcmp(rolename, r->rolename) &&
51018+ (r->roletype & GR_ROLE_SPECIAL)) {
51019+ found = 0;
51020+ if (r->allowed_ips != NULL) {
51021+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51022+ if ((ntohl(curr_ip) & ipp->netmask) ==
51023+ (ntohl(ipp->addr) & ipp->netmask))
51024+ found = 1;
51025+ }
51026+ } else
51027+ found = 2;
51028+ if (!found)
51029+ return 0;
51030+
51031+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51032+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51033+ *salt = NULL;
51034+ *sum = NULL;
51035+ return 1;
51036+ }
51037+ }
51038+ FOR_EACH_ROLE_END(r)
51039+
51040+ for (i = 0; i < num_sprole_pws; i++) {
51041+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51042+ *salt = acl_special_roles[i]->salt;
51043+ *sum = acl_special_roles[i]->sum;
51044+ return 1;
51045+ }
51046+ }
51047+
51048+ return 0;
51049+}
51050+
51051+static void
51052+assign_special_role(char *rolename)
51053+{
51054+ struct acl_object_label *obj;
51055+ struct acl_role_label *r;
51056+ struct acl_role_label *assigned = NULL;
51057+ struct task_struct *tsk;
51058+ struct file *filp;
51059+
51060+ FOR_EACH_ROLE_START(r)
51061+ if (!strcmp(rolename, r->rolename) &&
51062+ (r->roletype & GR_ROLE_SPECIAL)) {
51063+ assigned = r;
51064+ break;
51065+ }
51066+ FOR_EACH_ROLE_END(r)
51067+
51068+ if (!assigned)
51069+ return;
51070+
51071+ read_lock(&tasklist_lock);
51072+ read_lock(&grsec_exec_file_lock);
51073+
51074+ tsk = current->real_parent;
51075+ if (tsk == NULL)
51076+ goto out_unlock;
51077+
51078+ filp = tsk->exec_file;
51079+ if (filp == NULL)
51080+ goto out_unlock;
51081+
51082+ tsk->is_writable = 0;
51083+
51084+ tsk->acl_sp_role = 1;
51085+ tsk->acl_role_id = ++acl_sp_role_value;
51086+ tsk->role = assigned;
51087+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51088+
51089+ /* ignore additional mmap checks for processes that are writable
51090+ by the default ACL */
51091+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51092+ if (unlikely(obj->mode & GR_WRITE))
51093+ tsk->is_writable = 1;
51094+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51095+ if (unlikely(obj->mode & GR_WRITE))
51096+ tsk->is_writable = 1;
51097+
51098+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51099+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51100+#endif
51101+
51102+out_unlock:
51103+ read_unlock(&grsec_exec_file_lock);
51104+ read_unlock(&tasklist_lock);
51105+ return;
51106+}
51107+
51108+int gr_check_secure_terminal(struct task_struct *task)
51109+{
51110+ struct task_struct *p, *p2, *p3;
51111+ struct files_struct *files;
51112+ struct fdtable *fdt;
51113+ struct file *our_file = NULL, *file;
51114+ int i;
51115+
51116+ if (task->signal->tty == NULL)
51117+ return 1;
51118+
51119+ files = get_files_struct(task);
51120+ if (files != NULL) {
51121+ rcu_read_lock();
51122+ fdt = files_fdtable(files);
51123+ for (i=0; i < fdt->max_fds; i++) {
51124+ file = fcheck_files(files, i);
51125+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51126+ get_file(file);
51127+ our_file = file;
51128+ }
51129+ }
51130+ rcu_read_unlock();
51131+ put_files_struct(files);
51132+ }
51133+
51134+ if (our_file == NULL)
51135+ return 1;
51136+
51137+ read_lock(&tasklist_lock);
51138+ do_each_thread(p2, p) {
51139+ files = get_files_struct(p);
51140+ if (files == NULL ||
51141+ (p->signal && p->signal->tty == task->signal->tty)) {
51142+ if (files != NULL)
51143+ put_files_struct(files);
51144+ continue;
51145+ }
51146+ rcu_read_lock();
51147+ fdt = files_fdtable(files);
51148+ for (i=0; i < fdt->max_fds; i++) {
51149+ file = fcheck_files(files, i);
51150+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51151+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51152+ p3 = task;
51153+ while (p3->pid > 0) {
51154+ if (p3 == p)
51155+ break;
51156+ p3 = p3->real_parent;
51157+ }
51158+ if (p3 == p)
51159+ break;
51160+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51161+ gr_handle_alertkill(p);
51162+ rcu_read_unlock();
51163+ put_files_struct(files);
51164+ read_unlock(&tasklist_lock);
51165+ fput(our_file);
51166+ return 0;
51167+ }
51168+ }
51169+ rcu_read_unlock();
51170+ put_files_struct(files);
51171+ } while_each_thread(p2, p);
51172+ read_unlock(&tasklist_lock);
51173+
51174+ fput(our_file);
51175+ return 1;
51176+}
51177+
51178+ssize_t
51179+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51180+{
51181+ struct gr_arg_wrapper uwrap;
51182+ unsigned char *sprole_salt = NULL;
51183+ unsigned char *sprole_sum = NULL;
51184+ int error = sizeof (struct gr_arg_wrapper);
51185+ int error2 = 0;
51186+
51187+ mutex_lock(&gr_dev_mutex);
51188+
51189+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51190+ error = -EPERM;
51191+ goto out;
51192+ }
51193+
51194+ if (count != sizeof (struct gr_arg_wrapper)) {
51195+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51196+ error = -EINVAL;
51197+ goto out;
51198+ }
51199+
51200+
51201+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51202+ gr_auth_expires = 0;
51203+ gr_auth_attempts = 0;
51204+ }
51205+
51206+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51207+ error = -EFAULT;
51208+ goto out;
51209+ }
51210+
51211+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51212+ error = -EINVAL;
51213+ goto out;
51214+ }
51215+
51216+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51217+ error = -EFAULT;
51218+ goto out;
51219+ }
51220+
51221+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51222+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51223+ time_after(gr_auth_expires, get_seconds())) {
51224+ error = -EBUSY;
51225+ goto out;
51226+ }
51227+
51228+ /* if non-root trying to do anything other than use a special role,
51229+ do not attempt authentication, do not count towards authentication
51230+ locking
51231+ */
51232+
51233+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51234+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51235+ current_uid()) {
51236+ error = -EPERM;
51237+ goto out;
51238+ }
51239+
51240+ /* ensure pw and special role name are null terminated */
51241+
51242+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51243+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51244+
51245+ /* Okay.
51246+ * We have our enough of the argument structure..(we have yet
51247+ * to copy_from_user the tables themselves) . Copy the tables
51248+ * only if we need them, i.e. for loading operations. */
51249+
51250+ switch (gr_usermode->mode) {
51251+ case GR_STATUS:
51252+ if (gr_status & GR_READY) {
51253+ error = 1;
51254+ if (!gr_check_secure_terminal(current))
51255+ error = 3;
51256+ } else
51257+ error = 2;
51258+ goto out;
51259+ case GR_SHUTDOWN:
51260+ if ((gr_status & GR_READY)
51261+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51262+ pax_open_kernel();
51263+ gr_status &= ~GR_READY;
51264+ pax_close_kernel();
51265+
51266+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51267+ free_variables();
51268+ memset(gr_usermode, 0, sizeof (struct gr_arg));
51269+ memset(gr_system_salt, 0, GR_SALT_LEN);
51270+ memset(gr_system_sum, 0, GR_SHA_LEN);
51271+ } else if (gr_status & GR_READY) {
51272+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51273+ error = -EPERM;
51274+ } else {
51275+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51276+ error = -EAGAIN;
51277+ }
51278+ break;
51279+ case GR_ENABLE:
51280+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51281+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51282+ else {
51283+ if (gr_status & GR_READY)
51284+ error = -EAGAIN;
51285+ else
51286+ error = error2;
51287+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51288+ }
51289+ break;
51290+ case GR_RELOAD:
51291+ if (!(gr_status & GR_READY)) {
51292+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51293+ error = -EAGAIN;
51294+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51295+ preempt_disable();
51296+
51297+ pax_open_kernel();
51298+ gr_status &= ~GR_READY;
51299+ pax_close_kernel();
51300+
51301+ free_variables();
51302+ if (!(error2 = gracl_init(gr_usermode))) {
51303+ preempt_enable();
51304+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51305+ } else {
51306+ preempt_enable();
51307+ error = error2;
51308+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51309+ }
51310+ } else {
51311+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51312+ error = -EPERM;
51313+ }
51314+ break;
51315+ case GR_SEGVMOD:
51316+ if (unlikely(!(gr_status & GR_READY))) {
51317+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51318+ error = -EAGAIN;
51319+ break;
51320+ }
51321+
51322+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51323+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51324+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51325+ struct acl_subject_label *segvacl;
51326+ segvacl =
51327+ lookup_acl_subj_label(gr_usermode->segv_inode,
51328+ gr_usermode->segv_device,
51329+ current->role);
51330+ if (segvacl) {
51331+ segvacl->crashes = 0;
51332+ segvacl->expires = 0;
51333+ }
51334+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51335+ gr_remove_uid(gr_usermode->segv_uid);
51336+ }
51337+ } else {
51338+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51339+ error = -EPERM;
51340+ }
51341+ break;
51342+ case GR_SPROLE:
51343+ case GR_SPROLEPAM:
51344+ if (unlikely(!(gr_status & GR_READY))) {
51345+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51346+ error = -EAGAIN;
51347+ break;
51348+ }
51349+
51350+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51351+ current->role->expires = 0;
51352+ current->role->auth_attempts = 0;
51353+ }
51354+
51355+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51356+ time_after(current->role->expires, get_seconds())) {
51357+ error = -EBUSY;
51358+ goto out;
51359+ }
51360+
51361+ if (lookup_special_role_auth
51362+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51363+ && ((!sprole_salt && !sprole_sum)
51364+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51365+ char *p = "";
51366+ assign_special_role(gr_usermode->sp_role);
51367+ read_lock(&tasklist_lock);
51368+ if (current->real_parent)
51369+ p = current->real_parent->role->rolename;
51370+ read_unlock(&tasklist_lock);
51371+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51372+ p, acl_sp_role_value);
51373+ } else {
51374+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51375+ error = -EPERM;
51376+ if(!(current->role->auth_attempts++))
51377+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51378+
51379+ goto out;
51380+ }
51381+ break;
51382+ case GR_UNSPROLE:
51383+ if (unlikely(!(gr_status & GR_READY))) {
51384+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51385+ error = -EAGAIN;
51386+ break;
51387+ }
51388+
51389+ if (current->role->roletype & GR_ROLE_SPECIAL) {
51390+ char *p = "";
51391+ int i = 0;
51392+
51393+ read_lock(&tasklist_lock);
51394+ if (current->real_parent) {
51395+ p = current->real_parent->role->rolename;
51396+ i = current->real_parent->acl_role_id;
51397+ }
51398+ read_unlock(&tasklist_lock);
51399+
51400+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51401+ gr_set_acls(1);
51402+ } else {
51403+ error = -EPERM;
51404+ goto out;
51405+ }
51406+ break;
51407+ default:
51408+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51409+ error = -EINVAL;
51410+ break;
51411+ }
51412+
51413+ if (error != -EPERM)
51414+ goto out;
51415+
51416+ if(!(gr_auth_attempts++))
51417+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51418+
51419+ out:
51420+ mutex_unlock(&gr_dev_mutex);
51421+ return error;
51422+}
51423+
51424+/* must be called with
51425+ rcu_read_lock();
51426+ read_lock(&tasklist_lock);
51427+ read_lock(&grsec_exec_file_lock);
51428+*/
51429+int gr_apply_subject_to_task(struct task_struct *task)
51430+{
51431+ struct acl_object_label *obj;
51432+ char *tmpname;
51433+ struct acl_subject_label *tmpsubj;
51434+ struct file *filp;
51435+ struct name_entry *nmatch;
51436+
51437+ filp = task->exec_file;
51438+ if (filp == NULL)
51439+ return 0;
51440+
51441+ /* the following is to apply the correct subject
51442+ on binaries running when the RBAC system
51443+ is enabled, when the binaries have been
51444+ replaced or deleted since their execution
51445+ -----
51446+ when the RBAC system starts, the inode/dev
51447+ from exec_file will be one the RBAC system
51448+ is unaware of. It only knows the inode/dev
51449+ of the present file on disk, or the absence
51450+ of it.
51451+ */
51452+ preempt_disable();
51453+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51454+
51455+ nmatch = lookup_name_entry(tmpname);
51456+ preempt_enable();
51457+ tmpsubj = NULL;
51458+ if (nmatch) {
51459+ if (nmatch->deleted)
51460+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51461+ else
51462+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51463+ if (tmpsubj != NULL)
51464+ task->acl = tmpsubj;
51465+ }
51466+ if (tmpsubj == NULL)
51467+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51468+ task->role);
51469+ if (task->acl) {
51470+ task->is_writable = 0;
51471+ /* ignore additional mmap checks for processes that are writable
51472+ by the default ACL */
51473+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51474+ if (unlikely(obj->mode & GR_WRITE))
51475+ task->is_writable = 1;
51476+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51477+ if (unlikely(obj->mode & GR_WRITE))
51478+ task->is_writable = 1;
51479+
51480+ gr_set_proc_res(task);
51481+
51482+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51483+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51484+#endif
51485+ } else {
51486+ return 1;
51487+ }
51488+
51489+ return 0;
51490+}
51491+
51492+int
51493+gr_set_acls(const int type)
51494+{
51495+ struct task_struct *task, *task2;
51496+ struct acl_role_label *role = current->role;
51497+ __u16 acl_role_id = current->acl_role_id;
51498+ const struct cred *cred;
51499+ int ret;
51500+
51501+ rcu_read_lock();
51502+ read_lock(&tasklist_lock);
51503+ read_lock(&grsec_exec_file_lock);
51504+ do_each_thread(task2, task) {
51505+ /* check to see if we're called from the exit handler,
51506+ if so, only replace ACLs that have inherited the admin
51507+ ACL */
51508+
51509+ if (type && (task->role != role ||
51510+ task->acl_role_id != acl_role_id))
51511+ continue;
51512+
51513+ task->acl_role_id = 0;
51514+ task->acl_sp_role = 0;
51515+
51516+ if (task->exec_file) {
51517+ cred = __task_cred(task);
51518+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51519+ ret = gr_apply_subject_to_task(task);
51520+ if (ret) {
51521+ read_unlock(&grsec_exec_file_lock);
51522+ read_unlock(&tasklist_lock);
51523+ rcu_read_unlock();
51524+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51525+ return ret;
51526+ }
51527+ } else {
51528+ // it's a kernel process
51529+ task->role = kernel_role;
51530+ task->acl = kernel_role->root_label;
51531+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51532+ task->acl->mode &= ~GR_PROCFIND;
51533+#endif
51534+ }
51535+ } while_each_thread(task2, task);
51536+ read_unlock(&grsec_exec_file_lock);
51537+ read_unlock(&tasklist_lock);
51538+ rcu_read_unlock();
51539+
51540+ return 0;
51541+}
51542+
51543+void
51544+gr_learn_resource(const struct task_struct *task,
51545+ const int res, const unsigned long wanted, const int gt)
51546+{
51547+ struct acl_subject_label *acl;
51548+ const struct cred *cred;
51549+
51550+ if (unlikely((gr_status & GR_READY) &&
51551+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51552+ goto skip_reslog;
51553+
51554+#ifdef CONFIG_GRKERNSEC_RESLOG
51555+ gr_log_resource(task, res, wanted, gt);
51556+#endif
51557+ skip_reslog:
51558+
51559+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51560+ return;
51561+
51562+ acl = task->acl;
51563+
51564+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51565+ !(acl->resmask & (1 << (unsigned short) res))))
51566+ return;
51567+
51568+ if (wanted >= acl->res[res].rlim_cur) {
51569+ unsigned long res_add;
51570+
51571+ res_add = wanted;
51572+ switch (res) {
51573+ case RLIMIT_CPU:
51574+ res_add += GR_RLIM_CPU_BUMP;
51575+ break;
51576+ case RLIMIT_FSIZE:
51577+ res_add += GR_RLIM_FSIZE_BUMP;
51578+ break;
51579+ case RLIMIT_DATA:
51580+ res_add += GR_RLIM_DATA_BUMP;
51581+ break;
51582+ case RLIMIT_STACK:
51583+ res_add += GR_RLIM_STACK_BUMP;
51584+ break;
51585+ case RLIMIT_CORE:
51586+ res_add += GR_RLIM_CORE_BUMP;
51587+ break;
51588+ case RLIMIT_RSS:
51589+ res_add += GR_RLIM_RSS_BUMP;
51590+ break;
51591+ case RLIMIT_NPROC:
51592+ res_add += GR_RLIM_NPROC_BUMP;
51593+ break;
51594+ case RLIMIT_NOFILE:
51595+ res_add += GR_RLIM_NOFILE_BUMP;
51596+ break;
51597+ case RLIMIT_MEMLOCK:
51598+ res_add += GR_RLIM_MEMLOCK_BUMP;
51599+ break;
51600+ case RLIMIT_AS:
51601+ res_add += GR_RLIM_AS_BUMP;
51602+ break;
51603+ case RLIMIT_LOCKS:
51604+ res_add += GR_RLIM_LOCKS_BUMP;
51605+ break;
51606+ case RLIMIT_SIGPENDING:
51607+ res_add += GR_RLIM_SIGPENDING_BUMP;
51608+ break;
51609+ case RLIMIT_MSGQUEUE:
51610+ res_add += GR_RLIM_MSGQUEUE_BUMP;
51611+ break;
51612+ case RLIMIT_NICE:
51613+ res_add += GR_RLIM_NICE_BUMP;
51614+ break;
51615+ case RLIMIT_RTPRIO:
51616+ res_add += GR_RLIM_RTPRIO_BUMP;
51617+ break;
51618+ case RLIMIT_RTTIME:
51619+ res_add += GR_RLIM_RTTIME_BUMP;
51620+ break;
51621+ }
51622+
51623+ acl->res[res].rlim_cur = res_add;
51624+
51625+ if (wanted > acl->res[res].rlim_max)
51626+ acl->res[res].rlim_max = res_add;
51627+
51628+ /* only log the subject filename, since resource logging is supported for
51629+ single-subject learning only */
51630+ rcu_read_lock();
51631+ cred = __task_cred(task);
51632+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51633+ task->role->roletype, cred->uid, cred->gid, acl->filename,
51634+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
51635+ "", (unsigned long) res, &task->signal->saved_ip);
51636+ rcu_read_unlock();
51637+ }
51638+
51639+ return;
51640+}
51641+
51642+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
51643+void
51644+pax_set_initial_flags(struct linux_binprm *bprm)
51645+{
51646+ struct task_struct *task = current;
51647+ struct acl_subject_label *proc;
51648+ unsigned long flags;
51649+
51650+ if (unlikely(!(gr_status & GR_READY)))
51651+ return;
51652+
51653+ flags = pax_get_flags(task);
51654+
51655+ proc = task->acl;
51656+
51657+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
51658+ flags &= ~MF_PAX_PAGEEXEC;
51659+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
51660+ flags &= ~MF_PAX_SEGMEXEC;
51661+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
51662+ flags &= ~MF_PAX_RANDMMAP;
51663+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
51664+ flags &= ~MF_PAX_EMUTRAMP;
51665+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
51666+ flags &= ~MF_PAX_MPROTECT;
51667+
51668+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
51669+ flags |= MF_PAX_PAGEEXEC;
51670+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
51671+ flags |= MF_PAX_SEGMEXEC;
51672+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
51673+ flags |= MF_PAX_RANDMMAP;
51674+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
51675+ flags |= MF_PAX_EMUTRAMP;
51676+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
51677+ flags |= MF_PAX_MPROTECT;
51678+
51679+ pax_set_flags(task, flags);
51680+
51681+ return;
51682+}
51683+#endif
51684+
51685+#ifdef CONFIG_SYSCTL
51686+/* Eric Biederman likes breaking userland ABI and every inode-based security
51687+ system to save 35kb of memory */
51688+
51689+/* we modify the passed in filename, but adjust it back before returning */
51690+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
51691+{
51692+ struct name_entry *nmatch;
51693+ char *p, *lastp = NULL;
51694+ struct acl_object_label *obj = NULL, *tmp;
51695+ struct acl_subject_label *tmpsubj;
51696+ char c = '\0';
51697+
51698+ read_lock(&gr_inode_lock);
51699+
51700+ p = name + len - 1;
51701+ do {
51702+ nmatch = lookup_name_entry(name);
51703+ if (lastp != NULL)
51704+ *lastp = c;
51705+
51706+ if (nmatch == NULL)
51707+ goto next_component;
51708+ tmpsubj = current->acl;
51709+ do {
51710+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
51711+ if (obj != NULL) {
51712+ tmp = obj->globbed;
51713+ while (tmp) {
51714+ if (!glob_match(tmp->filename, name)) {
51715+ obj = tmp;
51716+ goto found_obj;
51717+ }
51718+ tmp = tmp->next;
51719+ }
51720+ goto found_obj;
51721+ }
51722+ } while ((tmpsubj = tmpsubj->parent_subject));
51723+next_component:
51724+ /* end case */
51725+ if (p == name)
51726+ break;
51727+
51728+ while (*p != '/')
51729+ p--;
51730+ if (p == name)
51731+ lastp = p + 1;
51732+ else {
51733+ lastp = p;
51734+ p--;
51735+ }
51736+ c = *lastp;
51737+ *lastp = '\0';
51738+ } while (1);
51739+found_obj:
51740+ read_unlock(&gr_inode_lock);
51741+ /* obj returned will always be non-null */
51742+ return obj;
51743+}
51744+
51745+/* returns 0 when allowing, non-zero on error
51746+ op of 0 is used for readdir, so we don't log the names of hidden files
51747+*/
51748+__u32
51749+gr_handle_sysctl(const struct ctl_table *table, const int op)
51750+{
51751+ struct ctl_table *tmp;
51752+ const char *proc_sys = "/proc/sys";
51753+ char *path;
51754+ struct acl_object_label *obj;
51755+ unsigned short len = 0, pos = 0, depth = 0, i;
51756+ __u32 err = 0;
51757+ __u32 mode = 0;
51758+
51759+ if (unlikely(!(gr_status & GR_READY)))
51760+ return 0;
51761+
51762+ /* for now, ignore operations on non-sysctl entries if it's not a
51763+ readdir*/
51764+ if (table->child != NULL && op != 0)
51765+ return 0;
51766+
51767+ mode |= GR_FIND;
51768+ /* it's only a read if it's an entry, read on dirs is for readdir */
51769+ if (op & MAY_READ)
51770+ mode |= GR_READ;
51771+ if (op & MAY_WRITE)
51772+ mode |= GR_WRITE;
51773+
51774+ preempt_disable();
51775+
51776+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51777+
51778+ /* it's only a read/write if it's an actual entry, not a dir
51779+ (which are opened for readdir)
51780+ */
51781+
51782+ /* convert the requested sysctl entry into a pathname */
51783+
51784+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51785+ len += strlen(tmp->procname);
51786+ len++;
51787+ depth++;
51788+ }
51789+
51790+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51791+ /* deny */
51792+ goto out;
51793+ }
51794+
51795+ memset(path, 0, PAGE_SIZE);
51796+
51797+ memcpy(path, proc_sys, strlen(proc_sys));
51798+
51799+ pos += strlen(proc_sys);
51800+
51801+ for (; depth > 0; depth--) {
51802+ path[pos] = '/';
51803+ pos++;
51804+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51805+ if (depth == i) {
51806+ memcpy(path + pos, tmp->procname,
51807+ strlen(tmp->procname));
51808+ pos += strlen(tmp->procname);
51809+ }
51810+ i++;
51811+ }
51812+ }
51813+
51814+ obj = gr_lookup_by_name(path, pos);
51815+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51816+
51817+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51818+ ((err & mode) != mode))) {
51819+ __u32 new_mode = mode;
51820+
51821+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51822+
51823+ err = 0;
51824+ gr_log_learn_sysctl(path, new_mode);
51825+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
51826+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
51827+ err = -ENOENT;
51828+ } else if (!(err & GR_FIND)) {
51829+ err = -ENOENT;
51830+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
51831+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
51832+ path, (mode & GR_READ) ? " reading" : "",
51833+ (mode & GR_WRITE) ? " writing" : "");
51834+ err = -EACCES;
51835+ } else if ((err & mode) != mode) {
51836+ err = -EACCES;
51837+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
51838+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
51839+ path, (mode & GR_READ) ? " reading" : "",
51840+ (mode & GR_WRITE) ? " writing" : "");
51841+ err = 0;
51842+ } else
51843+ err = 0;
51844+
51845+ out:
51846+ preempt_enable();
51847+
51848+ return err;
51849+}
51850+#endif
51851+
51852+int
51853+gr_handle_proc_ptrace(struct task_struct *task)
51854+{
51855+ struct file *filp;
51856+ struct task_struct *tmp = task;
51857+ struct task_struct *curtemp = current;
51858+ __u32 retmode;
51859+
51860+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51861+ if (unlikely(!(gr_status & GR_READY)))
51862+ return 0;
51863+#endif
51864+
51865+ read_lock(&tasklist_lock);
51866+ read_lock(&grsec_exec_file_lock);
51867+ filp = task->exec_file;
51868+
51869+ while (tmp->pid > 0) {
51870+ if (tmp == curtemp)
51871+ break;
51872+ tmp = tmp->real_parent;
51873+ }
51874+
51875+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51876+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51877+ read_unlock(&grsec_exec_file_lock);
51878+ read_unlock(&tasklist_lock);
51879+ return 1;
51880+ }
51881+
51882+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51883+ if (!(gr_status & GR_READY)) {
51884+ read_unlock(&grsec_exec_file_lock);
51885+ read_unlock(&tasklist_lock);
51886+ return 0;
51887+ }
51888+#endif
51889+
51890+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51891+ read_unlock(&grsec_exec_file_lock);
51892+ read_unlock(&tasklist_lock);
51893+
51894+ if (retmode & GR_NOPTRACE)
51895+ return 1;
51896+
51897+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
51898+ && (current->acl != task->acl || (current->acl != current->role->root_label
51899+ && current->pid != task->pid)))
51900+ return 1;
51901+
51902+ return 0;
51903+}
51904+
51905+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
51906+{
51907+ if (unlikely(!(gr_status & GR_READY)))
51908+ return;
51909+
51910+ if (!(current->role->roletype & GR_ROLE_GOD))
51911+ return;
51912+
51913+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
51914+ p->role->rolename, gr_task_roletype_to_char(p),
51915+ p->acl->filename);
51916+}
51917+
51918+int
51919+gr_handle_ptrace(struct task_struct *task, const long request)
51920+{
51921+ struct task_struct *tmp = task;
51922+ struct task_struct *curtemp = current;
51923+ __u32 retmode;
51924+
51925+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51926+ if (unlikely(!(gr_status & GR_READY)))
51927+ return 0;
51928+#endif
51929+
51930+ read_lock(&tasklist_lock);
51931+ while (tmp->pid > 0) {
51932+ if (tmp == curtemp)
51933+ break;
51934+ tmp = tmp->real_parent;
51935+ }
51936+
51937+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51938+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
51939+ read_unlock(&tasklist_lock);
51940+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51941+ return 1;
51942+ }
51943+ read_unlock(&tasklist_lock);
51944+
51945+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51946+ if (!(gr_status & GR_READY))
51947+ return 0;
51948+#endif
51949+
51950+ read_lock(&grsec_exec_file_lock);
51951+ if (unlikely(!task->exec_file)) {
51952+ read_unlock(&grsec_exec_file_lock);
51953+ return 0;
51954+ }
51955+
51956+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
51957+ read_unlock(&grsec_exec_file_lock);
51958+
51959+ if (retmode & GR_NOPTRACE) {
51960+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51961+ return 1;
51962+ }
51963+
51964+ if (retmode & GR_PTRACERD) {
51965+ switch (request) {
51966+ case PTRACE_SEIZE:
51967+ case PTRACE_POKETEXT:
51968+ case PTRACE_POKEDATA:
51969+ case PTRACE_POKEUSR:
51970+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
51971+ case PTRACE_SETREGS:
51972+ case PTRACE_SETFPREGS:
51973+#endif
51974+#ifdef CONFIG_X86
51975+ case PTRACE_SETFPXREGS:
51976+#endif
51977+#ifdef CONFIG_ALTIVEC
51978+ case PTRACE_SETVRREGS:
51979+#endif
51980+ return 1;
51981+ default:
51982+ return 0;
51983+ }
51984+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
51985+ !(current->role->roletype & GR_ROLE_GOD) &&
51986+ (current->acl != task->acl)) {
51987+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51988+ return 1;
51989+ }
51990+
51991+ return 0;
51992+}
51993+
51994+static int is_writable_mmap(const struct file *filp)
51995+{
51996+ struct task_struct *task = current;
51997+ struct acl_object_label *obj, *obj2;
51998+
51999+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52000+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52001+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52002+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52003+ task->role->root_label);
52004+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52005+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52006+ return 1;
52007+ }
52008+ }
52009+ return 0;
52010+}
52011+
52012+int
52013+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52014+{
52015+ __u32 mode;
52016+
52017+ if (unlikely(!file || !(prot & PROT_EXEC)))
52018+ return 1;
52019+
52020+ if (is_writable_mmap(file))
52021+ return 0;
52022+
52023+ mode =
52024+ gr_search_file(file->f_path.dentry,
52025+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52026+ file->f_path.mnt);
52027+
52028+ if (!gr_tpe_allow(file))
52029+ return 0;
52030+
52031+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52032+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52033+ return 0;
52034+ } else if (unlikely(!(mode & GR_EXEC))) {
52035+ return 0;
52036+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52037+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52038+ return 1;
52039+ }
52040+
52041+ return 1;
52042+}
52043+
52044+int
52045+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52046+{
52047+ __u32 mode;
52048+
52049+ if (unlikely(!file || !(prot & PROT_EXEC)))
52050+ return 1;
52051+
52052+ if (is_writable_mmap(file))
52053+ return 0;
52054+
52055+ mode =
52056+ gr_search_file(file->f_path.dentry,
52057+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52058+ file->f_path.mnt);
52059+
52060+ if (!gr_tpe_allow(file))
52061+ return 0;
52062+
52063+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52064+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52065+ return 0;
52066+ } else if (unlikely(!(mode & GR_EXEC))) {
52067+ return 0;
52068+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52069+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52070+ return 1;
52071+ }
52072+
52073+ return 1;
52074+}
52075+
52076+void
52077+gr_acl_handle_psacct(struct task_struct *task, const long code)
52078+{
52079+ unsigned long runtime;
52080+ unsigned long cputime;
52081+ unsigned int wday, cday;
52082+ __u8 whr, chr;
52083+ __u8 wmin, cmin;
52084+ __u8 wsec, csec;
52085+ struct timespec timeval;
52086+
52087+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52088+ !(task->acl->mode & GR_PROCACCT)))
52089+ return;
52090+
52091+ do_posix_clock_monotonic_gettime(&timeval);
52092+ runtime = timeval.tv_sec - task->start_time.tv_sec;
52093+ wday = runtime / (3600 * 24);
52094+ runtime -= wday * (3600 * 24);
52095+ whr = runtime / 3600;
52096+ runtime -= whr * 3600;
52097+ wmin = runtime / 60;
52098+ runtime -= wmin * 60;
52099+ wsec = runtime;
52100+
52101+ cputime = (task->utime + task->stime) / HZ;
52102+ cday = cputime / (3600 * 24);
52103+ cputime -= cday * (3600 * 24);
52104+ chr = cputime / 3600;
52105+ cputime -= chr * 3600;
52106+ cmin = cputime / 60;
52107+ cputime -= cmin * 60;
52108+ csec = cputime;
52109+
52110+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52111+
52112+ return;
52113+}
52114+
52115+void gr_set_kernel_label(struct task_struct *task)
52116+{
52117+ if (gr_status & GR_READY) {
52118+ task->role = kernel_role;
52119+ task->acl = kernel_role->root_label;
52120+ }
52121+ return;
52122+}
52123+
52124+#ifdef CONFIG_TASKSTATS
52125+int gr_is_taskstats_denied(int pid)
52126+{
52127+ struct task_struct *task;
52128+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52129+ const struct cred *cred;
52130+#endif
52131+ int ret = 0;
52132+
52133+ /* restrict taskstats viewing to un-chrooted root users
52134+ who have the 'view' subject flag if the RBAC system is enabled
52135+ */
52136+
52137+ rcu_read_lock();
52138+ read_lock(&tasklist_lock);
52139+ task = find_task_by_vpid(pid);
52140+ if (task) {
52141+#ifdef CONFIG_GRKERNSEC_CHROOT
52142+ if (proc_is_chrooted(task))
52143+ ret = -EACCES;
52144+#endif
52145+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52146+ cred = __task_cred(task);
52147+#ifdef CONFIG_GRKERNSEC_PROC_USER
52148+ if (cred->uid != 0)
52149+ ret = -EACCES;
52150+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52151+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52152+ ret = -EACCES;
52153+#endif
52154+#endif
52155+ if (gr_status & GR_READY) {
52156+ if (!(task->acl->mode & GR_VIEW))
52157+ ret = -EACCES;
52158+ }
52159+ } else
52160+ ret = -ENOENT;
52161+
52162+ read_unlock(&tasklist_lock);
52163+ rcu_read_unlock();
52164+
52165+ return ret;
52166+}
52167+#endif
52168+
52169+/* AUXV entries are filled via a descendant of search_binary_handler
52170+ after we've already applied the subject for the target
52171+*/
52172+int gr_acl_enable_at_secure(void)
52173+{
52174+ if (unlikely(!(gr_status & GR_READY)))
52175+ return 0;
52176+
52177+ if (current->acl->mode & GR_ATSECURE)
52178+ return 1;
52179+
52180+ return 0;
52181+}
52182+
52183+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52184+{
52185+ struct task_struct *task = current;
52186+ struct dentry *dentry = file->f_path.dentry;
52187+ struct vfsmount *mnt = file->f_path.mnt;
52188+ struct acl_object_label *obj, *tmp;
52189+ struct acl_subject_label *subj;
52190+ unsigned int bufsize;
52191+ int is_not_root;
52192+ char *path;
52193+ dev_t dev = __get_dev(dentry);
52194+
52195+ if (unlikely(!(gr_status & GR_READY)))
52196+ return 1;
52197+
52198+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52199+ return 1;
52200+
52201+ /* ignore Eric Biederman */
52202+ if (IS_PRIVATE(dentry->d_inode))
52203+ return 1;
52204+
52205+ subj = task->acl;
52206+ do {
52207+ obj = lookup_acl_obj_label(ino, dev, subj);
52208+ if (obj != NULL)
52209+ return (obj->mode & GR_FIND) ? 1 : 0;
52210+ } while ((subj = subj->parent_subject));
52211+
52212+ /* this is purely an optimization since we're looking for an object
52213+ for the directory we're doing a readdir on
52214+ if it's possible for any globbed object to match the entry we're
52215+ filling into the directory, then the object we find here will be
52216+ an anchor point with attached globbed objects
52217+ */
52218+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52219+ if (obj->globbed == NULL)
52220+ return (obj->mode & GR_FIND) ? 1 : 0;
52221+
52222+ is_not_root = ((obj->filename[0] == '/') &&
52223+ (obj->filename[1] == '\0')) ? 0 : 1;
52224+ bufsize = PAGE_SIZE - namelen - is_not_root;
52225+
52226+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
52227+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52228+ return 1;
52229+
52230+ preempt_disable();
52231+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52232+ bufsize);
52233+
52234+ bufsize = strlen(path);
52235+
52236+ /* if base is "/", don't append an additional slash */
52237+ if (is_not_root)
52238+ *(path + bufsize) = '/';
52239+ memcpy(path + bufsize + is_not_root, name, namelen);
52240+ *(path + bufsize + namelen + is_not_root) = '\0';
52241+
52242+ tmp = obj->globbed;
52243+ while (tmp) {
52244+ if (!glob_match(tmp->filename, path)) {
52245+ preempt_enable();
52246+ return (tmp->mode & GR_FIND) ? 1 : 0;
52247+ }
52248+ tmp = tmp->next;
52249+ }
52250+ preempt_enable();
52251+ return (obj->mode & GR_FIND) ? 1 : 0;
52252+}
52253+
52254+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52255+EXPORT_SYMBOL(gr_acl_is_enabled);
52256+#endif
52257+EXPORT_SYMBOL(gr_learn_resource);
52258+EXPORT_SYMBOL(gr_set_kernel_label);
52259+#ifdef CONFIG_SECURITY
52260+EXPORT_SYMBOL(gr_check_user_change);
52261+EXPORT_SYMBOL(gr_check_group_change);
52262+#endif
52263+
52264diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52265new file mode 100644
52266index 0000000..34fefda
52267--- /dev/null
52268+++ b/grsecurity/gracl_alloc.c
52269@@ -0,0 +1,105 @@
52270+#include <linux/kernel.h>
52271+#include <linux/mm.h>
52272+#include <linux/slab.h>
52273+#include <linux/vmalloc.h>
52274+#include <linux/gracl.h>
52275+#include <linux/grsecurity.h>
52276+
52277+static unsigned long alloc_stack_next = 1;
52278+static unsigned long alloc_stack_size = 1;
52279+static void **alloc_stack;
52280+
52281+static __inline__ int
52282+alloc_pop(void)
52283+{
52284+ if (alloc_stack_next == 1)
52285+ return 0;
52286+
52287+ kfree(alloc_stack[alloc_stack_next - 2]);
52288+
52289+ alloc_stack_next--;
52290+
52291+ return 1;
52292+}
52293+
52294+static __inline__ int
52295+alloc_push(void *buf)
52296+{
52297+ if (alloc_stack_next >= alloc_stack_size)
52298+ return 1;
52299+
52300+ alloc_stack[alloc_stack_next - 1] = buf;
52301+
52302+ alloc_stack_next++;
52303+
52304+ return 0;
52305+}
52306+
52307+void *
52308+acl_alloc(unsigned long len)
52309+{
52310+ void *ret = NULL;
52311+
52312+ if (!len || len > PAGE_SIZE)
52313+ goto out;
52314+
52315+ ret = kmalloc(len, GFP_KERNEL);
52316+
52317+ if (ret) {
52318+ if (alloc_push(ret)) {
52319+ kfree(ret);
52320+ ret = NULL;
52321+ }
52322+ }
52323+
52324+out:
52325+ return ret;
52326+}
52327+
52328+void *
52329+acl_alloc_num(unsigned long num, unsigned long len)
52330+{
52331+ if (!len || (num > (PAGE_SIZE / len)))
52332+ return NULL;
52333+
52334+ return acl_alloc(num * len);
52335+}
52336+
52337+void
52338+acl_free_all(void)
52339+{
52340+ if (gr_acl_is_enabled() || !alloc_stack)
52341+ return;
52342+
52343+ while (alloc_pop()) ;
52344+
52345+ if (alloc_stack) {
52346+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52347+ kfree(alloc_stack);
52348+ else
52349+ vfree(alloc_stack);
52350+ }
52351+
52352+ alloc_stack = NULL;
52353+ alloc_stack_size = 1;
52354+ alloc_stack_next = 1;
52355+
52356+ return;
52357+}
52358+
52359+int
52360+acl_alloc_stack_init(unsigned long size)
52361+{
52362+ if ((size * sizeof (void *)) <= PAGE_SIZE)
52363+ alloc_stack =
52364+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52365+ else
52366+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
52367+
52368+ alloc_stack_size = size;
52369+
52370+ if (!alloc_stack)
52371+ return 0;
52372+ else
52373+ return 1;
52374+}
52375diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52376new file mode 100644
52377index 0000000..955ddfb
52378--- /dev/null
52379+++ b/grsecurity/gracl_cap.c
52380@@ -0,0 +1,101 @@
52381+#include <linux/kernel.h>
52382+#include <linux/module.h>
52383+#include <linux/sched.h>
52384+#include <linux/gracl.h>
52385+#include <linux/grsecurity.h>
52386+#include <linux/grinternal.h>
52387+
52388+extern const char *captab_log[];
52389+extern int captab_log_entries;
52390+
52391+int
52392+gr_acl_is_capable(const int cap)
52393+{
52394+ struct task_struct *task = current;
52395+ const struct cred *cred = current_cred();
52396+ struct acl_subject_label *curracl;
52397+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52398+ kernel_cap_t cap_audit = __cap_empty_set;
52399+
52400+ if (!gr_acl_is_enabled())
52401+ return 1;
52402+
52403+ curracl = task->acl;
52404+
52405+ cap_drop = curracl->cap_lower;
52406+ cap_mask = curracl->cap_mask;
52407+ cap_audit = curracl->cap_invert_audit;
52408+
52409+ while ((curracl = curracl->parent_subject)) {
52410+ /* if the cap isn't specified in the current computed mask but is specified in the
52411+ current level subject, and is lowered in the current level subject, then add
52412+ it to the set of dropped capabilities
52413+ otherwise, add the current level subject's mask to the current computed mask
52414+ */
52415+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52416+ cap_raise(cap_mask, cap);
52417+ if (cap_raised(curracl->cap_lower, cap))
52418+ cap_raise(cap_drop, cap);
52419+ if (cap_raised(curracl->cap_invert_audit, cap))
52420+ cap_raise(cap_audit, cap);
52421+ }
52422+ }
52423+
52424+ if (!cap_raised(cap_drop, cap)) {
52425+ if (cap_raised(cap_audit, cap))
52426+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52427+ return 1;
52428+ }
52429+
52430+ curracl = task->acl;
52431+
52432+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52433+ && cap_raised(cred->cap_effective, cap)) {
52434+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52435+ task->role->roletype, cred->uid,
52436+ cred->gid, task->exec_file ?
52437+ gr_to_filename(task->exec_file->f_path.dentry,
52438+ task->exec_file->f_path.mnt) : curracl->filename,
52439+ curracl->filename, 0UL,
52440+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52441+ return 1;
52442+ }
52443+
52444+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52445+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52446+ return 0;
52447+}
52448+
52449+int
52450+gr_acl_is_capable_nolog(const int cap)
52451+{
52452+ struct acl_subject_label *curracl;
52453+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52454+
52455+ if (!gr_acl_is_enabled())
52456+ return 1;
52457+
52458+ curracl = current->acl;
52459+
52460+ cap_drop = curracl->cap_lower;
52461+ cap_mask = curracl->cap_mask;
52462+
52463+ while ((curracl = curracl->parent_subject)) {
52464+ /* if the cap isn't specified in the current computed mask but is specified in the
52465+ current level subject, and is lowered in the current level subject, then add
52466+ it to the set of dropped capabilities
52467+ otherwise, add the current level subject's mask to the current computed mask
52468+ */
52469+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52470+ cap_raise(cap_mask, cap);
52471+ if (cap_raised(curracl->cap_lower, cap))
52472+ cap_raise(cap_drop, cap);
52473+ }
52474+ }
52475+
52476+ if (!cap_raised(cap_drop, cap))
52477+ return 1;
52478+
52479+ return 0;
52480+}
52481+
52482diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52483new file mode 100644
52484index 0000000..4eda5c3
52485--- /dev/null
52486+++ b/grsecurity/gracl_fs.c
52487@@ -0,0 +1,433 @@
52488+#include <linux/kernel.h>
52489+#include <linux/sched.h>
52490+#include <linux/types.h>
52491+#include <linux/fs.h>
52492+#include <linux/file.h>
52493+#include <linux/stat.h>
52494+#include <linux/grsecurity.h>
52495+#include <linux/grinternal.h>
52496+#include <linux/gracl.h>
52497+
52498+__u32
52499+gr_acl_handle_hidden_file(const struct dentry * dentry,
52500+ const struct vfsmount * mnt)
52501+{
52502+ __u32 mode;
52503+
52504+ if (unlikely(!dentry->d_inode))
52505+ return GR_FIND;
52506+
52507+ mode =
52508+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52509+
52510+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52511+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52512+ return mode;
52513+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52514+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52515+ return 0;
52516+ } else if (unlikely(!(mode & GR_FIND)))
52517+ return 0;
52518+
52519+ return GR_FIND;
52520+}
52521+
52522+__u32
52523+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52524+ int acc_mode)
52525+{
52526+ __u32 reqmode = GR_FIND;
52527+ __u32 mode;
52528+
52529+ if (unlikely(!dentry->d_inode))
52530+ return reqmode;
52531+
52532+ if (acc_mode & MAY_APPEND)
52533+ reqmode |= GR_APPEND;
52534+ else if (acc_mode & MAY_WRITE)
52535+ reqmode |= GR_WRITE;
52536+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52537+ reqmode |= GR_READ;
52538+
52539+ mode =
52540+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52541+ mnt);
52542+
52543+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52544+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52545+ reqmode & GR_READ ? " reading" : "",
52546+ reqmode & GR_WRITE ? " writing" : reqmode &
52547+ GR_APPEND ? " appending" : "");
52548+ return reqmode;
52549+ } else
52550+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52551+ {
52552+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52553+ reqmode & GR_READ ? " reading" : "",
52554+ reqmode & GR_WRITE ? " writing" : reqmode &
52555+ GR_APPEND ? " appending" : "");
52556+ return 0;
52557+ } else if (unlikely((mode & reqmode) != reqmode))
52558+ return 0;
52559+
52560+ return reqmode;
52561+}
52562+
52563+__u32
52564+gr_acl_handle_creat(const struct dentry * dentry,
52565+ const struct dentry * p_dentry,
52566+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52567+ const int imode)
52568+{
52569+ __u32 reqmode = GR_WRITE | GR_CREATE;
52570+ __u32 mode;
52571+
52572+ if (acc_mode & MAY_APPEND)
52573+ reqmode |= GR_APPEND;
52574+ // if a directory was required or the directory already exists, then
52575+ // don't count this open as a read
52576+ if ((acc_mode & MAY_READ) &&
52577+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52578+ reqmode |= GR_READ;
52579+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52580+ reqmode |= GR_SETID;
52581+
52582+ mode =
52583+ gr_check_create(dentry, p_dentry, p_mnt,
52584+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52585+
52586+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52587+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52588+ reqmode & GR_READ ? " reading" : "",
52589+ reqmode & GR_WRITE ? " writing" : reqmode &
52590+ GR_APPEND ? " appending" : "");
52591+ return reqmode;
52592+ } else
52593+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52594+ {
52595+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52596+ reqmode & GR_READ ? " reading" : "",
52597+ reqmode & GR_WRITE ? " writing" : reqmode &
52598+ GR_APPEND ? " appending" : "");
52599+ return 0;
52600+ } else if (unlikely((mode & reqmode) != reqmode))
52601+ return 0;
52602+
52603+ return reqmode;
52604+}
52605+
52606+__u32
52607+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52608+ const int fmode)
52609+{
52610+ __u32 mode, reqmode = GR_FIND;
52611+
52612+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52613+ reqmode |= GR_EXEC;
52614+ if (fmode & S_IWOTH)
52615+ reqmode |= GR_WRITE;
52616+ if (fmode & S_IROTH)
52617+ reqmode |= GR_READ;
52618+
52619+ mode =
52620+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52621+ mnt);
52622+
52623+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52624+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52625+ reqmode & GR_READ ? " reading" : "",
52626+ reqmode & GR_WRITE ? " writing" : "",
52627+ reqmode & GR_EXEC ? " executing" : "");
52628+ return reqmode;
52629+ } else
52630+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52631+ {
52632+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52633+ reqmode & GR_READ ? " reading" : "",
52634+ reqmode & GR_WRITE ? " writing" : "",
52635+ reqmode & GR_EXEC ? " executing" : "");
52636+ return 0;
52637+ } else if (unlikely((mode & reqmode) != reqmode))
52638+ return 0;
52639+
52640+ return reqmode;
52641+}
52642+
52643+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
52644+{
52645+ __u32 mode;
52646+
52647+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
52648+
52649+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52650+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
52651+ return mode;
52652+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52653+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
52654+ return 0;
52655+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52656+ return 0;
52657+
52658+ return (reqmode);
52659+}
52660+
52661+__u32
52662+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52663+{
52664+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
52665+}
52666+
52667+__u32
52668+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
52669+{
52670+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
52671+}
52672+
52673+__u32
52674+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
52675+{
52676+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
52677+}
52678+
52679+__u32
52680+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
52681+{
52682+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
52683+}
52684+
52685+__u32
52686+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
52687+ mode_t mode)
52688+{
52689+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
52690+ return 1;
52691+
52692+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52693+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52694+ GR_FCHMOD_ACL_MSG);
52695+ } else {
52696+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
52697+ }
52698+}
52699+
52700+__u32
52701+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
52702+ mode_t mode)
52703+{
52704+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52705+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52706+ GR_CHMOD_ACL_MSG);
52707+ } else {
52708+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
52709+ }
52710+}
52711+
52712+__u32
52713+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
52714+{
52715+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
52716+}
52717+
52718+__u32
52719+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
52720+{
52721+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
52722+}
52723+
52724+__u32
52725+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
52726+{
52727+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
52728+}
52729+
52730+__u32
52731+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
52732+{
52733+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
52734+ GR_UNIXCONNECT_ACL_MSG);
52735+}
52736+
52737+/* hardlinks require at minimum create and link permission,
52738+ any additional privilege required is based on the
52739+ privilege of the file being linked to
52740+*/
52741+__u32
52742+gr_acl_handle_link(const struct dentry * new_dentry,
52743+ const struct dentry * parent_dentry,
52744+ const struct vfsmount * parent_mnt,
52745+ const struct dentry * old_dentry,
52746+ const struct vfsmount * old_mnt, const char *to)
52747+{
52748+ __u32 mode;
52749+ __u32 needmode = GR_CREATE | GR_LINK;
52750+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
52751+
52752+ mode =
52753+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
52754+ old_mnt);
52755+
52756+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
52757+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52758+ return mode;
52759+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52760+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52761+ return 0;
52762+ } else if (unlikely((mode & needmode) != needmode))
52763+ return 0;
52764+
52765+ return 1;
52766+}
52767+
52768+__u32
52769+gr_acl_handle_symlink(const struct dentry * new_dentry,
52770+ const struct dentry * parent_dentry,
52771+ const struct vfsmount * parent_mnt, const char *from)
52772+{
52773+ __u32 needmode = GR_WRITE | GR_CREATE;
52774+ __u32 mode;
52775+
52776+ mode =
52777+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
52778+ GR_CREATE | GR_AUDIT_CREATE |
52779+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
52780+
52781+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
52782+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52783+ return mode;
52784+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52785+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52786+ return 0;
52787+ } else if (unlikely((mode & needmode) != needmode))
52788+ return 0;
52789+
52790+ return (GR_WRITE | GR_CREATE);
52791+}
52792+
52793+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
52794+{
52795+ __u32 mode;
52796+
52797+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52798+
52799+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52800+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
52801+ return mode;
52802+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52803+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
52804+ return 0;
52805+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52806+ return 0;
52807+
52808+ return (reqmode);
52809+}
52810+
52811+__u32
52812+gr_acl_handle_mknod(const struct dentry * new_dentry,
52813+ const struct dentry * parent_dentry,
52814+ const struct vfsmount * parent_mnt,
52815+ const int mode)
52816+{
52817+ __u32 reqmode = GR_WRITE | GR_CREATE;
52818+ if (unlikely(mode & (S_ISUID | S_ISGID)))
52819+ reqmode |= GR_SETID;
52820+
52821+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52822+ reqmode, GR_MKNOD_ACL_MSG);
52823+}
52824+
52825+__u32
52826+gr_acl_handle_mkdir(const struct dentry *new_dentry,
52827+ const struct dentry *parent_dentry,
52828+ const struct vfsmount *parent_mnt)
52829+{
52830+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52831+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
52832+}
52833+
52834+#define RENAME_CHECK_SUCCESS(old, new) \
52835+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
52836+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
52837+
52838+int
52839+gr_acl_handle_rename(struct dentry *new_dentry,
52840+ struct dentry *parent_dentry,
52841+ const struct vfsmount *parent_mnt,
52842+ struct dentry *old_dentry,
52843+ struct inode *old_parent_inode,
52844+ struct vfsmount *old_mnt, const char *newname)
52845+{
52846+ __u32 comp1, comp2;
52847+ int error = 0;
52848+
52849+ if (unlikely(!gr_acl_is_enabled()))
52850+ return 0;
52851+
52852+ if (!new_dentry->d_inode) {
52853+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
52854+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
52855+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
52856+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
52857+ GR_DELETE | GR_AUDIT_DELETE |
52858+ GR_AUDIT_READ | GR_AUDIT_WRITE |
52859+ GR_SUPPRESS, old_mnt);
52860+ } else {
52861+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
52862+ GR_CREATE | GR_DELETE |
52863+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
52864+ GR_AUDIT_READ | GR_AUDIT_WRITE |
52865+ GR_SUPPRESS, parent_mnt);
52866+ comp2 =
52867+ gr_search_file(old_dentry,
52868+ GR_READ | GR_WRITE | GR_AUDIT_READ |
52869+ GR_DELETE | GR_AUDIT_DELETE |
52870+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
52871+ }
52872+
52873+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
52874+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
52875+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52876+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
52877+ && !(comp2 & GR_SUPPRESS)) {
52878+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52879+ error = -EACCES;
52880+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
52881+ error = -EACCES;
52882+
52883+ return error;
52884+}
52885+
52886+void
52887+gr_acl_handle_exit(void)
52888+{
52889+ u16 id;
52890+ char *rolename;
52891+ struct file *exec_file;
52892+
52893+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
52894+ !(current->role->roletype & GR_ROLE_PERSIST))) {
52895+ id = current->acl_role_id;
52896+ rolename = current->role->rolename;
52897+ gr_set_acls(1);
52898+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
52899+ }
52900+
52901+ write_lock(&grsec_exec_file_lock);
52902+ exec_file = current->exec_file;
52903+ current->exec_file = NULL;
52904+ write_unlock(&grsec_exec_file_lock);
52905+
52906+ if (exec_file)
52907+ fput(exec_file);
52908+}
52909+
52910+int
52911+gr_acl_handle_procpidmem(const struct task_struct *task)
52912+{
52913+ if (unlikely(!gr_acl_is_enabled()))
52914+ return 0;
52915+
52916+ if (task != current && task->acl->mode & GR_PROTPROCFD)
52917+ return -EACCES;
52918+
52919+ return 0;
52920+}
52921diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
52922new file mode 100644
52923index 0000000..17050ca
52924--- /dev/null
52925+++ b/grsecurity/gracl_ip.c
52926@@ -0,0 +1,381 @@
52927+#include <linux/kernel.h>
52928+#include <asm/uaccess.h>
52929+#include <asm/errno.h>
52930+#include <net/sock.h>
52931+#include <linux/file.h>
52932+#include <linux/fs.h>
52933+#include <linux/net.h>
52934+#include <linux/in.h>
52935+#include <linux/skbuff.h>
52936+#include <linux/ip.h>
52937+#include <linux/udp.h>
52938+#include <linux/types.h>
52939+#include <linux/sched.h>
52940+#include <linux/netdevice.h>
52941+#include <linux/inetdevice.h>
52942+#include <linux/gracl.h>
52943+#include <linux/grsecurity.h>
52944+#include <linux/grinternal.h>
52945+
52946+#define GR_BIND 0x01
52947+#define GR_CONNECT 0x02
52948+#define GR_INVERT 0x04
52949+#define GR_BINDOVERRIDE 0x08
52950+#define GR_CONNECTOVERRIDE 0x10
52951+#define GR_SOCK_FAMILY 0x20
52952+
52953+static const char * gr_protocols[IPPROTO_MAX] = {
52954+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
52955+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
52956+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
52957+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
52958+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
52959+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
52960+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
52961+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
52962+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
52963+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
52964+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
52965+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
52966+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
52967+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
52968+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
52969+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
52970+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
52971+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
52972+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
52973+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
52974+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
52975+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
52976+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
52977+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
52978+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
52979+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
52980+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
52981+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
52982+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
52983+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
52984+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
52985+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
52986+ };
52987+
52988+static const char * gr_socktypes[SOCK_MAX] = {
52989+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
52990+ "unknown:7", "unknown:8", "unknown:9", "packet"
52991+ };
52992+
52993+static const char * gr_sockfamilies[AF_MAX+1] = {
52994+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
52995+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
52996+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
52997+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
52998+ };
52999+
53000+const char *
53001+gr_proto_to_name(unsigned char proto)
53002+{
53003+ return gr_protocols[proto];
53004+}
53005+
53006+const char *
53007+gr_socktype_to_name(unsigned char type)
53008+{
53009+ return gr_socktypes[type];
53010+}
53011+
53012+const char *
53013+gr_sockfamily_to_name(unsigned char family)
53014+{
53015+ return gr_sockfamilies[family];
53016+}
53017+
53018+int
53019+gr_search_socket(const int domain, const int type, const int protocol)
53020+{
53021+ struct acl_subject_label *curr;
53022+ const struct cred *cred = current_cred();
53023+
53024+ if (unlikely(!gr_acl_is_enabled()))
53025+ goto exit;
53026+
53027+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
53028+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53029+ goto exit; // let the kernel handle it
53030+
53031+ curr = current->acl;
53032+
53033+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53034+ /* the family is allowed, if this is PF_INET allow it only if
53035+ the extra sock type/protocol checks pass */
53036+ if (domain == PF_INET)
53037+ goto inet_check;
53038+ goto exit;
53039+ } else {
53040+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53041+ __u32 fakeip = 0;
53042+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53043+ current->role->roletype, cred->uid,
53044+ cred->gid, current->exec_file ?
53045+ gr_to_filename(current->exec_file->f_path.dentry,
53046+ current->exec_file->f_path.mnt) :
53047+ curr->filename, curr->filename,
53048+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53049+ &current->signal->saved_ip);
53050+ goto exit;
53051+ }
53052+ goto exit_fail;
53053+ }
53054+
53055+inet_check:
53056+ /* the rest of this checking is for IPv4 only */
53057+ if (!curr->ips)
53058+ goto exit;
53059+
53060+ if ((curr->ip_type & (1 << type)) &&
53061+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53062+ goto exit;
53063+
53064+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53065+ /* we don't place acls on raw sockets , and sometimes
53066+ dgram/ip sockets are opened for ioctl and not
53067+ bind/connect, so we'll fake a bind learn log */
53068+ if (type == SOCK_RAW || type == SOCK_PACKET) {
53069+ __u32 fakeip = 0;
53070+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53071+ current->role->roletype, cred->uid,
53072+ cred->gid, current->exec_file ?
53073+ gr_to_filename(current->exec_file->f_path.dentry,
53074+ current->exec_file->f_path.mnt) :
53075+ curr->filename, curr->filename,
53076+ &fakeip, 0, type,
53077+ protocol, GR_CONNECT, &current->signal->saved_ip);
53078+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53079+ __u32 fakeip = 0;
53080+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53081+ current->role->roletype, cred->uid,
53082+ cred->gid, current->exec_file ?
53083+ gr_to_filename(current->exec_file->f_path.dentry,
53084+ current->exec_file->f_path.mnt) :
53085+ curr->filename, curr->filename,
53086+ &fakeip, 0, type,
53087+ protocol, GR_BIND, &current->signal->saved_ip);
53088+ }
53089+ /* we'll log when they use connect or bind */
53090+ goto exit;
53091+ }
53092+
53093+exit_fail:
53094+ if (domain == PF_INET)
53095+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53096+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
53097+ else
53098+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53099+ gr_socktype_to_name(type), protocol);
53100+
53101+ return 0;
53102+exit:
53103+ return 1;
53104+}
53105+
53106+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53107+{
53108+ if ((ip->mode & mode) &&
53109+ (ip_port >= ip->low) &&
53110+ (ip_port <= ip->high) &&
53111+ ((ntohl(ip_addr) & our_netmask) ==
53112+ (ntohl(our_addr) & our_netmask))
53113+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53114+ && (ip->type & (1 << type))) {
53115+ if (ip->mode & GR_INVERT)
53116+ return 2; // specifically denied
53117+ else
53118+ return 1; // allowed
53119+ }
53120+
53121+ return 0; // not specifically allowed, may continue parsing
53122+}
53123+
53124+static int
53125+gr_search_connectbind(const int full_mode, struct sock *sk,
53126+ struct sockaddr_in *addr, const int type)
53127+{
53128+ char iface[IFNAMSIZ] = {0};
53129+ struct acl_subject_label *curr;
53130+ struct acl_ip_label *ip;
53131+ struct inet_sock *isk;
53132+ struct net_device *dev;
53133+ struct in_device *idev;
53134+ unsigned long i;
53135+ int ret;
53136+ int mode = full_mode & (GR_BIND | GR_CONNECT);
53137+ __u32 ip_addr = 0;
53138+ __u32 our_addr;
53139+ __u32 our_netmask;
53140+ char *p;
53141+ __u16 ip_port = 0;
53142+ const struct cred *cred = current_cred();
53143+
53144+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53145+ return 0;
53146+
53147+ curr = current->acl;
53148+ isk = inet_sk(sk);
53149+
53150+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53151+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53152+ addr->sin_addr.s_addr = curr->inaddr_any_override;
53153+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53154+ struct sockaddr_in saddr;
53155+ int err;
53156+
53157+ saddr.sin_family = AF_INET;
53158+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
53159+ saddr.sin_port = isk->inet_sport;
53160+
53161+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53162+ if (err)
53163+ return err;
53164+
53165+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53166+ if (err)
53167+ return err;
53168+ }
53169+
53170+ if (!curr->ips)
53171+ return 0;
53172+
53173+ ip_addr = addr->sin_addr.s_addr;
53174+ ip_port = ntohs(addr->sin_port);
53175+
53176+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53177+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53178+ current->role->roletype, cred->uid,
53179+ cred->gid, current->exec_file ?
53180+ gr_to_filename(current->exec_file->f_path.dentry,
53181+ current->exec_file->f_path.mnt) :
53182+ curr->filename, curr->filename,
53183+ &ip_addr, ip_port, type,
53184+ sk->sk_protocol, mode, &current->signal->saved_ip);
53185+ return 0;
53186+ }
53187+
53188+ for (i = 0; i < curr->ip_num; i++) {
53189+ ip = *(curr->ips + i);
53190+ if (ip->iface != NULL) {
53191+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
53192+ p = strchr(iface, ':');
53193+ if (p != NULL)
53194+ *p = '\0';
53195+ dev = dev_get_by_name(sock_net(sk), iface);
53196+ if (dev == NULL)
53197+ continue;
53198+ idev = in_dev_get(dev);
53199+ if (idev == NULL) {
53200+ dev_put(dev);
53201+ continue;
53202+ }
53203+ rcu_read_lock();
53204+ for_ifa(idev) {
53205+ if (!strcmp(ip->iface, ifa->ifa_label)) {
53206+ our_addr = ifa->ifa_address;
53207+ our_netmask = 0xffffffff;
53208+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53209+ if (ret == 1) {
53210+ rcu_read_unlock();
53211+ in_dev_put(idev);
53212+ dev_put(dev);
53213+ return 0;
53214+ } else if (ret == 2) {
53215+ rcu_read_unlock();
53216+ in_dev_put(idev);
53217+ dev_put(dev);
53218+ goto denied;
53219+ }
53220+ }
53221+ } endfor_ifa(idev);
53222+ rcu_read_unlock();
53223+ in_dev_put(idev);
53224+ dev_put(dev);
53225+ } else {
53226+ our_addr = ip->addr;
53227+ our_netmask = ip->netmask;
53228+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53229+ if (ret == 1)
53230+ return 0;
53231+ else if (ret == 2)
53232+ goto denied;
53233+ }
53234+ }
53235+
53236+denied:
53237+ if (mode == GR_BIND)
53238+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53239+ else if (mode == GR_CONNECT)
53240+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53241+
53242+ return -EACCES;
53243+}
53244+
53245+int
53246+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53247+{
53248+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53249+}
53250+
53251+int
53252+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53253+{
53254+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53255+}
53256+
53257+int gr_search_listen(struct socket *sock)
53258+{
53259+ struct sock *sk = sock->sk;
53260+ struct sockaddr_in addr;
53261+
53262+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53263+ addr.sin_port = inet_sk(sk)->inet_sport;
53264+
53265+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53266+}
53267+
53268+int gr_search_accept(struct socket *sock)
53269+{
53270+ struct sock *sk = sock->sk;
53271+ struct sockaddr_in addr;
53272+
53273+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53274+ addr.sin_port = inet_sk(sk)->inet_sport;
53275+
53276+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53277+}
53278+
53279+int
53280+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53281+{
53282+ if (addr)
53283+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53284+ else {
53285+ struct sockaddr_in sin;
53286+ const struct inet_sock *inet = inet_sk(sk);
53287+
53288+ sin.sin_addr.s_addr = inet->inet_daddr;
53289+ sin.sin_port = inet->inet_dport;
53290+
53291+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53292+ }
53293+}
53294+
53295+int
53296+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53297+{
53298+ struct sockaddr_in sin;
53299+
53300+ if (unlikely(skb->len < sizeof (struct udphdr)))
53301+ return 0; // skip this packet
53302+
53303+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53304+ sin.sin_port = udp_hdr(skb)->source;
53305+
53306+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53307+}
53308diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53309new file mode 100644
53310index 0000000..25f54ef
53311--- /dev/null
53312+++ b/grsecurity/gracl_learn.c
53313@@ -0,0 +1,207 @@
53314+#include <linux/kernel.h>
53315+#include <linux/mm.h>
53316+#include <linux/sched.h>
53317+#include <linux/poll.h>
53318+#include <linux/string.h>
53319+#include <linux/file.h>
53320+#include <linux/types.h>
53321+#include <linux/vmalloc.h>
53322+#include <linux/grinternal.h>
53323+
53324+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53325+ size_t count, loff_t *ppos);
53326+extern int gr_acl_is_enabled(void);
53327+
53328+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53329+static int gr_learn_attached;
53330+
53331+/* use a 512k buffer */
53332+#define LEARN_BUFFER_SIZE (512 * 1024)
53333+
53334+static DEFINE_SPINLOCK(gr_learn_lock);
53335+static DEFINE_MUTEX(gr_learn_user_mutex);
53336+
53337+/* we need to maintain two buffers, so that the kernel context of grlearn
53338+ uses a semaphore around the userspace copying, and the other kernel contexts
53339+ use a spinlock when copying into the buffer, since they cannot sleep
53340+*/
53341+static char *learn_buffer;
53342+static char *learn_buffer_user;
53343+static int learn_buffer_len;
53344+static int learn_buffer_user_len;
53345+
53346+static ssize_t
53347+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53348+{
53349+ DECLARE_WAITQUEUE(wait, current);
53350+ ssize_t retval = 0;
53351+
53352+ add_wait_queue(&learn_wait, &wait);
53353+ set_current_state(TASK_INTERRUPTIBLE);
53354+ do {
53355+ mutex_lock(&gr_learn_user_mutex);
53356+ spin_lock(&gr_learn_lock);
53357+ if (learn_buffer_len)
53358+ break;
53359+ spin_unlock(&gr_learn_lock);
53360+ mutex_unlock(&gr_learn_user_mutex);
53361+ if (file->f_flags & O_NONBLOCK) {
53362+ retval = -EAGAIN;
53363+ goto out;
53364+ }
53365+ if (signal_pending(current)) {
53366+ retval = -ERESTARTSYS;
53367+ goto out;
53368+ }
53369+
53370+ schedule();
53371+ } while (1);
53372+
53373+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53374+ learn_buffer_user_len = learn_buffer_len;
53375+ retval = learn_buffer_len;
53376+ learn_buffer_len = 0;
53377+
53378+ spin_unlock(&gr_learn_lock);
53379+
53380+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53381+ retval = -EFAULT;
53382+
53383+ mutex_unlock(&gr_learn_user_mutex);
53384+out:
53385+ set_current_state(TASK_RUNNING);
53386+ remove_wait_queue(&learn_wait, &wait);
53387+ return retval;
53388+}
53389+
53390+static unsigned int
53391+poll_learn(struct file * file, poll_table * wait)
53392+{
53393+ poll_wait(file, &learn_wait, wait);
53394+
53395+ if (learn_buffer_len)
53396+ return (POLLIN | POLLRDNORM);
53397+
53398+ return 0;
53399+}
53400+
53401+void
53402+gr_clear_learn_entries(void)
53403+{
53404+ char *tmp;
53405+
53406+ mutex_lock(&gr_learn_user_mutex);
53407+ spin_lock(&gr_learn_lock);
53408+ tmp = learn_buffer;
53409+ learn_buffer = NULL;
53410+ spin_unlock(&gr_learn_lock);
53411+ if (tmp)
53412+ vfree(tmp);
53413+ if (learn_buffer_user != NULL) {
53414+ vfree(learn_buffer_user);
53415+ learn_buffer_user = NULL;
53416+ }
53417+ learn_buffer_len = 0;
53418+ mutex_unlock(&gr_learn_user_mutex);
53419+
53420+ return;
53421+}
53422+
53423+void
53424+gr_add_learn_entry(const char *fmt, ...)
53425+{
53426+ va_list args;
53427+ unsigned int len;
53428+
53429+ if (!gr_learn_attached)
53430+ return;
53431+
53432+ spin_lock(&gr_learn_lock);
53433+
53434+ /* leave a gap at the end so we know when it's "full" but don't have to
53435+ compute the exact length of the string we're trying to append
53436+ */
53437+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53438+ spin_unlock(&gr_learn_lock);
53439+ wake_up_interruptible(&learn_wait);
53440+ return;
53441+ }
53442+ if (learn_buffer == NULL) {
53443+ spin_unlock(&gr_learn_lock);
53444+ return;
53445+ }
53446+
53447+ va_start(args, fmt);
53448+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53449+ va_end(args);
53450+
53451+ learn_buffer_len += len + 1;
53452+
53453+ spin_unlock(&gr_learn_lock);
53454+ wake_up_interruptible(&learn_wait);
53455+
53456+ return;
53457+}
53458+
53459+static int
53460+open_learn(struct inode *inode, struct file *file)
53461+{
53462+ if (file->f_mode & FMODE_READ && gr_learn_attached)
53463+ return -EBUSY;
53464+ if (file->f_mode & FMODE_READ) {
53465+ int retval = 0;
53466+ mutex_lock(&gr_learn_user_mutex);
53467+ if (learn_buffer == NULL)
53468+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53469+ if (learn_buffer_user == NULL)
53470+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53471+ if (learn_buffer == NULL) {
53472+ retval = -ENOMEM;
53473+ goto out_error;
53474+ }
53475+ if (learn_buffer_user == NULL) {
53476+ retval = -ENOMEM;
53477+ goto out_error;
53478+ }
53479+ learn_buffer_len = 0;
53480+ learn_buffer_user_len = 0;
53481+ gr_learn_attached = 1;
53482+out_error:
53483+ mutex_unlock(&gr_learn_user_mutex);
53484+ return retval;
53485+ }
53486+ return 0;
53487+}
53488+
53489+static int
53490+close_learn(struct inode *inode, struct file *file)
53491+{
53492+ if (file->f_mode & FMODE_READ) {
53493+ char *tmp = NULL;
53494+ mutex_lock(&gr_learn_user_mutex);
53495+ spin_lock(&gr_learn_lock);
53496+ tmp = learn_buffer;
53497+ learn_buffer = NULL;
53498+ spin_unlock(&gr_learn_lock);
53499+ if (tmp)
53500+ vfree(tmp);
53501+ if (learn_buffer_user != NULL) {
53502+ vfree(learn_buffer_user);
53503+ learn_buffer_user = NULL;
53504+ }
53505+ learn_buffer_len = 0;
53506+ learn_buffer_user_len = 0;
53507+ gr_learn_attached = 0;
53508+ mutex_unlock(&gr_learn_user_mutex);
53509+ }
53510+
53511+ return 0;
53512+}
53513+
53514+const struct file_operations grsec_fops = {
53515+ .read = read_learn,
53516+ .write = write_grsec_handler,
53517+ .open = open_learn,
53518+ .release = close_learn,
53519+ .poll = poll_learn,
53520+};
53521diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53522new file mode 100644
53523index 0000000..39645c9
53524--- /dev/null
53525+++ b/grsecurity/gracl_res.c
53526@@ -0,0 +1,68 @@
53527+#include <linux/kernel.h>
53528+#include <linux/sched.h>
53529+#include <linux/gracl.h>
53530+#include <linux/grinternal.h>
53531+
53532+static const char *restab_log[] = {
53533+ [RLIMIT_CPU] = "RLIMIT_CPU",
53534+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53535+ [RLIMIT_DATA] = "RLIMIT_DATA",
53536+ [RLIMIT_STACK] = "RLIMIT_STACK",
53537+ [RLIMIT_CORE] = "RLIMIT_CORE",
53538+ [RLIMIT_RSS] = "RLIMIT_RSS",
53539+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
53540+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53541+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53542+ [RLIMIT_AS] = "RLIMIT_AS",
53543+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53544+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53545+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53546+ [RLIMIT_NICE] = "RLIMIT_NICE",
53547+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53548+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53549+ [GR_CRASH_RES] = "RLIMIT_CRASH"
53550+};
53551+
53552+void
53553+gr_log_resource(const struct task_struct *task,
53554+ const int res, const unsigned long wanted, const int gt)
53555+{
53556+ const struct cred *cred;
53557+ unsigned long rlim;
53558+
53559+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
53560+ return;
53561+
53562+ // not yet supported resource
53563+ if (unlikely(!restab_log[res]))
53564+ return;
53565+
53566+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53567+ rlim = task_rlimit_max(task, res);
53568+ else
53569+ rlim = task_rlimit(task, res);
53570+
53571+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53572+ return;
53573+
53574+ rcu_read_lock();
53575+ cred = __task_cred(task);
53576+
53577+ if (res == RLIMIT_NPROC &&
53578+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53579+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53580+ goto out_rcu_unlock;
53581+ else if (res == RLIMIT_MEMLOCK &&
53582+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53583+ goto out_rcu_unlock;
53584+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53585+ goto out_rcu_unlock;
53586+ rcu_read_unlock();
53587+
53588+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53589+
53590+ return;
53591+out_rcu_unlock:
53592+ rcu_read_unlock();
53593+ return;
53594+}
53595diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53596new file mode 100644
53597index 0000000..5556be3
53598--- /dev/null
53599+++ b/grsecurity/gracl_segv.c
53600@@ -0,0 +1,299 @@
53601+#include <linux/kernel.h>
53602+#include <linux/mm.h>
53603+#include <asm/uaccess.h>
53604+#include <asm/errno.h>
53605+#include <asm/mman.h>
53606+#include <net/sock.h>
53607+#include <linux/file.h>
53608+#include <linux/fs.h>
53609+#include <linux/net.h>
53610+#include <linux/in.h>
53611+#include <linux/slab.h>
53612+#include <linux/types.h>
53613+#include <linux/sched.h>
53614+#include <linux/timer.h>
53615+#include <linux/gracl.h>
53616+#include <linux/grsecurity.h>
53617+#include <linux/grinternal.h>
53618+
53619+static struct crash_uid *uid_set;
53620+static unsigned short uid_used;
53621+static DEFINE_SPINLOCK(gr_uid_lock);
53622+extern rwlock_t gr_inode_lock;
53623+extern struct acl_subject_label *
53624+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53625+ struct acl_role_label *role);
53626+
53627+#ifdef CONFIG_BTRFS_FS
53628+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53629+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53630+#endif
53631+
53632+static inline dev_t __get_dev(const struct dentry *dentry)
53633+{
53634+#ifdef CONFIG_BTRFS_FS
53635+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53636+ return get_btrfs_dev_from_inode(dentry->d_inode);
53637+ else
53638+#endif
53639+ return dentry->d_inode->i_sb->s_dev;
53640+}
53641+
53642+int
53643+gr_init_uidset(void)
53644+{
53645+ uid_set =
53646+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
53647+ uid_used = 0;
53648+
53649+ return uid_set ? 1 : 0;
53650+}
53651+
53652+void
53653+gr_free_uidset(void)
53654+{
53655+ if (uid_set)
53656+ kfree(uid_set);
53657+
53658+ return;
53659+}
53660+
53661+int
53662+gr_find_uid(const uid_t uid)
53663+{
53664+ struct crash_uid *tmp = uid_set;
53665+ uid_t buid;
53666+ int low = 0, high = uid_used - 1, mid;
53667+
53668+ while (high >= low) {
53669+ mid = (low + high) >> 1;
53670+ buid = tmp[mid].uid;
53671+ if (buid == uid)
53672+ return mid;
53673+ if (buid > uid)
53674+ high = mid - 1;
53675+ if (buid < uid)
53676+ low = mid + 1;
53677+ }
53678+
53679+ return -1;
53680+}
53681+
53682+static __inline__ void
53683+gr_insertsort(void)
53684+{
53685+ unsigned short i, j;
53686+ struct crash_uid index;
53687+
53688+ for (i = 1; i < uid_used; i++) {
53689+ index = uid_set[i];
53690+ j = i;
53691+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
53692+ uid_set[j] = uid_set[j - 1];
53693+ j--;
53694+ }
53695+ uid_set[j] = index;
53696+ }
53697+
53698+ return;
53699+}
53700+
53701+static __inline__ void
53702+gr_insert_uid(const uid_t uid, const unsigned long expires)
53703+{
53704+ int loc;
53705+
53706+ if (uid_used == GR_UIDTABLE_MAX)
53707+ return;
53708+
53709+ loc = gr_find_uid(uid);
53710+
53711+ if (loc >= 0) {
53712+ uid_set[loc].expires = expires;
53713+ return;
53714+ }
53715+
53716+ uid_set[uid_used].uid = uid;
53717+ uid_set[uid_used].expires = expires;
53718+ uid_used++;
53719+
53720+ gr_insertsort();
53721+
53722+ return;
53723+}
53724+
53725+void
53726+gr_remove_uid(const unsigned short loc)
53727+{
53728+ unsigned short i;
53729+
53730+ for (i = loc + 1; i < uid_used; i++)
53731+ uid_set[i - 1] = uid_set[i];
53732+
53733+ uid_used--;
53734+
53735+ return;
53736+}
53737+
53738+int
53739+gr_check_crash_uid(const uid_t uid)
53740+{
53741+ int loc;
53742+ int ret = 0;
53743+
53744+ if (unlikely(!gr_acl_is_enabled()))
53745+ return 0;
53746+
53747+ spin_lock(&gr_uid_lock);
53748+ loc = gr_find_uid(uid);
53749+
53750+ if (loc < 0)
53751+ goto out_unlock;
53752+
53753+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
53754+ gr_remove_uid(loc);
53755+ else
53756+ ret = 1;
53757+
53758+out_unlock:
53759+ spin_unlock(&gr_uid_lock);
53760+ return ret;
53761+}
53762+
53763+static __inline__ int
53764+proc_is_setxid(const struct cred *cred)
53765+{
53766+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
53767+ cred->uid != cred->fsuid)
53768+ return 1;
53769+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
53770+ cred->gid != cred->fsgid)
53771+ return 1;
53772+
53773+ return 0;
53774+}
53775+
53776+extern int gr_fake_force_sig(int sig, struct task_struct *t);
53777+
53778+void
53779+gr_handle_crash(struct task_struct *task, const int sig)
53780+{
53781+ struct acl_subject_label *curr;
53782+ struct task_struct *tsk, *tsk2;
53783+ const struct cred *cred;
53784+ const struct cred *cred2;
53785+
53786+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
53787+ return;
53788+
53789+ if (unlikely(!gr_acl_is_enabled()))
53790+ return;
53791+
53792+ curr = task->acl;
53793+
53794+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
53795+ return;
53796+
53797+ if (time_before_eq(curr->expires, get_seconds())) {
53798+ curr->expires = 0;
53799+ curr->crashes = 0;
53800+ }
53801+
53802+ curr->crashes++;
53803+
53804+ if (!curr->expires)
53805+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
53806+
53807+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53808+ time_after(curr->expires, get_seconds())) {
53809+ rcu_read_lock();
53810+ cred = __task_cred(task);
53811+ if (cred->uid && proc_is_setxid(cred)) {
53812+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53813+ spin_lock(&gr_uid_lock);
53814+ gr_insert_uid(cred->uid, curr->expires);
53815+ spin_unlock(&gr_uid_lock);
53816+ curr->expires = 0;
53817+ curr->crashes = 0;
53818+ read_lock(&tasklist_lock);
53819+ do_each_thread(tsk2, tsk) {
53820+ cred2 = __task_cred(tsk);
53821+ if (tsk != task && cred2->uid == cred->uid)
53822+ gr_fake_force_sig(SIGKILL, tsk);
53823+ } while_each_thread(tsk2, tsk);
53824+ read_unlock(&tasklist_lock);
53825+ } else {
53826+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53827+ read_lock(&tasklist_lock);
53828+ read_lock(&grsec_exec_file_lock);
53829+ do_each_thread(tsk2, tsk) {
53830+ if (likely(tsk != task)) {
53831+ // if this thread has the same subject as the one that triggered
53832+ // RES_CRASH and it's the same binary, kill it
53833+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
53834+ gr_fake_force_sig(SIGKILL, tsk);
53835+ }
53836+ } while_each_thread(tsk2, tsk);
53837+ read_unlock(&grsec_exec_file_lock);
53838+ read_unlock(&tasklist_lock);
53839+ }
53840+ rcu_read_unlock();
53841+ }
53842+
53843+ return;
53844+}
53845+
53846+int
53847+gr_check_crash_exec(const struct file *filp)
53848+{
53849+ struct acl_subject_label *curr;
53850+
53851+ if (unlikely(!gr_acl_is_enabled()))
53852+ return 0;
53853+
53854+ read_lock(&gr_inode_lock);
53855+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
53856+ __get_dev(filp->f_path.dentry),
53857+ current->role);
53858+ read_unlock(&gr_inode_lock);
53859+
53860+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
53861+ (!curr->crashes && !curr->expires))
53862+ return 0;
53863+
53864+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53865+ time_after(curr->expires, get_seconds()))
53866+ return 1;
53867+ else if (time_before_eq(curr->expires, get_seconds())) {
53868+ curr->crashes = 0;
53869+ curr->expires = 0;
53870+ }
53871+
53872+ return 0;
53873+}
53874+
53875+void
53876+gr_handle_alertkill(struct task_struct *task)
53877+{
53878+ struct acl_subject_label *curracl;
53879+ __u32 curr_ip;
53880+ struct task_struct *p, *p2;
53881+
53882+ if (unlikely(!gr_acl_is_enabled()))
53883+ return;
53884+
53885+ curracl = task->acl;
53886+ curr_ip = task->signal->curr_ip;
53887+
53888+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
53889+ read_lock(&tasklist_lock);
53890+ do_each_thread(p2, p) {
53891+ if (p->signal->curr_ip == curr_ip)
53892+ gr_fake_force_sig(SIGKILL, p);
53893+ } while_each_thread(p2, p);
53894+ read_unlock(&tasklist_lock);
53895+ } else if (curracl->mode & GR_KILLPROC)
53896+ gr_fake_force_sig(SIGKILL, task);
53897+
53898+ return;
53899+}
53900diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
53901new file mode 100644
53902index 0000000..9d83a69
53903--- /dev/null
53904+++ b/grsecurity/gracl_shm.c
53905@@ -0,0 +1,40 @@
53906+#include <linux/kernel.h>
53907+#include <linux/mm.h>
53908+#include <linux/sched.h>
53909+#include <linux/file.h>
53910+#include <linux/ipc.h>
53911+#include <linux/gracl.h>
53912+#include <linux/grsecurity.h>
53913+#include <linux/grinternal.h>
53914+
53915+int
53916+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53917+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53918+{
53919+ struct task_struct *task;
53920+
53921+ if (!gr_acl_is_enabled())
53922+ return 1;
53923+
53924+ rcu_read_lock();
53925+ read_lock(&tasklist_lock);
53926+
53927+ task = find_task_by_vpid(shm_cprid);
53928+
53929+ if (unlikely(!task))
53930+ task = find_task_by_vpid(shm_lapid);
53931+
53932+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
53933+ (task->pid == shm_lapid)) &&
53934+ (task->acl->mode & GR_PROTSHM) &&
53935+ (task->acl != current->acl))) {
53936+ read_unlock(&tasklist_lock);
53937+ rcu_read_unlock();
53938+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
53939+ return 0;
53940+ }
53941+ read_unlock(&tasklist_lock);
53942+ rcu_read_unlock();
53943+
53944+ return 1;
53945+}
53946diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
53947new file mode 100644
53948index 0000000..bc0be01
53949--- /dev/null
53950+++ b/grsecurity/grsec_chdir.c
53951@@ -0,0 +1,19 @@
53952+#include <linux/kernel.h>
53953+#include <linux/sched.h>
53954+#include <linux/fs.h>
53955+#include <linux/file.h>
53956+#include <linux/grsecurity.h>
53957+#include <linux/grinternal.h>
53958+
53959+void
53960+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
53961+{
53962+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53963+ if ((grsec_enable_chdir && grsec_enable_group &&
53964+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
53965+ !grsec_enable_group)) {
53966+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
53967+ }
53968+#endif
53969+ return;
53970+}
53971diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
53972new file mode 100644
53973index 0000000..a2dc675
53974--- /dev/null
53975+++ b/grsecurity/grsec_chroot.c
53976@@ -0,0 +1,351 @@
53977+#include <linux/kernel.h>
53978+#include <linux/module.h>
53979+#include <linux/sched.h>
53980+#include <linux/file.h>
53981+#include <linux/fs.h>
53982+#include <linux/mount.h>
53983+#include <linux/types.h>
53984+#include <linux/pid_namespace.h>
53985+#include <linux/grsecurity.h>
53986+#include <linux/grinternal.h>
53987+
53988+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
53989+{
53990+#ifdef CONFIG_GRKERNSEC
53991+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
53992+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
53993+ task->gr_is_chrooted = 1;
53994+ else
53995+ task->gr_is_chrooted = 0;
53996+
53997+ task->gr_chroot_dentry = path->dentry;
53998+#endif
53999+ return;
54000+}
54001+
54002+void gr_clear_chroot_entries(struct task_struct *task)
54003+{
54004+#ifdef CONFIG_GRKERNSEC
54005+ task->gr_is_chrooted = 0;
54006+ task->gr_chroot_dentry = NULL;
54007+#endif
54008+ return;
54009+}
54010+
54011+int
54012+gr_handle_chroot_unix(const pid_t pid)
54013+{
54014+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54015+ struct task_struct *p;
54016+
54017+ if (unlikely(!grsec_enable_chroot_unix))
54018+ return 1;
54019+
54020+ if (likely(!proc_is_chrooted(current)))
54021+ return 1;
54022+
54023+ rcu_read_lock();
54024+ read_lock(&tasklist_lock);
54025+ p = find_task_by_vpid_unrestricted(pid);
54026+ if (unlikely(p && !have_same_root(current, p))) {
54027+ read_unlock(&tasklist_lock);
54028+ rcu_read_unlock();
54029+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54030+ return 0;
54031+ }
54032+ read_unlock(&tasklist_lock);
54033+ rcu_read_unlock();
54034+#endif
54035+ return 1;
54036+}
54037+
54038+int
54039+gr_handle_chroot_nice(void)
54040+{
54041+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54042+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54043+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54044+ return -EPERM;
54045+ }
54046+#endif
54047+ return 0;
54048+}
54049+
54050+int
54051+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54052+{
54053+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54054+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54055+ && proc_is_chrooted(current)) {
54056+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54057+ return -EACCES;
54058+ }
54059+#endif
54060+ return 0;
54061+}
54062+
54063+int
54064+gr_handle_chroot_rawio(const struct inode *inode)
54065+{
54066+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54067+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54068+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54069+ return 1;
54070+#endif
54071+ return 0;
54072+}
54073+
54074+int
54075+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54076+{
54077+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54078+ struct task_struct *p;
54079+ int ret = 0;
54080+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54081+ return ret;
54082+
54083+ read_lock(&tasklist_lock);
54084+ do_each_pid_task(pid, type, p) {
54085+ if (!have_same_root(current, p)) {
54086+ ret = 1;
54087+ goto out;
54088+ }
54089+ } while_each_pid_task(pid, type, p);
54090+out:
54091+ read_unlock(&tasklist_lock);
54092+ return ret;
54093+#endif
54094+ return 0;
54095+}
54096+
54097+int
54098+gr_pid_is_chrooted(struct task_struct *p)
54099+{
54100+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54101+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54102+ return 0;
54103+
54104+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54105+ !have_same_root(current, p)) {
54106+ return 1;
54107+ }
54108+#endif
54109+ return 0;
54110+}
54111+
54112+EXPORT_SYMBOL(gr_pid_is_chrooted);
54113+
54114+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54115+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54116+{
54117+ struct path path, currentroot;
54118+ int ret = 0;
54119+
54120+ path.dentry = (struct dentry *)u_dentry;
54121+ path.mnt = (struct vfsmount *)u_mnt;
54122+ get_fs_root(current->fs, &currentroot);
54123+ if (path_is_under(&path, &currentroot))
54124+ ret = 1;
54125+ path_put(&currentroot);
54126+
54127+ return ret;
54128+}
54129+#endif
54130+
54131+int
54132+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54133+{
54134+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54135+ if (!grsec_enable_chroot_fchdir)
54136+ return 1;
54137+
54138+ if (!proc_is_chrooted(current))
54139+ return 1;
54140+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54141+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54142+ return 0;
54143+ }
54144+#endif
54145+ return 1;
54146+}
54147+
54148+int
54149+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54150+ const time_t shm_createtime)
54151+{
54152+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54153+ struct task_struct *p;
54154+ time_t starttime;
54155+
54156+ if (unlikely(!grsec_enable_chroot_shmat))
54157+ return 1;
54158+
54159+ if (likely(!proc_is_chrooted(current)))
54160+ return 1;
54161+
54162+ rcu_read_lock();
54163+ read_lock(&tasklist_lock);
54164+
54165+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54166+ starttime = p->start_time.tv_sec;
54167+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54168+ if (have_same_root(current, p)) {
54169+ goto allow;
54170+ } else {
54171+ read_unlock(&tasklist_lock);
54172+ rcu_read_unlock();
54173+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54174+ return 0;
54175+ }
54176+ }
54177+ /* creator exited, pid reuse, fall through to next check */
54178+ }
54179+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54180+ if (unlikely(!have_same_root(current, p))) {
54181+ read_unlock(&tasklist_lock);
54182+ rcu_read_unlock();
54183+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54184+ return 0;
54185+ }
54186+ }
54187+
54188+allow:
54189+ read_unlock(&tasklist_lock);
54190+ rcu_read_unlock();
54191+#endif
54192+ return 1;
54193+}
54194+
54195+void
54196+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54197+{
54198+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54199+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54200+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54201+#endif
54202+ return;
54203+}
54204+
54205+int
54206+gr_handle_chroot_mknod(const struct dentry *dentry,
54207+ const struct vfsmount *mnt, const int mode)
54208+{
54209+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54210+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54211+ proc_is_chrooted(current)) {
54212+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54213+ return -EPERM;
54214+ }
54215+#endif
54216+ return 0;
54217+}
54218+
54219+int
54220+gr_handle_chroot_mount(const struct dentry *dentry,
54221+ const struct vfsmount *mnt, const char *dev_name)
54222+{
54223+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54224+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54225+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54226+ return -EPERM;
54227+ }
54228+#endif
54229+ return 0;
54230+}
54231+
54232+int
54233+gr_handle_chroot_pivot(void)
54234+{
54235+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54236+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54237+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54238+ return -EPERM;
54239+ }
54240+#endif
54241+ return 0;
54242+}
54243+
54244+int
54245+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54246+{
54247+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54248+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54249+ !gr_is_outside_chroot(dentry, mnt)) {
54250+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54251+ return -EPERM;
54252+ }
54253+#endif
54254+ return 0;
54255+}
54256+
54257+extern const char *captab_log[];
54258+extern int captab_log_entries;
54259+
54260+int
54261+gr_chroot_is_capable(const int cap)
54262+{
54263+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54264+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54265+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54266+ if (cap_raised(chroot_caps, cap)) {
54267+ const struct cred *creds = current_cred();
54268+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54269+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54270+ }
54271+ return 0;
54272+ }
54273+ }
54274+#endif
54275+ return 1;
54276+}
54277+
54278+int
54279+gr_chroot_is_capable_nolog(const int cap)
54280+{
54281+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54282+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54283+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54284+ if (cap_raised(chroot_caps, cap)) {
54285+ return 0;
54286+ }
54287+ }
54288+#endif
54289+ return 1;
54290+}
54291+
54292+int
54293+gr_handle_chroot_sysctl(const int op)
54294+{
54295+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54296+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54297+ proc_is_chrooted(current))
54298+ return -EACCES;
54299+#endif
54300+ return 0;
54301+}
54302+
54303+void
54304+gr_handle_chroot_chdir(struct path *path)
54305+{
54306+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54307+ if (grsec_enable_chroot_chdir)
54308+ set_fs_pwd(current->fs, path);
54309+#endif
54310+ return;
54311+}
54312+
54313+int
54314+gr_handle_chroot_chmod(const struct dentry *dentry,
54315+ const struct vfsmount *mnt, const int mode)
54316+{
54317+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54318+ /* allow chmod +s on directories, but not files */
54319+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54320+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54321+ proc_is_chrooted(current)) {
54322+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54323+ return -EPERM;
54324+ }
54325+#endif
54326+ return 0;
54327+}
54328diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54329new file mode 100644
54330index 0000000..d81a586
54331--- /dev/null
54332+++ b/grsecurity/grsec_disabled.c
54333@@ -0,0 +1,439 @@
54334+#include <linux/kernel.h>
54335+#include <linux/module.h>
54336+#include <linux/sched.h>
54337+#include <linux/file.h>
54338+#include <linux/fs.h>
54339+#include <linux/kdev_t.h>
54340+#include <linux/net.h>
54341+#include <linux/in.h>
54342+#include <linux/ip.h>
54343+#include <linux/skbuff.h>
54344+#include <linux/sysctl.h>
54345+
54346+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54347+void
54348+pax_set_initial_flags(struct linux_binprm *bprm)
54349+{
54350+ return;
54351+}
54352+#endif
54353+
54354+#ifdef CONFIG_SYSCTL
54355+__u32
54356+gr_handle_sysctl(const struct ctl_table * table, const int op)
54357+{
54358+ return 0;
54359+}
54360+#endif
54361+
54362+#ifdef CONFIG_TASKSTATS
54363+int gr_is_taskstats_denied(int pid)
54364+{
54365+ return 0;
54366+}
54367+#endif
54368+
54369+int
54370+gr_acl_is_enabled(void)
54371+{
54372+ return 0;
54373+}
54374+
54375+void
54376+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54377+{
54378+ return;
54379+}
54380+
54381+int
54382+gr_handle_rawio(const struct inode *inode)
54383+{
54384+ return 0;
54385+}
54386+
54387+void
54388+gr_acl_handle_psacct(struct task_struct *task, const long code)
54389+{
54390+ return;
54391+}
54392+
54393+int
54394+gr_handle_ptrace(struct task_struct *task, const long request)
54395+{
54396+ return 0;
54397+}
54398+
54399+int
54400+gr_handle_proc_ptrace(struct task_struct *task)
54401+{
54402+ return 0;
54403+}
54404+
54405+void
54406+gr_learn_resource(const struct task_struct *task,
54407+ const int res, const unsigned long wanted, const int gt)
54408+{
54409+ return;
54410+}
54411+
54412+int
54413+gr_set_acls(const int type)
54414+{
54415+ return 0;
54416+}
54417+
54418+int
54419+gr_check_hidden_task(const struct task_struct *tsk)
54420+{
54421+ return 0;
54422+}
54423+
54424+int
54425+gr_check_protected_task(const struct task_struct *task)
54426+{
54427+ return 0;
54428+}
54429+
54430+int
54431+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54432+{
54433+ return 0;
54434+}
54435+
54436+void
54437+gr_copy_label(struct task_struct *tsk)
54438+{
54439+ return;
54440+}
54441+
54442+void
54443+gr_set_pax_flags(struct task_struct *task)
54444+{
54445+ return;
54446+}
54447+
54448+int
54449+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54450+ const int unsafe_share)
54451+{
54452+ return 0;
54453+}
54454+
54455+void
54456+gr_handle_delete(const ino_t ino, const dev_t dev)
54457+{
54458+ return;
54459+}
54460+
54461+void
54462+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54463+{
54464+ return;
54465+}
54466+
54467+void
54468+gr_handle_crash(struct task_struct *task, const int sig)
54469+{
54470+ return;
54471+}
54472+
54473+int
54474+gr_check_crash_exec(const struct file *filp)
54475+{
54476+ return 0;
54477+}
54478+
54479+int
54480+gr_check_crash_uid(const uid_t uid)
54481+{
54482+ return 0;
54483+}
54484+
54485+void
54486+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54487+ struct dentry *old_dentry,
54488+ struct dentry *new_dentry,
54489+ struct vfsmount *mnt, const __u8 replace)
54490+{
54491+ return;
54492+}
54493+
54494+int
54495+gr_search_socket(const int family, const int type, const int protocol)
54496+{
54497+ return 1;
54498+}
54499+
54500+int
54501+gr_search_connectbind(const int mode, const struct socket *sock,
54502+ const struct sockaddr_in *addr)
54503+{
54504+ return 0;
54505+}
54506+
54507+void
54508+gr_handle_alertkill(struct task_struct *task)
54509+{
54510+ return;
54511+}
54512+
54513+__u32
54514+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54515+{
54516+ return 1;
54517+}
54518+
54519+__u32
54520+gr_acl_handle_hidden_file(const struct dentry * dentry,
54521+ const struct vfsmount * mnt)
54522+{
54523+ return 1;
54524+}
54525+
54526+__u32
54527+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54528+ int acc_mode)
54529+{
54530+ return 1;
54531+}
54532+
54533+__u32
54534+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54535+{
54536+ return 1;
54537+}
54538+
54539+__u32
54540+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54541+{
54542+ return 1;
54543+}
54544+
54545+int
54546+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54547+ unsigned int *vm_flags)
54548+{
54549+ return 1;
54550+}
54551+
54552+__u32
54553+gr_acl_handle_truncate(const struct dentry * dentry,
54554+ const struct vfsmount * mnt)
54555+{
54556+ return 1;
54557+}
54558+
54559+__u32
54560+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54561+{
54562+ return 1;
54563+}
54564+
54565+__u32
54566+gr_acl_handle_access(const struct dentry * dentry,
54567+ const struct vfsmount * mnt, const int fmode)
54568+{
54569+ return 1;
54570+}
54571+
54572+__u32
54573+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
54574+ mode_t mode)
54575+{
54576+ return 1;
54577+}
54578+
54579+__u32
54580+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54581+ mode_t mode)
54582+{
54583+ return 1;
54584+}
54585+
54586+__u32
54587+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54588+{
54589+ return 1;
54590+}
54591+
54592+__u32
54593+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54594+{
54595+ return 1;
54596+}
54597+
54598+void
54599+grsecurity_init(void)
54600+{
54601+ return;
54602+}
54603+
54604+__u32
54605+gr_acl_handle_mknod(const struct dentry * new_dentry,
54606+ const struct dentry * parent_dentry,
54607+ const struct vfsmount * parent_mnt,
54608+ const int mode)
54609+{
54610+ return 1;
54611+}
54612+
54613+__u32
54614+gr_acl_handle_mkdir(const struct dentry * new_dentry,
54615+ const struct dentry * parent_dentry,
54616+ const struct vfsmount * parent_mnt)
54617+{
54618+ return 1;
54619+}
54620+
54621+__u32
54622+gr_acl_handle_symlink(const struct dentry * new_dentry,
54623+ const struct dentry * parent_dentry,
54624+ const struct vfsmount * parent_mnt, const char *from)
54625+{
54626+ return 1;
54627+}
54628+
54629+__u32
54630+gr_acl_handle_link(const struct dentry * new_dentry,
54631+ const struct dentry * parent_dentry,
54632+ const struct vfsmount * parent_mnt,
54633+ const struct dentry * old_dentry,
54634+ const struct vfsmount * old_mnt, const char *to)
54635+{
54636+ return 1;
54637+}
54638+
54639+int
54640+gr_acl_handle_rename(const struct dentry *new_dentry,
54641+ const struct dentry *parent_dentry,
54642+ const struct vfsmount *parent_mnt,
54643+ const struct dentry *old_dentry,
54644+ const struct inode *old_parent_inode,
54645+ const struct vfsmount *old_mnt, const char *newname)
54646+{
54647+ return 0;
54648+}
54649+
54650+int
54651+gr_acl_handle_filldir(const struct file *file, const char *name,
54652+ const int namelen, const ino_t ino)
54653+{
54654+ return 1;
54655+}
54656+
54657+int
54658+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54659+ const time_t shm_createtime, const uid_t cuid, const int shmid)
54660+{
54661+ return 1;
54662+}
54663+
54664+int
54665+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
54666+{
54667+ return 0;
54668+}
54669+
54670+int
54671+gr_search_accept(const struct socket *sock)
54672+{
54673+ return 0;
54674+}
54675+
54676+int
54677+gr_search_listen(const struct socket *sock)
54678+{
54679+ return 0;
54680+}
54681+
54682+int
54683+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
54684+{
54685+ return 0;
54686+}
54687+
54688+__u32
54689+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
54690+{
54691+ return 1;
54692+}
54693+
54694+__u32
54695+gr_acl_handle_creat(const struct dentry * dentry,
54696+ const struct dentry * p_dentry,
54697+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54698+ const int imode)
54699+{
54700+ return 1;
54701+}
54702+
54703+void
54704+gr_acl_handle_exit(void)
54705+{
54706+ return;
54707+}
54708+
54709+int
54710+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54711+{
54712+ return 1;
54713+}
54714+
54715+void
54716+gr_set_role_label(const uid_t uid, const gid_t gid)
54717+{
54718+ return;
54719+}
54720+
54721+int
54722+gr_acl_handle_procpidmem(const struct task_struct *task)
54723+{
54724+ return 0;
54725+}
54726+
54727+int
54728+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
54729+{
54730+ return 0;
54731+}
54732+
54733+int
54734+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
54735+{
54736+ return 0;
54737+}
54738+
54739+void
54740+gr_set_kernel_label(struct task_struct *task)
54741+{
54742+ return;
54743+}
54744+
54745+int
54746+gr_check_user_change(int real, int effective, int fs)
54747+{
54748+ return 0;
54749+}
54750+
54751+int
54752+gr_check_group_change(int real, int effective, int fs)
54753+{
54754+ return 0;
54755+}
54756+
54757+int gr_acl_enable_at_secure(void)
54758+{
54759+ return 0;
54760+}
54761+
54762+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
54763+{
54764+ return dentry->d_inode->i_sb->s_dev;
54765+}
54766+
54767+EXPORT_SYMBOL(gr_learn_resource);
54768+EXPORT_SYMBOL(gr_set_kernel_label);
54769+#ifdef CONFIG_SECURITY
54770+EXPORT_SYMBOL(gr_check_user_change);
54771+EXPORT_SYMBOL(gr_check_group_change);
54772+#endif
54773diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
54774new file mode 100644
54775index 0000000..2b05ada
54776--- /dev/null
54777+++ b/grsecurity/grsec_exec.c
54778@@ -0,0 +1,146 @@
54779+#include <linux/kernel.h>
54780+#include <linux/sched.h>
54781+#include <linux/file.h>
54782+#include <linux/binfmts.h>
54783+#include <linux/fs.h>
54784+#include <linux/types.h>
54785+#include <linux/grdefs.h>
54786+#include <linux/grsecurity.h>
54787+#include <linux/grinternal.h>
54788+#include <linux/capability.h>
54789+#include <linux/module.h>
54790+
54791+#include <asm/uaccess.h>
54792+
54793+#ifdef CONFIG_GRKERNSEC_EXECLOG
54794+static char gr_exec_arg_buf[132];
54795+static DEFINE_MUTEX(gr_exec_arg_mutex);
54796+#endif
54797+
54798+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
54799+
54800+void
54801+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
54802+{
54803+#ifdef CONFIG_GRKERNSEC_EXECLOG
54804+ char *grarg = gr_exec_arg_buf;
54805+ unsigned int i, x, execlen = 0;
54806+ char c;
54807+
54808+ if (!((grsec_enable_execlog && grsec_enable_group &&
54809+ in_group_p(grsec_audit_gid))
54810+ || (grsec_enable_execlog && !grsec_enable_group)))
54811+ return;
54812+
54813+ mutex_lock(&gr_exec_arg_mutex);
54814+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
54815+
54816+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
54817+ const char __user *p;
54818+ unsigned int len;
54819+
54820+ p = get_user_arg_ptr(argv, i);
54821+ if (IS_ERR(p))
54822+ goto log;
54823+
54824+ len = strnlen_user(p, 128 - execlen);
54825+ if (len > 128 - execlen)
54826+ len = 128 - execlen;
54827+ else if (len > 0)
54828+ len--;
54829+ if (copy_from_user(grarg + execlen, p, len))
54830+ goto log;
54831+
54832+ /* rewrite unprintable characters */
54833+ for (x = 0; x < len; x++) {
54834+ c = *(grarg + execlen + x);
54835+ if (c < 32 || c > 126)
54836+ *(grarg + execlen + x) = ' ';
54837+ }
54838+
54839+ execlen += len;
54840+ *(grarg + execlen) = ' ';
54841+ *(grarg + execlen + 1) = '\0';
54842+ execlen++;
54843+ }
54844+
54845+ log:
54846+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
54847+ bprm->file->f_path.mnt, grarg);
54848+ mutex_unlock(&gr_exec_arg_mutex);
54849+#endif
54850+ return;
54851+}
54852+
54853+#ifdef CONFIG_GRKERNSEC
54854+extern int gr_acl_is_capable(const int cap);
54855+extern int gr_acl_is_capable_nolog(const int cap);
54856+extern int gr_chroot_is_capable(const int cap);
54857+extern int gr_chroot_is_capable_nolog(const int cap);
54858+#endif
54859+
54860+const char *captab_log[] = {
54861+ "CAP_CHOWN",
54862+ "CAP_DAC_OVERRIDE",
54863+ "CAP_DAC_READ_SEARCH",
54864+ "CAP_FOWNER",
54865+ "CAP_FSETID",
54866+ "CAP_KILL",
54867+ "CAP_SETGID",
54868+ "CAP_SETUID",
54869+ "CAP_SETPCAP",
54870+ "CAP_LINUX_IMMUTABLE",
54871+ "CAP_NET_BIND_SERVICE",
54872+ "CAP_NET_BROADCAST",
54873+ "CAP_NET_ADMIN",
54874+ "CAP_NET_RAW",
54875+ "CAP_IPC_LOCK",
54876+ "CAP_IPC_OWNER",
54877+ "CAP_SYS_MODULE",
54878+ "CAP_SYS_RAWIO",
54879+ "CAP_SYS_CHROOT",
54880+ "CAP_SYS_PTRACE",
54881+ "CAP_SYS_PACCT",
54882+ "CAP_SYS_ADMIN",
54883+ "CAP_SYS_BOOT",
54884+ "CAP_SYS_NICE",
54885+ "CAP_SYS_RESOURCE",
54886+ "CAP_SYS_TIME",
54887+ "CAP_SYS_TTY_CONFIG",
54888+ "CAP_MKNOD",
54889+ "CAP_LEASE",
54890+ "CAP_AUDIT_WRITE",
54891+ "CAP_AUDIT_CONTROL",
54892+ "CAP_SETFCAP",
54893+ "CAP_MAC_OVERRIDE",
54894+ "CAP_MAC_ADMIN",
54895+ "CAP_SYSLOG",
54896+ "CAP_WAKE_ALARM"
54897+};
54898+
54899+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
54900+
54901+int gr_is_capable(const int cap)
54902+{
54903+#ifdef CONFIG_GRKERNSEC
54904+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
54905+ return 1;
54906+ return 0;
54907+#else
54908+ return 1;
54909+#endif
54910+}
54911+
54912+int gr_is_capable_nolog(const int cap)
54913+{
54914+#ifdef CONFIG_GRKERNSEC
54915+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
54916+ return 1;
54917+ return 0;
54918+#else
54919+ return 1;
54920+#endif
54921+}
54922+
54923+EXPORT_SYMBOL(gr_is_capable);
54924+EXPORT_SYMBOL(gr_is_capable_nolog);
54925diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
54926new file mode 100644
54927index 0000000..d3ee748
54928--- /dev/null
54929+++ b/grsecurity/grsec_fifo.c
54930@@ -0,0 +1,24 @@
54931+#include <linux/kernel.h>
54932+#include <linux/sched.h>
54933+#include <linux/fs.h>
54934+#include <linux/file.h>
54935+#include <linux/grinternal.h>
54936+
54937+int
54938+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
54939+ const struct dentry *dir, const int flag, const int acc_mode)
54940+{
54941+#ifdef CONFIG_GRKERNSEC_FIFO
54942+ const struct cred *cred = current_cred();
54943+
54944+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
54945+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
54946+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
54947+ (cred->fsuid != dentry->d_inode->i_uid)) {
54948+ if (!inode_permission(dentry->d_inode, acc_mode))
54949+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
54950+ return -EACCES;
54951+ }
54952+#endif
54953+ return 0;
54954+}
54955diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
54956new file mode 100644
54957index 0000000..8ca18bf
54958--- /dev/null
54959+++ b/grsecurity/grsec_fork.c
54960@@ -0,0 +1,23 @@
54961+#include <linux/kernel.h>
54962+#include <linux/sched.h>
54963+#include <linux/grsecurity.h>
54964+#include <linux/grinternal.h>
54965+#include <linux/errno.h>
54966+
54967+void
54968+gr_log_forkfail(const int retval)
54969+{
54970+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54971+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
54972+ switch (retval) {
54973+ case -EAGAIN:
54974+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
54975+ break;
54976+ case -ENOMEM:
54977+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
54978+ break;
54979+ }
54980+ }
54981+#endif
54982+ return;
54983+}
54984diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
54985new file mode 100644
54986index 0000000..01ddde4
54987--- /dev/null
54988+++ b/grsecurity/grsec_init.c
54989@@ -0,0 +1,277 @@
54990+#include <linux/kernel.h>
54991+#include <linux/sched.h>
54992+#include <linux/mm.h>
54993+#include <linux/gracl.h>
54994+#include <linux/slab.h>
54995+#include <linux/vmalloc.h>
54996+#include <linux/percpu.h>
54997+#include <linux/module.h>
54998+
54999+int grsec_enable_ptrace_readexec;
55000+int grsec_enable_setxid;
55001+int grsec_enable_brute;
55002+int grsec_enable_link;
55003+int grsec_enable_dmesg;
55004+int grsec_enable_harden_ptrace;
55005+int grsec_enable_fifo;
55006+int grsec_enable_execlog;
55007+int grsec_enable_signal;
55008+int grsec_enable_forkfail;
55009+int grsec_enable_audit_ptrace;
55010+int grsec_enable_time;
55011+int grsec_enable_audit_textrel;
55012+int grsec_enable_group;
55013+int grsec_audit_gid;
55014+int grsec_enable_chdir;
55015+int grsec_enable_mount;
55016+int grsec_enable_rofs;
55017+int grsec_enable_chroot_findtask;
55018+int grsec_enable_chroot_mount;
55019+int grsec_enable_chroot_shmat;
55020+int grsec_enable_chroot_fchdir;
55021+int grsec_enable_chroot_double;
55022+int grsec_enable_chroot_pivot;
55023+int grsec_enable_chroot_chdir;
55024+int grsec_enable_chroot_chmod;
55025+int grsec_enable_chroot_mknod;
55026+int grsec_enable_chroot_nice;
55027+int grsec_enable_chroot_execlog;
55028+int grsec_enable_chroot_caps;
55029+int grsec_enable_chroot_sysctl;
55030+int grsec_enable_chroot_unix;
55031+int grsec_enable_tpe;
55032+int grsec_tpe_gid;
55033+int grsec_enable_blackhole;
55034+#ifdef CONFIG_IPV6_MODULE
55035+EXPORT_SYMBOL(grsec_enable_blackhole);
55036+#endif
55037+int grsec_lastack_retries;
55038+int grsec_enable_tpe_all;
55039+int grsec_enable_tpe_invert;
55040+int grsec_enable_socket_all;
55041+int grsec_socket_all_gid;
55042+int grsec_enable_socket_client;
55043+int grsec_socket_client_gid;
55044+int grsec_enable_socket_server;
55045+int grsec_socket_server_gid;
55046+int grsec_resource_logging;
55047+int grsec_disable_privio;
55048+int grsec_enable_log_rwxmaps;
55049+int grsec_lock;
55050+
55051+DEFINE_SPINLOCK(grsec_alert_lock);
55052+unsigned long grsec_alert_wtime = 0;
55053+unsigned long grsec_alert_fyet = 0;
55054+
55055+DEFINE_SPINLOCK(grsec_audit_lock);
55056+
55057+DEFINE_RWLOCK(grsec_exec_file_lock);
55058+
55059+char *gr_shared_page[4];
55060+
55061+char *gr_alert_log_fmt;
55062+char *gr_audit_log_fmt;
55063+char *gr_alert_log_buf;
55064+char *gr_audit_log_buf;
55065+
55066+extern struct gr_arg *gr_usermode;
55067+extern unsigned char *gr_system_salt;
55068+extern unsigned char *gr_system_sum;
55069+
55070+void __init
55071+grsecurity_init(void)
55072+{
55073+ int j;
55074+ /* create the per-cpu shared pages */
55075+
55076+#ifdef CONFIG_X86
55077+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55078+#endif
55079+
55080+ for (j = 0; j < 4; j++) {
55081+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55082+ if (gr_shared_page[j] == NULL) {
55083+ panic("Unable to allocate grsecurity shared page");
55084+ return;
55085+ }
55086+ }
55087+
55088+ /* allocate log buffers */
55089+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55090+ if (!gr_alert_log_fmt) {
55091+ panic("Unable to allocate grsecurity alert log format buffer");
55092+ return;
55093+ }
55094+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55095+ if (!gr_audit_log_fmt) {
55096+ panic("Unable to allocate grsecurity audit log format buffer");
55097+ return;
55098+ }
55099+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55100+ if (!gr_alert_log_buf) {
55101+ panic("Unable to allocate grsecurity alert log buffer");
55102+ return;
55103+ }
55104+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55105+ if (!gr_audit_log_buf) {
55106+ panic("Unable to allocate grsecurity audit log buffer");
55107+ return;
55108+ }
55109+
55110+ /* allocate memory for authentication structure */
55111+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55112+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55113+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55114+
55115+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55116+ panic("Unable to allocate grsecurity authentication structure");
55117+ return;
55118+ }
55119+
55120+
55121+#ifdef CONFIG_GRKERNSEC_IO
55122+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55123+ grsec_disable_privio = 1;
55124+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55125+ grsec_disable_privio = 1;
55126+#else
55127+ grsec_disable_privio = 0;
55128+#endif
55129+#endif
55130+
55131+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55132+ /* for backward compatibility, tpe_invert always defaults to on if
55133+ enabled in the kernel
55134+ */
55135+ grsec_enable_tpe_invert = 1;
55136+#endif
55137+
55138+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55139+#ifndef CONFIG_GRKERNSEC_SYSCTL
55140+ grsec_lock = 1;
55141+#endif
55142+
55143+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55144+ grsec_enable_audit_textrel = 1;
55145+#endif
55146+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55147+ grsec_enable_log_rwxmaps = 1;
55148+#endif
55149+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55150+ grsec_enable_group = 1;
55151+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55152+#endif
55153+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55154+ grsec_enable_ptrace_readexec = 1;
55155+#endif
55156+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55157+ grsec_enable_chdir = 1;
55158+#endif
55159+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55160+ grsec_enable_harden_ptrace = 1;
55161+#endif
55162+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55163+ grsec_enable_mount = 1;
55164+#endif
55165+#ifdef CONFIG_GRKERNSEC_LINK
55166+ grsec_enable_link = 1;
55167+#endif
55168+#ifdef CONFIG_GRKERNSEC_BRUTE
55169+ grsec_enable_brute = 1;
55170+#endif
55171+#ifdef CONFIG_GRKERNSEC_DMESG
55172+ grsec_enable_dmesg = 1;
55173+#endif
55174+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55175+ grsec_enable_blackhole = 1;
55176+ grsec_lastack_retries = 4;
55177+#endif
55178+#ifdef CONFIG_GRKERNSEC_FIFO
55179+ grsec_enable_fifo = 1;
55180+#endif
55181+#ifdef CONFIG_GRKERNSEC_EXECLOG
55182+ grsec_enable_execlog = 1;
55183+#endif
55184+#ifdef CONFIG_GRKERNSEC_SETXID
55185+ grsec_enable_setxid = 1;
55186+#endif
55187+#ifdef CONFIG_GRKERNSEC_SIGNAL
55188+ grsec_enable_signal = 1;
55189+#endif
55190+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55191+ grsec_enable_forkfail = 1;
55192+#endif
55193+#ifdef CONFIG_GRKERNSEC_TIME
55194+ grsec_enable_time = 1;
55195+#endif
55196+#ifdef CONFIG_GRKERNSEC_RESLOG
55197+ grsec_resource_logging = 1;
55198+#endif
55199+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55200+ grsec_enable_chroot_findtask = 1;
55201+#endif
55202+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55203+ grsec_enable_chroot_unix = 1;
55204+#endif
55205+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55206+ grsec_enable_chroot_mount = 1;
55207+#endif
55208+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55209+ grsec_enable_chroot_fchdir = 1;
55210+#endif
55211+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55212+ grsec_enable_chroot_shmat = 1;
55213+#endif
55214+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55215+ grsec_enable_audit_ptrace = 1;
55216+#endif
55217+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55218+ grsec_enable_chroot_double = 1;
55219+#endif
55220+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55221+ grsec_enable_chroot_pivot = 1;
55222+#endif
55223+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55224+ grsec_enable_chroot_chdir = 1;
55225+#endif
55226+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55227+ grsec_enable_chroot_chmod = 1;
55228+#endif
55229+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55230+ grsec_enable_chroot_mknod = 1;
55231+#endif
55232+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55233+ grsec_enable_chroot_nice = 1;
55234+#endif
55235+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55236+ grsec_enable_chroot_execlog = 1;
55237+#endif
55238+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55239+ grsec_enable_chroot_caps = 1;
55240+#endif
55241+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55242+ grsec_enable_chroot_sysctl = 1;
55243+#endif
55244+#ifdef CONFIG_GRKERNSEC_TPE
55245+ grsec_enable_tpe = 1;
55246+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55247+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55248+ grsec_enable_tpe_all = 1;
55249+#endif
55250+#endif
55251+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55252+ grsec_enable_socket_all = 1;
55253+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55254+#endif
55255+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55256+ grsec_enable_socket_client = 1;
55257+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55258+#endif
55259+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55260+ grsec_enable_socket_server = 1;
55261+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55262+#endif
55263+#endif
55264+
55265+ return;
55266+}
55267diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55268new file mode 100644
55269index 0000000..3efe141
55270--- /dev/null
55271+++ b/grsecurity/grsec_link.c
55272@@ -0,0 +1,43 @@
55273+#include <linux/kernel.h>
55274+#include <linux/sched.h>
55275+#include <linux/fs.h>
55276+#include <linux/file.h>
55277+#include <linux/grinternal.h>
55278+
55279+int
55280+gr_handle_follow_link(const struct inode *parent,
55281+ const struct inode *inode,
55282+ const struct dentry *dentry, const struct vfsmount *mnt)
55283+{
55284+#ifdef CONFIG_GRKERNSEC_LINK
55285+ const struct cred *cred = current_cred();
55286+
55287+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55288+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55289+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55290+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55291+ return -EACCES;
55292+ }
55293+#endif
55294+ return 0;
55295+}
55296+
55297+int
55298+gr_handle_hardlink(const struct dentry *dentry,
55299+ const struct vfsmount *mnt,
55300+ struct inode *inode, const int mode, const char *to)
55301+{
55302+#ifdef CONFIG_GRKERNSEC_LINK
55303+ const struct cred *cred = current_cred();
55304+
55305+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55306+ (!S_ISREG(mode) || (mode & S_ISUID) ||
55307+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55308+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55309+ !capable(CAP_FOWNER) && cred->uid) {
55310+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55311+ return -EPERM;
55312+ }
55313+#endif
55314+ return 0;
55315+}
55316diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55317new file mode 100644
55318index 0000000..a45d2e9
55319--- /dev/null
55320+++ b/grsecurity/grsec_log.c
55321@@ -0,0 +1,322 @@
55322+#include <linux/kernel.h>
55323+#include <linux/sched.h>
55324+#include <linux/file.h>
55325+#include <linux/tty.h>
55326+#include <linux/fs.h>
55327+#include <linux/grinternal.h>
55328+
55329+#ifdef CONFIG_TREE_PREEMPT_RCU
55330+#define DISABLE_PREEMPT() preempt_disable()
55331+#define ENABLE_PREEMPT() preempt_enable()
55332+#else
55333+#define DISABLE_PREEMPT()
55334+#define ENABLE_PREEMPT()
55335+#endif
55336+
55337+#define BEGIN_LOCKS(x) \
55338+ DISABLE_PREEMPT(); \
55339+ rcu_read_lock(); \
55340+ read_lock(&tasklist_lock); \
55341+ read_lock(&grsec_exec_file_lock); \
55342+ if (x != GR_DO_AUDIT) \
55343+ spin_lock(&grsec_alert_lock); \
55344+ else \
55345+ spin_lock(&grsec_audit_lock)
55346+
55347+#define END_LOCKS(x) \
55348+ if (x != GR_DO_AUDIT) \
55349+ spin_unlock(&grsec_alert_lock); \
55350+ else \
55351+ spin_unlock(&grsec_audit_lock); \
55352+ read_unlock(&grsec_exec_file_lock); \
55353+ read_unlock(&tasklist_lock); \
55354+ rcu_read_unlock(); \
55355+ ENABLE_PREEMPT(); \
55356+ if (x == GR_DONT_AUDIT) \
55357+ gr_handle_alertkill(current)
55358+
55359+enum {
55360+ FLOODING,
55361+ NO_FLOODING
55362+};
55363+
55364+extern char *gr_alert_log_fmt;
55365+extern char *gr_audit_log_fmt;
55366+extern char *gr_alert_log_buf;
55367+extern char *gr_audit_log_buf;
55368+
55369+static int gr_log_start(int audit)
55370+{
55371+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55372+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55373+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55374+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55375+ unsigned long curr_secs = get_seconds();
55376+
55377+ if (audit == GR_DO_AUDIT)
55378+ goto set_fmt;
55379+
55380+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55381+ grsec_alert_wtime = curr_secs;
55382+ grsec_alert_fyet = 0;
55383+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55384+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55385+ grsec_alert_fyet++;
55386+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55387+ grsec_alert_wtime = curr_secs;
55388+ grsec_alert_fyet++;
55389+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55390+ return FLOODING;
55391+ }
55392+ else return FLOODING;
55393+
55394+set_fmt:
55395+#endif
55396+ memset(buf, 0, PAGE_SIZE);
55397+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
55398+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55399+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55400+ } else if (current->signal->curr_ip) {
55401+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55402+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55403+ } else if (gr_acl_is_enabled()) {
55404+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55405+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55406+ } else {
55407+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
55408+ strcpy(buf, fmt);
55409+ }
55410+
55411+ return NO_FLOODING;
55412+}
55413+
55414+static void gr_log_middle(int audit, const char *msg, va_list ap)
55415+ __attribute__ ((format (printf, 2, 0)));
55416+
55417+static void gr_log_middle(int audit, const char *msg, va_list ap)
55418+{
55419+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55420+ unsigned int len = strlen(buf);
55421+
55422+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55423+
55424+ return;
55425+}
55426+
55427+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55428+ __attribute__ ((format (printf, 2, 3)));
55429+
55430+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55431+{
55432+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55433+ unsigned int len = strlen(buf);
55434+ va_list ap;
55435+
55436+ va_start(ap, msg);
55437+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55438+ va_end(ap);
55439+
55440+ return;
55441+}
55442+
55443+static void gr_log_end(int audit, int append_default)
55444+{
55445+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55446+
55447+ if (append_default) {
55448+ unsigned int len = strlen(buf);
55449+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55450+ }
55451+
55452+ printk("%s\n", buf);
55453+
55454+ return;
55455+}
55456+
55457+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55458+{
55459+ int logtype;
55460+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55461+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55462+ void *voidptr = NULL;
55463+ int num1 = 0, num2 = 0;
55464+ unsigned long ulong1 = 0, ulong2 = 0;
55465+ struct dentry *dentry = NULL;
55466+ struct vfsmount *mnt = NULL;
55467+ struct file *file = NULL;
55468+ struct task_struct *task = NULL;
55469+ const struct cred *cred, *pcred;
55470+ va_list ap;
55471+
55472+ BEGIN_LOCKS(audit);
55473+ logtype = gr_log_start(audit);
55474+ if (logtype == FLOODING) {
55475+ END_LOCKS(audit);
55476+ return;
55477+ }
55478+ va_start(ap, argtypes);
55479+ switch (argtypes) {
55480+ case GR_TTYSNIFF:
55481+ task = va_arg(ap, struct task_struct *);
55482+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55483+ break;
55484+ case GR_SYSCTL_HIDDEN:
55485+ str1 = va_arg(ap, char *);
55486+ gr_log_middle_varargs(audit, msg, result, str1);
55487+ break;
55488+ case GR_RBAC:
55489+ dentry = va_arg(ap, struct dentry *);
55490+ mnt = va_arg(ap, struct vfsmount *);
55491+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55492+ break;
55493+ case GR_RBAC_STR:
55494+ dentry = va_arg(ap, struct dentry *);
55495+ mnt = va_arg(ap, struct vfsmount *);
55496+ str1 = va_arg(ap, char *);
55497+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55498+ break;
55499+ case GR_STR_RBAC:
55500+ str1 = va_arg(ap, char *);
55501+ dentry = va_arg(ap, struct dentry *);
55502+ mnt = va_arg(ap, struct vfsmount *);
55503+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55504+ break;
55505+ case GR_RBAC_MODE2:
55506+ dentry = va_arg(ap, struct dentry *);
55507+ mnt = va_arg(ap, struct vfsmount *);
55508+ str1 = va_arg(ap, char *);
55509+ str2 = va_arg(ap, char *);
55510+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55511+ break;
55512+ case GR_RBAC_MODE3:
55513+ dentry = va_arg(ap, struct dentry *);
55514+ mnt = va_arg(ap, struct vfsmount *);
55515+ str1 = va_arg(ap, char *);
55516+ str2 = va_arg(ap, char *);
55517+ str3 = va_arg(ap, char *);
55518+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55519+ break;
55520+ case GR_FILENAME:
55521+ dentry = va_arg(ap, struct dentry *);
55522+ mnt = va_arg(ap, struct vfsmount *);
55523+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55524+ break;
55525+ case GR_STR_FILENAME:
55526+ str1 = va_arg(ap, char *);
55527+ dentry = va_arg(ap, struct dentry *);
55528+ mnt = va_arg(ap, struct vfsmount *);
55529+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55530+ break;
55531+ case GR_FILENAME_STR:
55532+ dentry = va_arg(ap, struct dentry *);
55533+ mnt = va_arg(ap, struct vfsmount *);
55534+ str1 = va_arg(ap, char *);
55535+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55536+ break;
55537+ case GR_FILENAME_TWO_INT:
55538+ dentry = va_arg(ap, struct dentry *);
55539+ mnt = va_arg(ap, struct vfsmount *);
55540+ num1 = va_arg(ap, int);
55541+ num2 = va_arg(ap, int);
55542+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55543+ break;
55544+ case GR_FILENAME_TWO_INT_STR:
55545+ dentry = va_arg(ap, struct dentry *);
55546+ mnt = va_arg(ap, struct vfsmount *);
55547+ num1 = va_arg(ap, int);
55548+ num2 = va_arg(ap, int);
55549+ str1 = va_arg(ap, char *);
55550+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55551+ break;
55552+ case GR_TEXTREL:
55553+ file = va_arg(ap, struct file *);
55554+ ulong1 = va_arg(ap, unsigned long);
55555+ ulong2 = va_arg(ap, unsigned long);
55556+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55557+ break;
55558+ case GR_PTRACE:
55559+ task = va_arg(ap, struct task_struct *);
55560+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55561+ break;
55562+ case GR_RESOURCE:
55563+ task = va_arg(ap, struct task_struct *);
55564+ cred = __task_cred(task);
55565+ pcred = __task_cred(task->real_parent);
55566+ ulong1 = va_arg(ap, unsigned long);
55567+ str1 = va_arg(ap, char *);
55568+ ulong2 = va_arg(ap, unsigned long);
55569+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55570+ break;
55571+ case GR_CAP:
55572+ task = va_arg(ap, struct task_struct *);
55573+ cred = __task_cred(task);
55574+ pcred = __task_cred(task->real_parent);
55575+ str1 = va_arg(ap, char *);
55576+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55577+ break;
55578+ case GR_SIG:
55579+ str1 = va_arg(ap, char *);
55580+ voidptr = va_arg(ap, void *);
55581+ gr_log_middle_varargs(audit, msg, str1, voidptr);
55582+ break;
55583+ case GR_SIG2:
55584+ task = va_arg(ap, struct task_struct *);
55585+ cred = __task_cred(task);
55586+ pcred = __task_cred(task->real_parent);
55587+ num1 = va_arg(ap, int);
55588+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55589+ break;
55590+ case GR_CRASH1:
55591+ task = va_arg(ap, struct task_struct *);
55592+ cred = __task_cred(task);
55593+ pcred = __task_cred(task->real_parent);
55594+ ulong1 = va_arg(ap, unsigned long);
55595+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55596+ break;
55597+ case GR_CRASH2:
55598+ task = va_arg(ap, struct task_struct *);
55599+ cred = __task_cred(task);
55600+ pcred = __task_cred(task->real_parent);
55601+ ulong1 = va_arg(ap, unsigned long);
55602+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55603+ break;
55604+ case GR_RWXMAP:
55605+ file = va_arg(ap, struct file *);
55606+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55607+ break;
55608+ case GR_PSACCT:
55609+ {
55610+ unsigned int wday, cday;
55611+ __u8 whr, chr;
55612+ __u8 wmin, cmin;
55613+ __u8 wsec, csec;
55614+ char cur_tty[64] = { 0 };
55615+ char parent_tty[64] = { 0 };
55616+
55617+ task = va_arg(ap, struct task_struct *);
55618+ wday = va_arg(ap, unsigned int);
55619+ cday = va_arg(ap, unsigned int);
55620+ whr = va_arg(ap, int);
55621+ chr = va_arg(ap, int);
55622+ wmin = va_arg(ap, int);
55623+ cmin = va_arg(ap, int);
55624+ wsec = va_arg(ap, int);
55625+ csec = va_arg(ap, int);
55626+ ulong1 = va_arg(ap, unsigned long);
55627+ cred = __task_cred(task);
55628+ pcred = __task_cred(task->real_parent);
55629+
55630+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55631+ }
55632+ break;
55633+ default:
55634+ gr_log_middle(audit, msg, ap);
55635+ }
55636+ va_end(ap);
55637+ // these don't need DEFAULTSECARGS printed on the end
55638+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
55639+ gr_log_end(audit, 0);
55640+ else
55641+ gr_log_end(audit, 1);
55642+ END_LOCKS(audit);
55643+}
55644diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
55645new file mode 100644
55646index 0000000..6c0416b
55647--- /dev/null
55648+++ b/grsecurity/grsec_mem.c
55649@@ -0,0 +1,33 @@
55650+#include <linux/kernel.h>
55651+#include <linux/sched.h>
55652+#include <linux/mm.h>
55653+#include <linux/mman.h>
55654+#include <linux/grinternal.h>
55655+
55656+void
55657+gr_handle_ioperm(void)
55658+{
55659+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
55660+ return;
55661+}
55662+
55663+void
55664+gr_handle_iopl(void)
55665+{
55666+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
55667+ return;
55668+}
55669+
55670+void
55671+gr_handle_mem_readwrite(u64 from, u64 to)
55672+{
55673+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
55674+ return;
55675+}
55676+
55677+void
55678+gr_handle_vm86(void)
55679+{
55680+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
55681+ return;
55682+}
55683diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
55684new file mode 100644
55685index 0000000..2131422
55686--- /dev/null
55687+++ b/grsecurity/grsec_mount.c
55688@@ -0,0 +1,62 @@
55689+#include <linux/kernel.h>
55690+#include <linux/sched.h>
55691+#include <linux/mount.h>
55692+#include <linux/grsecurity.h>
55693+#include <linux/grinternal.h>
55694+
55695+void
55696+gr_log_remount(const char *devname, const int retval)
55697+{
55698+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55699+ if (grsec_enable_mount && (retval >= 0))
55700+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
55701+#endif
55702+ return;
55703+}
55704+
55705+void
55706+gr_log_unmount(const char *devname, const int retval)
55707+{
55708+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55709+ if (grsec_enable_mount && (retval >= 0))
55710+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
55711+#endif
55712+ return;
55713+}
55714+
55715+void
55716+gr_log_mount(const char *from, const char *to, const int retval)
55717+{
55718+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55719+ if (grsec_enable_mount && (retval >= 0))
55720+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
55721+#endif
55722+ return;
55723+}
55724+
55725+int
55726+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
55727+{
55728+#ifdef CONFIG_GRKERNSEC_ROFS
55729+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
55730+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
55731+ return -EPERM;
55732+ } else
55733+ return 0;
55734+#endif
55735+ return 0;
55736+}
55737+
55738+int
55739+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
55740+{
55741+#ifdef CONFIG_GRKERNSEC_ROFS
55742+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
55743+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
55744+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
55745+ return -EPERM;
55746+ } else
55747+ return 0;
55748+#endif
55749+ return 0;
55750+}
55751diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
55752new file mode 100644
55753index 0000000..a3b12a0
55754--- /dev/null
55755+++ b/grsecurity/grsec_pax.c
55756@@ -0,0 +1,36 @@
55757+#include <linux/kernel.h>
55758+#include <linux/sched.h>
55759+#include <linux/mm.h>
55760+#include <linux/file.h>
55761+#include <linux/grinternal.h>
55762+#include <linux/grsecurity.h>
55763+
55764+void
55765+gr_log_textrel(struct vm_area_struct * vma)
55766+{
55767+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55768+ if (grsec_enable_audit_textrel)
55769+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
55770+#endif
55771+ return;
55772+}
55773+
55774+void
55775+gr_log_rwxmmap(struct file *file)
55776+{
55777+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55778+ if (grsec_enable_log_rwxmaps)
55779+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
55780+#endif
55781+ return;
55782+}
55783+
55784+void
55785+gr_log_rwxmprotect(struct file *file)
55786+{
55787+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55788+ if (grsec_enable_log_rwxmaps)
55789+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
55790+#endif
55791+ return;
55792+}
55793diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
55794new file mode 100644
55795index 0000000..f7f29aa
55796--- /dev/null
55797+++ b/grsecurity/grsec_ptrace.c
55798@@ -0,0 +1,30 @@
55799+#include <linux/kernel.h>
55800+#include <linux/sched.h>
55801+#include <linux/grinternal.h>
55802+#include <linux/security.h>
55803+
55804+void
55805+gr_audit_ptrace(struct task_struct *task)
55806+{
55807+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55808+ if (grsec_enable_audit_ptrace)
55809+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
55810+#endif
55811+ return;
55812+}
55813+
55814+int
55815+gr_ptrace_readexec(struct file *file, int unsafe_flags)
55816+{
55817+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55818+ const struct dentry *dentry = file->f_path.dentry;
55819+ const struct vfsmount *mnt = file->f_path.mnt;
55820+
55821+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
55822+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
55823+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
55824+ return -EACCES;
55825+ }
55826+#endif
55827+ return 0;
55828+}
55829diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
55830new file mode 100644
55831index 0000000..7a5b2de
55832--- /dev/null
55833+++ b/grsecurity/grsec_sig.c
55834@@ -0,0 +1,207 @@
55835+#include <linux/kernel.h>
55836+#include <linux/sched.h>
55837+#include <linux/delay.h>
55838+#include <linux/grsecurity.h>
55839+#include <linux/grinternal.h>
55840+#include <linux/hardirq.h>
55841+
55842+char *signames[] = {
55843+ [SIGSEGV] = "Segmentation fault",
55844+ [SIGILL] = "Illegal instruction",
55845+ [SIGABRT] = "Abort",
55846+ [SIGBUS] = "Invalid alignment/Bus error"
55847+};
55848+
55849+void
55850+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
55851+{
55852+#ifdef CONFIG_GRKERNSEC_SIGNAL
55853+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
55854+ (sig == SIGABRT) || (sig == SIGBUS))) {
55855+ if (t->pid == current->pid) {
55856+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
55857+ } else {
55858+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
55859+ }
55860+ }
55861+#endif
55862+ return;
55863+}
55864+
55865+int
55866+gr_handle_signal(const struct task_struct *p, const int sig)
55867+{
55868+#ifdef CONFIG_GRKERNSEC
55869+ /* ignore the 0 signal for protected task checks */
55870+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
55871+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
55872+ return -EPERM;
55873+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
55874+ return -EPERM;
55875+ }
55876+#endif
55877+ return 0;
55878+}
55879+
55880+#ifdef CONFIG_GRKERNSEC
55881+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
55882+
55883+int gr_fake_force_sig(int sig, struct task_struct *t)
55884+{
55885+ unsigned long int flags;
55886+ int ret, blocked, ignored;
55887+ struct k_sigaction *action;
55888+
55889+ spin_lock_irqsave(&t->sighand->siglock, flags);
55890+ action = &t->sighand->action[sig-1];
55891+ ignored = action->sa.sa_handler == SIG_IGN;
55892+ blocked = sigismember(&t->blocked, sig);
55893+ if (blocked || ignored) {
55894+ action->sa.sa_handler = SIG_DFL;
55895+ if (blocked) {
55896+ sigdelset(&t->blocked, sig);
55897+ recalc_sigpending_and_wake(t);
55898+ }
55899+ }
55900+ if (action->sa.sa_handler == SIG_DFL)
55901+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
55902+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
55903+
55904+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
55905+
55906+ return ret;
55907+}
55908+#endif
55909+
55910+#ifdef CONFIG_GRKERNSEC_BRUTE
55911+#define GR_USER_BAN_TIME (15 * 60)
55912+
55913+static int __get_dumpable(unsigned long mm_flags)
55914+{
55915+ int ret;
55916+
55917+ ret = mm_flags & MMF_DUMPABLE_MASK;
55918+ return (ret >= 2) ? 2 : ret;
55919+}
55920+#endif
55921+
55922+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
55923+{
55924+#ifdef CONFIG_GRKERNSEC_BRUTE
55925+ uid_t uid = 0;
55926+
55927+ if (!grsec_enable_brute)
55928+ return;
55929+
55930+ rcu_read_lock();
55931+ read_lock(&tasklist_lock);
55932+ read_lock(&grsec_exec_file_lock);
55933+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
55934+ p->real_parent->brute = 1;
55935+ else {
55936+ const struct cred *cred = __task_cred(p), *cred2;
55937+ struct task_struct *tsk, *tsk2;
55938+
55939+ if (!__get_dumpable(mm_flags) && cred->uid) {
55940+ struct user_struct *user;
55941+
55942+ uid = cred->uid;
55943+
55944+ /* this is put upon execution past expiration */
55945+ user = find_user(uid);
55946+ if (user == NULL)
55947+ goto unlock;
55948+ user->banned = 1;
55949+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
55950+ if (user->ban_expires == ~0UL)
55951+ user->ban_expires--;
55952+
55953+ do_each_thread(tsk2, tsk) {
55954+ cred2 = __task_cred(tsk);
55955+ if (tsk != p && cred2->uid == uid)
55956+ gr_fake_force_sig(SIGKILL, tsk);
55957+ } while_each_thread(tsk2, tsk);
55958+ }
55959+ }
55960+unlock:
55961+ read_unlock(&grsec_exec_file_lock);
55962+ read_unlock(&tasklist_lock);
55963+ rcu_read_unlock();
55964+
55965+ if (uid)
55966+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
55967+
55968+#endif
55969+ return;
55970+}
55971+
55972+void gr_handle_brute_check(void)
55973+{
55974+#ifdef CONFIG_GRKERNSEC_BRUTE
55975+ if (current->brute)
55976+ msleep(30 * 1000);
55977+#endif
55978+ return;
55979+}
55980+
55981+void gr_handle_kernel_exploit(void)
55982+{
55983+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
55984+ const struct cred *cred;
55985+ struct task_struct *tsk, *tsk2;
55986+ struct user_struct *user;
55987+ uid_t uid;
55988+
55989+ if (in_irq() || in_serving_softirq() || in_nmi())
55990+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
55991+
55992+ uid = current_uid();
55993+
55994+ if (uid == 0)
55995+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
55996+ else {
55997+ /* kill all the processes of this user, hold a reference
55998+ to their creds struct, and prevent them from creating
55999+ another process until system reset
56000+ */
56001+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
56002+ /* we intentionally leak this ref */
56003+ user = get_uid(current->cred->user);
56004+ if (user) {
56005+ user->banned = 1;
56006+ user->ban_expires = ~0UL;
56007+ }
56008+
56009+ read_lock(&tasklist_lock);
56010+ do_each_thread(tsk2, tsk) {
56011+ cred = __task_cred(tsk);
56012+ if (cred->uid == uid)
56013+ gr_fake_force_sig(SIGKILL, tsk);
56014+ } while_each_thread(tsk2, tsk);
56015+ read_unlock(&tasklist_lock);
56016+ }
56017+#endif
56018+}
56019+
56020+int __gr_process_user_ban(struct user_struct *user)
56021+{
56022+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56023+ if (unlikely(user->banned)) {
56024+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
56025+ user->banned = 0;
56026+ user->ban_expires = 0;
56027+ free_uid(user);
56028+ } else
56029+ return -EPERM;
56030+ }
56031+#endif
56032+ return 0;
56033+}
56034+
56035+int gr_process_user_ban(void)
56036+{
56037+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56038+ return __gr_process_user_ban(current->cred->user);
56039+#endif
56040+ return 0;
56041+}
56042diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
56043new file mode 100644
56044index 0000000..4030d57
56045--- /dev/null
56046+++ b/grsecurity/grsec_sock.c
56047@@ -0,0 +1,244 @@
56048+#include <linux/kernel.h>
56049+#include <linux/module.h>
56050+#include <linux/sched.h>
56051+#include <linux/file.h>
56052+#include <linux/net.h>
56053+#include <linux/in.h>
56054+#include <linux/ip.h>
56055+#include <net/sock.h>
56056+#include <net/inet_sock.h>
56057+#include <linux/grsecurity.h>
56058+#include <linux/grinternal.h>
56059+#include <linux/gracl.h>
56060+
56061+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
56062+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
56063+
56064+EXPORT_SYMBOL(gr_search_udp_recvmsg);
56065+EXPORT_SYMBOL(gr_search_udp_sendmsg);
56066+
56067+#ifdef CONFIG_UNIX_MODULE
56068+EXPORT_SYMBOL(gr_acl_handle_unix);
56069+EXPORT_SYMBOL(gr_acl_handle_mknod);
56070+EXPORT_SYMBOL(gr_handle_chroot_unix);
56071+EXPORT_SYMBOL(gr_handle_create);
56072+#endif
56073+
56074+#ifdef CONFIG_GRKERNSEC
56075+#define gr_conn_table_size 32749
56076+struct conn_table_entry {
56077+ struct conn_table_entry *next;
56078+ struct signal_struct *sig;
56079+};
56080+
56081+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56082+DEFINE_SPINLOCK(gr_conn_table_lock);
56083+
56084+extern const char * gr_socktype_to_name(unsigned char type);
56085+extern const char * gr_proto_to_name(unsigned char proto);
56086+extern const char * gr_sockfamily_to_name(unsigned char family);
56087+
56088+static __inline__ int
56089+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56090+{
56091+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56092+}
56093+
56094+static __inline__ int
56095+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56096+ __u16 sport, __u16 dport)
56097+{
56098+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56099+ sig->gr_sport == sport && sig->gr_dport == dport))
56100+ return 1;
56101+ else
56102+ return 0;
56103+}
56104+
56105+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56106+{
56107+ struct conn_table_entry **match;
56108+ unsigned int index;
56109+
56110+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56111+ sig->gr_sport, sig->gr_dport,
56112+ gr_conn_table_size);
56113+
56114+ newent->sig = sig;
56115+
56116+ match = &gr_conn_table[index];
56117+ newent->next = *match;
56118+ *match = newent;
56119+
56120+ return;
56121+}
56122+
56123+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56124+{
56125+ struct conn_table_entry *match, *last = NULL;
56126+ unsigned int index;
56127+
56128+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56129+ sig->gr_sport, sig->gr_dport,
56130+ gr_conn_table_size);
56131+
56132+ match = gr_conn_table[index];
56133+ while (match && !conn_match(match->sig,
56134+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56135+ sig->gr_dport)) {
56136+ last = match;
56137+ match = match->next;
56138+ }
56139+
56140+ if (match) {
56141+ if (last)
56142+ last->next = match->next;
56143+ else
56144+ gr_conn_table[index] = NULL;
56145+ kfree(match);
56146+ }
56147+
56148+ return;
56149+}
56150+
56151+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56152+ __u16 sport, __u16 dport)
56153+{
56154+ struct conn_table_entry *match;
56155+ unsigned int index;
56156+
56157+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56158+
56159+ match = gr_conn_table[index];
56160+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56161+ match = match->next;
56162+
56163+ if (match)
56164+ return match->sig;
56165+ else
56166+ return NULL;
56167+}
56168+
56169+#endif
56170+
56171+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56172+{
56173+#ifdef CONFIG_GRKERNSEC
56174+ struct signal_struct *sig = task->signal;
56175+ struct conn_table_entry *newent;
56176+
56177+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56178+ if (newent == NULL)
56179+ return;
56180+ /* no bh lock needed since we are called with bh disabled */
56181+ spin_lock(&gr_conn_table_lock);
56182+ gr_del_task_from_ip_table_nolock(sig);
56183+ sig->gr_saddr = inet->inet_rcv_saddr;
56184+ sig->gr_daddr = inet->inet_daddr;
56185+ sig->gr_sport = inet->inet_sport;
56186+ sig->gr_dport = inet->inet_dport;
56187+ gr_add_to_task_ip_table_nolock(sig, newent);
56188+ spin_unlock(&gr_conn_table_lock);
56189+#endif
56190+ return;
56191+}
56192+
56193+void gr_del_task_from_ip_table(struct task_struct *task)
56194+{
56195+#ifdef CONFIG_GRKERNSEC
56196+ spin_lock_bh(&gr_conn_table_lock);
56197+ gr_del_task_from_ip_table_nolock(task->signal);
56198+ spin_unlock_bh(&gr_conn_table_lock);
56199+#endif
56200+ return;
56201+}
56202+
56203+void
56204+gr_attach_curr_ip(const struct sock *sk)
56205+{
56206+#ifdef CONFIG_GRKERNSEC
56207+ struct signal_struct *p, *set;
56208+ const struct inet_sock *inet = inet_sk(sk);
56209+
56210+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56211+ return;
56212+
56213+ set = current->signal;
56214+
56215+ spin_lock_bh(&gr_conn_table_lock);
56216+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56217+ inet->inet_dport, inet->inet_sport);
56218+ if (unlikely(p != NULL)) {
56219+ set->curr_ip = p->curr_ip;
56220+ set->used_accept = 1;
56221+ gr_del_task_from_ip_table_nolock(p);
56222+ spin_unlock_bh(&gr_conn_table_lock);
56223+ return;
56224+ }
56225+ spin_unlock_bh(&gr_conn_table_lock);
56226+
56227+ set->curr_ip = inet->inet_daddr;
56228+ set->used_accept = 1;
56229+#endif
56230+ return;
56231+}
56232+
56233+int
56234+gr_handle_sock_all(const int family, const int type, const int protocol)
56235+{
56236+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56237+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56238+ (family != AF_UNIX)) {
56239+ if (family == AF_INET)
56240+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56241+ else
56242+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56243+ return -EACCES;
56244+ }
56245+#endif
56246+ return 0;
56247+}
56248+
56249+int
56250+gr_handle_sock_server(const struct sockaddr *sck)
56251+{
56252+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56253+ if (grsec_enable_socket_server &&
56254+ in_group_p(grsec_socket_server_gid) &&
56255+ sck && (sck->sa_family != AF_UNIX) &&
56256+ (sck->sa_family != AF_LOCAL)) {
56257+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56258+ return -EACCES;
56259+ }
56260+#endif
56261+ return 0;
56262+}
56263+
56264+int
56265+gr_handle_sock_server_other(const struct sock *sck)
56266+{
56267+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56268+ if (grsec_enable_socket_server &&
56269+ in_group_p(grsec_socket_server_gid) &&
56270+ sck && (sck->sk_family != AF_UNIX) &&
56271+ (sck->sk_family != AF_LOCAL)) {
56272+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56273+ return -EACCES;
56274+ }
56275+#endif
56276+ return 0;
56277+}
56278+
56279+int
56280+gr_handle_sock_client(const struct sockaddr *sck)
56281+{
56282+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56283+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56284+ sck && (sck->sa_family != AF_UNIX) &&
56285+ (sck->sa_family != AF_LOCAL)) {
56286+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56287+ return -EACCES;
56288+ }
56289+#endif
56290+ return 0;
56291+}
56292diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56293new file mode 100644
56294index 0000000..a1aedd7
56295--- /dev/null
56296+++ b/grsecurity/grsec_sysctl.c
56297@@ -0,0 +1,451 @@
56298+#include <linux/kernel.h>
56299+#include <linux/sched.h>
56300+#include <linux/sysctl.h>
56301+#include <linux/grsecurity.h>
56302+#include <linux/grinternal.h>
56303+
56304+int
56305+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56306+{
56307+#ifdef CONFIG_GRKERNSEC_SYSCTL
56308+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56309+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56310+ return -EACCES;
56311+ }
56312+#endif
56313+ return 0;
56314+}
56315+
56316+#ifdef CONFIG_GRKERNSEC_ROFS
56317+static int __maybe_unused one = 1;
56318+#endif
56319+
56320+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56321+struct ctl_table grsecurity_table[] = {
56322+#ifdef CONFIG_GRKERNSEC_SYSCTL
56323+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56324+#ifdef CONFIG_GRKERNSEC_IO
56325+ {
56326+ .procname = "disable_priv_io",
56327+ .data = &grsec_disable_privio,
56328+ .maxlen = sizeof(int),
56329+ .mode = 0600,
56330+ .proc_handler = &proc_dointvec,
56331+ },
56332+#endif
56333+#endif
56334+#ifdef CONFIG_GRKERNSEC_LINK
56335+ {
56336+ .procname = "linking_restrictions",
56337+ .data = &grsec_enable_link,
56338+ .maxlen = sizeof(int),
56339+ .mode = 0600,
56340+ .proc_handler = &proc_dointvec,
56341+ },
56342+#endif
56343+#ifdef CONFIG_GRKERNSEC_BRUTE
56344+ {
56345+ .procname = "deter_bruteforce",
56346+ .data = &grsec_enable_brute,
56347+ .maxlen = sizeof(int),
56348+ .mode = 0600,
56349+ .proc_handler = &proc_dointvec,
56350+ },
56351+#endif
56352+#ifdef CONFIG_GRKERNSEC_FIFO
56353+ {
56354+ .procname = "fifo_restrictions",
56355+ .data = &grsec_enable_fifo,
56356+ .maxlen = sizeof(int),
56357+ .mode = 0600,
56358+ .proc_handler = &proc_dointvec,
56359+ },
56360+#endif
56361+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56362+ {
56363+ .procname = "ptrace_readexec",
56364+ .data = &grsec_enable_ptrace_readexec,
56365+ .maxlen = sizeof(int),
56366+ .mode = 0600,
56367+ .proc_handler = &proc_dointvec,
56368+ },
56369+#endif
56370+#ifdef CONFIG_GRKERNSEC_SETXID
56371+ {
56372+ .procname = "consistent_setxid",
56373+ .data = &grsec_enable_setxid,
56374+ .maxlen = sizeof(int),
56375+ .mode = 0600,
56376+ .proc_handler = &proc_dointvec,
56377+ },
56378+#endif
56379+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56380+ {
56381+ .procname = "ip_blackhole",
56382+ .data = &grsec_enable_blackhole,
56383+ .maxlen = sizeof(int),
56384+ .mode = 0600,
56385+ .proc_handler = &proc_dointvec,
56386+ },
56387+ {
56388+ .procname = "lastack_retries",
56389+ .data = &grsec_lastack_retries,
56390+ .maxlen = sizeof(int),
56391+ .mode = 0600,
56392+ .proc_handler = &proc_dointvec,
56393+ },
56394+#endif
56395+#ifdef CONFIG_GRKERNSEC_EXECLOG
56396+ {
56397+ .procname = "exec_logging",
56398+ .data = &grsec_enable_execlog,
56399+ .maxlen = sizeof(int),
56400+ .mode = 0600,
56401+ .proc_handler = &proc_dointvec,
56402+ },
56403+#endif
56404+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56405+ {
56406+ .procname = "rwxmap_logging",
56407+ .data = &grsec_enable_log_rwxmaps,
56408+ .maxlen = sizeof(int),
56409+ .mode = 0600,
56410+ .proc_handler = &proc_dointvec,
56411+ },
56412+#endif
56413+#ifdef CONFIG_GRKERNSEC_SIGNAL
56414+ {
56415+ .procname = "signal_logging",
56416+ .data = &grsec_enable_signal,
56417+ .maxlen = sizeof(int),
56418+ .mode = 0600,
56419+ .proc_handler = &proc_dointvec,
56420+ },
56421+#endif
56422+#ifdef CONFIG_GRKERNSEC_FORKFAIL
56423+ {
56424+ .procname = "forkfail_logging",
56425+ .data = &grsec_enable_forkfail,
56426+ .maxlen = sizeof(int),
56427+ .mode = 0600,
56428+ .proc_handler = &proc_dointvec,
56429+ },
56430+#endif
56431+#ifdef CONFIG_GRKERNSEC_TIME
56432+ {
56433+ .procname = "timechange_logging",
56434+ .data = &grsec_enable_time,
56435+ .maxlen = sizeof(int),
56436+ .mode = 0600,
56437+ .proc_handler = &proc_dointvec,
56438+ },
56439+#endif
56440+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56441+ {
56442+ .procname = "chroot_deny_shmat",
56443+ .data = &grsec_enable_chroot_shmat,
56444+ .maxlen = sizeof(int),
56445+ .mode = 0600,
56446+ .proc_handler = &proc_dointvec,
56447+ },
56448+#endif
56449+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56450+ {
56451+ .procname = "chroot_deny_unix",
56452+ .data = &grsec_enable_chroot_unix,
56453+ .maxlen = sizeof(int),
56454+ .mode = 0600,
56455+ .proc_handler = &proc_dointvec,
56456+ },
56457+#endif
56458+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56459+ {
56460+ .procname = "chroot_deny_mount",
56461+ .data = &grsec_enable_chroot_mount,
56462+ .maxlen = sizeof(int),
56463+ .mode = 0600,
56464+ .proc_handler = &proc_dointvec,
56465+ },
56466+#endif
56467+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56468+ {
56469+ .procname = "chroot_deny_fchdir",
56470+ .data = &grsec_enable_chroot_fchdir,
56471+ .maxlen = sizeof(int),
56472+ .mode = 0600,
56473+ .proc_handler = &proc_dointvec,
56474+ },
56475+#endif
56476+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56477+ {
56478+ .procname = "chroot_deny_chroot",
56479+ .data = &grsec_enable_chroot_double,
56480+ .maxlen = sizeof(int),
56481+ .mode = 0600,
56482+ .proc_handler = &proc_dointvec,
56483+ },
56484+#endif
56485+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56486+ {
56487+ .procname = "chroot_deny_pivot",
56488+ .data = &grsec_enable_chroot_pivot,
56489+ .maxlen = sizeof(int),
56490+ .mode = 0600,
56491+ .proc_handler = &proc_dointvec,
56492+ },
56493+#endif
56494+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56495+ {
56496+ .procname = "chroot_enforce_chdir",
56497+ .data = &grsec_enable_chroot_chdir,
56498+ .maxlen = sizeof(int),
56499+ .mode = 0600,
56500+ .proc_handler = &proc_dointvec,
56501+ },
56502+#endif
56503+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56504+ {
56505+ .procname = "chroot_deny_chmod",
56506+ .data = &grsec_enable_chroot_chmod,
56507+ .maxlen = sizeof(int),
56508+ .mode = 0600,
56509+ .proc_handler = &proc_dointvec,
56510+ },
56511+#endif
56512+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56513+ {
56514+ .procname = "chroot_deny_mknod",
56515+ .data = &grsec_enable_chroot_mknod,
56516+ .maxlen = sizeof(int),
56517+ .mode = 0600,
56518+ .proc_handler = &proc_dointvec,
56519+ },
56520+#endif
56521+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56522+ {
56523+ .procname = "chroot_restrict_nice",
56524+ .data = &grsec_enable_chroot_nice,
56525+ .maxlen = sizeof(int),
56526+ .mode = 0600,
56527+ .proc_handler = &proc_dointvec,
56528+ },
56529+#endif
56530+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56531+ {
56532+ .procname = "chroot_execlog",
56533+ .data = &grsec_enable_chroot_execlog,
56534+ .maxlen = sizeof(int),
56535+ .mode = 0600,
56536+ .proc_handler = &proc_dointvec,
56537+ },
56538+#endif
56539+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56540+ {
56541+ .procname = "chroot_caps",
56542+ .data = &grsec_enable_chroot_caps,
56543+ .maxlen = sizeof(int),
56544+ .mode = 0600,
56545+ .proc_handler = &proc_dointvec,
56546+ },
56547+#endif
56548+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56549+ {
56550+ .procname = "chroot_deny_sysctl",
56551+ .data = &grsec_enable_chroot_sysctl,
56552+ .maxlen = sizeof(int),
56553+ .mode = 0600,
56554+ .proc_handler = &proc_dointvec,
56555+ },
56556+#endif
56557+#ifdef CONFIG_GRKERNSEC_TPE
56558+ {
56559+ .procname = "tpe",
56560+ .data = &grsec_enable_tpe,
56561+ .maxlen = sizeof(int),
56562+ .mode = 0600,
56563+ .proc_handler = &proc_dointvec,
56564+ },
56565+ {
56566+ .procname = "tpe_gid",
56567+ .data = &grsec_tpe_gid,
56568+ .maxlen = sizeof(int),
56569+ .mode = 0600,
56570+ .proc_handler = &proc_dointvec,
56571+ },
56572+#endif
56573+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56574+ {
56575+ .procname = "tpe_invert",
56576+ .data = &grsec_enable_tpe_invert,
56577+ .maxlen = sizeof(int),
56578+ .mode = 0600,
56579+ .proc_handler = &proc_dointvec,
56580+ },
56581+#endif
56582+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56583+ {
56584+ .procname = "tpe_restrict_all",
56585+ .data = &grsec_enable_tpe_all,
56586+ .maxlen = sizeof(int),
56587+ .mode = 0600,
56588+ .proc_handler = &proc_dointvec,
56589+ },
56590+#endif
56591+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56592+ {
56593+ .procname = "socket_all",
56594+ .data = &grsec_enable_socket_all,
56595+ .maxlen = sizeof(int),
56596+ .mode = 0600,
56597+ .proc_handler = &proc_dointvec,
56598+ },
56599+ {
56600+ .procname = "socket_all_gid",
56601+ .data = &grsec_socket_all_gid,
56602+ .maxlen = sizeof(int),
56603+ .mode = 0600,
56604+ .proc_handler = &proc_dointvec,
56605+ },
56606+#endif
56607+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56608+ {
56609+ .procname = "socket_client",
56610+ .data = &grsec_enable_socket_client,
56611+ .maxlen = sizeof(int),
56612+ .mode = 0600,
56613+ .proc_handler = &proc_dointvec,
56614+ },
56615+ {
56616+ .procname = "socket_client_gid",
56617+ .data = &grsec_socket_client_gid,
56618+ .maxlen = sizeof(int),
56619+ .mode = 0600,
56620+ .proc_handler = &proc_dointvec,
56621+ },
56622+#endif
56623+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56624+ {
56625+ .procname = "socket_server",
56626+ .data = &grsec_enable_socket_server,
56627+ .maxlen = sizeof(int),
56628+ .mode = 0600,
56629+ .proc_handler = &proc_dointvec,
56630+ },
56631+ {
56632+ .procname = "socket_server_gid",
56633+ .data = &grsec_socket_server_gid,
56634+ .maxlen = sizeof(int),
56635+ .mode = 0600,
56636+ .proc_handler = &proc_dointvec,
56637+ },
56638+#endif
56639+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56640+ {
56641+ .procname = "audit_group",
56642+ .data = &grsec_enable_group,
56643+ .maxlen = sizeof(int),
56644+ .mode = 0600,
56645+ .proc_handler = &proc_dointvec,
56646+ },
56647+ {
56648+ .procname = "audit_gid",
56649+ .data = &grsec_audit_gid,
56650+ .maxlen = sizeof(int),
56651+ .mode = 0600,
56652+ .proc_handler = &proc_dointvec,
56653+ },
56654+#endif
56655+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56656+ {
56657+ .procname = "audit_chdir",
56658+ .data = &grsec_enable_chdir,
56659+ .maxlen = sizeof(int),
56660+ .mode = 0600,
56661+ .proc_handler = &proc_dointvec,
56662+ },
56663+#endif
56664+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56665+ {
56666+ .procname = "audit_mount",
56667+ .data = &grsec_enable_mount,
56668+ .maxlen = sizeof(int),
56669+ .mode = 0600,
56670+ .proc_handler = &proc_dointvec,
56671+ },
56672+#endif
56673+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56674+ {
56675+ .procname = "audit_textrel",
56676+ .data = &grsec_enable_audit_textrel,
56677+ .maxlen = sizeof(int),
56678+ .mode = 0600,
56679+ .proc_handler = &proc_dointvec,
56680+ },
56681+#endif
56682+#ifdef CONFIG_GRKERNSEC_DMESG
56683+ {
56684+ .procname = "dmesg",
56685+ .data = &grsec_enable_dmesg,
56686+ .maxlen = sizeof(int),
56687+ .mode = 0600,
56688+ .proc_handler = &proc_dointvec,
56689+ },
56690+#endif
56691+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56692+ {
56693+ .procname = "chroot_findtask",
56694+ .data = &grsec_enable_chroot_findtask,
56695+ .maxlen = sizeof(int),
56696+ .mode = 0600,
56697+ .proc_handler = &proc_dointvec,
56698+ },
56699+#endif
56700+#ifdef CONFIG_GRKERNSEC_RESLOG
56701+ {
56702+ .procname = "resource_logging",
56703+ .data = &grsec_resource_logging,
56704+ .maxlen = sizeof(int),
56705+ .mode = 0600,
56706+ .proc_handler = &proc_dointvec,
56707+ },
56708+#endif
56709+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56710+ {
56711+ .procname = "audit_ptrace",
56712+ .data = &grsec_enable_audit_ptrace,
56713+ .maxlen = sizeof(int),
56714+ .mode = 0600,
56715+ .proc_handler = &proc_dointvec,
56716+ },
56717+#endif
56718+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56719+ {
56720+ .procname = "harden_ptrace",
56721+ .data = &grsec_enable_harden_ptrace,
56722+ .maxlen = sizeof(int),
56723+ .mode = 0600,
56724+ .proc_handler = &proc_dointvec,
56725+ },
56726+#endif
56727+ {
56728+ .procname = "grsec_lock",
56729+ .data = &grsec_lock,
56730+ .maxlen = sizeof(int),
56731+ .mode = 0600,
56732+ .proc_handler = &proc_dointvec,
56733+ },
56734+#endif
56735+#ifdef CONFIG_GRKERNSEC_ROFS
56736+ {
56737+ .procname = "romount_protect",
56738+ .data = &grsec_enable_rofs,
56739+ .maxlen = sizeof(int),
56740+ .mode = 0600,
56741+ .proc_handler = &proc_dointvec_minmax,
56742+ .extra1 = &one,
56743+ .extra2 = &one,
56744+ },
56745+#endif
56746+ { }
56747+};
56748+#endif
56749diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
56750new file mode 100644
56751index 0000000..0dc13c3
56752--- /dev/null
56753+++ b/grsecurity/grsec_time.c
56754@@ -0,0 +1,16 @@
56755+#include <linux/kernel.h>
56756+#include <linux/sched.h>
56757+#include <linux/grinternal.h>
56758+#include <linux/module.h>
56759+
56760+void
56761+gr_log_timechange(void)
56762+{
56763+#ifdef CONFIG_GRKERNSEC_TIME
56764+ if (grsec_enable_time)
56765+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
56766+#endif
56767+ return;
56768+}
56769+
56770+EXPORT_SYMBOL(gr_log_timechange);
56771diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
56772new file mode 100644
56773index 0000000..07e0dc0
56774--- /dev/null
56775+++ b/grsecurity/grsec_tpe.c
56776@@ -0,0 +1,73 @@
56777+#include <linux/kernel.h>
56778+#include <linux/sched.h>
56779+#include <linux/file.h>
56780+#include <linux/fs.h>
56781+#include <linux/grinternal.h>
56782+
56783+extern int gr_acl_tpe_check(void);
56784+
56785+int
56786+gr_tpe_allow(const struct file *file)
56787+{
56788+#ifdef CONFIG_GRKERNSEC
56789+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
56790+ const struct cred *cred = current_cred();
56791+ char *msg = NULL;
56792+ char *msg2 = NULL;
56793+
56794+ // never restrict root
56795+ if (!cred->uid)
56796+ return 1;
56797+
56798+ if (grsec_enable_tpe) {
56799+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56800+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
56801+ msg = "not being in trusted group";
56802+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
56803+ msg = "being in untrusted group";
56804+#else
56805+ if (in_group_p(grsec_tpe_gid))
56806+ msg = "being in untrusted group";
56807+#endif
56808+ }
56809+ if (!msg && gr_acl_tpe_check())
56810+ msg = "being in untrusted role";
56811+
56812+ // not in any affected group/role
56813+ if (!msg)
56814+ goto next_check;
56815+
56816+ if (inode->i_uid)
56817+ msg2 = "file in non-root-owned directory";
56818+ else if (inode->i_mode & S_IWOTH)
56819+ msg2 = "file in world-writable directory";
56820+ else if (inode->i_mode & S_IWGRP)
56821+ msg2 = "file in group-writable directory";
56822+
56823+ if (msg && msg2) {
56824+ char fullmsg[70] = {0};
56825+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
56826+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
56827+ return 0;
56828+ }
56829+ msg = NULL;
56830+next_check:
56831+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56832+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
56833+ return 1;
56834+
56835+ if (inode->i_uid && (inode->i_uid != cred->uid))
56836+ msg = "directory not owned by user";
56837+ else if (inode->i_mode & S_IWOTH)
56838+ msg = "file in world-writable directory";
56839+ else if (inode->i_mode & S_IWGRP)
56840+ msg = "file in group-writable directory";
56841+
56842+ if (msg) {
56843+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
56844+ return 0;
56845+ }
56846+#endif
56847+#endif
56848+ return 1;
56849+}
56850diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
56851new file mode 100644
56852index 0000000..9f7b1ac
56853--- /dev/null
56854+++ b/grsecurity/grsum.c
56855@@ -0,0 +1,61 @@
56856+#include <linux/err.h>
56857+#include <linux/kernel.h>
56858+#include <linux/sched.h>
56859+#include <linux/mm.h>
56860+#include <linux/scatterlist.h>
56861+#include <linux/crypto.h>
56862+#include <linux/gracl.h>
56863+
56864+
56865+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
56866+#error "crypto and sha256 must be built into the kernel"
56867+#endif
56868+
56869+int
56870+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
56871+{
56872+ char *p;
56873+ struct crypto_hash *tfm;
56874+ struct hash_desc desc;
56875+ struct scatterlist sg;
56876+ unsigned char temp_sum[GR_SHA_LEN];
56877+ volatile int retval = 0;
56878+ volatile int dummy = 0;
56879+ unsigned int i;
56880+
56881+ sg_init_table(&sg, 1);
56882+
56883+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
56884+ if (IS_ERR(tfm)) {
56885+ /* should never happen, since sha256 should be built in */
56886+ return 1;
56887+ }
56888+
56889+ desc.tfm = tfm;
56890+ desc.flags = 0;
56891+
56892+ crypto_hash_init(&desc);
56893+
56894+ p = salt;
56895+ sg_set_buf(&sg, p, GR_SALT_LEN);
56896+ crypto_hash_update(&desc, &sg, sg.length);
56897+
56898+ p = entry->pw;
56899+ sg_set_buf(&sg, p, strlen(p));
56900+
56901+ crypto_hash_update(&desc, &sg, sg.length);
56902+
56903+ crypto_hash_final(&desc, temp_sum);
56904+
56905+ memset(entry->pw, 0, GR_PW_LEN);
56906+
56907+ for (i = 0; i < GR_SHA_LEN; i++)
56908+ if (sum[i] != temp_sum[i])
56909+ retval = 1;
56910+ else
56911+ dummy = 1; // waste a cycle
56912+
56913+ crypto_free_hash(tfm);
56914+
56915+ return retval;
56916+}
56917diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
56918index 6cd5b64..f620d2d 100644
56919--- a/include/acpi/acpi_bus.h
56920+++ b/include/acpi/acpi_bus.h
56921@@ -107,7 +107,7 @@ struct acpi_device_ops {
56922 acpi_op_bind bind;
56923 acpi_op_unbind unbind;
56924 acpi_op_notify notify;
56925-};
56926+} __no_const;
56927
56928 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56929
56930diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
56931index b7babf0..71e4e74 100644
56932--- a/include/asm-generic/atomic-long.h
56933+++ b/include/asm-generic/atomic-long.h
56934@@ -22,6 +22,12 @@
56935
56936 typedef atomic64_t atomic_long_t;
56937
56938+#ifdef CONFIG_PAX_REFCOUNT
56939+typedef atomic64_unchecked_t atomic_long_unchecked_t;
56940+#else
56941+typedef atomic64_t atomic_long_unchecked_t;
56942+#endif
56943+
56944 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56945
56946 static inline long atomic_long_read(atomic_long_t *l)
56947@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
56948 return (long)atomic64_read(v);
56949 }
56950
56951+#ifdef CONFIG_PAX_REFCOUNT
56952+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56953+{
56954+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56955+
56956+ return (long)atomic64_read_unchecked(v);
56957+}
56958+#endif
56959+
56960 static inline void atomic_long_set(atomic_long_t *l, long i)
56961 {
56962 atomic64_t *v = (atomic64_t *)l;
56963@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
56964 atomic64_set(v, i);
56965 }
56966
56967+#ifdef CONFIG_PAX_REFCOUNT
56968+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56969+{
56970+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56971+
56972+ atomic64_set_unchecked(v, i);
56973+}
56974+#endif
56975+
56976 static inline void atomic_long_inc(atomic_long_t *l)
56977 {
56978 atomic64_t *v = (atomic64_t *)l;
56979@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
56980 atomic64_inc(v);
56981 }
56982
56983+#ifdef CONFIG_PAX_REFCOUNT
56984+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56985+{
56986+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56987+
56988+ atomic64_inc_unchecked(v);
56989+}
56990+#endif
56991+
56992 static inline void atomic_long_dec(atomic_long_t *l)
56993 {
56994 atomic64_t *v = (atomic64_t *)l;
56995@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
56996 atomic64_dec(v);
56997 }
56998
56999+#ifdef CONFIG_PAX_REFCOUNT
57000+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57001+{
57002+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57003+
57004+ atomic64_dec_unchecked(v);
57005+}
57006+#endif
57007+
57008 static inline void atomic_long_add(long i, atomic_long_t *l)
57009 {
57010 atomic64_t *v = (atomic64_t *)l;
57011@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57012 atomic64_add(i, v);
57013 }
57014
57015+#ifdef CONFIG_PAX_REFCOUNT
57016+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57017+{
57018+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57019+
57020+ atomic64_add_unchecked(i, v);
57021+}
57022+#endif
57023+
57024 static inline void atomic_long_sub(long i, atomic_long_t *l)
57025 {
57026 atomic64_t *v = (atomic64_t *)l;
57027@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57028 atomic64_sub(i, v);
57029 }
57030
57031+#ifdef CONFIG_PAX_REFCOUNT
57032+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57033+{
57034+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57035+
57036+ atomic64_sub_unchecked(i, v);
57037+}
57038+#endif
57039+
57040 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57041 {
57042 atomic64_t *v = (atomic64_t *)l;
57043@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57044 return (long)atomic64_inc_return(v);
57045 }
57046
57047+#ifdef CONFIG_PAX_REFCOUNT
57048+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57049+{
57050+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57051+
57052+ return (long)atomic64_inc_return_unchecked(v);
57053+}
57054+#endif
57055+
57056 static inline long atomic_long_dec_return(atomic_long_t *l)
57057 {
57058 atomic64_t *v = (atomic64_t *)l;
57059@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57060
57061 typedef atomic_t atomic_long_t;
57062
57063+#ifdef CONFIG_PAX_REFCOUNT
57064+typedef atomic_unchecked_t atomic_long_unchecked_t;
57065+#else
57066+typedef atomic_t atomic_long_unchecked_t;
57067+#endif
57068+
57069 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57070 static inline long atomic_long_read(atomic_long_t *l)
57071 {
57072@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57073 return (long)atomic_read(v);
57074 }
57075
57076+#ifdef CONFIG_PAX_REFCOUNT
57077+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57078+{
57079+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57080+
57081+ return (long)atomic_read_unchecked(v);
57082+}
57083+#endif
57084+
57085 static inline void atomic_long_set(atomic_long_t *l, long i)
57086 {
57087 atomic_t *v = (atomic_t *)l;
57088@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57089 atomic_set(v, i);
57090 }
57091
57092+#ifdef CONFIG_PAX_REFCOUNT
57093+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57094+{
57095+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57096+
57097+ atomic_set_unchecked(v, i);
57098+}
57099+#endif
57100+
57101 static inline void atomic_long_inc(atomic_long_t *l)
57102 {
57103 atomic_t *v = (atomic_t *)l;
57104@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57105 atomic_inc(v);
57106 }
57107
57108+#ifdef CONFIG_PAX_REFCOUNT
57109+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57110+{
57111+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57112+
57113+ atomic_inc_unchecked(v);
57114+}
57115+#endif
57116+
57117 static inline void atomic_long_dec(atomic_long_t *l)
57118 {
57119 atomic_t *v = (atomic_t *)l;
57120@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57121 atomic_dec(v);
57122 }
57123
57124+#ifdef CONFIG_PAX_REFCOUNT
57125+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57126+{
57127+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57128+
57129+ atomic_dec_unchecked(v);
57130+}
57131+#endif
57132+
57133 static inline void atomic_long_add(long i, atomic_long_t *l)
57134 {
57135 atomic_t *v = (atomic_t *)l;
57136@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57137 atomic_add(i, v);
57138 }
57139
57140+#ifdef CONFIG_PAX_REFCOUNT
57141+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57142+{
57143+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57144+
57145+ atomic_add_unchecked(i, v);
57146+}
57147+#endif
57148+
57149 static inline void atomic_long_sub(long i, atomic_long_t *l)
57150 {
57151 atomic_t *v = (atomic_t *)l;
57152@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57153 atomic_sub(i, v);
57154 }
57155
57156+#ifdef CONFIG_PAX_REFCOUNT
57157+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57158+{
57159+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57160+
57161+ atomic_sub_unchecked(i, v);
57162+}
57163+#endif
57164+
57165 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57166 {
57167 atomic_t *v = (atomic_t *)l;
57168@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57169 return (long)atomic_inc_return(v);
57170 }
57171
57172+#ifdef CONFIG_PAX_REFCOUNT
57173+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57174+{
57175+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57176+
57177+ return (long)atomic_inc_return_unchecked(v);
57178+}
57179+#endif
57180+
57181 static inline long atomic_long_dec_return(atomic_long_t *l)
57182 {
57183 atomic_t *v = (atomic_t *)l;
57184@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57185
57186 #endif /* BITS_PER_LONG == 64 */
57187
57188+#ifdef CONFIG_PAX_REFCOUNT
57189+static inline void pax_refcount_needs_these_functions(void)
57190+{
57191+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
57192+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57193+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57194+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57195+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57196+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57197+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57198+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57199+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57200+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57201+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57202+
57203+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57204+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57205+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57206+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57207+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57208+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57209+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57210+}
57211+#else
57212+#define atomic_read_unchecked(v) atomic_read(v)
57213+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57214+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57215+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57216+#define atomic_inc_unchecked(v) atomic_inc(v)
57217+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57218+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57219+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57220+#define atomic_dec_unchecked(v) atomic_dec(v)
57221+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57222+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57223+
57224+#define atomic_long_read_unchecked(v) atomic_long_read(v)
57225+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57226+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57227+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57228+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57229+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57230+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57231+#endif
57232+
57233 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57234diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
57235index b18ce4f..2ee2843 100644
57236--- a/include/asm-generic/atomic64.h
57237+++ b/include/asm-generic/atomic64.h
57238@@ -16,6 +16,8 @@ typedef struct {
57239 long long counter;
57240 } atomic64_t;
57241
57242+typedef atomic64_t atomic64_unchecked_t;
57243+
57244 #define ATOMIC64_INIT(i) { (i) }
57245
57246 extern long long atomic64_read(const atomic64_t *v);
57247@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
57248 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
57249 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
57250
57251+#define atomic64_read_unchecked(v) atomic64_read(v)
57252+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
57253+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
57254+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
57255+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57256+#define atomic64_inc_unchecked(v) atomic64_inc(v)
57257+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57258+#define atomic64_dec_unchecked(v) atomic64_dec(v)
57259+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57260+
57261 #endif /* _ASM_GENERIC_ATOMIC64_H */
57262diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57263index 1bfcfe5..e04c5c9 100644
57264--- a/include/asm-generic/cache.h
57265+++ b/include/asm-generic/cache.h
57266@@ -6,7 +6,7 @@
57267 * cache lines need to provide their own cache.h.
57268 */
57269
57270-#define L1_CACHE_SHIFT 5
57271-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57272+#define L1_CACHE_SHIFT 5UL
57273+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57274
57275 #endif /* __ASM_GENERIC_CACHE_H */
57276diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57277index 1ca3efc..e3dc852 100644
57278--- a/include/asm-generic/int-l64.h
57279+++ b/include/asm-generic/int-l64.h
57280@@ -46,6 +46,8 @@ typedef unsigned int u32;
57281 typedef signed long s64;
57282 typedef unsigned long u64;
57283
57284+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57285+
57286 #define S8_C(x) x
57287 #define U8_C(x) x ## U
57288 #define S16_C(x) x
57289diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57290index f394147..b6152b9 100644
57291--- a/include/asm-generic/int-ll64.h
57292+++ b/include/asm-generic/int-ll64.h
57293@@ -51,6 +51,8 @@ typedef unsigned int u32;
57294 typedef signed long long s64;
57295 typedef unsigned long long u64;
57296
57297+typedef unsigned long long intoverflow_t;
57298+
57299 #define S8_C(x) x
57300 #define U8_C(x) x ## U
57301 #define S16_C(x) x
57302diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57303index 0232ccb..13d9165 100644
57304--- a/include/asm-generic/kmap_types.h
57305+++ b/include/asm-generic/kmap_types.h
57306@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57307 KMAP_D(17) KM_NMI,
57308 KMAP_D(18) KM_NMI_PTE,
57309 KMAP_D(19) KM_KDB,
57310+KMAP_D(20) KM_CLEARPAGE,
57311 /*
57312 * Remember to update debug_kmap_atomic() when adding new kmap types!
57313 */
57314-KMAP_D(20) KM_TYPE_NR
57315+KMAP_D(21) KM_TYPE_NR
57316 };
57317
57318 #undef KMAP_D
57319diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57320index 725612b..9cc513a 100644
57321--- a/include/asm-generic/pgtable-nopmd.h
57322+++ b/include/asm-generic/pgtable-nopmd.h
57323@@ -1,14 +1,19 @@
57324 #ifndef _PGTABLE_NOPMD_H
57325 #define _PGTABLE_NOPMD_H
57326
57327-#ifndef __ASSEMBLY__
57328-
57329 #include <asm-generic/pgtable-nopud.h>
57330
57331-struct mm_struct;
57332-
57333 #define __PAGETABLE_PMD_FOLDED
57334
57335+#define PMD_SHIFT PUD_SHIFT
57336+#define PTRS_PER_PMD 1
57337+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57338+#define PMD_MASK (~(PMD_SIZE-1))
57339+
57340+#ifndef __ASSEMBLY__
57341+
57342+struct mm_struct;
57343+
57344 /*
57345 * Having the pmd type consist of a pud gets the size right, and allows
57346 * us to conceptually access the pud entry that this pmd is folded into
57347@@ -16,11 +21,6 @@ struct mm_struct;
57348 */
57349 typedef struct { pud_t pud; } pmd_t;
57350
57351-#define PMD_SHIFT PUD_SHIFT
57352-#define PTRS_PER_PMD 1
57353-#define PMD_SIZE (1UL << PMD_SHIFT)
57354-#define PMD_MASK (~(PMD_SIZE-1))
57355-
57356 /*
57357 * The "pud_xxx()" functions here are trivial for a folded two-level
57358 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57359diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57360index 810431d..ccc3638 100644
57361--- a/include/asm-generic/pgtable-nopud.h
57362+++ b/include/asm-generic/pgtable-nopud.h
57363@@ -1,10 +1,15 @@
57364 #ifndef _PGTABLE_NOPUD_H
57365 #define _PGTABLE_NOPUD_H
57366
57367-#ifndef __ASSEMBLY__
57368-
57369 #define __PAGETABLE_PUD_FOLDED
57370
57371+#define PUD_SHIFT PGDIR_SHIFT
57372+#define PTRS_PER_PUD 1
57373+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57374+#define PUD_MASK (~(PUD_SIZE-1))
57375+
57376+#ifndef __ASSEMBLY__
57377+
57378 /*
57379 * Having the pud type consist of a pgd gets the size right, and allows
57380 * us to conceptually access the pgd entry that this pud is folded into
57381@@ -12,11 +17,6 @@
57382 */
57383 typedef struct { pgd_t pgd; } pud_t;
57384
57385-#define PUD_SHIFT PGDIR_SHIFT
57386-#define PTRS_PER_PUD 1
57387-#define PUD_SIZE (1UL << PUD_SHIFT)
57388-#define PUD_MASK (~(PUD_SIZE-1))
57389-
57390 /*
57391 * The "pgd_xxx()" functions here are trivial for a folded two-level
57392 * setup: the pud is never bad, and a pud always exists (as it's folded
57393diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57394index 76bff2b..c7a14e2 100644
57395--- a/include/asm-generic/pgtable.h
57396+++ b/include/asm-generic/pgtable.h
57397@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57398 #endif /* __HAVE_ARCH_PMD_WRITE */
57399 #endif
57400
57401+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57402+static inline unsigned long pax_open_kernel(void) { return 0; }
57403+#endif
57404+
57405+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57406+static inline unsigned long pax_close_kernel(void) { return 0; }
57407+#endif
57408+
57409 #endif /* !__ASSEMBLY__ */
57410
57411 #endif /* _ASM_GENERIC_PGTABLE_H */
57412diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57413index b5e2e4c..6a5373e 100644
57414--- a/include/asm-generic/vmlinux.lds.h
57415+++ b/include/asm-generic/vmlinux.lds.h
57416@@ -217,6 +217,7 @@
57417 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57418 VMLINUX_SYMBOL(__start_rodata) = .; \
57419 *(.rodata) *(.rodata.*) \
57420+ *(.data..read_only) \
57421 *(__vermagic) /* Kernel version magic */ \
57422 . = ALIGN(8); \
57423 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57424@@ -722,17 +723,18 @@
57425 * section in the linker script will go there too. @phdr should have
57426 * a leading colon.
57427 *
57428- * Note that this macros defines __per_cpu_load as an absolute symbol.
57429+ * Note that this macros defines per_cpu_load as an absolute symbol.
57430 * If there is no need to put the percpu section at a predetermined
57431 * address, use PERCPU_SECTION.
57432 */
57433 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57434- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57435- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57436+ per_cpu_load = .; \
57437+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57438 - LOAD_OFFSET) { \
57439+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57440 PERCPU_INPUT(cacheline) \
57441 } phdr \
57442- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57443+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57444
57445 /**
57446 * PERCPU_SECTION - define output section for percpu area, simple version
57447diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57448index bf4b2dc..2d0762f 100644
57449--- a/include/drm/drmP.h
57450+++ b/include/drm/drmP.h
57451@@ -72,6 +72,7 @@
57452 #include <linux/workqueue.h>
57453 #include <linux/poll.h>
57454 #include <asm/pgalloc.h>
57455+#include <asm/local.h>
57456 #include "drm.h"
57457
57458 #include <linux/idr.h>
57459@@ -1038,7 +1039,7 @@ struct drm_device {
57460
57461 /** \name Usage Counters */
57462 /*@{ */
57463- int open_count; /**< Outstanding files open */
57464+ local_t open_count; /**< Outstanding files open */
57465 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57466 atomic_t vma_count; /**< Outstanding vma areas open */
57467 int buf_use; /**< Buffers in use -- cannot alloc */
57468@@ -1049,7 +1050,7 @@ struct drm_device {
57469 /*@{ */
57470 unsigned long counters;
57471 enum drm_stat_type types[15];
57472- atomic_t counts[15];
57473+ atomic_unchecked_t counts[15];
57474 /*@} */
57475
57476 struct list_head filelist;
57477diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57478index 73b0712..0b7ef2f 100644
57479--- a/include/drm/drm_crtc_helper.h
57480+++ b/include/drm/drm_crtc_helper.h
57481@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57482
57483 /* disable crtc when not in use - more explicit than dpms off */
57484 void (*disable)(struct drm_crtc *crtc);
57485-};
57486+} __no_const;
57487
57488 struct drm_encoder_helper_funcs {
57489 void (*dpms)(struct drm_encoder *encoder, int mode);
57490@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57491 struct drm_connector *connector);
57492 /* disable encoder when not in use - more explicit than dpms off */
57493 void (*disable)(struct drm_encoder *encoder);
57494-};
57495+} __no_const;
57496
57497 struct drm_connector_helper_funcs {
57498 int (*get_modes)(struct drm_connector *connector);
57499diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57500index 26c1f78..6722682 100644
57501--- a/include/drm/ttm/ttm_memory.h
57502+++ b/include/drm/ttm/ttm_memory.h
57503@@ -47,7 +47,7 @@
57504
57505 struct ttm_mem_shrink {
57506 int (*do_shrink) (struct ttm_mem_shrink *);
57507-};
57508+} __no_const;
57509
57510 /**
57511 * struct ttm_mem_global - Global memory accounting structure.
57512diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57513index e86dfca..40cc55f 100644
57514--- a/include/linux/a.out.h
57515+++ b/include/linux/a.out.h
57516@@ -39,6 +39,14 @@ enum machine_type {
57517 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57518 };
57519
57520+/* Constants for the N_FLAGS field */
57521+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57522+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57523+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57524+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57525+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57526+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57527+
57528 #if !defined (N_MAGIC)
57529 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57530 #endif
57531diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57532index 49a83ca..df96b54 100644
57533--- a/include/linux/atmdev.h
57534+++ b/include/linux/atmdev.h
57535@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57536 #endif
57537
57538 struct k_atm_aal_stats {
57539-#define __HANDLE_ITEM(i) atomic_t i
57540+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57541 __AAL_STAT_ITEMS
57542 #undef __HANDLE_ITEM
57543 };
57544diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57545index fd88a39..f4d0bad 100644
57546--- a/include/linux/binfmts.h
57547+++ b/include/linux/binfmts.h
57548@@ -88,6 +88,7 @@ struct linux_binfmt {
57549 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57550 int (*load_shlib)(struct file *);
57551 int (*core_dump)(struct coredump_params *cprm);
57552+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57553 unsigned long min_coredump; /* minimal dump size */
57554 };
57555
57556diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57557index 0ed1eb0..3ab569b 100644
57558--- a/include/linux/blkdev.h
57559+++ b/include/linux/blkdev.h
57560@@ -1315,7 +1315,7 @@ struct block_device_operations {
57561 /* this callback is with swap_lock and sometimes page table lock held */
57562 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57563 struct module *owner;
57564-};
57565+} __do_const;
57566
57567 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57568 unsigned long);
57569diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57570index 4d1a074..88f929a 100644
57571--- a/include/linux/blktrace_api.h
57572+++ b/include/linux/blktrace_api.h
57573@@ -162,7 +162,7 @@ struct blk_trace {
57574 struct dentry *dir;
57575 struct dentry *dropped_file;
57576 struct dentry *msg_file;
57577- atomic_t dropped;
57578+ atomic_unchecked_t dropped;
57579 };
57580
57581 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57582diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57583index 83195fb..0b0f77d 100644
57584--- a/include/linux/byteorder/little_endian.h
57585+++ b/include/linux/byteorder/little_endian.h
57586@@ -42,51 +42,51 @@
57587
57588 static inline __le64 __cpu_to_le64p(const __u64 *p)
57589 {
57590- return (__force __le64)*p;
57591+ return (__force const __le64)*p;
57592 }
57593 static inline __u64 __le64_to_cpup(const __le64 *p)
57594 {
57595- return (__force __u64)*p;
57596+ return (__force const __u64)*p;
57597 }
57598 static inline __le32 __cpu_to_le32p(const __u32 *p)
57599 {
57600- return (__force __le32)*p;
57601+ return (__force const __le32)*p;
57602 }
57603 static inline __u32 __le32_to_cpup(const __le32 *p)
57604 {
57605- return (__force __u32)*p;
57606+ return (__force const __u32)*p;
57607 }
57608 static inline __le16 __cpu_to_le16p(const __u16 *p)
57609 {
57610- return (__force __le16)*p;
57611+ return (__force const __le16)*p;
57612 }
57613 static inline __u16 __le16_to_cpup(const __le16 *p)
57614 {
57615- return (__force __u16)*p;
57616+ return (__force const __u16)*p;
57617 }
57618 static inline __be64 __cpu_to_be64p(const __u64 *p)
57619 {
57620- return (__force __be64)__swab64p(p);
57621+ return (__force const __be64)__swab64p(p);
57622 }
57623 static inline __u64 __be64_to_cpup(const __be64 *p)
57624 {
57625- return __swab64p((__u64 *)p);
57626+ return __swab64p((const __u64 *)p);
57627 }
57628 static inline __be32 __cpu_to_be32p(const __u32 *p)
57629 {
57630- return (__force __be32)__swab32p(p);
57631+ return (__force const __be32)__swab32p(p);
57632 }
57633 static inline __u32 __be32_to_cpup(const __be32 *p)
57634 {
57635- return __swab32p((__u32 *)p);
57636+ return __swab32p((const __u32 *)p);
57637 }
57638 static inline __be16 __cpu_to_be16p(const __u16 *p)
57639 {
57640- return (__force __be16)__swab16p(p);
57641+ return (__force const __be16)__swab16p(p);
57642 }
57643 static inline __u16 __be16_to_cpup(const __be16 *p)
57644 {
57645- return __swab16p((__u16 *)p);
57646+ return __swab16p((const __u16 *)p);
57647 }
57648 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57649 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57650diff --git a/include/linux/cache.h b/include/linux/cache.h
57651index 4c57065..4307975 100644
57652--- a/include/linux/cache.h
57653+++ b/include/linux/cache.h
57654@@ -16,6 +16,10 @@
57655 #define __read_mostly
57656 #endif
57657
57658+#ifndef __read_only
57659+#define __read_only __read_mostly
57660+#endif
57661+
57662 #ifndef ____cacheline_aligned
57663 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57664 #endif
57665diff --git a/include/linux/capability.h b/include/linux/capability.h
57666index a63d13d..069bfd5 100644
57667--- a/include/linux/capability.h
57668+++ b/include/linux/capability.h
57669@@ -548,6 +548,9 @@ extern bool capable(int cap);
57670 extern bool ns_capable(struct user_namespace *ns, int cap);
57671 extern bool task_ns_capable(struct task_struct *t, int cap);
57672 extern bool nsown_capable(int cap);
57673+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57674+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57675+extern bool capable_nolog(int cap);
57676
57677 /* audit system wants to get cap info from files as well */
57678 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57679diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
57680index 04ffb2e..6799180 100644
57681--- a/include/linux/cleancache.h
57682+++ b/include/linux/cleancache.h
57683@@ -31,7 +31,7 @@ struct cleancache_ops {
57684 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57685 void (*flush_inode)(int, struct cleancache_filekey);
57686 void (*flush_fs)(int);
57687-};
57688+} __no_const;
57689
57690 extern struct cleancache_ops
57691 cleancache_register_ops(struct cleancache_ops *ops);
57692diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
57693index dfadc96..c0e70c1 100644
57694--- a/include/linux/compiler-gcc4.h
57695+++ b/include/linux/compiler-gcc4.h
57696@@ -31,6 +31,12 @@
57697
57698
57699 #if __GNUC_MINOR__ >= 5
57700+
57701+#ifdef CONSTIFY_PLUGIN
57702+#define __no_const __attribute__((no_const))
57703+#define __do_const __attribute__((do_const))
57704+#endif
57705+
57706 /*
57707 * Mark a position in code as unreachable. This can be used to
57708 * suppress control flow warnings after asm blocks that transfer
57709@@ -46,6 +52,11 @@
57710 #define __noclone __attribute__((__noclone__))
57711
57712 #endif
57713+
57714+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57715+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57716+#define __bos0(ptr) __bos((ptr), 0)
57717+#define __bos1(ptr) __bos((ptr), 1)
57718 #endif
57719
57720 #if __GNUC_MINOR__ > 0
57721diff --git a/include/linux/compiler.h b/include/linux/compiler.h
57722index 320d6c9..8573a1c 100644
57723--- a/include/linux/compiler.h
57724+++ b/include/linux/compiler.h
57725@@ -5,31 +5,62 @@
57726
57727 #ifdef __CHECKER__
57728 # define __user __attribute__((noderef, address_space(1)))
57729+# define __force_user __force __user
57730 # define __kernel __attribute__((address_space(0)))
57731+# define __force_kernel __force __kernel
57732 # define __safe __attribute__((safe))
57733 # define __force __attribute__((force))
57734 # define __nocast __attribute__((nocast))
57735 # define __iomem __attribute__((noderef, address_space(2)))
57736+# define __force_iomem __force __iomem
57737 # define __acquires(x) __attribute__((context(x,0,1)))
57738 # define __releases(x) __attribute__((context(x,1,0)))
57739 # define __acquire(x) __context__(x,1)
57740 # define __release(x) __context__(x,-1)
57741 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57742 # define __percpu __attribute__((noderef, address_space(3)))
57743+# define __force_percpu __force __percpu
57744 #ifdef CONFIG_SPARSE_RCU_POINTER
57745 # define __rcu __attribute__((noderef, address_space(4)))
57746+# define __force_rcu __force __rcu
57747 #else
57748 # define __rcu
57749+# define __force_rcu
57750 #endif
57751 extern void __chk_user_ptr(const volatile void __user *);
57752 extern void __chk_io_ptr(const volatile void __iomem *);
57753+#elif defined(CHECKER_PLUGIN)
57754+//# define __user
57755+//# define __force_user
57756+//# define __kernel
57757+//# define __force_kernel
57758+# define __safe
57759+# define __force
57760+# define __nocast
57761+# define __iomem
57762+# define __force_iomem
57763+# define __chk_user_ptr(x) (void)0
57764+# define __chk_io_ptr(x) (void)0
57765+# define __builtin_warning(x, y...) (1)
57766+# define __acquires(x)
57767+# define __releases(x)
57768+# define __acquire(x) (void)0
57769+# define __release(x) (void)0
57770+# define __cond_lock(x,c) (c)
57771+# define __percpu
57772+# define __force_percpu
57773+# define __rcu
57774+# define __force_rcu
57775 #else
57776 # define __user
57777+# define __force_user
57778 # define __kernel
57779+# define __force_kernel
57780 # define __safe
57781 # define __force
57782 # define __nocast
57783 # define __iomem
57784+# define __force_iomem
57785 # define __chk_user_ptr(x) (void)0
57786 # define __chk_io_ptr(x) (void)0
57787 # define __builtin_warning(x, y...) (1)
57788@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
57789 # define __release(x) (void)0
57790 # define __cond_lock(x,c) (c)
57791 # define __percpu
57792+# define __force_percpu
57793 # define __rcu
57794+# define __force_rcu
57795 #endif
57796
57797 #ifdef __KERNEL__
57798@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57799 # define __attribute_const__ /* unimplemented */
57800 #endif
57801
57802+#ifndef __no_const
57803+# define __no_const
57804+#endif
57805+
57806+#ifndef __do_const
57807+# define __do_const
57808+#endif
57809+
57810 /*
57811 * Tell gcc if a function is cold. The compiler will assume any path
57812 * directly leading to the call is unlikely.
57813@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57814 #define __cold
57815 #endif
57816
57817+#ifndef __alloc_size
57818+#define __alloc_size(...)
57819+#endif
57820+
57821+#ifndef __bos
57822+#define __bos(ptr, arg)
57823+#endif
57824+
57825+#ifndef __bos0
57826+#define __bos0(ptr)
57827+#endif
57828+
57829+#ifndef __bos1
57830+#define __bos1(ptr)
57831+#endif
57832+
57833 /* Simple shorthand for a section definition */
57834 #ifndef __section
57835 # define __section(S) __attribute__ ((__section__(#S)))
57836@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57837 * use is to mediate communication between process-level code and irq/NMI
57838 * handlers, all running on the same CPU.
57839 */
57840-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57841+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57842+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57843
57844 #endif /* __LINUX_COMPILER_H */
57845diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
57846index e9eaec5..bfeb9bb 100644
57847--- a/include/linux/cpuset.h
57848+++ b/include/linux/cpuset.h
57849@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
57850 * nodemask.
57851 */
57852 smp_mb();
57853- --ACCESS_ONCE(current->mems_allowed_change_disable);
57854+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57855 }
57856
57857 static inline void set_mems_allowed(nodemask_t nodemask)
57858diff --git a/include/linux/cred.h b/include/linux/cred.h
57859index 4030896..8d6f342 100644
57860--- a/include/linux/cred.h
57861+++ b/include/linux/cred.h
57862@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
57863 static inline void validate_process_creds(void)
57864 {
57865 }
57866+static inline void validate_task_creds(struct task_struct *task)
57867+{
57868+}
57869 #endif
57870
57871 /**
57872diff --git a/include/linux/crypto.h b/include/linux/crypto.h
57873index 8a94217..15d49e3 100644
57874--- a/include/linux/crypto.h
57875+++ b/include/linux/crypto.h
57876@@ -365,7 +365,7 @@ struct cipher_tfm {
57877 const u8 *key, unsigned int keylen);
57878 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57879 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57880-};
57881+} __no_const;
57882
57883 struct hash_tfm {
57884 int (*init)(struct hash_desc *desc);
57885@@ -386,13 +386,13 @@ struct compress_tfm {
57886 int (*cot_decompress)(struct crypto_tfm *tfm,
57887 const u8 *src, unsigned int slen,
57888 u8 *dst, unsigned int *dlen);
57889-};
57890+} __no_const;
57891
57892 struct rng_tfm {
57893 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57894 unsigned int dlen);
57895 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57896-};
57897+} __no_const;
57898
57899 #define crt_ablkcipher crt_u.ablkcipher
57900 #define crt_aead crt_u.aead
57901diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
57902index 7925bf0..d5143d2 100644
57903--- a/include/linux/decompress/mm.h
57904+++ b/include/linux/decompress/mm.h
57905@@ -77,7 +77,7 @@ static void free(void *where)
57906 * warnings when not needed (indeed large_malloc / large_free are not
57907 * needed by inflate */
57908
57909-#define malloc(a) kmalloc(a, GFP_KERNEL)
57910+#define malloc(a) kmalloc((a), GFP_KERNEL)
57911 #define free(a) kfree(a)
57912
57913 #define large_malloc(a) vmalloc(a)
57914diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
57915index e13117c..e9fc938 100644
57916--- a/include/linux/dma-mapping.h
57917+++ b/include/linux/dma-mapping.h
57918@@ -46,7 +46,7 @@ struct dma_map_ops {
57919 u64 (*get_required_mask)(struct device *dev);
57920 #endif
57921 int is_phys;
57922-};
57923+} __do_const;
57924
57925 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57926
57927diff --git a/include/linux/efi.h b/include/linux/efi.h
57928index 2362a0b..cfaf8fcc 100644
57929--- a/include/linux/efi.h
57930+++ b/include/linux/efi.h
57931@@ -446,7 +446,7 @@ struct efivar_operations {
57932 efi_get_variable_t *get_variable;
57933 efi_get_next_variable_t *get_next_variable;
57934 efi_set_variable_t *set_variable;
57935-};
57936+} __no_const;
57937
57938 struct efivars {
57939 /*
57940diff --git a/include/linux/elf.h b/include/linux/elf.h
57941index 31f0508..5421c01 100644
57942--- a/include/linux/elf.h
57943+++ b/include/linux/elf.h
57944@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57945 #define PT_GNU_EH_FRAME 0x6474e550
57946
57947 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57948+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57949+
57950+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57951+
57952+/* Constants for the e_flags field */
57953+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57954+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57955+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57956+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57957+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57958+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57959
57960 /*
57961 * Extended Numbering
57962@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
57963 #define DT_DEBUG 21
57964 #define DT_TEXTREL 22
57965 #define DT_JMPREL 23
57966+#define DT_FLAGS 30
57967+ #define DF_TEXTREL 0x00000004
57968 #define DT_ENCODING 32
57969 #define OLD_DT_LOOS 0x60000000
57970 #define DT_LOOS 0x6000000d
57971@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
57972 #define PF_W 0x2
57973 #define PF_X 0x1
57974
57975+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57976+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57977+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57978+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57979+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57980+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57981+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57982+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57983+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57984+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57985+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57986+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57987+
57988 typedef struct elf32_phdr{
57989 Elf32_Word p_type;
57990 Elf32_Off p_offset;
57991@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
57992 #define EI_OSABI 7
57993 #define EI_PAD 8
57994
57995+#define EI_PAX 14
57996+
57997 #define ELFMAG0 0x7f /* EI_MAG */
57998 #define ELFMAG1 'E'
57999 #define ELFMAG2 'L'
58000@@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
58001 #define elf_note elf32_note
58002 #define elf_addr_t Elf32_Off
58003 #define Elf_Half Elf32_Half
58004+#define elf_dyn Elf32_Dyn
58005
58006 #else
58007
58008@@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
58009 #define elf_note elf64_note
58010 #define elf_addr_t Elf64_Off
58011 #define Elf_Half Elf64_Half
58012+#define elf_dyn Elf64_Dyn
58013
58014 #endif
58015
58016diff --git a/include/linux/filter.h b/include/linux/filter.h
58017index 8eeb205..d59bfa2 100644
58018--- a/include/linux/filter.h
58019+++ b/include/linux/filter.h
58020@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
58021
58022 struct sk_buff;
58023 struct sock;
58024+struct bpf_jit_work;
58025
58026 struct sk_filter
58027 {
58028@@ -141,6 +142,9 @@ struct sk_filter
58029 unsigned int len; /* Number of filter blocks */
58030 unsigned int (*bpf_func)(const struct sk_buff *skb,
58031 const struct sock_filter *filter);
58032+#ifdef CONFIG_BPF_JIT
58033+ struct bpf_jit_work *work;
58034+#endif
58035 struct rcu_head rcu;
58036 struct sock_filter insns[0];
58037 };
58038diff --git a/include/linux/firewire.h b/include/linux/firewire.h
58039index 84ccf8e..2e9b14c 100644
58040--- a/include/linux/firewire.h
58041+++ b/include/linux/firewire.h
58042@@ -428,7 +428,7 @@ struct fw_iso_context {
58043 union {
58044 fw_iso_callback_t sc;
58045 fw_iso_mc_callback_t mc;
58046- } callback;
58047+ } __no_const callback;
58048 void *callback_data;
58049 };
58050
58051diff --git a/include/linux/fs.h b/include/linux/fs.h
58052index e0bc4ff..d79c2fa 100644
58053--- a/include/linux/fs.h
58054+++ b/include/linux/fs.h
58055@@ -1608,7 +1608,8 @@ struct file_operations {
58056 int (*setlease)(struct file *, long, struct file_lock **);
58057 long (*fallocate)(struct file *file, int mode, loff_t offset,
58058 loff_t len);
58059-};
58060+} __do_const;
58061+typedef struct file_operations __no_const file_operations_no_const;
58062
58063 struct inode_operations {
58064 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58065diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
58066index 003dc0f..3c4ea97 100644
58067--- a/include/linux/fs_struct.h
58068+++ b/include/linux/fs_struct.h
58069@@ -6,7 +6,7 @@
58070 #include <linux/seqlock.h>
58071
58072 struct fs_struct {
58073- int users;
58074+ atomic_t users;
58075 spinlock_t lock;
58076 seqcount_t seq;
58077 int umask;
58078diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
58079index ce31408..b1ad003 100644
58080--- a/include/linux/fscache-cache.h
58081+++ b/include/linux/fscache-cache.h
58082@@ -102,7 +102,7 @@ struct fscache_operation {
58083 fscache_operation_release_t release;
58084 };
58085
58086-extern atomic_t fscache_op_debug_id;
58087+extern atomic_unchecked_t fscache_op_debug_id;
58088 extern void fscache_op_work_func(struct work_struct *work);
58089
58090 extern void fscache_enqueue_operation(struct fscache_operation *);
58091@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
58092 {
58093 INIT_WORK(&op->work, fscache_op_work_func);
58094 atomic_set(&op->usage, 1);
58095- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58096+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58097 op->processor = processor;
58098 op->release = release;
58099 INIT_LIST_HEAD(&op->pend_link);
58100diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
58101index 2a53f10..0187fdf 100644
58102--- a/include/linux/fsnotify.h
58103+++ b/include/linux/fsnotify.h
58104@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
58105 */
58106 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58107 {
58108- return kstrdup(name, GFP_KERNEL);
58109+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58110 }
58111
58112 /*
58113diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
58114index 91d0e0a3..035666b 100644
58115--- a/include/linux/fsnotify_backend.h
58116+++ b/include/linux/fsnotify_backend.h
58117@@ -105,6 +105,7 @@ struct fsnotify_ops {
58118 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
58119 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
58120 };
58121+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
58122
58123 /*
58124 * A group is a "thing" that wants to receive notification about filesystem
58125diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
58126index c3da42d..c70e0df 100644
58127--- a/include/linux/ftrace_event.h
58128+++ b/include/linux/ftrace_event.h
58129@@ -97,7 +97,7 @@ struct trace_event_functions {
58130 trace_print_func raw;
58131 trace_print_func hex;
58132 trace_print_func binary;
58133-};
58134+} __no_const;
58135
58136 struct trace_event {
58137 struct hlist_node node;
58138@@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
58139 extern int trace_add_event_call(struct ftrace_event_call *call);
58140 extern void trace_remove_event_call(struct ftrace_event_call *call);
58141
58142-#define is_signed_type(type) (((type)(-1)) < 0)
58143+#define is_signed_type(type) (((type)(-1)) < (type)1)
58144
58145 int trace_set_clr_event(const char *system, const char *event, int set);
58146
58147diff --git a/include/linux/genhd.h b/include/linux/genhd.h
58148index 6d18f35..ab71e2c 100644
58149--- a/include/linux/genhd.h
58150+++ b/include/linux/genhd.h
58151@@ -185,7 +185,7 @@ struct gendisk {
58152 struct kobject *slave_dir;
58153
58154 struct timer_rand_state *random;
58155- atomic_t sync_io; /* RAID */
58156+ atomic_unchecked_t sync_io; /* RAID */
58157 struct disk_events *ev;
58158 #ifdef CONFIG_BLK_DEV_INTEGRITY
58159 struct blk_integrity *integrity;
58160diff --git a/include/linux/gracl.h b/include/linux/gracl.h
58161new file mode 100644
58162index 0000000..0dc3943
58163--- /dev/null
58164+++ b/include/linux/gracl.h
58165@@ -0,0 +1,317 @@
58166+#ifndef GR_ACL_H
58167+#define GR_ACL_H
58168+
58169+#include <linux/grdefs.h>
58170+#include <linux/resource.h>
58171+#include <linux/capability.h>
58172+#include <linux/dcache.h>
58173+#include <asm/resource.h>
58174+
58175+/* Major status information */
58176+
58177+#define GR_VERSION "grsecurity 2.2.2"
58178+#define GRSECURITY_VERSION 0x2202
58179+
58180+enum {
58181+ GR_SHUTDOWN = 0,
58182+ GR_ENABLE = 1,
58183+ GR_SPROLE = 2,
58184+ GR_RELOAD = 3,
58185+ GR_SEGVMOD = 4,
58186+ GR_STATUS = 5,
58187+ GR_UNSPROLE = 6,
58188+ GR_PASSSET = 7,
58189+ GR_SPROLEPAM = 8,
58190+};
58191+
58192+/* Password setup definitions
58193+ * kernel/grhash.c */
58194+enum {
58195+ GR_PW_LEN = 128,
58196+ GR_SALT_LEN = 16,
58197+ GR_SHA_LEN = 32,
58198+};
58199+
58200+enum {
58201+ GR_SPROLE_LEN = 64,
58202+};
58203+
58204+enum {
58205+ GR_NO_GLOB = 0,
58206+ GR_REG_GLOB,
58207+ GR_CREATE_GLOB
58208+};
58209+
58210+#define GR_NLIMITS 32
58211+
58212+/* Begin Data Structures */
58213+
58214+struct sprole_pw {
58215+ unsigned char *rolename;
58216+ unsigned char salt[GR_SALT_LEN];
58217+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58218+};
58219+
58220+struct name_entry {
58221+ __u32 key;
58222+ ino_t inode;
58223+ dev_t device;
58224+ char *name;
58225+ __u16 len;
58226+ __u8 deleted;
58227+ struct name_entry *prev;
58228+ struct name_entry *next;
58229+};
58230+
58231+struct inodev_entry {
58232+ struct name_entry *nentry;
58233+ struct inodev_entry *prev;
58234+ struct inodev_entry *next;
58235+};
58236+
58237+struct acl_role_db {
58238+ struct acl_role_label **r_hash;
58239+ __u32 r_size;
58240+};
58241+
58242+struct inodev_db {
58243+ struct inodev_entry **i_hash;
58244+ __u32 i_size;
58245+};
58246+
58247+struct name_db {
58248+ struct name_entry **n_hash;
58249+ __u32 n_size;
58250+};
58251+
58252+struct crash_uid {
58253+ uid_t uid;
58254+ unsigned long expires;
58255+};
58256+
58257+struct gr_hash_struct {
58258+ void **table;
58259+ void **nametable;
58260+ void *first;
58261+ __u32 table_size;
58262+ __u32 used_size;
58263+ int type;
58264+};
58265+
58266+/* Userspace Grsecurity ACL data structures */
58267+
58268+struct acl_subject_label {
58269+ char *filename;
58270+ ino_t inode;
58271+ dev_t device;
58272+ __u32 mode;
58273+ kernel_cap_t cap_mask;
58274+ kernel_cap_t cap_lower;
58275+ kernel_cap_t cap_invert_audit;
58276+
58277+ struct rlimit res[GR_NLIMITS];
58278+ __u32 resmask;
58279+
58280+ __u8 user_trans_type;
58281+ __u8 group_trans_type;
58282+ uid_t *user_transitions;
58283+ gid_t *group_transitions;
58284+ __u16 user_trans_num;
58285+ __u16 group_trans_num;
58286+
58287+ __u32 sock_families[2];
58288+ __u32 ip_proto[8];
58289+ __u32 ip_type;
58290+ struct acl_ip_label **ips;
58291+ __u32 ip_num;
58292+ __u32 inaddr_any_override;
58293+
58294+ __u32 crashes;
58295+ unsigned long expires;
58296+
58297+ struct acl_subject_label *parent_subject;
58298+ struct gr_hash_struct *hash;
58299+ struct acl_subject_label *prev;
58300+ struct acl_subject_label *next;
58301+
58302+ struct acl_object_label **obj_hash;
58303+ __u32 obj_hash_size;
58304+ __u16 pax_flags;
58305+};
58306+
58307+struct role_allowed_ip {
58308+ __u32 addr;
58309+ __u32 netmask;
58310+
58311+ struct role_allowed_ip *prev;
58312+ struct role_allowed_ip *next;
58313+};
58314+
58315+struct role_transition {
58316+ char *rolename;
58317+
58318+ struct role_transition *prev;
58319+ struct role_transition *next;
58320+};
58321+
58322+struct acl_role_label {
58323+ char *rolename;
58324+ uid_t uidgid;
58325+ __u16 roletype;
58326+
58327+ __u16 auth_attempts;
58328+ unsigned long expires;
58329+
58330+ struct acl_subject_label *root_label;
58331+ struct gr_hash_struct *hash;
58332+
58333+ struct acl_role_label *prev;
58334+ struct acl_role_label *next;
58335+
58336+ struct role_transition *transitions;
58337+ struct role_allowed_ip *allowed_ips;
58338+ uid_t *domain_children;
58339+ __u16 domain_child_num;
58340+
58341+ struct acl_subject_label **subj_hash;
58342+ __u32 subj_hash_size;
58343+};
58344+
58345+struct user_acl_role_db {
58346+ struct acl_role_label **r_table;
58347+ __u32 num_pointers; /* Number of allocations to track */
58348+ __u32 num_roles; /* Number of roles */
58349+ __u32 num_domain_children; /* Number of domain children */
58350+ __u32 num_subjects; /* Number of subjects */
58351+ __u32 num_objects; /* Number of objects */
58352+};
58353+
58354+struct acl_object_label {
58355+ char *filename;
58356+ ino_t inode;
58357+ dev_t device;
58358+ __u32 mode;
58359+
58360+ struct acl_subject_label *nested;
58361+ struct acl_object_label *globbed;
58362+
58363+ /* next two structures not used */
58364+
58365+ struct acl_object_label *prev;
58366+ struct acl_object_label *next;
58367+};
58368+
58369+struct acl_ip_label {
58370+ char *iface;
58371+ __u32 addr;
58372+ __u32 netmask;
58373+ __u16 low, high;
58374+ __u8 mode;
58375+ __u32 type;
58376+ __u32 proto[8];
58377+
58378+ /* next two structures not used */
58379+
58380+ struct acl_ip_label *prev;
58381+ struct acl_ip_label *next;
58382+};
58383+
58384+struct gr_arg {
58385+ struct user_acl_role_db role_db;
58386+ unsigned char pw[GR_PW_LEN];
58387+ unsigned char salt[GR_SALT_LEN];
58388+ unsigned char sum[GR_SHA_LEN];
58389+ unsigned char sp_role[GR_SPROLE_LEN];
58390+ struct sprole_pw *sprole_pws;
58391+ dev_t segv_device;
58392+ ino_t segv_inode;
58393+ uid_t segv_uid;
58394+ __u16 num_sprole_pws;
58395+ __u16 mode;
58396+};
58397+
58398+struct gr_arg_wrapper {
58399+ struct gr_arg *arg;
58400+ __u32 version;
58401+ __u32 size;
58402+};
58403+
58404+struct subject_map {
58405+ struct acl_subject_label *user;
58406+ struct acl_subject_label *kernel;
58407+ struct subject_map *prev;
58408+ struct subject_map *next;
58409+};
58410+
58411+struct acl_subj_map_db {
58412+ struct subject_map **s_hash;
58413+ __u32 s_size;
58414+};
58415+
58416+/* End Data Structures Section */
58417+
58418+/* Hash functions generated by empirical testing by Brad Spengler
58419+ Makes good use of the low bits of the inode. Generally 0-1 times
58420+ in loop for successful match. 0-3 for unsuccessful match.
58421+ Shift/add algorithm with modulus of table size and an XOR*/
58422+
58423+static __inline__ unsigned int
58424+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58425+{
58426+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58427+}
58428+
58429+ static __inline__ unsigned int
58430+shash(const struct acl_subject_label *userp, const unsigned int sz)
58431+{
58432+ return ((const unsigned long)userp % sz);
58433+}
58434+
58435+static __inline__ unsigned int
58436+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58437+{
58438+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58439+}
58440+
58441+static __inline__ unsigned int
58442+nhash(const char *name, const __u16 len, const unsigned int sz)
58443+{
58444+ return full_name_hash((const unsigned char *)name, len) % sz;
58445+}
58446+
58447+#define FOR_EACH_ROLE_START(role) \
58448+ role = role_list; \
58449+ while (role) {
58450+
58451+#define FOR_EACH_ROLE_END(role) \
58452+ role = role->prev; \
58453+ }
58454+
58455+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58456+ subj = NULL; \
58457+ iter = 0; \
58458+ while (iter < role->subj_hash_size) { \
58459+ if (subj == NULL) \
58460+ subj = role->subj_hash[iter]; \
58461+ if (subj == NULL) { \
58462+ iter++; \
58463+ continue; \
58464+ }
58465+
58466+#define FOR_EACH_SUBJECT_END(subj,iter) \
58467+ subj = subj->next; \
58468+ if (subj == NULL) \
58469+ iter++; \
58470+ }
58471+
58472+
58473+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58474+ subj = role->hash->first; \
58475+ while (subj != NULL) {
58476+
58477+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58478+ subj = subj->next; \
58479+ }
58480+
58481+#endif
58482+
58483diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58484new file mode 100644
58485index 0000000..323ecf2
58486--- /dev/null
58487+++ b/include/linux/gralloc.h
58488@@ -0,0 +1,9 @@
58489+#ifndef __GRALLOC_H
58490+#define __GRALLOC_H
58491+
58492+void acl_free_all(void);
58493+int acl_alloc_stack_init(unsigned long size);
58494+void *acl_alloc(unsigned long len);
58495+void *acl_alloc_num(unsigned long num, unsigned long len);
58496+
58497+#endif
58498diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58499new file mode 100644
58500index 0000000..b30e9bc
58501--- /dev/null
58502+++ b/include/linux/grdefs.h
58503@@ -0,0 +1,140 @@
58504+#ifndef GRDEFS_H
58505+#define GRDEFS_H
58506+
58507+/* Begin grsecurity status declarations */
58508+
58509+enum {
58510+ GR_READY = 0x01,
58511+ GR_STATUS_INIT = 0x00 // disabled state
58512+};
58513+
58514+/* Begin ACL declarations */
58515+
58516+/* Role flags */
58517+
58518+enum {
58519+ GR_ROLE_USER = 0x0001,
58520+ GR_ROLE_GROUP = 0x0002,
58521+ GR_ROLE_DEFAULT = 0x0004,
58522+ GR_ROLE_SPECIAL = 0x0008,
58523+ GR_ROLE_AUTH = 0x0010,
58524+ GR_ROLE_NOPW = 0x0020,
58525+ GR_ROLE_GOD = 0x0040,
58526+ GR_ROLE_LEARN = 0x0080,
58527+ GR_ROLE_TPE = 0x0100,
58528+ GR_ROLE_DOMAIN = 0x0200,
58529+ GR_ROLE_PAM = 0x0400,
58530+ GR_ROLE_PERSIST = 0x0800
58531+};
58532+
58533+/* ACL Subject and Object mode flags */
58534+enum {
58535+ GR_DELETED = 0x80000000
58536+};
58537+
58538+/* ACL Object-only mode flags */
58539+enum {
58540+ GR_READ = 0x00000001,
58541+ GR_APPEND = 0x00000002,
58542+ GR_WRITE = 0x00000004,
58543+ GR_EXEC = 0x00000008,
58544+ GR_FIND = 0x00000010,
58545+ GR_INHERIT = 0x00000020,
58546+ GR_SETID = 0x00000040,
58547+ GR_CREATE = 0x00000080,
58548+ GR_DELETE = 0x00000100,
58549+ GR_LINK = 0x00000200,
58550+ GR_AUDIT_READ = 0x00000400,
58551+ GR_AUDIT_APPEND = 0x00000800,
58552+ GR_AUDIT_WRITE = 0x00001000,
58553+ GR_AUDIT_EXEC = 0x00002000,
58554+ GR_AUDIT_FIND = 0x00004000,
58555+ GR_AUDIT_INHERIT= 0x00008000,
58556+ GR_AUDIT_SETID = 0x00010000,
58557+ GR_AUDIT_CREATE = 0x00020000,
58558+ GR_AUDIT_DELETE = 0x00040000,
58559+ GR_AUDIT_LINK = 0x00080000,
58560+ GR_PTRACERD = 0x00100000,
58561+ GR_NOPTRACE = 0x00200000,
58562+ GR_SUPPRESS = 0x00400000,
58563+ GR_NOLEARN = 0x00800000,
58564+ GR_INIT_TRANSFER= 0x01000000
58565+};
58566+
58567+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58568+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58569+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58570+
58571+/* ACL subject-only mode flags */
58572+enum {
58573+ GR_KILL = 0x00000001,
58574+ GR_VIEW = 0x00000002,
58575+ GR_PROTECTED = 0x00000004,
58576+ GR_LEARN = 0x00000008,
58577+ GR_OVERRIDE = 0x00000010,
58578+ /* just a placeholder, this mode is only used in userspace */
58579+ GR_DUMMY = 0x00000020,
58580+ GR_PROTSHM = 0x00000040,
58581+ GR_KILLPROC = 0x00000080,
58582+ GR_KILLIPPROC = 0x00000100,
58583+ /* just a placeholder, this mode is only used in userspace */
58584+ GR_NOTROJAN = 0x00000200,
58585+ GR_PROTPROCFD = 0x00000400,
58586+ GR_PROCACCT = 0x00000800,
58587+ GR_RELAXPTRACE = 0x00001000,
58588+ GR_NESTED = 0x00002000,
58589+ GR_INHERITLEARN = 0x00004000,
58590+ GR_PROCFIND = 0x00008000,
58591+ GR_POVERRIDE = 0x00010000,
58592+ GR_KERNELAUTH = 0x00020000,
58593+ GR_ATSECURE = 0x00040000,
58594+ GR_SHMEXEC = 0x00080000
58595+};
58596+
58597+enum {
58598+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58599+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58600+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58601+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58602+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58603+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58604+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58605+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58606+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58607+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58608+};
58609+
58610+enum {
58611+ GR_ID_USER = 0x01,
58612+ GR_ID_GROUP = 0x02,
58613+};
58614+
58615+enum {
58616+ GR_ID_ALLOW = 0x01,
58617+ GR_ID_DENY = 0x02,
58618+};
58619+
58620+#define GR_CRASH_RES 31
58621+#define GR_UIDTABLE_MAX 500
58622+
58623+/* begin resource learning section */
58624+enum {
58625+ GR_RLIM_CPU_BUMP = 60,
58626+ GR_RLIM_FSIZE_BUMP = 50000,
58627+ GR_RLIM_DATA_BUMP = 10000,
58628+ GR_RLIM_STACK_BUMP = 1000,
58629+ GR_RLIM_CORE_BUMP = 10000,
58630+ GR_RLIM_RSS_BUMP = 500000,
58631+ GR_RLIM_NPROC_BUMP = 1,
58632+ GR_RLIM_NOFILE_BUMP = 5,
58633+ GR_RLIM_MEMLOCK_BUMP = 50000,
58634+ GR_RLIM_AS_BUMP = 500000,
58635+ GR_RLIM_LOCKS_BUMP = 2,
58636+ GR_RLIM_SIGPENDING_BUMP = 5,
58637+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58638+ GR_RLIM_NICE_BUMP = 1,
58639+ GR_RLIM_RTPRIO_BUMP = 1,
58640+ GR_RLIM_RTTIME_BUMP = 1000000
58641+};
58642+
58643+#endif
58644diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
58645new file mode 100644
58646index 0000000..da390f1
58647--- /dev/null
58648+++ b/include/linux/grinternal.h
58649@@ -0,0 +1,221 @@
58650+#ifndef __GRINTERNAL_H
58651+#define __GRINTERNAL_H
58652+
58653+#ifdef CONFIG_GRKERNSEC
58654+
58655+#include <linux/fs.h>
58656+#include <linux/mnt_namespace.h>
58657+#include <linux/nsproxy.h>
58658+#include <linux/gracl.h>
58659+#include <linux/grdefs.h>
58660+#include <linux/grmsg.h>
58661+
58662+void gr_add_learn_entry(const char *fmt, ...)
58663+ __attribute__ ((format (printf, 1, 2)));
58664+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58665+ const struct vfsmount *mnt);
58666+__u32 gr_check_create(const struct dentry *new_dentry,
58667+ const struct dentry *parent,
58668+ const struct vfsmount *mnt, const __u32 mode);
58669+int gr_check_protected_task(const struct task_struct *task);
58670+__u32 to_gr_audit(const __u32 reqmode);
58671+int gr_set_acls(const int type);
58672+int gr_apply_subject_to_task(struct task_struct *task);
58673+int gr_acl_is_enabled(void);
58674+char gr_roletype_to_char(void);
58675+
58676+void gr_handle_alertkill(struct task_struct *task);
58677+char *gr_to_filename(const struct dentry *dentry,
58678+ const struct vfsmount *mnt);
58679+char *gr_to_filename1(const struct dentry *dentry,
58680+ const struct vfsmount *mnt);
58681+char *gr_to_filename2(const struct dentry *dentry,
58682+ const struct vfsmount *mnt);
58683+char *gr_to_filename3(const struct dentry *dentry,
58684+ const struct vfsmount *mnt);
58685+
58686+extern int grsec_enable_ptrace_readexec;
58687+extern int grsec_enable_harden_ptrace;
58688+extern int grsec_enable_link;
58689+extern int grsec_enable_fifo;
58690+extern int grsec_enable_execve;
58691+extern int grsec_enable_shm;
58692+extern int grsec_enable_execlog;
58693+extern int grsec_enable_signal;
58694+extern int grsec_enable_audit_ptrace;
58695+extern int grsec_enable_forkfail;
58696+extern int grsec_enable_time;
58697+extern int grsec_enable_rofs;
58698+extern int grsec_enable_chroot_shmat;
58699+extern int grsec_enable_chroot_mount;
58700+extern int grsec_enable_chroot_double;
58701+extern int grsec_enable_chroot_pivot;
58702+extern int grsec_enable_chroot_chdir;
58703+extern int grsec_enable_chroot_chmod;
58704+extern int grsec_enable_chroot_mknod;
58705+extern int grsec_enable_chroot_fchdir;
58706+extern int grsec_enable_chroot_nice;
58707+extern int grsec_enable_chroot_execlog;
58708+extern int grsec_enable_chroot_caps;
58709+extern int grsec_enable_chroot_sysctl;
58710+extern int grsec_enable_chroot_unix;
58711+extern int grsec_enable_tpe;
58712+extern int grsec_tpe_gid;
58713+extern int grsec_enable_tpe_all;
58714+extern int grsec_enable_tpe_invert;
58715+extern int grsec_enable_socket_all;
58716+extern int grsec_socket_all_gid;
58717+extern int grsec_enable_socket_client;
58718+extern int grsec_socket_client_gid;
58719+extern int grsec_enable_socket_server;
58720+extern int grsec_socket_server_gid;
58721+extern int grsec_audit_gid;
58722+extern int grsec_enable_group;
58723+extern int grsec_enable_audit_textrel;
58724+extern int grsec_enable_log_rwxmaps;
58725+extern int grsec_enable_mount;
58726+extern int grsec_enable_chdir;
58727+extern int grsec_resource_logging;
58728+extern int grsec_enable_blackhole;
58729+extern int grsec_lastack_retries;
58730+extern int grsec_enable_brute;
58731+extern int grsec_lock;
58732+
58733+extern spinlock_t grsec_alert_lock;
58734+extern unsigned long grsec_alert_wtime;
58735+extern unsigned long grsec_alert_fyet;
58736+
58737+extern spinlock_t grsec_audit_lock;
58738+
58739+extern rwlock_t grsec_exec_file_lock;
58740+
58741+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58742+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58743+ (tsk)->exec_file->f_vfsmnt) : "/")
58744+
58745+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58746+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58747+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58748+
58749+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58750+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58751+ (tsk)->exec_file->f_vfsmnt) : "/")
58752+
58753+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58754+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58755+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58756+
58757+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58758+
58759+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58760+
58761+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58762+ (task)->pid, (cred)->uid, \
58763+ (cred)->euid, (cred)->gid, (cred)->egid, \
58764+ gr_parent_task_fullpath(task), \
58765+ (task)->real_parent->comm, (task)->real_parent->pid, \
58766+ (pcred)->uid, (pcred)->euid, \
58767+ (pcred)->gid, (pcred)->egid
58768+
58769+#define GR_CHROOT_CAPS {{ \
58770+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58771+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58772+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58773+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58774+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58775+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58776+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58777+
58778+#define security_learn(normal_msg,args...) \
58779+({ \
58780+ read_lock(&grsec_exec_file_lock); \
58781+ gr_add_learn_entry(normal_msg "\n", ## args); \
58782+ read_unlock(&grsec_exec_file_lock); \
58783+})
58784+
58785+enum {
58786+ GR_DO_AUDIT,
58787+ GR_DONT_AUDIT,
58788+ /* used for non-audit messages that we shouldn't kill the task on */
58789+ GR_DONT_AUDIT_GOOD
58790+};
58791+
58792+enum {
58793+ GR_TTYSNIFF,
58794+ GR_RBAC,
58795+ GR_RBAC_STR,
58796+ GR_STR_RBAC,
58797+ GR_RBAC_MODE2,
58798+ GR_RBAC_MODE3,
58799+ GR_FILENAME,
58800+ GR_SYSCTL_HIDDEN,
58801+ GR_NOARGS,
58802+ GR_ONE_INT,
58803+ GR_ONE_INT_TWO_STR,
58804+ GR_ONE_STR,
58805+ GR_STR_INT,
58806+ GR_TWO_STR_INT,
58807+ GR_TWO_INT,
58808+ GR_TWO_U64,
58809+ GR_THREE_INT,
58810+ GR_FIVE_INT_TWO_STR,
58811+ GR_TWO_STR,
58812+ GR_THREE_STR,
58813+ GR_FOUR_STR,
58814+ GR_STR_FILENAME,
58815+ GR_FILENAME_STR,
58816+ GR_FILENAME_TWO_INT,
58817+ GR_FILENAME_TWO_INT_STR,
58818+ GR_TEXTREL,
58819+ GR_PTRACE,
58820+ GR_RESOURCE,
58821+ GR_CAP,
58822+ GR_SIG,
58823+ GR_SIG2,
58824+ GR_CRASH1,
58825+ GR_CRASH2,
58826+ GR_PSACCT,
58827+ GR_RWXMAP
58828+};
58829+
58830+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58831+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58832+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58833+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58834+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58835+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58836+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58837+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58838+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58839+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58840+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58841+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58842+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58843+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58844+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58845+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58846+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58847+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58848+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58849+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58850+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58851+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58852+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58853+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58854+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58855+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58856+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58857+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58858+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58859+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58860+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58861+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58862+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58863+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58864+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58865+
58866+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58867+
58868+#endif
58869+
58870+#endif
58871diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
58872new file mode 100644
58873index 0000000..7f62b30
58874--- /dev/null
58875+++ b/include/linux/grmsg.h
58876@@ -0,0 +1,109 @@
58877+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58878+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58879+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58880+#define GR_STOPMOD_MSG "denied modification of module state by "
58881+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58882+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58883+#define GR_IOPERM_MSG "denied use of ioperm() by "
58884+#define GR_IOPL_MSG "denied use of iopl() by "
58885+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58886+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58887+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58888+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58889+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58890+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58891+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58892+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58893+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58894+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58895+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58896+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58897+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58898+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58899+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58900+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58901+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58902+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58903+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58904+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58905+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58906+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58907+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58908+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58909+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58910+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58911+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
58912+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58913+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58914+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58915+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58916+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58917+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58918+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58919+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58920+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58921+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58922+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58923+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58924+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58925+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58926+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58927+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58928+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58929+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58930+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58931+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58932+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58933+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58934+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58935+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58936+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58937+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58938+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58939+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58940+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58941+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58942+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58943+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58944+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58945+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58946+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58947+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58948+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58949+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58950+#define GR_NICE_CHROOT_MSG "denied priority change by "
58951+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58952+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58953+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58954+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58955+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58956+#define GR_TIME_MSG "time set by "
58957+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58958+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58959+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58960+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58961+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58962+#define GR_BIND_MSG "denied bind() by "
58963+#define GR_CONNECT_MSG "denied connect() by "
58964+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58965+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58966+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58967+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58968+#define GR_CAP_ACL_MSG "use of %s denied for "
58969+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
58970+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58971+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58972+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58973+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58974+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58975+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58976+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58977+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58978+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58979+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58980+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58981+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58982+#define GR_VM86_MSG "denied use of vm86 by "
58983+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58984+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
58985+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58986diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
58987new file mode 100644
58988index 0000000..cb9f1c1
58989--- /dev/null
58990+++ b/include/linux/grsecurity.h
58991@@ -0,0 +1,227 @@
58992+#ifndef GR_SECURITY_H
58993+#define GR_SECURITY_H
58994+#include <linux/fs.h>
58995+#include <linux/fs_struct.h>
58996+#include <linux/binfmts.h>
58997+#include <linux/gracl.h>
58998+
58999+/* notify of brain-dead configs */
59000+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59001+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59002+#endif
59003+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59004+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59005+#endif
59006+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59007+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59008+#endif
59009+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59010+#error "CONFIG_PAX enabled, but no PaX options are enabled."
59011+#endif
59012+
59013+#include <linux/compat.h>
59014+
59015+struct user_arg_ptr {
59016+#ifdef CONFIG_COMPAT
59017+ bool is_compat;
59018+#endif
59019+ union {
59020+ const char __user *const __user *native;
59021+#ifdef CONFIG_COMPAT
59022+ compat_uptr_t __user *compat;
59023+#endif
59024+ } ptr;
59025+};
59026+
59027+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59028+void gr_handle_brute_check(void);
59029+void gr_handle_kernel_exploit(void);
59030+int gr_process_user_ban(void);
59031+
59032+char gr_roletype_to_char(void);
59033+
59034+int gr_acl_enable_at_secure(void);
59035+
59036+int gr_check_user_change(int real, int effective, int fs);
59037+int gr_check_group_change(int real, int effective, int fs);
59038+
59039+void gr_del_task_from_ip_table(struct task_struct *p);
59040+
59041+int gr_pid_is_chrooted(struct task_struct *p);
59042+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59043+int gr_handle_chroot_nice(void);
59044+int gr_handle_chroot_sysctl(const int op);
59045+int gr_handle_chroot_setpriority(struct task_struct *p,
59046+ const int niceval);
59047+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59048+int gr_handle_chroot_chroot(const struct dentry *dentry,
59049+ const struct vfsmount *mnt);
59050+void gr_handle_chroot_chdir(struct path *path);
59051+int gr_handle_chroot_chmod(const struct dentry *dentry,
59052+ const struct vfsmount *mnt, const int mode);
59053+int gr_handle_chroot_mknod(const struct dentry *dentry,
59054+ const struct vfsmount *mnt, const int mode);
59055+int gr_handle_chroot_mount(const struct dentry *dentry,
59056+ const struct vfsmount *mnt,
59057+ const char *dev_name);
59058+int gr_handle_chroot_pivot(void);
59059+int gr_handle_chroot_unix(const pid_t pid);
59060+
59061+int gr_handle_rawio(const struct inode *inode);
59062+
59063+void gr_handle_ioperm(void);
59064+void gr_handle_iopl(void);
59065+
59066+int gr_tpe_allow(const struct file *file);
59067+
59068+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59069+void gr_clear_chroot_entries(struct task_struct *task);
59070+
59071+void gr_log_forkfail(const int retval);
59072+void gr_log_timechange(void);
59073+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59074+void gr_log_chdir(const struct dentry *dentry,
59075+ const struct vfsmount *mnt);
59076+void gr_log_chroot_exec(const struct dentry *dentry,
59077+ const struct vfsmount *mnt);
59078+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59079+void gr_log_remount(const char *devname, const int retval);
59080+void gr_log_unmount(const char *devname, const int retval);
59081+void gr_log_mount(const char *from, const char *to, const int retval);
59082+void gr_log_textrel(struct vm_area_struct *vma);
59083+void gr_log_rwxmmap(struct file *file);
59084+void gr_log_rwxmprotect(struct file *file);
59085+
59086+int gr_handle_follow_link(const struct inode *parent,
59087+ const struct inode *inode,
59088+ const struct dentry *dentry,
59089+ const struct vfsmount *mnt);
59090+int gr_handle_fifo(const struct dentry *dentry,
59091+ const struct vfsmount *mnt,
59092+ const struct dentry *dir, const int flag,
59093+ const int acc_mode);
59094+int gr_handle_hardlink(const struct dentry *dentry,
59095+ const struct vfsmount *mnt,
59096+ struct inode *inode,
59097+ const int mode, const char *to);
59098+
59099+int gr_is_capable(const int cap);
59100+int gr_is_capable_nolog(const int cap);
59101+void gr_learn_resource(const struct task_struct *task, const int limit,
59102+ const unsigned long wanted, const int gt);
59103+void gr_copy_label(struct task_struct *tsk);
59104+void gr_handle_crash(struct task_struct *task, const int sig);
59105+int gr_handle_signal(const struct task_struct *p, const int sig);
59106+int gr_check_crash_uid(const uid_t uid);
59107+int gr_check_protected_task(const struct task_struct *task);
59108+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59109+int gr_acl_handle_mmap(const struct file *file,
59110+ const unsigned long prot);
59111+int gr_acl_handle_mprotect(const struct file *file,
59112+ const unsigned long prot);
59113+int gr_check_hidden_task(const struct task_struct *tsk);
59114+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59115+ const struct vfsmount *mnt);
59116+__u32 gr_acl_handle_utime(const struct dentry *dentry,
59117+ const struct vfsmount *mnt);
59118+__u32 gr_acl_handle_access(const struct dentry *dentry,
59119+ const struct vfsmount *mnt, const int fmode);
59120+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59121+ const struct vfsmount *mnt, mode_t mode);
59122+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59123+ const struct vfsmount *mnt, mode_t mode);
59124+__u32 gr_acl_handle_chown(const struct dentry *dentry,
59125+ const struct vfsmount *mnt);
59126+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59127+ const struct vfsmount *mnt);
59128+int gr_handle_ptrace(struct task_struct *task, const long request);
59129+int gr_handle_proc_ptrace(struct task_struct *task);
59130+__u32 gr_acl_handle_execve(const struct dentry *dentry,
59131+ const struct vfsmount *mnt);
59132+int gr_check_crash_exec(const struct file *filp);
59133+int gr_acl_is_enabled(void);
59134+void gr_set_kernel_label(struct task_struct *task);
59135+void gr_set_role_label(struct task_struct *task, const uid_t uid,
59136+ const gid_t gid);
59137+int gr_set_proc_label(const struct dentry *dentry,
59138+ const struct vfsmount *mnt,
59139+ const int unsafe_flags);
59140+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59141+ const struct vfsmount *mnt);
59142+__u32 gr_acl_handle_open(const struct dentry *dentry,
59143+ const struct vfsmount *mnt, int acc_mode);
59144+__u32 gr_acl_handle_creat(const struct dentry *dentry,
59145+ const struct dentry *p_dentry,
59146+ const struct vfsmount *p_mnt,
59147+ int open_flags, int acc_mode, const int imode);
59148+void gr_handle_create(const struct dentry *dentry,
59149+ const struct vfsmount *mnt);
59150+void gr_handle_proc_create(const struct dentry *dentry,
59151+ const struct inode *inode);
59152+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59153+ const struct dentry *parent_dentry,
59154+ const struct vfsmount *parent_mnt,
59155+ const int mode);
59156+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59157+ const struct dentry *parent_dentry,
59158+ const struct vfsmount *parent_mnt);
59159+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59160+ const struct vfsmount *mnt);
59161+void gr_handle_delete(const ino_t ino, const dev_t dev);
59162+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59163+ const struct vfsmount *mnt);
59164+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59165+ const struct dentry *parent_dentry,
59166+ const struct vfsmount *parent_mnt,
59167+ const char *from);
59168+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59169+ const struct dentry *parent_dentry,
59170+ const struct vfsmount *parent_mnt,
59171+ const struct dentry *old_dentry,
59172+ const struct vfsmount *old_mnt, const char *to);
59173+int gr_acl_handle_rename(struct dentry *new_dentry,
59174+ struct dentry *parent_dentry,
59175+ const struct vfsmount *parent_mnt,
59176+ struct dentry *old_dentry,
59177+ struct inode *old_parent_inode,
59178+ struct vfsmount *old_mnt, const char *newname);
59179+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59180+ struct dentry *old_dentry,
59181+ struct dentry *new_dentry,
59182+ struct vfsmount *mnt, const __u8 replace);
59183+__u32 gr_check_link(const struct dentry *new_dentry,
59184+ const struct dentry *parent_dentry,
59185+ const struct vfsmount *parent_mnt,
59186+ const struct dentry *old_dentry,
59187+ const struct vfsmount *old_mnt);
59188+int gr_acl_handle_filldir(const struct file *file, const char *name,
59189+ const unsigned int namelen, const ino_t ino);
59190+
59191+__u32 gr_acl_handle_unix(const struct dentry *dentry,
59192+ const struct vfsmount *mnt);
59193+void gr_acl_handle_exit(void);
59194+void gr_acl_handle_psacct(struct task_struct *task, const long code);
59195+int gr_acl_handle_procpidmem(const struct task_struct *task);
59196+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59197+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59198+void gr_audit_ptrace(struct task_struct *task);
59199+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59200+
59201+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
59202+
59203+#ifdef CONFIG_GRKERNSEC
59204+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59205+void gr_handle_vm86(void);
59206+void gr_handle_mem_readwrite(u64 from, u64 to);
59207+
59208+extern int grsec_enable_dmesg;
59209+extern int grsec_disable_privio;
59210+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59211+extern int grsec_enable_chroot_findtask;
59212+#endif
59213+#ifdef CONFIG_GRKERNSEC_SETXID
59214+extern int grsec_enable_setxid;
59215+#endif
59216+#endif
59217+
59218+#endif
59219diff --git a/include/linux/grsock.h b/include/linux/grsock.h
59220new file mode 100644
59221index 0000000..e7ffaaf
59222--- /dev/null
59223+++ b/include/linux/grsock.h
59224@@ -0,0 +1,19 @@
59225+#ifndef __GRSOCK_H
59226+#define __GRSOCK_H
59227+
59228+extern void gr_attach_curr_ip(const struct sock *sk);
59229+extern int gr_handle_sock_all(const int family, const int type,
59230+ const int protocol);
59231+extern int gr_handle_sock_server(const struct sockaddr *sck);
59232+extern int gr_handle_sock_server_other(const struct sock *sck);
59233+extern int gr_handle_sock_client(const struct sockaddr *sck);
59234+extern int gr_search_connect(struct socket * sock,
59235+ struct sockaddr_in * addr);
59236+extern int gr_search_bind(struct socket * sock,
59237+ struct sockaddr_in * addr);
59238+extern int gr_search_listen(struct socket * sock);
59239+extern int gr_search_accept(struct socket * sock);
59240+extern int gr_search_socket(const int domain, const int type,
59241+ const int protocol);
59242+
59243+#endif
59244diff --git a/include/linux/hid.h b/include/linux/hid.h
59245index c235e4e..f0cf7a0 100644
59246--- a/include/linux/hid.h
59247+++ b/include/linux/hid.h
59248@@ -679,7 +679,7 @@ struct hid_ll_driver {
59249 unsigned int code, int value);
59250
59251 int (*parse)(struct hid_device *hdev);
59252-};
59253+} __no_const;
59254
59255 #define PM_HINT_FULLON 1<<5
59256 #define PM_HINT_NORMAL 1<<1
59257diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59258index 3a93f73..b19d0b3 100644
59259--- a/include/linux/highmem.h
59260+++ b/include/linux/highmem.h
59261@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59262 kunmap_atomic(kaddr, KM_USER0);
59263 }
59264
59265+static inline void sanitize_highpage(struct page *page)
59266+{
59267+ void *kaddr;
59268+ unsigned long flags;
59269+
59270+ local_irq_save(flags);
59271+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59272+ clear_page(kaddr);
59273+ kunmap_atomic(kaddr, KM_CLEARPAGE);
59274+ local_irq_restore(flags);
59275+}
59276+
59277 static inline void zero_user_segments(struct page *page,
59278 unsigned start1, unsigned end1,
59279 unsigned start2, unsigned end2)
59280diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59281index 07d103a..04ec65b 100644
59282--- a/include/linux/i2c.h
59283+++ b/include/linux/i2c.h
59284@@ -364,6 +364,7 @@ struct i2c_algorithm {
59285 /* To determine what the adapter supports */
59286 u32 (*functionality) (struct i2c_adapter *);
59287 };
59288+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59289
59290 /*
59291 * i2c_adapter is the structure used to identify a physical i2c bus along
59292diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59293index a6deef4..c56a7f2 100644
59294--- a/include/linux/i2o.h
59295+++ b/include/linux/i2o.h
59296@@ -564,7 +564,7 @@ struct i2o_controller {
59297 struct i2o_device *exec; /* Executive */
59298 #if BITS_PER_LONG == 64
59299 spinlock_t context_list_lock; /* lock for context_list */
59300- atomic_t context_list_counter; /* needed for unique contexts */
59301+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59302 struct list_head context_list; /* list of context id's
59303 and pointers */
59304 #endif
59305diff --git a/include/linux/init.h b/include/linux/init.h
59306index 9146f39..885354d 100644
59307--- a/include/linux/init.h
59308+++ b/include/linux/init.h
59309@@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59310
59311 /* Each module must use one module_init(). */
59312 #define module_init(initfn) \
59313- static inline initcall_t __inittest(void) \
59314+ static inline __used initcall_t __inittest(void) \
59315 { return initfn; } \
59316 int init_module(void) __attribute__((alias(#initfn)));
59317
59318 /* This is only required if you want to be unloadable. */
59319 #define module_exit(exitfn) \
59320- static inline exitcall_t __exittest(void) \
59321+ static inline __used exitcall_t __exittest(void) \
59322 { return exitfn; } \
59323 void cleanup_module(void) __attribute__((alias(#exitfn)));
59324
59325diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59326index 32574ee..00d4ef1 100644
59327--- a/include/linux/init_task.h
59328+++ b/include/linux/init_task.h
59329@@ -128,6 +128,12 @@ extern struct cred init_cred;
59330
59331 #define INIT_TASK_COMM "swapper"
59332
59333+#ifdef CONFIG_X86
59334+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59335+#else
59336+#define INIT_TASK_THREAD_INFO
59337+#endif
59338+
59339 /*
59340 * INIT_TASK is used to set up the first task table, touch at
59341 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59342@@ -166,6 +172,7 @@ extern struct cred init_cred;
59343 RCU_INIT_POINTER(.cred, &init_cred), \
59344 .comm = INIT_TASK_COMM, \
59345 .thread = INIT_THREAD, \
59346+ INIT_TASK_THREAD_INFO \
59347 .fs = &init_fs, \
59348 .files = &init_files, \
59349 .signal = &init_signals, \
59350diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59351index e6ca56d..8583707 100644
59352--- a/include/linux/intel-iommu.h
59353+++ b/include/linux/intel-iommu.h
59354@@ -296,7 +296,7 @@ struct iommu_flush {
59355 u8 fm, u64 type);
59356 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59357 unsigned int size_order, u64 type);
59358-};
59359+} __no_const;
59360
59361 enum {
59362 SR_DMAR_FECTL_REG,
59363diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59364index a64b00e..464d8bc 100644
59365--- a/include/linux/interrupt.h
59366+++ b/include/linux/interrupt.h
59367@@ -441,7 +441,7 @@ enum
59368 /* map softirq index to softirq name. update 'softirq_to_name' in
59369 * kernel/softirq.c when adding a new softirq.
59370 */
59371-extern char *softirq_to_name[NR_SOFTIRQS];
59372+extern const char * const softirq_to_name[NR_SOFTIRQS];
59373
59374 /* softirq mask and active fields moved to irq_cpustat_t in
59375 * asm/hardirq.h to get better cache usage. KAO
59376@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59377
59378 struct softirq_action
59379 {
59380- void (*action)(struct softirq_action *);
59381+ void (*action)(void);
59382 };
59383
59384 asmlinkage void do_softirq(void);
59385 asmlinkage void __do_softirq(void);
59386-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59387+extern void open_softirq(int nr, void (*action)(void));
59388 extern void softirq_init(void);
59389 static inline void __raise_softirq_irqoff(unsigned int nr)
59390 {
59391diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59392index 3875719..4cd454c 100644
59393--- a/include/linux/kallsyms.h
59394+++ b/include/linux/kallsyms.h
59395@@ -15,7 +15,8 @@
59396
59397 struct module;
59398
59399-#ifdef CONFIG_KALLSYMS
59400+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59401+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59402 /* Lookup the address for a symbol. Returns 0 if not found. */
59403 unsigned long kallsyms_lookup_name(const char *name);
59404
59405@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59406 /* Stupid that this does nothing, but I didn't create this mess. */
59407 #define __print_symbol(fmt, addr)
59408 #endif /*CONFIG_KALLSYMS*/
59409+#else /* when included by kallsyms.c, vsnprintf.c, or
59410+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59411+extern void __print_symbol(const char *fmt, unsigned long address);
59412+extern int sprint_backtrace(char *buffer, unsigned long address);
59413+extern int sprint_symbol(char *buffer, unsigned long address);
59414+const char *kallsyms_lookup(unsigned long addr,
59415+ unsigned long *symbolsize,
59416+ unsigned long *offset,
59417+ char **modname, char *namebuf);
59418+#endif
59419
59420 /* This macro allows us to keep printk typechecking */
59421 static __printf(1, 2)
59422diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59423index fa39183..40160be 100644
59424--- a/include/linux/kgdb.h
59425+++ b/include/linux/kgdb.h
59426@@ -53,7 +53,7 @@ extern int kgdb_connected;
59427 extern int kgdb_io_module_registered;
59428
59429 extern atomic_t kgdb_setting_breakpoint;
59430-extern atomic_t kgdb_cpu_doing_single_step;
59431+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59432
59433 extern struct task_struct *kgdb_usethread;
59434 extern struct task_struct *kgdb_contthread;
59435@@ -251,7 +251,7 @@ struct kgdb_arch {
59436 void (*disable_hw_break)(struct pt_regs *regs);
59437 void (*remove_all_hw_break)(void);
59438 void (*correct_hw_break)(void);
59439-};
59440+} __do_const;
59441
59442 /**
59443 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59444@@ -276,7 +276,7 @@ struct kgdb_io {
59445 void (*pre_exception) (void);
59446 void (*post_exception) (void);
59447 int is_console;
59448-};
59449+} __do_const;
59450
59451 extern struct kgdb_arch arch_kgdb_ops;
59452
59453diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59454index b16f653..eb908f4 100644
59455--- a/include/linux/kmod.h
59456+++ b/include/linux/kmod.h
59457@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59458 * usually useless though. */
59459 extern __printf(2, 3)
59460 int __request_module(bool wait, const char *name, ...);
59461+extern __printf(3, 4)
59462+int ___request_module(bool wait, char *param_name, const char *name, ...);
59463 #define request_module(mod...) __request_module(true, mod)
59464 #define request_module_nowait(mod...) __request_module(false, mod)
59465 #define try_then_request_module(x, mod...) \
59466diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59467index d526231..086e89b 100644
59468--- a/include/linux/kvm_host.h
59469+++ b/include/linux/kvm_host.h
59470@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59471 void vcpu_load(struct kvm_vcpu *vcpu);
59472 void vcpu_put(struct kvm_vcpu *vcpu);
59473
59474-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59475+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59476 struct module *module);
59477 void kvm_exit(void);
59478
59479@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59480 struct kvm_guest_debug *dbg);
59481 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59482
59483-int kvm_arch_init(void *opaque);
59484+int kvm_arch_init(const void *opaque);
59485 void kvm_arch_exit(void);
59486
59487 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59488diff --git a/include/linux/libata.h b/include/linux/libata.h
59489index cafc09a..d7e7829 100644
59490--- a/include/linux/libata.h
59491+++ b/include/linux/libata.h
59492@@ -909,7 +909,7 @@ struct ata_port_operations {
59493 * fields must be pointers.
59494 */
59495 const struct ata_port_operations *inherits;
59496-};
59497+} __do_const;
59498
59499 struct ata_port_info {
59500 unsigned long flags;
59501diff --git a/include/linux/mca.h b/include/linux/mca.h
59502index 3797270..7765ede 100644
59503--- a/include/linux/mca.h
59504+++ b/include/linux/mca.h
59505@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59506 int region);
59507 void * (*mca_transform_memory)(struct mca_device *,
59508 void *memory);
59509-};
59510+} __no_const;
59511
59512 struct mca_bus {
59513 u64 default_dma_mask;
59514diff --git a/include/linux/memory.h b/include/linux/memory.h
59515index 935699b..11042cc 100644
59516--- a/include/linux/memory.h
59517+++ b/include/linux/memory.h
59518@@ -144,7 +144,7 @@ struct memory_accessor {
59519 size_t count);
59520 ssize_t (*write)(struct memory_accessor *, const char *buf,
59521 off_t offset, size_t count);
59522-};
59523+} __no_const;
59524
59525 /*
59526 * Kernel text modification mutex, used for code patching. Users of this lock
59527diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59528index 9970337..9444122 100644
59529--- a/include/linux/mfd/abx500.h
59530+++ b/include/linux/mfd/abx500.h
59531@@ -188,6 +188,7 @@ struct abx500_ops {
59532 int (*event_registers_startup_state_get) (struct device *, u8 *);
59533 int (*startup_irq_enabled) (struct device *, unsigned int);
59534 };
59535+typedef struct abx500_ops __no_const abx500_ops_no_const;
59536
59537 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59538 void abx500_remove_ops(struct device *dev);
59539diff --git a/include/linux/mm.h b/include/linux/mm.h
59540index 4baadd1..2e0b45e 100644
59541--- a/include/linux/mm.h
59542+++ b/include/linux/mm.h
59543@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59544
59545 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59546 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59547+
59548+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59549+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59550+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59551+#else
59552 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59553+#endif
59554+
59555 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59556 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59557
59558@@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59559 int set_page_dirty_lock(struct page *page);
59560 int clear_page_dirty_for_io(struct page *page);
59561
59562-/* Is the vma a continuation of the stack vma above it? */
59563-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59564-{
59565- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59566-}
59567-
59568-static inline int stack_guard_page_start(struct vm_area_struct *vma,
59569- unsigned long addr)
59570-{
59571- return (vma->vm_flags & VM_GROWSDOWN) &&
59572- (vma->vm_start == addr) &&
59573- !vma_growsdown(vma->vm_prev, addr);
59574-}
59575-
59576-/* Is the vma a continuation of the stack vma below it? */
59577-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59578-{
59579- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59580-}
59581-
59582-static inline int stack_guard_page_end(struct vm_area_struct *vma,
59583- unsigned long addr)
59584-{
59585- return (vma->vm_flags & VM_GROWSUP) &&
59586- (vma->vm_end == addr) &&
59587- !vma_growsup(vma->vm_next, addr);
59588-}
59589-
59590 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59591 unsigned long old_addr, struct vm_area_struct *new_vma,
59592 unsigned long new_addr, unsigned long len);
59593@@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59594 }
59595 #endif
59596
59597+#ifdef CONFIG_MMU
59598+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59599+#else
59600+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59601+{
59602+ return __pgprot(0);
59603+}
59604+#endif
59605+
59606 int vma_wants_writenotify(struct vm_area_struct *vma);
59607
59608 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59609@@ -1419,6 +1407,7 @@ out:
59610 }
59611
59612 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59613+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59614
59615 extern unsigned long do_brk(unsigned long, unsigned long);
59616
59617@@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
59618 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59619 struct vm_area_struct **pprev);
59620
59621+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59622+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59623+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59624+
59625 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59626 NULL if none. Assume start_addr < end_addr. */
59627 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59628@@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
59629 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59630 }
59631
59632-#ifdef CONFIG_MMU
59633-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59634-#else
59635-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59636-{
59637- return __pgprot(0);
59638-}
59639-#endif
59640-
59641 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59642 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59643 unsigned long pfn, unsigned long size, pgprot_t);
59644@@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
59645 extern int sysctl_memory_failure_early_kill;
59646 extern int sysctl_memory_failure_recovery;
59647 extern void shake_page(struct page *p, int access);
59648-extern atomic_long_t mce_bad_pages;
59649+extern atomic_long_unchecked_t mce_bad_pages;
59650 extern int soft_offline_page(struct page *page, int flags);
59651
59652 extern void dump_page(struct page *page);
59653@@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
59654 unsigned int pages_per_huge_page);
59655 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59656
59657+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59658+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59659+#else
59660+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59661+#endif
59662+
59663 #endif /* __KERNEL__ */
59664 #endif /* _LINUX_MM_H */
59665diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
59666index 5b42f1b..759e4b4 100644
59667--- a/include/linux/mm_types.h
59668+++ b/include/linux/mm_types.h
59669@@ -253,6 +253,8 @@ struct vm_area_struct {
59670 #ifdef CONFIG_NUMA
59671 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59672 #endif
59673+
59674+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59675 };
59676
59677 struct core_thread {
59678@@ -389,6 +391,24 @@ struct mm_struct {
59679 #ifdef CONFIG_CPUMASK_OFFSTACK
59680 struct cpumask cpumask_allocation;
59681 #endif
59682+
59683+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59684+ unsigned long pax_flags;
59685+#endif
59686+
59687+#ifdef CONFIG_PAX_DLRESOLVE
59688+ unsigned long call_dl_resolve;
59689+#endif
59690+
59691+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59692+ unsigned long call_syscall;
59693+#endif
59694+
59695+#ifdef CONFIG_PAX_ASLR
59696+ unsigned long delta_mmap; /* randomized offset */
59697+ unsigned long delta_stack; /* randomized offset */
59698+#endif
59699+
59700 };
59701
59702 static inline void mm_init_cpumask(struct mm_struct *mm)
59703diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
59704index 1d1b1e1..2a13c78 100644
59705--- a/include/linux/mmu_notifier.h
59706+++ b/include/linux/mmu_notifier.h
59707@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
59708 */
59709 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59710 ({ \
59711- pte_t __pte; \
59712+ pte_t ___pte; \
59713 struct vm_area_struct *___vma = __vma; \
59714 unsigned long ___address = __address; \
59715- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59716+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59717 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59718- __pte; \
59719+ ___pte; \
59720 })
59721
59722 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59723diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
59724index 188cb2f..d78409b 100644
59725--- a/include/linux/mmzone.h
59726+++ b/include/linux/mmzone.h
59727@@ -369,7 +369,7 @@ struct zone {
59728 unsigned long flags; /* zone flags, see below */
59729
59730 /* Zone statistics */
59731- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59732+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59733
59734 /*
59735 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59736diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
59737index 468819c..17b9db3 100644
59738--- a/include/linux/mod_devicetable.h
59739+++ b/include/linux/mod_devicetable.h
59740@@ -12,7 +12,7 @@
59741 typedef unsigned long kernel_ulong_t;
59742 #endif
59743
59744-#define PCI_ANY_ID (~0)
59745+#define PCI_ANY_ID ((__u16)~0)
59746
59747 struct pci_device_id {
59748 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59749@@ -131,7 +131,7 @@ struct usb_device_id {
59750 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59751 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59752
59753-#define HID_ANY_ID (~0)
59754+#define HID_ANY_ID (~0U)
59755
59756 struct hid_device_id {
59757 __u16 bus;
59758diff --git a/include/linux/module.h b/include/linux/module.h
59759index 3cb7839..511cb87 100644
59760--- a/include/linux/module.h
59761+++ b/include/linux/module.h
59762@@ -17,6 +17,7 @@
59763 #include <linux/moduleparam.h>
59764 #include <linux/tracepoint.h>
59765 #include <linux/export.h>
59766+#include <linux/fs.h>
59767
59768 #include <linux/percpu.h>
59769 #include <asm/module.h>
59770@@ -261,19 +262,16 @@ struct module
59771 int (*init)(void);
59772
59773 /* If this is non-NULL, vfree after init() returns */
59774- void *module_init;
59775+ void *module_init_rx, *module_init_rw;
59776
59777 /* Here is the actual code + data, vfree'd on unload. */
59778- void *module_core;
59779+ void *module_core_rx, *module_core_rw;
59780
59781 /* Here are the sizes of the init and core sections */
59782- unsigned int init_size, core_size;
59783+ unsigned int init_size_rw, core_size_rw;
59784
59785 /* The size of the executable code in each section. */
59786- unsigned int init_text_size, core_text_size;
59787-
59788- /* Size of RO sections of the module (text+rodata) */
59789- unsigned int init_ro_size, core_ro_size;
59790+ unsigned int init_size_rx, core_size_rx;
59791
59792 /* Arch-specific module values */
59793 struct mod_arch_specific arch;
59794@@ -329,6 +327,10 @@ struct module
59795 #ifdef CONFIG_EVENT_TRACING
59796 struct ftrace_event_call **trace_events;
59797 unsigned int num_trace_events;
59798+ struct file_operations trace_id;
59799+ struct file_operations trace_enable;
59800+ struct file_operations trace_format;
59801+ struct file_operations trace_filter;
59802 #endif
59803 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59804 unsigned int num_ftrace_callsites;
59805@@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
59806 bool is_module_percpu_address(unsigned long addr);
59807 bool is_module_text_address(unsigned long addr);
59808
59809+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59810+{
59811+
59812+#ifdef CONFIG_PAX_KERNEXEC
59813+ if (ktla_ktva(addr) >= (unsigned long)start &&
59814+ ktla_ktva(addr) < (unsigned long)start + size)
59815+ return 1;
59816+#endif
59817+
59818+ return ((void *)addr >= start && (void *)addr < start + size);
59819+}
59820+
59821+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59822+{
59823+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59824+}
59825+
59826+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59827+{
59828+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59829+}
59830+
59831+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59832+{
59833+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59834+}
59835+
59836+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59837+{
59838+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59839+}
59840+
59841 static inline int within_module_core(unsigned long addr, struct module *mod)
59842 {
59843- return (unsigned long)mod->module_core <= addr &&
59844- addr < (unsigned long)mod->module_core + mod->core_size;
59845+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59846 }
59847
59848 static inline int within_module_init(unsigned long addr, struct module *mod)
59849 {
59850- return (unsigned long)mod->module_init <= addr &&
59851- addr < (unsigned long)mod->module_init + mod->init_size;
59852+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59853 }
59854
59855 /* Search for module by name: must hold module_mutex. */
59856diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
59857index b2be02e..6a9fdb1 100644
59858--- a/include/linux/moduleloader.h
59859+++ b/include/linux/moduleloader.h
59860@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
59861 sections. Returns NULL on failure. */
59862 void *module_alloc(unsigned long size);
59863
59864+#ifdef CONFIG_PAX_KERNEXEC
59865+void *module_alloc_exec(unsigned long size);
59866+#else
59867+#define module_alloc_exec(x) module_alloc(x)
59868+#endif
59869+
59870 /* Free memory returned from module_alloc. */
59871 void module_free(struct module *mod, void *module_region);
59872
59873+#ifdef CONFIG_PAX_KERNEXEC
59874+void module_free_exec(struct module *mod, void *module_region);
59875+#else
59876+#define module_free_exec(x, y) module_free((x), (y))
59877+#endif
59878+
59879 /* Apply the given relocation to the (simplified) ELF. Return -error
59880 or 0. */
59881 int apply_relocate(Elf_Shdr *sechdrs,
59882diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
59883index 7939f63..ec6df57 100644
59884--- a/include/linux/moduleparam.h
59885+++ b/include/linux/moduleparam.h
59886@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
59887 * @len is usually just sizeof(string).
59888 */
59889 #define module_param_string(name, string, len, perm) \
59890- static const struct kparam_string __param_string_##name \
59891+ static const struct kparam_string __param_string_##name __used \
59892 = { len, string }; \
59893 __module_param_call(MODULE_PARAM_PREFIX, name, \
59894 &param_ops_string, \
59895@@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
59896 * module_param_named() for why this might be necessary.
59897 */
59898 #define module_param_array_named(name, array, type, nump, perm) \
59899- static const struct kparam_array __param_arr_##name \
59900+ static const struct kparam_array __param_arr_##name __used \
59901 = { .max = ARRAY_SIZE(array), .num = nump, \
59902 .ops = &param_ops_##type, \
59903 .elemsize = sizeof(array[0]), .elem = array }; \
59904diff --git a/include/linux/namei.h b/include/linux/namei.h
59905index ffc0213..2c1f2cb 100644
59906--- a/include/linux/namei.h
59907+++ b/include/linux/namei.h
59908@@ -24,7 +24,7 @@ struct nameidata {
59909 unsigned seq;
59910 int last_type;
59911 unsigned depth;
59912- char *saved_names[MAX_NESTED_LINKS + 1];
59913+ const char *saved_names[MAX_NESTED_LINKS + 1];
59914
59915 /* Intent data */
59916 union {
59917@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
59918 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59919 extern void unlock_rename(struct dentry *, struct dentry *);
59920
59921-static inline void nd_set_link(struct nameidata *nd, char *path)
59922+static inline void nd_set_link(struct nameidata *nd, const char *path)
59923 {
59924 nd->saved_names[nd->depth] = path;
59925 }
59926
59927-static inline char *nd_get_link(struct nameidata *nd)
59928+static inline const char *nd_get_link(const struct nameidata *nd)
59929 {
59930 return nd->saved_names[nd->depth];
59931 }
59932diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
59933index a82ad4d..90d15b7 100644
59934--- a/include/linux/netdevice.h
59935+++ b/include/linux/netdevice.h
59936@@ -949,6 +949,7 @@ struct net_device_ops {
59937 int (*ndo_set_features)(struct net_device *dev,
59938 u32 features);
59939 };
59940+typedef struct net_device_ops __no_const net_device_ops_no_const;
59941
59942 /*
59943 * The DEVICE structure.
59944@@ -1088,7 +1089,7 @@ struct net_device {
59945 int iflink;
59946
59947 struct net_device_stats stats;
59948- atomic_long_t rx_dropped; /* dropped packets by core network
59949+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
59950 * Do not use this in drivers.
59951 */
59952
59953diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
59954new file mode 100644
59955index 0000000..33f4af8
59956--- /dev/null
59957+++ b/include/linux/netfilter/xt_gradm.h
59958@@ -0,0 +1,9 @@
59959+#ifndef _LINUX_NETFILTER_XT_GRADM_H
59960+#define _LINUX_NETFILTER_XT_GRADM_H 1
59961+
59962+struct xt_gradm_mtinfo {
59963+ __u16 flags;
59964+ __u16 invflags;
59965+};
59966+
59967+#endif
59968diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
59969index c65a18a..0c05f3a 100644
59970--- a/include/linux/of_pdt.h
59971+++ b/include/linux/of_pdt.h
59972@@ -32,7 +32,7 @@ struct of_pdt_ops {
59973
59974 /* return 0 on success; fill in 'len' with number of bytes in path */
59975 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
59976-};
59977+} __no_const;
59978
59979 extern void *prom_early_alloc(unsigned long size);
59980
59981diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
59982index a4c5624..79d6d88 100644
59983--- a/include/linux/oprofile.h
59984+++ b/include/linux/oprofile.h
59985@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
59986 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59987 char const * name, ulong * val);
59988
59989-/** Create a file for read-only access to an atomic_t. */
59990+/** Create a file for read-only access to an atomic_unchecked_t. */
59991 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59992- char const * name, atomic_t * val);
59993+ char const * name, atomic_unchecked_t * val);
59994
59995 /** create a directory */
59996 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59997diff --git a/include/linux/padata.h b/include/linux/padata.h
59998index 4633b2f..988bc08 100644
59999--- a/include/linux/padata.h
60000+++ b/include/linux/padata.h
60001@@ -129,7 +129,7 @@ struct parallel_data {
60002 struct padata_instance *pinst;
60003 struct padata_parallel_queue __percpu *pqueue;
60004 struct padata_serial_queue __percpu *squeue;
60005- atomic_t seq_nr;
60006+ atomic_unchecked_t seq_nr;
60007 atomic_t reorder_objects;
60008 atomic_t refcnt;
60009 unsigned int max_seq_nr;
60010diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
60011index b1f8912..c955bff 100644
60012--- a/include/linux/perf_event.h
60013+++ b/include/linux/perf_event.h
60014@@ -748,8 +748,8 @@ struct perf_event {
60015
60016 enum perf_event_active_state state;
60017 unsigned int attach_state;
60018- local64_t count;
60019- atomic64_t child_count;
60020+ local64_t count; /* PaX: fix it one day */
60021+ atomic64_unchecked_t child_count;
60022
60023 /*
60024 * These are the total time in nanoseconds that the event
60025@@ -800,8 +800,8 @@ struct perf_event {
60026 * These accumulate total time (in nanoseconds) that children
60027 * events have been enabled and running, respectively.
60028 */
60029- atomic64_t child_total_time_enabled;
60030- atomic64_t child_total_time_running;
60031+ atomic64_unchecked_t child_total_time_enabled;
60032+ atomic64_unchecked_t child_total_time_running;
60033
60034 /*
60035 * Protect attach/detach and child_list:
60036diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
60037index 77257c9..51d473a 100644
60038--- a/include/linux/pipe_fs_i.h
60039+++ b/include/linux/pipe_fs_i.h
60040@@ -46,9 +46,9 @@ struct pipe_buffer {
60041 struct pipe_inode_info {
60042 wait_queue_head_t wait;
60043 unsigned int nrbufs, curbuf, buffers;
60044- unsigned int readers;
60045- unsigned int writers;
60046- unsigned int waiting_writers;
60047+ atomic_t readers;
60048+ atomic_t writers;
60049+ atomic_t waiting_writers;
60050 unsigned int r_counter;
60051 unsigned int w_counter;
60052 struct page *tmp_page;
60053diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
60054index d3085e7..fd01052 100644
60055--- a/include/linux/pm_runtime.h
60056+++ b/include/linux/pm_runtime.h
60057@@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
60058
60059 static inline void pm_runtime_mark_last_busy(struct device *dev)
60060 {
60061- ACCESS_ONCE(dev->power.last_busy) = jiffies;
60062+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60063 }
60064
60065 #else /* !CONFIG_PM_RUNTIME */
60066diff --git a/include/linux/poison.h b/include/linux/poison.h
60067index 79159de..f1233a9 100644
60068--- a/include/linux/poison.h
60069+++ b/include/linux/poison.h
60070@@ -19,8 +19,8 @@
60071 * under normal circumstances, used to verify that nobody uses
60072 * non-initialized list entries.
60073 */
60074-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60075-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60076+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60077+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60078
60079 /********** include/linux/timer.h **********/
60080 /*
60081diff --git a/include/linux/preempt.h b/include/linux/preempt.h
60082index 58969b2..ead129b 100644
60083--- a/include/linux/preempt.h
60084+++ b/include/linux/preempt.h
60085@@ -123,7 +123,7 @@ struct preempt_ops {
60086 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60087 void (*sched_out)(struct preempt_notifier *notifier,
60088 struct task_struct *next);
60089-};
60090+} __no_const;
60091
60092 /**
60093 * preempt_notifier - key for installing preemption notifiers
60094diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
60095index 643b96c..ef55a9c 100644
60096--- a/include/linux/proc_fs.h
60097+++ b/include/linux/proc_fs.h
60098@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
60099 return proc_create_data(name, mode, parent, proc_fops, NULL);
60100 }
60101
60102+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60103+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60104+{
60105+#ifdef CONFIG_GRKERNSEC_PROC_USER
60106+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60107+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60108+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60109+#else
60110+ return proc_create_data(name, mode, parent, proc_fops, NULL);
60111+#endif
60112+}
60113+
60114+
60115 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60116 mode_t mode, struct proc_dir_entry *base,
60117 read_proc_t *read_proc, void * data)
60118@@ -258,7 +271,7 @@ union proc_op {
60119 int (*proc_show)(struct seq_file *m,
60120 struct pid_namespace *ns, struct pid *pid,
60121 struct task_struct *task);
60122-};
60123+} __no_const;
60124
60125 struct ctl_table_header;
60126 struct ctl_table;
60127diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
60128index 800f113..e9ee2e3 100644
60129--- a/include/linux/ptrace.h
60130+++ b/include/linux/ptrace.h
60131@@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
60132 extern void exit_ptrace(struct task_struct *tracer);
60133 #define PTRACE_MODE_READ 1
60134 #define PTRACE_MODE_ATTACH 2
60135-/* Returns 0 on success, -errno on denial. */
60136-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60137 /* Returns true on success, false on denial. */
60138 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60139+/* Returns true on success, false on denial. */
60140+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60141+/* Returns true on success, false on denial. */
60142+extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
60143
60144 static inline int ptrace_reparented(struct task_struct *child)
60145 {
60146diff --git a/include/linux/random.h b/include/linux/random.h
60147index 8f74538..02a1012 100644
60148--- a/include/linux/random.h
60149+++ b/include/linux/random.h
60150@@ -69,12 +69,17 @@ void srandom32(u32 seed);
60151
60152 u32 prandom32(struct rnd_state *);
60153
60154+static inline unsigned long pax_get_random_long(void)
60155+{
60156+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60157+}
60158+
60159 /*
60160 * Handle minimum values for seeds
60161 */
60162 static inline u32 __seed(u32 x, u32 m)
60163 {
60164- return (x < m) ? x + m : x;
60165+ return (x <= m) ? x + m + 1 : x;
60166 }
60167
60168 /**
60169diff --git a/include/linux/reboot.h b/include/linux/reboot.h
60170index e0879a7..a12f962 100644
60171--- a/include/linux/reboot.h
60172+++ b/include/linux/reboot.h
60173@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
60174 * Architecture-specific implementations of sys_reboot commands.
60175 */
60176
60177-extern void machine_restart(char *cmd);
60178-extern void machine_halt(void);
60179-extern void machine_power_off(void);
60180+extern void machine_restart(char *cmd) __noreturn;
60181+extern void machine_halt(void) __noreturn;
60182+extern void machine_power_off(void) __noreturn;
60183
60184 extern void machine_shutdown(void);
60185 struct pt_regs;
60186@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
60187 */
60188
60189 extern void kernel_restart_prepare(char *cmd);
60190-extern void kernel_restart(char *cmd);
60191-extern void kernel_halt(void);
60192-extern void kernel_power_off(void);
60193+extern void kernel_restart(char *cmd) __noreturn;
60194+extern void kernel_halt(void) __noreturn;
60195+extern void kernel_power_off(void) __noreturn;
60196
60197 extern int C_A_D; /* for sysctl */
60198 void ctrl_alt_del(void);
60199@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60200 * Emergency restart, callable from an interrupt handler.
60201 */
60202
60203-extern void emergency_restart(void);
60204+extern void emergency_restart(void) __noreturn;
60205 #include <asm/emergency-restart.h>
60206
60207 #endif
60208diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
60209index 96d465f..b084e05 100644
60210--- a/include/linux/reiserfs_fs.h
60211+++ b/include/linux/reiserfs_fs.h
60212@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60213 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60214
60215 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60216-#define get_generation(s) atomic_read (&fs_generation(s))
60217+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60218 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60219 #define __fs_changed(gen,s) (gen != get_generation (s))
60220 #define fs_changed(gen,s) \
60221diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
60222index 52c83b6..18ed7eb 100644
60223--- a/include/linux/reiserfs_fs_sb.h
60224+++ b/include/linux/reiserfs_fs_sb.h
60225@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60226 /* Comment? -Hans */
60227 wait_queue_head_t s_wait;
60228 /* To be obsoleted soon by per buffer seals.. -Hans */
60229- atomic_t s_generation_counter; // increased by one every time the
60230+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60231 // tree gets re-balanced
60232 unsigned long s_properties; /* File system properties. Currently holds
60233 on-disk FS format */
60234diff --git a/include/linux/relay.h b/include/linux/relay.h
60235index 14a86bc..17d0700 100644
60236--- a/include/linux/relay.h
60237+++ b/include/linux/relay.h
60238@@ -159,7 +159,7 @@ struct rchan_callbacks
60239 * The callback should return 0 if successful, negative if not.
60240 */
60241 int (*remove_buf_file)(struct dentry *dentry);
60242-};
60243+} __no_const;
60244
60245 /*
60246 * CONFIG_RELAY kernel API, kernel/relay.c
60247diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
60248index c6c6084..5bf1212 100644
60249--- a/include/linux/rfkill.h
60250+++ b/include/linux/rfkill.h
60251@@ -147,6 +147,7 @@ struct rfkill_ops {
60252 void (*query)(struct rfkill *rfkill, void *data);
60253 int (*set_block)(void *data, bool blocked);
60254 };
60255+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60256
60257 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60258 /**
60259diff --git a/include/linux/rio.h b/include/linux/rio.h
60260index 4d50611..c6858a2 100644
60261--- a/include/linux/rio.h
60262+++ b/include/linux/rio.h
60263@@ -315,7 +315,7 @@ struct rio_ops {
60264 int mbox, void *buffer, size_t len);
60265 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60266 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60267-};
60268+} __no_const;
60269
60270 #define RIO_RESOURCE_MEM 0x00000100
60271 #define RIO_RESOURCE_DOORBELL 0x00000200
60272diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60273index 2148b12..519b820 100644
60274--- a/include/linux/rmap.h
60275+++ b/include/linux/rmap.h
60276@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60277 void anon_vma_init(void); /* create anon_vma_cachep */
60278 int anon_vma_prepare(struct vm_area_struct *);
60279 void unlink_anon_vmas(struct vm_area_struct *);
60280-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60281-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60282+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60283+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60284 void __anon_vma_link(struct vm_area_struct *);
60285
60286 static inline void anon_vma_merge(struct vm_area_struct *vma,
60287diff --git a/include/linux/sched.h b/include/linux/sched.h
60288index 1c4f3e9..f29cbeb 100644
60289--- a/include/linux/sched.h
60290+++ b/include/linux/sched.h
60291@@ -101,6 +101,7 @@ struct bio_list;
60292 struct fs_struct;
60293 struct perf_event_context;
60294 struct blk_plug;
60295+struct linux_binprm;
60296
60297 /*
60298 * List of flags we want to share for kernel threads,
60299@@ -380,10 +381,13 @@ struct user_namespace;
60300 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60301
60302 extern int sysctl_max_map_count;
60303+extern unsigned long sysctl_heap_stack_gap;
60304
60305 #include <linux/aio.h>
60306
60307 #ifdef CONFIG_MMU
60308+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60309+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60310 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60311 extern unsigned long
60312 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60313@@ -629,6 +633,17 @@ struct signal_struct {
60314 #ifdef CONFIG_TASKSTATS
60315 struct taskstats *stats;
60316 #endif
60317+
60318+#ifdef CONFIG_GRKERNSEC
60319+ u32 curr_ip;
60320+ u32 saved_ip;
60321+ u32 gr_saddr;
60322+ u32 gr_daddr;
60323+ u16 gr_sport;
60324+ u16 gr_dport;
60325+ u8 used_accept:1;
60326+#endif
60327+
60328 #ifdef CONFIG_AUDIT
60329 unsigned audit_tty;
60330 struct tty_audit_buf *tty_audit_buf;
60331@@ -710,6 +725,11 @@ struct user_struct {
60332 struct key *session_keyring; /* UID's default session keyring */
60333 #endif
60334
60335+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60336+ unsigned int banned;
60337+ unsigned long ban_expires;
60338+#endif
60339+
60340 /* Hash table maintenance information */
60341 struct hlist_node uidhash_node;
60342 uid_t uid;
60343@@ -1337,8 +1357,8 @@ struct task_struct {
60344 struct list_head thread_group;
60345
60346 struct completion *vfork_done; /* for vfork() */
60347- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60348- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60349+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60350+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60351
60352 cputime_t utime, stime, utimescaled, stimescaled;
60353 cputime_t gtime;
60354@@ -1354,13 +1374,6 @@ struct task_struct {
60355 struct task_cputime cputime_expires;
60356 struct list_head cpu_timers[3];
60357
60358-/* process credentials */
60359- const struct cred __rcu *real_cred; /* objective and real subjective task
60360- * credentials (COW) */
60361- const struct cred __rcu *cred; /* effective (overridable) subjective task
60362- * credentials (COW) */
60363- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60364-
60365 char comm[TASK_COMM_LEN]; /* executable name excluding path
60366 - access with [gs]et_task_comm (which lock
60367 it with task_lock())
60368@@ -1377,8 +1390,16 @@ struct task_struct {
60369 #endif
60370 /* CPU-specific state of this task */
60371 struct thread_struct thread;
60372+/* thread_info moved to task_struct */
60373+#ifdef CONFIG_X86
60374+ struct thread_info tinfo;
60375+#endif
60376 /* filesystem information */
60377 struct fs_struct *fs;
60378+
60379+ const struct cred __rcu *cred; /* effective (overridable) subjective task
60380+ * credentials (COW) */
60381+
60382 /* open file information */
60383 struct files_struct *files;
60384 /* namespaces */
60385@@ -1425,6 +1446,11 @@ struct task_struct {
60386 struct rt_mutex_waiter *pi_blocked_on;
60387 #endif
60388
60389+/* process credentials */
60390+ const struct cred __rcu *real_cred; /* objective and real subjective task
60391+ * credentials (COW) */
60392+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60393+
60394 #ifdef CONFIG_DEBUG_MUTEXES
60395 /* mutex deadlock detection */
60396 struct mutex_waiter *blocked_on;
60397@@ -1540,6 +1566,24 @@ struct task_struct {
60398 unsigned long default_timer_slack_ns;
60399
60400 struct list_head *scm_work_list;
60401+
60402+#ifdef CONFIG_GRKERNSEC
60403+ /* grsecurity */
60404+#ifdef CONFIG_GRKERNSEC_SETXID
60405+ const struct cred *delayed_cred;
60406+#endif
60407+ struct dentry *gr_chroot_dentry;
60408+ struct acl_subject_label *acl;
60409+ struct acl_role_label *role;
60410+ struct file *exec_file;
60411+ u16 acl_role_id;
60412+ /* is this the task that authenticated to the special role */
60413+ u8 acl_sp_role;
60414+ u8 is_writable;
60415+ u8 brute;
60416+ u8 gr_is_chrooted;
60417+#endif
60418+
60419 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60420 /* Index of current stored address in ret_stack */
60421 int curr_ret_stack;
60422@@ -1574,6 +1618,51 @@ struct task_struct {
60423 #endif
60424 };
60425
60426+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60427+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60428+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60429+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60430+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60431+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60432+
60433+#ifdef CONFIG_PAX_SOFTMODE
60434+extern int pax_softmode;
60435+#endif
60436+
60437+extern int pax_check_flags(unsigned long *);
60438+
60439+/* if tsk != current then task_lock must be held on it */
60440+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60441+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60442+{
60443+ if (likely(tsk->mm))
60444+ return tsk->mm->pax_flags;
60445+ else
60446+ return 0UL;
60447+}
60448+
60449+/* if tsk != current then task_lock must be held on it */
60450+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60451+{
60452+ if (likely(tsk->mm)) {
60453+ tsk->mm->pax_flags = flags;
60454+ return 0;
60455+ }
60456+ return -EINVAL;
60457+}
60458+#endif
60459+
60460+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60461+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60462+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60463+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60464+#endif
60465+
60466+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60467+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60468+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60469+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60470+
60471 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60472 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60473
60474@@ -2081,7 +2170,9 @@ void yield(void);
60475 extern struct exec_domain default_exec_domain;
60476
60477 union thread_union {
60478+#ifndef CONFIG_X86
60479 struct thread_info thread_info;
60480+#endif
60481 unsigned long stack[THREAD_SIZE/sizeof(long)];
60482 };
60483
60484@@ -2114,6 +2205,7 @@ extern struct pid_namespace init_pid_ns;
60485 */
60486
60487 extern struct task_struct *find_task_by_vpid(pid_t nr);
60488+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60489 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60490 struct pid_namespace *ns);
60491
60492@@ -2235,6 +2327,12 @@ static inline void mmdrop(struct mm_struct * mm)
60493 extern void mmput(struct mm_struct *);
60494 /* Grab a reference to a task's mm, if it is not already going away */
60495 extern struct mm_struct *get_task_mm(struct task_struct *task);
60496+/*
60497+ * Grab a reference to a task's mm, if it is not already going away
60498+ * and ptrace_may_access with the mode parameter passed to it
60499+ * succeeds.
60500+ */
60501+extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
60502 /* Remove the current tasks stale references to the old mm_struct */
60503 extern void mm_release(struct task_struct *, struct mm_struct *);
60504 /* Allocate a new mm structure and copy contents from tsk->mm */
60505@@ -2251,7 +2349,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60506 extern void exit_itimers(struct signal_struct *);
60507 extern void flush_itimer_signals(void);
60508
60509-extern NORET_TYPE void do_group_exit(int);
60510+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60511
60512 extern void daemonize(const char *, ...);
60513 extern int allow_signal(int);
60514@@ -2416,13 +2514,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60515
60516 #endif
60517
60518-static inline int object_is_on_stack(void *obj)
60519+static inline int object_starts_on_stack(void *obj)
60520 {
60521- void *stack = task_stack_page(current);
60522+ const void *stack = task_stack_page(current);
60523
60524 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60525 }
60526
60527+#ifdef CONFIG_PAX_USERCOPY
60528+extern int object_is_on_stack(const void *obj, unsigned long len);
60529+#endif
60530+
60531 extern void thread_info_cache_init(void);
60532
60533 #ifdef CONFIG_DEBUG_STACK_USAGE
60534diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60535index 899fbb4..1cb4138 100644
60536--- a/include/linux/screen_info.h
60537+++ b/include/linux/screen_info.h
60538@@ -43,7 +43,8 @@ struct screen_info {
60539 __u16 pages; /* 0x32 */
60540 __u16 vesa_attributes; /* 0x34 */
60541 __u32 capabilities; /* 0x36 */
60542- __u8 _reserved[6]; /* 0x3a */
60543+ __u16 vesapm_size; /* 0x3a */
60544+ __u8 _reserved[4]; /* 0x3c */
60545 } __attribute__((packed));
60546
60547 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60548diff --git a/include/linux/security.h b/include/linux/security.h
60549index e8c619d..e0cbd1c 100644
60550--- a/include/linux/security.h
60551+++ b/include/linux/security.h
60552@@ -37,6 +37,7 @@
60553 #include <linux/xfrm.h>
60554 #include <linux/slab.h>
60555 #include <linux/xattr.h>
60556+#include <linux/grsecurity.h>
60557 #include <net/flow.h>
60558
60559 /* Maximum number of letters for an LSM name string */
60560diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60561index 0b69a46..e9e5538 100644
60562--- a/include/linux/seq_file.h
60563+++ b/include/linux/seq_file.h
60564@@ -33,6 +33,7 @@ struct seq_operations {
60565 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60566 int (*show) (struct seq_file *m, void *v);
60567 };
60568+typedef struct seq_operations __no_const seq_operations_no_const;
60569
60570 #define SEQ_SKIP 1
60571
60572diff --git a/include/linux/shm.h b/include/linux/shm.h
60573index 92808b8..c28cac4 100644
60574--- a/include/linux/shm.h
60575+++ b/include/linux/shm.h
60576@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
60577
60578 /* The task created the shm object. NULL if the task is dead. */
60579 struct task_struct *shm_creator;
60580+#ifdef CONFIG_GRKERNSEC
60581+ time_t shm_createtime;
60582+ pid_t shm_lapid;
60583+#endif
60584 };
60585
60586 /* shm_mode upper byte flags */
60587diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
60588index fe86488..1563c1c 100644
60589--- a/include/linux/skbuff.h
60590+++ b/include/linux/skbuff.h
60591@@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
60592 */
60593 static inline int skb_queue_empty(const struct sk_buff_head *list)
60594 {
60595- return list->next == (struct sk_buff *)list;
60596+ return list->next == (const struct sk_buff *)list;
60597 }
60598
60599 /**
60600@@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
60601 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60602 const struct sk_buff *skb)
60603 {
60604- return skb->next == (struct sk_buff *)list;
60605+ return skb->next == (const struct sk_buff *)list;
60606 }
60607
60608 /**
60609@@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60610 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60611 const struct sk_buff *skb)
60612 {
60613- return skb->prev == (struct sk_buff *)list;
60614+ return skb->prev == (const struct sk_buff *)list;
60615 }
60616
60617 /**
60618@@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
60619 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60620 */
60621 #ifndef NET_SKB_PAD
60622-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60623+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60624 #endif
60625
60626 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60627diff --git a/include/linux/slab.h b/include/linux/slab.h
60628index 573c809..e84c132 100644
60629--- a/include/linux/slab.h
60630+++ b/include/linux/slab.h
60631@@ -11,12 +11,20 @@
60632
60633 #include <linux/gfp.h>
60634 #include <linux/types.h>
60635+#include <linux/err.h>
60636
60637 /*
60638 * Flags to pass to kmem_cache_create().
60639 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60640 */
60641 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60642+
60643+#ifdef CONFIG_PAX_USERCOPY
60644+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60645+#else
60646+#define SLAB_USERCOPY 0x00000000UL
60647+#endif
60648+
60649 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60650 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60651 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60652@@ -87,10 +95,13 @@
60653 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60654 * Both make kfree a no-op.
60655 */
60656-#define ZERO_SIZE_PTR ((void *)16)
60657+#define ZERO_SIZE_PTR \
60658+({ \
60659+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60660+ (void *)(-MAX_ERRNO-1L); \
60661+})
60662
60663-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60664- (unsigned long)ZERO_SIZE_PTR)
60665+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60666
60667 /*
60668 * struct kmem_cache related prototypes
60669@@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
60670 void kfree(const void *);
60671 void kzfree(const void *);
60672 size_t ksize(const void *);
60673+void check_object_size(const void *ptr, unsigned long n, bool to);
60674
60675 /*
60676 * Allocator specific definitions. These are mainly used to establish optimized
60677@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
60678
60679 void __init kmem_cache_init_late(void);
60680
60681+#define kmalloc(x, y) \
60682+({ \
60683+ void *___retval; \
60684+ intoverflow_t ___x = (intoverflow_t)x; \
60685+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60686+ ___retval = NULL; \
60687+ else \
60688+ ___retval = kmalloc((size_t)___x, (y)); \
60689+ ___retval; \
60690+})
60691+
60692+#define kmalloc_node(x, y, z) \
60693+({ \
60694+ void *___retval; \
60695+ intoverflow_t ___x = (intoverflow_t)x; \
60696+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60697+ ___retval = NULL; \
60698+ else \
60699+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60700+ ___retval; \
60701+})
60702+
60703+#define kzalloc(x, y) \
60704+({ \
60705+ void *___retval; \
60706+ intoverflow_t ___x = (intoverflow_t)x; \
60707+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60708+ ___retval = NULL; \
60709+ else \
60710+ ___retval = kzalloc((size_t)___x, (y)); \
60711+ ___retval; \
60712+})
60713+
60714+#define __krealloc(x, y, z) \
60715+({ \
60716+ void *___retval; \
60717+ intoverflow_t ___y = (intoverflow_t)y; \
60718+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60719+ ___retval = NULL; \
60720+ else \
60721+ ___retval = __krealloc((x), (size_t)___y, (z)); \
60722+ ___retval; \
60723+})
60724+
60725+#define krealloc(x, y, z) \
60726+({ \
60727+ void *___retval; \
60728+ intoverflow_t ___y = (intoverflow_t)y; \
60729+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60730+ ___retval = NULL; \
60731+ else \
60732+ ___retval = krealloc((x), (size_t)___y, (z)); \
60733+ ___retval; \
60734+})
60735+
60736 #endif /* _LINUX_SLAB_H */
60737diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
60738index d00e0ba..1b3bf7b 100644
60739--- a/include/linux/slab_def.h
60740+++ b/include/linux/slab_def.h
60741@@ -68,10 +68,10 @@ struct kmem_cache {
60742 unsigned long node_allocs;
60743 unsigned long node_frees;
60744 unsigned long node_overflow;
60745- atomic_t allochit;
60746- atomic_t allocmiss;
60747- atomic_t freehit;
60748- atomic_t freemiss;
60749+ atomic_unchecked_t allochit;
60750+ atomic_unchecked_t allocmiss;
60751+ atomic_unchecked_t freehit;
60752+ atomic_unchecked_t freemiss;
60753
60754 /*
60755 * If debugging is enabled, then the allocator can add additional
60756diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
60757index a32bcfd..53b71f4 100644
60758--- a/include/linux/slub_def.h
60759+++ b/include/linux/slub_def.h
60760@@ -89,7 +89,7 @@ struct kmem_cache {
60761 struct kmem_cache_order_objects max;
60762 struct kmem_cache_order_objects min;
60763 gfp_t allocflags; /* gfp flags to use on each alloc */
60764- int refcount; /* Refcount for slab cache destroy */
60765+ atomic_t refcount; /* Refcount for slab cache destroy */
60766 void (*ctor)(void *);
60767 int inuse; /* Offset to metadata */
60768 int align; /* Alignment */
60769@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
60770 }
60771
60772 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60773-void *__kmalloc(size_t size, gfp_t flags);
60774+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60775
60776 static __always_inline void *
60777 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60778diff --git a/include/linux/sonet.h b/include/linux/sonet.h
60779index de8832d..0147b46 100644
60780--- a/include/linux/sonet.h
60781+++ b/include/linux/sonet.h
60782@@ -61,7 +61,7 @@ struct sonet_stats {
60783 #include <linux/atomic.h>
60784
60785 struct k_sonet_stats {
60786-#define __HANDLE_ITEM(i) atomic_t i
60787+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60788 __SONET_ITEMS
60789 #undef __HANDLE_ITEM
60790 };
60791diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
60792index 3d8f9c4..69f1c0a 100644
60793--- a/include/linux/sunrpc/clnt.h
60794+++ b/include/linux/sunrpc/clnt.h
60795@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
60796 {
60797 switch (sap->sa_family) {
60798 case AF_INET:
60799- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60800+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60801 case AF_INET6:
60802- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60803+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60804 }
60805 return 0;
60806 }
60807@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
60808 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60809 const struct sockaddr *src)
60810 {
60811- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60812+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60813 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60814
60815 dsin->sin_family = ssin->sin_family;
60816@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
60817 if (sa->sa_family != AF_INET6)
60818 return 0;
60819
60820- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60821+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60822 }
60823
60824 #endif /* __KERNEL__ */
60825diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
60826index e775689..9e206d9 100644
60827--- a/include/linux/sunrpc/sched.h
60828+++ b/include/linux/sunrpc/sched.h
60829@@ -105,6 +105,7 @@ struct rpc_call_ops {
60830 void (*rpc_call_done)(struct rpc_task *, void *);
60831 void (*rpc_release)(void *);
60832 };
60833+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
60834
60835 struct rpc_task_setup {
60836 struct rpc_task *task;
60837diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
60838index c14fe86..393245e 100644
60839--- a/include/linux/sunrpc/svc_rdma.h
60840+++ b/include/linux/sunrpc/svc_rdma.h
60841@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60842 extern unsigned int svcrdma_max_requests;
60843 extern unsigned int svcrdma_max_req_size;
60844
60845-extern atomic_t rdma_stat_recv;
60846-extern atomic_t rdma_stat_read;
60847-extern atomic_t rdma_stat_write;
60848-extern atomic_t rdma_stat_sq_starve;
60849-extern atomic_t rdma_stat_rq_starve;
60850-extern atomic_t rdma_stat_rq_poll;
60851-extern atomic_t rdma_stat_rq_prod;
60852-extern atomic_t rdma_stat_sq_poll;
60853-extern atomic_t rdma_stat_sq_prod;
60854+extern atomic_unchecked_t rdma_stat_recv;
60855+extern atomic_unchecked_t rdma_stat_read;
60856+extern atomic_unchecked_t rdma_stat_write;
60857+extern atomic_unchecked_t rdma_stat_sq_starve;
60858+extern atomic_unchecked_t rdma_stat_rq_starve;
60859+extern atomic_unchecked_t rdma_stat_rq_poll;
60860+extern atomic_unchecked_t rdma_stat_rq_prod;
60861+extern atomic_unchecked_t rdma_stat_sq_poll;
60862+extern atomic_unchecked_t rdma_stat_sq_prod;
60863
60864 #define RPCRDMA_VERSION 1
60865
60866diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
60867index 703cfa3..0b8ca72ac 100644
60868--- a/include/linux/sysctl.h
60869+++ b/include/linux/sysctl.h
60870@@ -155,7 +155,11 @@ enum
60871 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60872 };
60873
60874-
60875+#ifdef CONFIG_PAX_SOFTMODE
60876+enum {
60877+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60878+};
60879+#endif
60880
60881 /* CTL_VM names: */
60882 enum
60883@@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
60884
60885 extern int proc_dostring(struct ctl_table *, int,
60886 void __user *, size_t *, loff_t *);
60887+extern int proc_dostring_modpriv(struct ctl_table *, int,
60888+ void __user *, size_t *, loff_t *);
60889 extern int proc_dointvec(struct ctl_table *, int,
60890 void __user *, size_t *, loff_t *);
60891 extern int proc_dointvec_minmax(struct ctl_table *, int,
60892diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
60893index a71a292..51bd91d 100644
60894--- a/include/linux/tracehook.h
60895+++ b/include/linux/tracehook.h
60896@@ -54,12 +54,12 @@ struct linux_binprm;
60897 /*
60898 * ptrace report for syscall entry and exit looks identical.
60899 */
60900-static inline void ptrace_report_syscall(struct pt_regs *regs)
60901+static inline int ptrace_report_syscall(struct pt_regs *regs)
60902 {
60903 int ptrace = current->ptrace;
60904
60905 if (!(ptrace & PT_PTRACED))
60906- return;
60907+ return 0;
60908
60909 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
60910
60911@@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
60912 send_sig(current->exit_code, current, 1);
60913 current->exit_code = 0;
60914 }
60915+
60916+ return fatal_signal_pending(current);
60917 }
60918
60919 /**
60920@@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
60921 static inline __must_check int tracehook_report_syscall_entry(
60922 struct pt_regs *regs)
60923 {
60924- ptrace_report_syscall(regs);
60925- return 0;
60926+ return ptrace_report_syscall(regs);
60927 }
60928
60929 /**
60930diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
60931index ff7dc08..893e1bd 100644
60932--- a/include/linux/tty_ldisc.h
60933+++ b/include/linux/tty_ldisc.h
60934@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
60935
60936 struct module *owner;
60937
60938- int refcount;
60939+ atomic_t refcount;
60940 };
60941
60942 struct tty_ldisc {
60943diff --git a/include/linux/types.h b/include/linux/types.h
60944index 57a9723..dbe234a 100644
60945--- a/include/linux/types.h
60946+++ b/include/linux/types.h
60947@@ -213,10 +213,26 @@ typedef struct {
60948 int counter;
60949 } atomic_t;
60950
60951+#ifdef CONFIG_PAX_REFCOUNT
60952+typedef struct {
60953+ int counter;
60954+} atomic_unchecked_t;
60955+#else
60956+typedef atomic_t atomic_unchecked_t;
60957+#endif
60958+
60959 #ifdef CONFIG_64BIT
60960 typedef struct {
60961 long counter;
60962 } atomic64_t;
60963+
60964+#ifdef CONFIG_PAX_REFCOUNT
60965+typedef struct {
60966+ long counter;
60967+} atomic64_unchecked_t;
60968+#else
60969+typedef atomic64_t atomic64_unchecked_t;
60970+#endif
60971 #endif
60972
60973 struct list_head {
60974diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
60975index 5ca0951..ab496a5 100644
60976--- a/include/linux/uaccess.h
60977+++ b/include/linux/uaccess.h
60978@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
60979 long ret; \
60980 mm_segment_t old_fs = get_fs(); \
60981 \
60982- set_fs(KERNEL_DS); \
60983 pagefault_disable(); \
60984- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60985- pagefault_enable(); \
60986+ set_fs(KERNEL_DS); \
60987+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
60988 set_fs(old_fs); \
60989+ pagefault_enable(); \
60990 ret; \
60991 })
60992
60993diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
60994index 99c1b4d..bb94261 100644
60995--- a/include/linux/unaligned/access_ok.h
60996+++ b/include/linux/unaligned/access_ok.h
60997@@ -6,32 +6,32 @@
60998
60999 static inline u16 get_unaligned_le16(const void *p)
61000 {
61001- return le16_to_cpup((__le16 *)p);
61002+ return le16_to_cpup((const __le16 *)p);
61003 }
61004
61005 static inline u32 get_unaligned_le32(const void *p)
61006 {
61007- return le32_to_cpup((__le32 *)p);
61008+ return le32_to_cpup((const __le32 *)p);
61009 }
61010
61011 static inline u64 get_unaligned_le64(const void *p)
61012 {
61013- return le64_to_cpup((__le64 *)p);
61014+ return le64_to_cpup((const __le64 *)p);
61015 }
61016
61017 static inline u16 get_unaligned_be16(const void *p)
61018 {
61019- return be16_to_cpup((__be16 *)p);
61020+ return be16_to_cpup((const __be16 *)p);
61021 }
61022
61023 static inline u32 get_unaligned_be32(const void *p)
61024 {
61025- return be32_to_cpup((__be32 *)p);
61026+ return be32_to_cpup((const __be32 *)p);
61027 }
61028
61029 static inline u64 get_unaligned_be64(const void *p)
61030 {
61031- return be64_to_cpup((__be64 *)p);
61032+ return be64_to_cpup((const __be64 *)p);
61033 }
61034
61035 static inline void put_unaligned_le16(u16 val, void *p)
61036diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
61037index e5a40c3..20ab0f6 100644
61038--- a/include/linux/usb/renesas_usbhs.h
61039+++ b/include/linux/usb/renesas_usbhs.h
61040@@ -39,7 +39,7 @@ enum {
61041 */
61042 struct renesas_usbhs_driver_callback {
61043 int (*notify_hotplug)(struct platform_device *pdev);
61044-};
61045+} __no_const;
61046
61047 /*
61048 * callback functions for platform
61049@@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
61050 * VBUS control is needed for Host
61051 */
61052 int (*set_vbus)(struct platform_device *pdev, int enable);
61053-};
61054+} __no_const;
61055
61056 /*
61057 * parameters for renesas usbhs
61058diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
61059index 6f8fbcf..8259001 100644
61060--- a/include/linux/vermagic.h
61061+++ b/include/linux/vermagic.h
61062@@ -25,9 +25,35 @@
61063 #define MODULE_ARCH_VERMAGIC ""
61064 #endif
61065
61066+#ifdef CONFIG_PAX_REFCOUNT
61067+#define MODULE_PAX_REFCOUNT "REFCOUNT "
61068+#else
61069+#define MODULE_PAX_REFCOUNT ""
61070+#endif
61071+
61072+#ifdef CONSTIFY_PLUGIN
61073+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61074+#else
61075+#define MODULE_CONSTIFY_PLUGIN ""
61076+#endif
61077+
61078+#ifdef STACKLEAK_PLUGIN
61079+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61080+#else
61081+#define MODULE_STACKLEAK_PLUGIN ""
61082+#endif
61083+
61084+#ifdef CONFIG_GRKERNSEC
61085+#define MODULE_GRSEC "GRSEC "
61086+#else
61087+#define MODULE_GRSEC ""
61088+#endif
61089+
61090 #define VERMAGIC_STRING \
61091 UTS_RELEASE " " \
61092 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61093 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61094- MODULE_ARCH_VERMAGIC
61095+ MODULE_ARCH_VERMAGIC \
61096+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61097+ MODULE_GRSEC
61098
61099diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
61100index 4bde182..aec92c1 100644
61101--- a/include/linux/vmalloc.h
61102+++ b/include/linux/vmalloc.h
61103@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
61104 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61105 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61106 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61107+
61108+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61109+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61110+#endif
61111+
61112 /* bits [20..32] reserved for arch specific ioremap internals */
61113
61114 /*
61115@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
61116 # endif
61117 #endif
61118
61119+#define vmalloc(x) \
61120+({ \
61121+ void *___retval; \
61122+ intoverflow_t ___x = (intoverflow_t)x; \
61123+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61124+ ___retval = NULL; \
61125+ else \
61126+ ___retval = vmalloc((unsigned long)___x); \
61127+ ___retval; \
61128+})
61129+
61130+#define vzalloc(x) \
61131+({ \
61132+ void *___retval; \
61133+ intoverflow_t ___x = (intoverflow_t)x; \
61134+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61135+ ___retval = NULL; \
61136+ else \
61137+ ___retval = vzalloc((unsigned long)___x); \
61138+ ___retval; \
61139+})
61140+
61141+#define __vmalloc(x, y, z) \
61142+({ \
61143+ void *___retval; \
61144+ intoverflow_t ___x = (intoverflow_t)x; \
61145+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61146+ ___retval = NULL; \
61147+ else \
61148+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61149+ ___retval; \
61150+})
61151+
61152+#define vmalloc_user(x) \
61153+({ \
61154+ void *___retval; \
61155+ intoverflow_t ___x = (intoverflow_t)x; \
61156+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61157+ ___retval = NULL; \
61158+ else \
61159+ ___retval = vmalloc_user((unsigned long)___x); \
61160+ ___retval; \
61161+})
61162+
61163+#define vmalloc_exec(x) \
61164+({ \
61165+ void *___retval; \
61166+ intoverflow_t ___x = (intoverflow_t)x; \
61167+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61168+ ___retval = NULL; \
61169+ else \
61170+ ___retval = vmalloc_exec((unsigned long)___x); \
61171+ ___retval; \
61172+})
61173+
61174+#define vmalloc_node(x, y) \
61175+({ \
61176+ void *___retval; \
61177+ intoverflow_t ___x = (intoverflow_t)x; \
61178+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61179+ ___retval = NULL; \
61180+ else \
61181+ ___retval = vmalloc_node((unsigned long)___x, (y));\
61182+ ___retval; \
61183+})
61184+
61185+#define vzalloc_node(x, y) \
61186+({ \
61187+ void *___retval; \
61188+ intoverflow_t ___x = (intoverflow_t)x; \
61189+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61190+ ___retval = NULL; \
61191+ else \
61192+ ___retval = vzalloc_node((unsigned long)___x, (y));\
61193+ ___retval; \
61194+})
61195+
61196+#define vmalloc_32(x) \
61197+({ \
61198+ void *___retval; \
61199+ intoverflow_t ___x = (intoverflow_t)x; \
61200+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61201+ ___retval = NULL; \
61202+ else \
61203+ ___retval = vmalloc_32((unsigned long)___x); \
61204+ ___retval; \
61205+})
61206+
61207+#define vmalloc_32_user(x) \
61208+({ \
61209+void *___retval; \
61210+ intoverflow_t ___x = (intoverflow_t)x; \
61211+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61212+ ___retval = NULL; \
61213+ else \
61214+ ___retval = vmalloc_32_user((unsigned long)___x);\
61215+ ___retval; \
61216+})
61217+
61218 #endif /* _LINUX_VMALLOC_H */
61219diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
61220index 65efb92..137adbb 100644
61221--- a/include/linux/vmstat.h
61222+++ b/include/linux/vmstat.h
61223@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
61224 /*
61225 * Zone based page accounting with per cpu differentials.
61226 */
61227-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61228+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61229
61230 static inline void zone_page_state_add(long x, struct zone *zone,
61231 enum zone_stat_item item)
61232 {
61233- atomic_long_add(x, &zone->vm_stat[item]);
61234- atomic_long_add(x, &vm_stat[item]);
61235+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61236+ atomic_long_add_unchecked(x, &vm_stat[item]);
61237 }
61238
61239 static inline unsigned long global_page_state(enum zone_stat_item item)
61240 {
61241- long x = atomic_long_read(&vm_stat[item]);
61242+ long x = atomic_long_read_unchecked(&vm_stat[item]);
61243 #ifdef CONFIG_SMP
61244 if (x < 0)
61245 x = 0;
61246@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
61247 static inline unsigned long zone_page_state(struct zone *zone,
61248 enum zone_stat_item item)
61249 {
61250- long x = atomic_long_read(&zone->vm_stat[item]);
61251+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61252 #ifdef CONFIG_SMP
61253 if (x < 0)
61254 x = 0;
61255@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
61256 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61257 enum zone_stat_item item)
61258 {
61259- long x = atomic_long_read(&zone->vm_stat[item]);
61260+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61261
61262 #ifdef CONFIG_SMP
61263 int cpu;
61264@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
61265
61266 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61267 {
61268- atomic_long_inc(&zone->vm_stat[item]);
61269- atomic_long_inc(&vm_stat[item]);
61270+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
61271+ atomic_long_inc_unchecked(&vm_stat[item]);
61272 }
61273
61274 static inline void __inc_zone_page_state(struct page *page,
61275@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
61276
61277 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61278 {
61279- atomic_long_dec(&zone->vm_stat[item]);
61280- atomic_long_dec(&vm_stat[item]);
61281+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
61282+ atomic_long_dec_unchecked(&vm_stat[item]);
61283 }
61284
61285 static inline void __dec_zone_page_state(struct page *page,
61286diff --git a/include/linux/xattr.h b/include/linux/xattr.h
61287index e5d1220..ef6e406 100644
61288--- a/include/linux/xattr.h
61289+++ b/include/linux/xattr.h
61290@@ -57,6 +57,11 @@
61291 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
61292 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
61293
61294+/* User namespace */
61295+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
61296+#define XATTR_PAX_FLAGS_SUFFIX "flags"
61297+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
61298+
61299 #ifdef __KERNEL__
61300
61301 #include <linux/types.h>
61302diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
61303index 4aeff96..b378cdc 100644
61304--- a/include/media/saa7146_vv.h
61305+++ b/include/media/saa7146_vv.h
61306@@ -163,7 +163,7 @@ struct saa7146_ext_vv
61307 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61308
61309 /* the extension can override this */
61310- struct v4l2_ioctl_ops ops;
61311+ v4l2_ioctl_ops_no_const ops;
61312 /* pointer to the saa7146 core ops */
61313 const struct v4l2_ioctl_ops *core_ops;
61314
61315diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61316index c7c40f1..4f01585 100644
61317--- a/include/media/v4l2-dev.h
61318+++ b/include/media/v4l2-dev.h
61319@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61320
61321
61322 struct v4l2_file_operations {
61323- struct module *owner;
61324+ struct module * const owner;
61325 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61326 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61327 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61328@@ -68,6 +68,7 @@ struct v4l2_file_operations {
61329 int (*open) (struct file *);
61330 int (*release) (struct file *);
61331 };
61332+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61333
61334 /*
61335 * Newer version of video_device, handled by videodev2.c
61336diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61337index 4d1c74a..65e1221 100644
61338--- a/include/media/v4l2-ioctl.h
61339+++ b/include/media/v4l2-ioctl.h
61340@@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61341 long (*vidioc_default) (struct file *file, void *fh,
61342 bool valid_prio, int cmd, void *arg);
61343 };
61344-
61345+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61346
61347 /* v4l debugging and diagnostics */
61348
61349diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61350index 8d55251..dfe5b0a 100644
61351--- a/include/net/caif/caif_hsi.h
61352+++ b/include/net/caif/caif_hsi.h
61353@@ -98,7 +98,7 @@ struct cfhsi_drv {
61354 void (*rx_done_cb) (struct cfhsi_drv *drv);
61355 void (*wake_up_cb) (struct cfhsi_drv *drv);
61356 void (*wake_down_cb) (struct cfhsi_drv *drv);
61357-};
61358+} __no_const;
61359
61360 /* Structure implemented by HSI device. */
61361 struct cfhsi_dev {
61362diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61363index 9e5425b..8136ffc 100644
61364--- a/include/net/caif/cfctrl.h
61365+++ b/include/net/caif/cfctrl.h
61366@@ -52,7 +52,7 @@ struct cfctrl_rsp {
61367 void (*radioset_rsp)(void);
61368 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61369 struct cflayer *client_layer);
61370-};
61371+} __no_const;
61372
61373 /* Link Setup Parameters for CAIF-Links. */
61374 struct cfctrl_link_param {
61375@@ -101,8 +101,8 @@ struct cfctrl_request_info {
61376 struct cfctrl {
61377 struct cfsrvl serv;
61378 struct cfctrl_rsp res;
61379- atomic_t req_seq_no;
61380- atomic_t rsp_seq_no;
61381+ atomic_unchecked_t req_seq_no;
61382+ atomic_unchecked_t rsp_seq_no;
61383 struct list_head list;
61384 /* Protects from simultaneous access to first_req list */
61385 spinlock_t info_list_lock;
61386diff --git a/include/net/flow.h b/include/net/flow.h
61387index 57f15a7..0de26c6 100644
61388--- a/include/net/flow.h
61389+++ b/include/net/flow.h
61390@@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61391
61392 extern void flow_cache_flush(void);
61393 extern void flow_cache_flush_deferred(void);
61394-extern atomic_t flow_cache_genid;
61395+extern atomic_unchecked_t flow_cache_genid;
61396
61397 #endif
61398diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61399index e9ff3fc..9d3e5c7 100644
61400--- a/include/net/inetpeer.h
61401+++ b/include/net/inetpeer.h
61402@@ -48,8 +48,8 @@ struct inet_peer {
61403 */
61404 union {
61405 struct {
61406- atomic_t rid; /* Frag reception counter */
61407- atomic_t ip_id_count; /* IP ID for the next packet */
61408+ atomic_unchecked_t rid; /* Frag reception counter */
61409+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61410 __u32 tcp_ts;
61411 __u32 tcp_ts_stamp;
61412 };
61413@@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61414 more++;
61415 inet_peer_refcheck(p);
61416 do {
61417- old = atomic_read(&p->ip_id_count);
61418+ old = atomic_read_unchecked(&p->ip_id_count);
61419 new = old + more;
61420 if (!new)
61421 new = 1;
61422- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61423+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61424 return new;
61425 }
61426
61427diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61428index 10422ef..662570f 100644
61429--- a/include/net/ip_fib.h
61430+++ b/include/net/ip_fib.h
61431@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61432
61433 #define FIB_RES_SADDR(net, res) \
61434 ((FIB_RES_NH(res).nh_saddr_genid == \
61435- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61436+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61437 FIB_RES_NH(res).nh_saddr : \
61438 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61439 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61440diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61441index e5a7b9a..f4fc44b 100644
61442--- a/include/net/ip_vs.h
61443+++ b/include/net/ip_vs.h
61444@@ -509,7 +509,7 @@ struct ip_vs_conn {
61445 struct ip_vs_conn *control; /* Master control connection */
61446 atomic_t n_control; /* Number of controlled ones */
61447 struct ip_vs_dest *dest; /* real server */
61448- atomic_t in_pkts; /* incoming packet counter */
61449+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61450
61451 /* packet transmitter for different forwarding methods. If it
61452 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61453@@ -647,7 +647,7 @@ struct ip_vs_dest {
61454 __be16 port; /* port number of the server */
61455 union nf_inet_addr addr; /* IP address of the server */
61456 volatile unsigned flags; /* dest status flags */
61457- atomic_t conn_flags; /* flags to copy to conn */
61458+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61459 atomic_t weight; /* server weight */
61460
61461 atomic_t refcnt; /* reference counter */
61462diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61463index 69b610a..fe3962c 100644
61464--- a/include/net/irda/ircomm_core.h
61465+++ b/include/net/irda/ircomm_core.h
61466@@ -51,7 +51,7 @@ typedef struct {
61467 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61468 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61469 struct ircomm_info *);
61470-} call_t;
61471+} __no_const call_t;
61472
61473 struct ircomm_cb {
61474 irda_queue_t queue;
61475diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61476index 59ba38bc..d515662 100644
61477--- a/include/net/irda/ircomm_tty.h
61478+++ b/include/net/irda/ircomm_tty.h
61479@@ -35,6 +35,7 @@
61480 #include <linux/termios.h>
61481 #include <linux/timer.h>
61482 #include <linux/tty.h> /* struct tty_struct */
61483+#include <asm/local.h>
61484
61485 #include <net/irda/irias_object.h>
61486 #include <net/irda/ircomm_core.h>
61487@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61488 unsigned short close_delay;
61489 unsigned short closing_wait; /* time to wait before closing */
61490
61491- int open_count;
61492- int blocked_open; /* # of blocked opens */
61493+ local_t open_count;
61494+ local_t blocked_open; /* # of blocked opens */
61495
61496 /* Protect concurent access to :
61497 * o self->open_count
61498diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61499index f2419cf..473679f 100644
61500--- a/include/net/iucv/af_iucv.h
61501+++ b/include/net/iucv/af_iucv.h
61502@@ -139,7 +139,7 @@ struct iucv_sock {
61503 struct iucv_sock_list {
61504 struct hlist_head head;
61505 rwlock_t lock;
61506- atomic_t autobind_name;
61507+ atomic_unchecked_t autobind_name;
61508 };
61509
61510 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61511diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61512index 2720884..3aa5c25 100644
61513--- a/include/net/neighbour.h
61514+++ b/include/net/neighbour.h
61515@@ -122,7 +122,7 @@ struct neigh_ops {
61516 void (*error_report)(struct neighbour *, struct sk_buff *);
61517 int (*output)(struct neighbour *, struct sk_buff *);
61518 int (*connected_output)(struct neighbour *, struct sk_buff *);
61519-};
61520+} __do_const;
61521
61522 struct pneigh_entry {
61523 struct pneigh_entry *next;
61524diff --git a/include/net/netlink.h b/include/net/netlink.h
61525index cb1f350..3279d2c 100644
61526--- a/include/net/netlink.h
61527+++ b/include/net/netlink.h
61528@@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61529 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61530 {
61531 if (mark)
61532- skb_trim(skb, (unsigned char *) mark - skb->data);
61533+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61534 }
61535
61536 /**
61537diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61538index d786b4f..4c3dd41 100644
61539--- a/include/net/netns/ipv4.h
61540+++ b/include/net/netns/ipv4.h
61541@@ -56,8 +56,8 @@ struct netns_ipv4 {
61542
61543 unsigned int sysctl_ping_group_range[2];
61544
61545- atomic_t rt_genid;
61546- atomic_t dev_addr_genid;
61547+ atomic_unchecked_t rt_genid;
61548+ atomic_unchecked_t dev_addr_genid;
61549
61550 #ifdef CONFIG_IP_MROUTE
61551 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61552diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61553index 6a72a58..e6a127d 100644
61554--- a/include/net/sctp/sctp.h
61555+++ b/include/net/sctp/sctp.h
61556@@ -318,9 +318,9 @@ do { \
61557
61558 #else /* SCTP_DEBUG */
61559
61560-#define SCTP_DEBUG_PRINTK(whatever...)
61561-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61562-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61563+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61564+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61565+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61566 #define SCTP_ENABLE_DEBUG
61567 #define SCTP_DISABLE_DEBUG
61568 #define SCTP_ASSERT(expr, str, func)
61569diff --git a/include/net/sock.h b/include/net/sock.h
61570index 32e3937..87a1dbc 100644
61571--- a/include/net/sock.h
61572+++ b/include/net/sock.h
61573@@ -277,7 +277,7 @@ struct sock {
61574 #ifdef CONFIG_RPS
61575 __u32 sk_rxhash;
61576 #endif
61577- atomic_t sk_drops;
61578+ atomic_unchecked_t sk_drops;
61579 int sk_rcvbuf;
61580
61581 struct sk_filter __rcu *sk_filter;
61582@@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
61583 }
61584
61585 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61586- char __user *from, char *to,
61587+ char __user *from, unsigned char *to,
61588 int copy, int offset)
61589 {
61590 if (skb->ip_summed == CHECKSUM_NONE) {
61591diff --git a/include/net/tcp.h b/include/net/tcp.h
61592index bb18c4d..bb87972 100644
61593--- a/include/net/tcp.h
61594+++ b/include/net/tcp.h
61595@@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
61596 char *name;
61597 sa_family_t family;
61598 const struct file_operations *seq_fops;
61599- struct seq_operations seq_ops;
61600+ seq_operations_no_const seq_ops;
61601 };
61602
61603 struct tcp_iter_state {
61604diff --git a/include/net/udp.h b/include/net/udp.h
61605index 3b285f4..0219639 100644
61606--- a/include/net/udp.h
61607+++ b/include/net/udp.h
61608@@ -237,7 +237,7 @@ struct udp_seq_afinfo {
61609 sa_family_t family;
61610 struct udp_table *udp_table;
61611 const struct file_operations *seq_fops;
61612- struct seq_operations seq_ops;
61613+ seq_operations_no_const seq_ops;
61614 };
61615
61616 struct udp_iter_state {
61617diff --git a/include/net/xfrm.h b/include/net/xfrm.h
61618index b203e14..1df3991 100644
61619--- a/include/net/xfrm.h
61620+++ b/include/net/xfrm.h
61621@@ -505,7 +505,7 @@ struct xfrm_policy {
61622 struct timer_list timer;
61623
61624 struct flow_cache_object flo;
61625- atomic_t genid;
61626+ atomic_unchecked_t genid;
61627 u32 priority;
61628 u32 index;
61629 struct xfrm_mark mark;
61630diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
61631index 1a046b1..ee0bef0 100644
61632--- a/include/rdma/iw_cm.h
61633+++ b/include/rdma/iw_cm.h
61634@@ -122,7 +122,7 @@ struct iw_cm_verbs {
61635 int backlog);
61636
61637 int (*destroy_listen)(struct iw_cm_id *cm_id);
61638-};
61639+} __no_const;
61640
61641 /**
61642 * iw_create_cm_id - Create an IW CM identifier.
61643diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
61644index 5d1a758..1dbf795 100644
61645--- a/include/scsi/libfc.h
61646+++ b/include/scsi/libfc.h
61647@@ -748,6 +748,7 @@ struct libfc_function_template {
61648 */
61649 void (*disc_stop_final) (struct fc_lport *);
61650 };
61651+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61652
61653 /**
61654 * struct fc_disc - Discovery context
61655@@ -851,7 +852,7 @@ struct fc_lport {
61656 struct fc_vport *vport;
61657
61658 /* Operational Information */
61659- struct libfc_function_template tt;
61660+ libfc_function_template_no_const tt;
61661 u8 link_up;
61662 u8 qfull;
61663 enum fc_lport_state state;
61664diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
61665index 5591ed5..13eb457 100644
61666--- a/include/scsi/scsi_device.h
61667+++ b/include/scsi/scsi_device.h
61668@@ -161,9 +161,9 @@ struct scsi_device {
61669 unsigned int max_device_blocked; /* what device_blocked counts down from */
61670 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61671
61672- atomic_t iorequest_cnt;
61673- atomic_t iodone_cnt;
61674- atomic_t ioerr_cnt;
61675+ atomic_unchecked_t iorequest_cnt;
61676+ atomic_unchecked_t iodone_cnt;
61677+ atomic_unchecked_t ioerr_cnt;
61678
61679 struct device sdev_gendev,
61680 sdev_dev;
61681diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
61682index 2a65167..91e01f8 100644
61683--- a/include/scsi/scsi_transport_fc.h
61684+++ b/include/scsi/scsi_transport_fc.h
61685@@ -711,7 +711,7 @@ struct fc_function_template {
61686 unsigned long show_host_system_hostname:1;
61687
61688 unsigned long disable_target_scan:1;
61689-};
61690+} __do_const;
61691
61692
61693 /**
61694diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
61695index 030b87c..98a6954 100644
61696--- a/include/sound/ak4xxx-adda.h
61697+++ b/include/sound/ak4xxx-adda.h
61698@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61699 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61700 unsigned char val);
61701 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61702-};
61703+} __no_const;
61704
61705 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61706
61707diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
61708index 8c05e47..2b5df97 100644
61709--- a/include/sound/hwdep.h
61710+++ b/include/sound/hwdep.h
61711@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61712 struct snd_hwdep_dsp_status *status);
61713 int (*dsp_load)(struct snd_hwdep *hw,
61714 struct snd_hwdep_dsp_image *image);
61715-};
61716+} __no_const;
61717
61718 struct snd_hwdep {
61719 struct snd_card *card;
61720diff --git a/include/sound/info.h b/include/sound/info.h
61721index 5492cc4..1a65278 100644
61722--- a/include/sound/info.h
61723+++ b/include/sound/info.h
61724@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61725 struct snd_info_buffer *buffer);
61726 void (*write)(struct snd_info_entry *entry,
61727 struct snd_info_buffer *buffer);
61728-};
61729+} __no_const;
61730
61731 struct snd_info_entry_ops {
61732 int (*open)(struct snd_info_entry *entry,
61733diff --git a/include/sound/pcm.h b/include/sound/pcm.h
61734index 0cf91b2..b70cae4 100644
61735--- a/include/sound/pcm.h
61736+++ b/include/sound/pcm.h
61737@@ -81,6 +81,7 @@ struct snd_pcm_ops {
61738 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61739 int (*ack)(struct snd_pcm_substream *substream);
61740 };
61741+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61742
61743 /*
61744 *
61745diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
61746index af1b49e..a5d55a5 100644
61747--- a/include/sound/sb16_csp.h
61748+++ b/include/sound/sb16_csp.h
61749@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61750 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61751 int (*csp_stop) (struct snd_sb_csp * p);
61752 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61753-};
61754+} __no_const;
61755
61756 /*
61757 * CSP private data
61758diff --git a/include/sound/soc.h b/include/sound/soc.h
61759index 11cfb59..e3f93f4 100644
61760--- a/include/sound/soc.h
61761+++ b/include/sound/soc.h
61762@@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
61763 /* platform IO - used for platform DAPM */
61764 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
61765 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
61766-};
61767+} __do_const;
61768
61769 struct snd_soc_platform {
61770 const char *name;
61771diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
61772index 444cd6b..3327cc5 100644
61773--- a/include/sound/ymfpci.h
61774+++ b/include/sound/ymfpci.h
61775@@ -358,7 +358,7 @@ struct snd_ymfpci {
61776 spinlock_t reg_lock;
61777 spinlock_t voice_lock;
61778 wait_queue_head_t interrupt_sleep;
61779- atomic_t interrupt_sleep_count;
61780+ atomic_unchecked_t interrupt_sleep_count;
61781 struct snd_info_entry *proc_entry;
61782 const struct firmware *dsp_microcode;
61783 const struct firmware *controller_microcode;
61784diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
61785index a79886c..b483af6 100644
61786--- a/include/target/target_core_base.h
61787+++ b/include/target/target_core_base.h
61788@@ -346,7 +346,7 @@ struct t10_reservation_ops {
61789 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61790 int (*t10_pr_register)(struct se_cmd *);
61791 int (*t10_pr_clear)(struct se_cmd *);
61792-};
61793+} __no_const;
61794
61795 struct t10_reservation {
61796 /* Reservation effects all target ports */
61797@@ -465,8 +465,8 @@ struct se_cmd {
61798 atomic_t t_se_count;
61799 atomic_t t_task_cdbs_left;
61800 atomic_t t_task_cdbs_ex_left;
61801- atomic_t t_task_cdbs_sent;
61802- atomic_t t_transport_aborted;
61803+ atomic_unchecked_t t_task_cdbs_sent;
61804+ atomic_unchecked_t t_transport_aborted;
61805 atomic_t t_transport_active;
61806 atomic_t t_transport_complete;
61807 atomic_t t_transport_queue_active;
61808@@ -704,7 +704,7 @@ struct se_device {
61809 /* Active commands on this virtual SE device */
61810 atomic_t simple_cmds;
61811 atomic_t depth_left;
61812- atomic_t dev_ordered_id;
61813+ atomic_unchecked_t dev_ordered_id;
61814 atomic_t execute_tasks;
61815 atomic_t dev_ordered_sync;
61816 atomic_t dev_qf_count;
61817diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
61818index 1c09820..7f5ec79 100644
61819--- a/include/trace/events/irq.h
61820+++ b/include/trace/events/irq.h
61821@@ -36,7 +36,7 @@ struct softirq_action;
61822 */
61823 TRACE_EVENT(irq_handler_entry,
61824
61825- TP_PROTO(int irq, struct irqaction *action),
61826+ TP_PROTO(int irq, const struct irqaction *action),
61827
61828 TP_ARGS(irq, action),
61829
61830@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61831 */
61832 TRACE_EVENT(irq_handler_exit,
61833
61834- TP_PROTO(int irq, struct irqaction *action, int ret),
61835+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61836
61837 TP_ARGS(irq, action, ret),
61838
61839diff --git a/include/video/udlfb.h b/include/video/udlfb.h
61840index c41f308..6918de3 100644
61841--- a/include/video/udlfb.h
61842+++ b/include/video/udlfb.h
61843@@ -52,10 +52,10 @@ struct dlfb_data {
61844 u32 pseudo_palette[256];
61845 int blank_mode; /*one of FB_BLANK_ */
61846 /* blit-only rendering path metrics, exposed through sysfs */
61847- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61848- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61849- atomic_t bytes_sent; /* to usb, after compression including overhead */
61850- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61851+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61852+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61853+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61854+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61855 };
61856
61857 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61858diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
61859index 0993a22..32ba2fe 100644
61860--- a/include/video/uvesafb.h
61861+++ b/include/video/uvesafb.h
61862@@ -177,6 +177,7 @@ struct uvesafb_par {
61863 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61864 u8 pmi_setpal; /* PMI for palette changes */
61865 u16 *pmi_base; /* protected mode interface location */
61866+ u8 *pmi_code; /* protected mode code location */
61867 void *pmi_start;
61868 void *pmi_pal;
61869 u8 *vbe_state_orig; /*
61870diff --git a/init/Kconfig b/init/Kconfig
61871index 43298f9..2f56c12 100644
61872--- a/init/Kconfig
61873+++ b/init/Kconfig
61874@@ -1214,7 +1214,7 @@ config SLUB_DEBUG
61875
61876 config COMPAT_BRK
61877 bool "Disable heap randomization"
61878- default y
61879+ default n
61880 help
61881 Randomizing heap placement makes heap exploits harder, but it
61882 also breaks ancient binaries (including anything libc5 based).
61883diff --git a/init/do_mounts.c b/init/do_mounts.c
61884index db6e5ee..7677ff7 100644
61885--- a/init/do_mounts.c
61886+++ b/init/do_mounts.c
61887@@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
61888
61889 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61890 {
61891- int err = sys_mount(name, "/root", fs, flags, data);
61892+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61893 if (err)
61894 return err;
61895
61896- sys_chdir((const char __user __force *)"/root");
61897+ sys_chdir((const char __force_user*)"/root");
61898 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61899 printk(KERN_INFO
61900 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61901@@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
61902 va_start(args, fmt);
61903 vsprintf(buf, fmt, args);
61904 va_end(args);
61905- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61906+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61907 if (fd >= 0) {
61908 sys_ioctl(fd, FDEJECT, 0);
61909 sys_close(fd);
61910 }
61911 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61912- fd = sys_open("/dev/console", O_RDWR, 0);
61913+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
61914 if (fd >= 0) {
61915 sys_ioctl(fd, TCGETS, (long)&termios);
61916 termios.c_lflag &= ~ICANON;
61917 sys_ioctl(fd, TCSETSF, (long)&termios);
61918- sys_read(fd, &c, 1);
61919+ sys_read(fd, (char __user *)&c, 1);
61920 termios.c_lflag |= ICANON;
61921 sys_ioctl(fd, TCSETSF, (long)&termios);
61922 sys_close(fd);
61923@@ -553,6 +553,6 @@ void __init prepare_namespace(void)
61924 mount_root();
61925 out:
61926 devtmpfs_mount("dev");
61927- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61928- sys_chroot((const char __user __force *)".");
61929+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61930+ sys_chroot((const char __force_user *)".");
61931 }
61932diff --git a/init/do_mounts.h b/init/do_mounts.h
61933index f5b978a..69dbfe8 100644
61934--- a/init/do_mounts.h
61935+++ b/init/do_mounts.h
61936@@ -15,15 +15,15 @@ extern int root_mountflags;
61937
61938 static inline int create_dev(char *name, dev_t dev)
61939 {
61940- sys_unlink(name);
61941- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61942+ sys_unlink((char __force_user *)name);
61943+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
61944 }
61945
61946 #if BITS_PER_LONG == 32
61947 static inline u32 bstat(char *name)
61948 {
61949 struct stat64 stat;
61950- if (sys_stat64(name, &stat) != 0)
61951+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
61952 return 0;
61953 if (!S_ISBLK(stat.st_mode))
61954 return 0;
61955@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
61956 static inline u32 bstat(char *name)
61957 {
61958 struct stat stat;
61959- if (sys_newstat(name, &stat) != 0)
61960+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
61961 return 0;
61962 if (!S_ISBLK(stat.st_mode))
61963 return 0;
61964diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
61965index 3098a38..253064e 100644
61966--- a/init/do_mounts_initrd.c
61967+++ b/init/do_mounts_initrd.c
61968@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
61969 create_dev("/dev/root.old", Root_RAM0);
61970 /* mount initrd on rootfs' /root */
61971 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61972- sys_mkdir("/old", 0700);
61973- root_fd = sys_open("/", 0, 0);
61974- old_fd = sys_open("/old", 0, 0);
61975+ sys_mkdir((const char __force_user *)"/old", 0700);
61976+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
61977+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
61978 /* move initrd over / and chdir/chroot in initrd root */
61979- sys_chdir("/root");
61980- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61981- sys_chroot(".");
61982+ sys_chdir((const char __force_user *)"/root");
61983+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61984+ sys_chroot((const char __force_user *)".");
61985
61986 /*
61987 * In case that a resume from disk is carried out by linuxrc or one of
61988@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
61989
61990 /* move initrd to rootfs' /old */
61991 sys_fchdir(old_fd);
61992- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61993+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
61994 /* switch root and cwd back to / of rootfs */
61995 sys_fchdir(root_fd);
61996- sys_chroot(".");
61997+ sys_chroot((const char __force_user *)".");
61998 sys_close(old_fd);
61999 sys_close(root_fd);
62000
62001 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62002- sys_chdir("/old");
62003+ sys_chdir((const char __force_user *)"/old");
62004 return;
62005 }
62006
62007@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
62008 mount_root();
62009
62010 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62011- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62012+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
62013 if (!error)
62014 printk("okay\n");
62015 else {
62016- int fd = sys_open("/dev/root.old", O_RDWR, 0);
62017+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
62018 if (error == -ENOENT)
62019 printk("/initrd does not exist. Ignored.\n");
62020 else
62021 printk("failed\n");
62022 printk(KERN_NOTICE "Unmounting old root\n");
62023- sys_umount("/old", MNT_DETACH);
62024+ sys_umount((char __force_user *)"/old", MNT_DETACH);
62025 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62026 if (fd < 0) {
62027 error = fd;
62028@@ -116,11 +116,11 @@ int __init initrd_load(void)
62029 * mounted in the normal path.
62030 */
62031 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62032- sys_unlink("/initrd.image");
62033+ sys_unlink((const char __force_user *)"/initrd.image");
62034 handle_initrd();
62035 return 1;
62036 }
62037 }
62038- sys_unlink("/initrd.image");
62039+ sys_unlink((const char __force_user *)"/initrd.image");
62040 return 0;
62041 }
62042diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
62043index 32c4799..c27ee74 100644
62044--- a/init/do_mounts_md.c
62045+++ b/init/do_mounts_md.c
62046@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62047 partitioned ? "_d" : "", minor,
62048 md_setup_args[ent].device_names);
62049
62050- fd = sys_open(name, 0, 0);
62051+ fd = sys_open((char __force_user *)name, 0, 0);
62052 if (fd < 0) {
62053 printk(KERN_ERR "md: open failed - cannot start "
62054 "array %s\n", name);
62055@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62056 * array without it
62057 */
62058 sys_close(fd);
62059- fd = sys_open(name, 0, 0);
62060+ fd = sys_open((char __force_user *)name, 0, 0);
62061 sys_ioctl(fd, BLKRRPART, 0);
62062 }
62063 sys_close(fd);
62064@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62065
62066 wait_for_device_probe();
62067
62068- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62069+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62070 if (fd >= 0) {
62071 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62072 sys_close(fd);
62073diff --git a/init/initramfs.c b/init/initramfs.c
62074index 2531811..040d4d4 100644
62075--- a/init/initramfs.c
62076+++ b/init/initramfs.c
62077@@ -74,7 +74,7 @@ static void __init free_hash(void)
62078 }
62079 }
62080
62081-static long __init do_utime(char __user *filename, time_t mtime)
62082+static long __init do_utime(__force char __user *filename, time_t mtime)
62083 {
62084 struct timespec t[2];
62085
62086@@ -109,7 +109,7 @@ static void __init dir_utime(void)
62087 struct dir_entry *de, *tmp;
62088 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62089 list_del(&de->list);
62090- do_utime(de->name, de->mtime);
62091+ do_utime((char __force_user *)de->name, de->mtime);
62092 kfree(de->name);
62093 kfree(de);
62094 }
62095@@ -271,7 +271,7 @@ static int __init maybe_link(void)
62096 if (nlink >= 2) {
62097 char *old = find_link(major, minor, ino, mode, collected);
62098 if (old)
62099- return (sys_link(old, collected) < 0) ? -1 : 1;
62100+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62101 }
62102 return 0;
62103 }
62104@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
62105 {
62106 struct stat st;
62107
62108- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62109+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62110 if (S_ISDIR(st.st_mode))
62111- sys_rmdir(path);
62112+ sys_rmdir((char __force_user *)path);
62113 else
62114- sys_unlink(path);
62115+ sys_unlink((char __force_user *)path);
62116 }
62117 }
62118
62119@@ -305,7 +305,7 @@ static int __init do_name(void)
62120 int openflags = O_WRONLY|O_CREAT;
62121 if (ml != 1)
62122 openflags |= O_TRUNC;
62123- wfd = sys_open(collected, openflags, mode);
62124+ wfd = sys_open((char __force_user *)collected, openflags, mode);
62125
62126 if (wfd >= 0) {
62127 sys_fchown(wfd, uid, gid);
62128@@ -317,17 +317,17 @@ static int __init do_name(void)
62129 }
62130 }
62131 } else if (S_ISDIR(mode)) {
62132- sys_mkdir(collected, mode);
62133- sys_chown(collected, uid, gid);
62134- sys_chmod(collected, mode);
62135+ sys_mkdir((char __force_user *)collected, mode);
62136+ sys_chown((char __force_user *)collected, uid, gid);
62137+ sys_chmod((char __force_user *)collected, mode);
62138 dir_add(collected, mtime);
62139 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62140 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62141 if (maybe_link() == 0) {
62142- sys_mknod(collected, mode, rdev);
62143- sys_chown(collected, uid, gid);
62144- sys_chmod(collected, mode);
62145- do_utime(collected, mtime);
62146+ sys_mknod((char __force_user *)collected, mode, rdev);
62147+ sys_chown((char __force_user *)collected, uid, gid);
62148+ sys_chmod((char __force_user *)collected, mode);
62149+ do_utime((char __force_user *)collected, mtime);
62150 }
62151 }
62152 return 0;
62153@@ -336,15 +336,15 @@ static int __init do_name(void)
62154 static int __init do_copy(void)
62155 {
62156 if (count >= body_len) {
62157- sys_write(wfd, victim, body_len);
62158+ sys_write(wfd, (char __force_user *)victim, body_len);
62159 sys_close(wfd);
62160- do_utime(vcollected, mtime);
62161+ do_utime((char __force_user *)vcollected, mtime);
62162 kfree(vcollected);
62163 eat(body_len);
62164 state = SkipIt;
62165 return 0;
62166 } else {
62167- sys_write(wfd, victim, count);
62168+ sys_write(wfd, (char __force_user *)victim, count);
62169 body_len -= count;
62170 eat(count);
62171 return 1;
62172@@ -355,9 +355,9 @@ static int __init do_symlink(void)
62173 {
62174 collected[N_ALIGN(name_len) + body_len] = '\0';
62175 clean_path(collected, 0);
62176- sys_symlink(collected + N_ALIGN(name_len), collected);
62177- sys_lchown(collected, uid, gid);
62178- do_utime(collected, mtime);
62179+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62180+ sys_lchown((char __force_user *)collected, uid, gid);
62181+ do_utime((char __force_user *)collected, mtime);
62182 state = SkipIt;
62183 next_state = Reset;
62184 return 0;
62185diff --git a/init/main.c b/init/main.c
62186index 217ed23..32e5731 100644
62187--- a/init/main.c
62188+++ b/init/main.c
62189@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
62190 extern void tc_init(void);
62191 #endif
62192
62193+extern void grsecurity_init(void);
62194+
62195 /*
62196 * Debug helper: via this flag we know that we are in 'early bootup code'
62197 * where only the boot processor is running with IRQ disabled. This means
62198@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
62199
62200 __setup("reset_devices", set_reset_devices);
62201
62202+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62203+extern char pax_enter_kernel_user[];
62204+extern char pax_exit_kernel_user[];
62205+extern pgdval_t clone_pgd_mask;
62206+#endif
62207+
62208+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62209+static int __init setup_pax_nouderef(char *str)
62210+{
62211+#ifdef CONFIG_X86_32
62212+ unsigned int cpu;
62213+ struct desc_struct *gdt;
62214+
62215+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
62216+ gdt = get_cpu_gdt_table(cpu);
62217+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62218+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62219+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62220+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62221+ }
62222+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62223+#else
62224+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62225+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62226+ clone_pgd_mask = ~(pgdval_t)0UL;
62227+#endif
62228+
62229+ return 0;
62230+}
62231+early_param("pax_nouderef", setup_pax_nouderef);
62232+#endif
62233+
62234+#ifdef CONFIG_PAX_SOFTMODE
62235+int pax_softmode;
62236+
62237+static int __init setup_pax_softmode(char *str)
62238+{
62239+ get_option(&str, &pax_softmode);
62240+ return 1;
62241+}
62242+__setup("pax_softmode=", setup_pax_softmode);
62243+#endif
62244+
62245 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62246 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62247 static const char *panic_later, *panic_param;
62248@@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
62249 {
62250 int count = preempt_count();
62251 int ret;
62252+ const char *msg1 = "", *msg2 = "";
62253
62254 if (initcall_debug)
62255 ret = do_one_initcall_debug(fn);
62256@@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
62257 sprintf(msgbuf, "error code %d ", ret);
62258
62259 if (preempt_count() != count) {
62260- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62261+ msg1 = " preemption imbalance";
62262 preempt_count() = count;
62263 }
62264 if (irqs_disabled()) {
62265- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62266+ msg2 = " disabled interrupts";
62267 local_irq_enable();
62268 }
62269- if (msgbuf[0]) {
62270- printk("initcall %pF returned with %s\n", fn, msgbuf);
62271+ if (msgbuf[0] || *msg1 || *msg2) {
62272+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62273 }
62274
62275 return ret;
62276@@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
62277 do_basic_setup();
62278
62279 /* Open the /dev/console on the rootfs, this should never fail */
62280- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62281+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62282 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62283
62284 (void) sys_dup(0);
62285@@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
62286 if (!ramdisk_execute_command)
62287 ramdisk_execute_command = "/init";
62288
62289- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62290+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62291 ramdisk_execute_command = NULL;
62292 prepare_namespace();
62293 }
62294
62295+ grsecurity_init();
62296+
62297 /*
62298 * Ok, we have completed the initial bootup, and
62299 * we're essentially up and running. Get rid of the
62300diff --git a/ipc/mqueue.c b/ipc/mqueue.c
62301index 5b4293d..f179875 100644
62302--- a/ipc/mqueue.c
62303+++ b/ipc/mqueue.c
62304@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
62305 mq_bytes = (mq_msg_tblsz +
62306 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62307
62308+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62309 spin_lock(&mq_lock);
62310 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62311 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62312diff --git a/ipc/msg.c b/ipc/msg.c
62313index 7385de2..a8180e0 100644
62314--- a/ipc/msg.c
62315+++ b/ipc/msg.c
62316@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62317 return security_msg_queue_associate(msq, msgflg);
62318 }
62319
62320+static struct ipc_ops msg_ops = {
62321+ .getnew = newque,
62322+ .associate = msg_security,
62323+ .more_checks = NULL
62324+};
62325+
62326 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62327 {
62328 struct ipc_namespace *ns;
62329- struct ipc_ops msg_ops;
62330 struct ipc_params msg_params;
62331
62332 ns = current->nsproxy->ipc_ns;
62333
62334- msg_ops.getnew = newque;
62335- msg_ops.associate = msg_security;
62336- msg_ops.more_checks = NULL;
62337-
62338 msg_params.key = key;
62339 msg_params.flg = msgflg;
62340
62341diff --git a/ipc/sem.c b/ipc/sem.c
62342index 5215a81..cfc0cac 100644
62343--- a/ipc/sem.c
62344+++ b/ipc/sem.c
62345@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62346 return 0;
62347 }
62348
62349+static struct ipc_ops sem_ops = {
62350+ .getnew = newary,
62351+ .associate = sem_security,
62352+ .more_checks = sem_more_checks
62353+};
62354+
62355 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62356 {
62357 struct ipc_namespace *ns;
62358- struct ipc_ops sem_ops;
62359 struct ipc_params sem_params;
62360
62361 ns = current->nsproxy->ipc_ns;
62362@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62363 if (nsems < 0 || nsems > ns->sc_semmsl)
62364 return -EINVAL;
62365
62366- sem_ops.getnew = newary;
62367- sem_ops.associate = sem_security;
62368- sem_ops.more_checks = sem_more_checks;
62369-
62370 sem_params.key = key;
62371 sem_params.flg = semflg;
62372 sem_params.u.nsems = nsems;
62373diff --git a/ipc/shm.c b/ipc/shm.c
62374index b76be5b..859e750 100644
62375--- a/ipc/shm.c
62376+++ b/ipc/shm.c
62377@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62378 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62379 #endif
62380
62381+#ifdef CONFIG_GRKERNSEC
62382+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62383+ const time_t shm_createtime, const uid_t cuid,
62384+ const int shmid);
62385+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62386+ const time_t shm_createtime);
62387+#endif
62388+
62389 void shm_init_ns(struct ipc_namespace *ns)
62390 {
62391 ns->shm_ctlmax = SHMMAX;
62392@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62393 shp->shm_lprid = 0;
62394 shp->shm_atim = shp->shm_dtim = 0;
62395 shp->shm_ctim = get_seconds();
62396+#ifdef CONFIG_GRKERNSEC
62397+ {
62398+ struct timespec timeval;
62399+ do_posix_clock_monotonic_gettime(&timeval);
62400+
62401+ shp->shm_createtime = timeval.tv_sec;
62402+ }
62403+#endif
62404 shp->shm_segsz = size;
62405 shp->shm_nattch = 0;
62406 shp->shm_file = file;
62407@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62408 return 0;
62409 }
62410
62411+static struct ipc_ops shm_ops = {
62412+ .getnew = newseg,
62413+ .associate = shm_security,
62414+ .more_checks = shm_more_checks
62415+};
62416+
62417 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62418 {
62419 struct ipc_namespace *ns;
62420- struct ipc_ops shm_ops;
62421 struct ipc_params shm_params;
62422
62423 ns = current->nsproxy->ipc_ns;
62424
62425- shm_ops.getnew = newseg;
62426- shm_ops.associate = shm_security;
62427- shm_ops.more_checks = shm_more_checks;
62428-
62429 shm_params.key = key;
62430 shm_params.flg = shmflg;
62431 shm_params.u.size = size;
62432@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62433 f_mode = FMODE_READ | FMODE_WRITE;
62434 }
62435 if (shmflg & SHM_EXEC) {
62436+
62437+#ifdef CONFIG_PAX_MPROTECT
62438+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
62439+ goto out;
62440+#endif
62441+
62442 prot |= PROT_EXEC;
62443 acc_mode |= S_IXUGO;
62444 }
62445@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62446 if (err)
62447 goto out_unlock;
62448
62449+#ifdef CONFIG_GRKERNSEC
62450+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62451+ shp->shm_perm.cuid, shmid) ||
62452+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62453+ err = -EACCES;
62454+ goto out_unlock;
62455+ }
62456+#endif
62457+
62458 path = shp->shm_file->f_path;
62459 path_get(&path);
62460 shp->shm_nattch++;
62461+#ifdef CONFIG_GRKERNSEC
62462+ shp->shm_lapid = current->pid;
62463+#endif
62464 size = i_size_read(path.dentry->d_inode);
62465 shm_unlock(shp);
62466
62467diff --git a/kernel/acct.c b/kernel/acct.c
62468index fa7eb3d..7faf116 100644
62469--- a/kernel/acct.c
62470+++ b/kernel/acct.c
62471@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62472 */
62473 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62474 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62475- file->f_op->write(file, (char *)&ac,
62476+ file->f_op->write(file, (char __force_user *)&ac,
62477 sizeof(acct_t), &file->f_pos);
62478 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62479 set_fs(fs);
62480diff --git a/kernel/audit.c b/kernel/audit.c
62481index 09fae26..ed71d5b 100644
62482--- a/kernel/audit.c
62483+++ b/kernel/audit.c
62484@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62485 3) suppressed due to audit_rate_limit
62486 4) suppressed due to audit_backlog_limit
62487 */
62488-static atomic_t audit_lost = ATOMIC_INIT(0);
62489+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62490
62491 /* The netlink socket. */
62492 static struct sock *audit_sock;
62493@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62494 unsigned long now;
62495 int print;
62496
62497- atomic_inc(&audit_lost);
62498+ atomic_inc_unchecked(&audit_lost);
62499
62500 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62501
62502@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62503 printk(KERN_WARNING
62504 "audit: audit_lost=%d audit_rate_limit=%d "
62505 "audit_backlog_limit=%d\n",
62506- atomic_read(&audit_lost),
62507+ atomic_read_unchecked(&audit_lost),
62508 audit_rate_limit,
62509 audit_backlog_limit);
62510 audit_panic(message);
62511@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62512 status_set.pid = audit_pid;
62513 status_set.rate_limit = audit_rate_limit;
62514 status_set.backlog_limit = audit_backlog_limit;
62515- status_set.lost = atomic_read(&audit_lost);
62516+ status_set.lost = atomic_read_unchecked(&audit_lost);
62517 status_set.backlog = skb_queue_len(&audit_skb_queue);
62518 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62519 &status_set, sizeof(status_set));
62520@@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62521 avail = audit_expand(ab,
62522 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62523 if (!avail)
62524- goto out;
62525+ goto out_va_end;
62526 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62527 }
62528- va_end(args2);
62529 if (len > 0)
62530 skb_put(skb, len);
62531+out_va_end:
62532+ va_end(args2);
62533 out:
62534 return;
62535 }
62536diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62537index 47b7fc1..c003c33 100644
62538--- a/kernel/auditsc.c
62539+++ b/kernel/auditsc.c
62540@@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62541 struct audit_buffer **ab,
62542 struct audit_aux_data_execve *axi)
62543 {
62544- int i;
62545- size_t len, len_sent = 0;
62546+ int i, len;
62547+ size_t len_sent = 0;
62548 const char __user *p;
62549 char *buf;
62550
62551@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62552 }
62553
62554 /* global counter which is incremented every time something logs in */
62555-static atomic_t session_id = ATOMIC_INIT(0);
62556+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62557
62558 /**
62559 * audit_set_loginuid - set a task's audit_context loginuid
62560@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62561 */
62562 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62563 {
62564- unsigned int sessionid = atomic_inc_return(&session_id);
62565+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62566 struct audit_context *context = task->audit_context;
62567
62568 if (context && context->in_syscall) {
62569diff --git a/kernel/capability.c b/kernel/capability.c
62570index b463871..fa3ea1f 100644
62571--- a/kernel/capability.c
62572+++ b/kernel/capability.c
62573@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
62574 * before modification is attempted and the application
62575 * fails.
62576 */
62577+ if (tocopy > ARRAY_SIZE(kdata))
62578+ return -EFAULT;
62579+
62580 if (copy_to_user(dataptr, kdata, tocopy
62581 * sizeof(struct __user_cap_data_struct))) {
62582 return -EFAULT;
62583@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
62584 BUG();
62585 }
62586
62587- if (security_capable(ns, current_cred(), cap) == 0) {
62588+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62589 current->flags |= PF_SUPERPRIV;
62590 return true;
62591 }
62592@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
62593 }
62594 EXPORT_SYMBOL(ns_capable);
62595
62596+bool ns_capable_nolog(struct user_namespace *ns, int cap)
62597+{
62598+ if (unlikely(!cap_valid(cap))) {
62599+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62600+ BUG();
62601+ }
62602+
62603+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62604+ current->flags |= PF_SUPERPRIV;
62605+ return true;
62606+ }
62607+ return false;
62608+}
62609+EXPORT_SYMBOL(ns_capable_nolog);
62610+
62611+bool capable_nolog(int cap)
62612+{
62613+ return ns_capable_nolog(&init_user_ns, cap);
62614+}
62615+EXPORT_SYMBOL(capable_nolog);
62616+
62617 /**
62618 * task_ns_capable - Determine whether current task has a superior
62619 * capability targeted at a specific task's user namespace.
62620@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
62621 }
62622 EXPORT_SYMBOL(task_ns_capable);
62623
62624+bool task_ns_capable_nolog(struct task_struct *t, int cap)
62625+{
62626+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62627+}
62628+EXPORT_SYMBOL(task_ns_capable_nolog);
62629+
62630 /**
62631 * nsown_capable - Check superior capability to one's own user_ns
62632 * @cap: The capability in question
62633diff --git a/kernel/compat.c b/kernel/compat.c
62634index f346ced..aa2b1f4 100644
62635--- a/kernel/compat.c
62636+++ b/kernel/compat.c
62637@@ -13,6 +13,7 @@
62638
62639 #include <linux/linkage.h>
62640 #include <linux/compat.h>
62641+#include <linux/module.h>
62642 #include <linux/errno.h>
62643 #include <linux/time.h>
62644 #include <linux/signal.h>
62645@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
62646 mm_segment_t oldfs;
62647 long ret;
62648
62649- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62650+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62651 oldfs = get_fs();
62652 set_fs(KERNEL_DS);
62653 ret = hrtimer_nanosleep_restart(restart);
62654@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
62655 oldfs = get_fs();
62656 set_fs(KERNEL_DS);
62657 ret = hrtimer_nanosleep(&tu,
62658- rmtp ? (struct timespec __user *)&rmt : NULL,
62659+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
62660 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62661 set_fs(oldfs);
62662
62663@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
62664 mm_segment_t old_fs = get_fs();
62665
62666 set_fs(KERNEL_DS);
62667- ret = sys_sigpending((old_sigset_t __user *) &s);
62668+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
62669 set_fs(old_fs);
62670 if (ret == 0)
62671 ret = put_user(s, set);
62672@@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
62673 old_fs = get_fs();
62674 set_fs(KERNEL_DS);
62675 ret = sys_sigprocmask(how,
62676- set ? (old_sigset_t __user *) &s : NULL,
62677- oset ? (old_sigset_t __user *) &s : NULL);
62678+ set ? (old_sigset_t __force_user *) &s : NULL,
62679+ oset ? (old_sigset_t __force_user *) &s : NULL);
62680 set_fs(old_fs);
62681 if (ret == 0)
62682 if (oset)
62683@@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
62684 mm_segment_t old_fs = get_fs();
62685
62686 set_fs(KERNEL_DS);
62687- ret = sys_old_getrlimit(resource, &r);
62688+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62689 set_fs(old_fs);
62690
62691 if (!ret) {
62692@@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
62693 mm_segment_t old_fs = get_fs();
62694
62695 set_fs(KERNEL_DS);
62696- ret = sys_getrusage(who, (struct rusage __user *) &r);
62697+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62698 set_fs(old_fs);
62699
62700 if (ret)
62701@@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
62702 set_fs (KERNEL_DS);
62703 ret = sys_wait4(pid,
62704 (stat_addr ?
62705- (unsigned int __user *) &status : NULL),
62706- options, (struct rusage __user *) &r);
62707+ (unsigned int __force_user *) &status : NULL),
62708+ options, (struct rusage __force_user *) &r);
62709 set_fs (old_fs);
62710
62711 if (ret > 0) {
62712@@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
62713 memset(&info, 0, sizeof(info));
62714
62715 set_fs(KERNEL_DS);
62716- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62717- uru ? (struct rusage __user *)&ru : NULL);
62718+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62719+ uru ? (struct rusage __force_user *)&ru : NULL);
62720 set_fs(old_fs);
62721
62722 if ((ret < 0) || (info.si_signo == 0))
62723@@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
62724 oldfs = get_fs();
62725 set_fs(KERNEL_DS);
62726 err = sys_timer_settime(timer_id, flags,
62727- (struct itimerspec __user *) &newts,
62728- (struct itimerspec __user *) &oldts);
62729+ (struct itimerspec __force_user *) &newts,
62730+ (struct itimerspec __force_user *) &oldts);
62731 set_fs(oldfs);
62732 if (!err && old && put_compat_itimerspec(old, &oldts))
62733 return -EFAULT;
62734@@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
62735 oldfs = get_fs();
62736 set_fs(KERNEL_DS);
62737 err = sys_timer_gettime(timer_id,
62738- (struct itimerspec __user *) &ts);
62739+ (struct itimerspec __force_user *) &ts);
62740 set_fs(oldfs);
62741 if (!err && put_compat_itimerspec(setting, &ts))
62742 return -EFAULT;
62743@@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
62744 oldfs = get_fs();
62745 set_fs(KERNEL_DS);
62746 err = sys_clock_settime(which_clock,
62747- (struct timespec __user *) &ts);
62748+ (struct timespec __force_user *) &ts);
62749 set_fs(oldfs);
62750 return err;
62751 }
62752@@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
62753 oldfs = get_fs();
62754 set_fs(KERNEL_DS);
62755 err = sys_clock_gettime(which_clock,
62756- (struct timespec __user *) &ts);
62757+ (struct timespec __force_user *) &ts);
62758 set_fs(oldfs);
62759 if (!err && put_compat_timespec(&ts, tp))
62760 return -EFAULT;
62761@@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
62762
62763 oldfs = get_fs();
62764 set_fs(KERNEL_DS);
62765- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62766+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62767 set_fs(oldfs);
62768
62769 err = compat_put_timex(utp, &txc);
62770@@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
62771 oldfs = get_fs();
62772 set_fs(KERNEL_DS);
62773 err = sys_clock_getres(which_clock,
62774- (struct timespec __user *) &ts);
62775+ (struct timespec __force_user *) &ts);
62776 set_fs(oldfs);
62777 if (!err && tp && put_compat_timespec(&ts, tp))
62778 return -EFAULT;
62779@@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
62780 long err;
62781 mm_segment_t oldfs;
62782 struct timespec tu;
62783- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62784+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62785
62786- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62787+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62788 oldfs = get_fs();
62789 set_fs(KERNEL_DS);
62790 err = clock_nanosleep_restart(restart);
62791@@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
62792 oldfs = get_fs();
62793 set_fs(KERNEL_DS);
62794 err = sys_clock_nanosleep(which_clock, flags,
62795- (struct timespec __user *) &in,
62796- (struct timespec __user *) &out);
62797+ (struct timespec __force_user *) &in,
62798+ (struct timespec __force_user *) &out);
62799 set_fs(oldfs);
62800
62801 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62802diff --git a/kernel/configs.c b/kernel/configs.c
62803index 42e8fa0..9e7406b 100644
62804--- a/kernel/configs.c
62805+++ b/kernel/configs.c
62806@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62807 struct proc_dir_entry *entry;
62808
62809 /* create the current config file */
62810+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62811+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62812+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62813+ &ikconfig_file_ops);
62814+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62815+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62816+ &ikconfig_file_ops);
62817+#endif
62818+#else
62819 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62820 &ikconfig_file_ops);
62821+#endif
62822+
62823 if (!entry)
62824 return -ENOMEM;
62825
62826diff --git a/kernel/cred.c b/kernel/cred.c
62827index 5791612..a3c04dc 100644
62828--- a/kernel/cred.c
62829+++ b/kernel/cred.c
62830@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
62831 validate_creds(cred);
62832 put_cred(cred);
62833 }
62834+
62835+#ifdef CONFIG_GRKERNSEC_SETXID
62836+ cred = (struct cred *) tsk->delayed_cred;
62837+ if (cred) {
62838+ tsk->delayed_cred = NULL;
62839+ validate_creds(cred);
62840+ put_cred(cred);
62841+ }
62842+#endif
62843 }
62844
62845 /**
62846@@ -470,7 +479,7 @@ error_put:
62847 * Always returns 0 thus allowing this function to be tail-called at the end
62848 * of, say, sys_setgid().
62849 */
62850-int commit_creds(struct cred *new)
62851+static int __commit_creds(struct cred *new)
62852 {
62853 struct task_struct *task = current;
62854 const struct cred *old = task->real_cred;
62855@@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
62856
62857 get_cred(new); /* we will require a ref for the subj creds too */
62858
62859+ gr_set_role_label(task, new->uid, new->gid);
62860+
62861 /* dumpability changes */
62862 if (old->euid != new->euid ||
62863 old->egid != new->egid ||
62864@@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
62865 put_cred(old);
62866 return 0;
62867 }
62868+#ifdef CONFIG_GRKERNSEC_SETXID
62869+extern int set_user(struct cred *new);
62870+
62871+void gr_delayed_cred_worker(void)
62872+{
62873+ const struct cred *new = current->delayed_cred;
62874+ struct cred *ncred;
62875+
62876+ current->delayed_cred = NULL;
62877+
62878+ if (current_uid() && new != NULL) {
62879+ // from doing get_cred on it when queueing this
62880+ put_cred(new);
62881+ return;
62882+ } else if (new == NULL)
62883+ return;
62884+
62885+ ncred = prepare_creds();
62886+ if (!ncred)
62887+ goto die;
62888+ // uids
62889+ ncred->uid = new->uid;
62890+ ncred->euid = new->euid;
62891+ ncred->suid = new->suid;
62892+ ncred->fsuid = new->fsuid;
62893+ // gids
62894+ ncred->gid = new->gid;
62895+ ncred->egid = new->egid;
62896+ ncred->sgid = new->sgid;
62897+ ncred->fsgid = new->fsgid;
62898+ // groups
62899+ if (set_groups(ncred, new->group_info) < 0) {
62900+ abort_creds(ncred);
62901+ goto die;
62902+ }
62903+ // caps
62904+ ncred->securebits = new->securebits;
62905+ ncred->cap_inheritable = new->cap_inheritable;
62906+ ncred->cap_permitted = new->cap_permitted;
62907+ ncred->cap_effective = new->cap_effective;
62908+ ncred->cap_bset = new->cap_bset;
62909+
62910+ if (set_user(ncred)) {
62911+ abort_creds(ncred);
62912+ goto die;
62913+ }
62914+
62915+ // from doing get_cred on it when queueing this
62916+ put_cred(new);
62917+
62918+ __commit_creds(ncred);
62919+ return;
62920+die:
62921+ // from doing get_cred on it when queueing this
62922+ put_cred(new);
62923+ do_group_exit(SIGKILL);
62924+}
62925+#endif
62926+
62927+int commit_creds(struct cred *new)
62928+{
62929+#ifdef CONFIG_GRKERNSEC_SETXID
62930+ struct task_struct *t;
62931+
62932+ /* we won't get called with tasklist_lock held for writing
62933+ and interrupts disabled as the cred struct in that case is
62934+ init_cred
62935+ */
62936+ if (grsec_enable_setxid && !current_is_single_threaded() &&
62937+ !current_uid() && new->uid) {
62938+ rcu_read_lock();
62939+ read_lock(&tasklist_lock);
62940+ for (t = next_thread(current); t != current;
62941+ t = next_thread(t)) {
62942+ if (t->delayed_cred == NULL) {
62943+ t->delayed_cred = get_cred(new);
62944+ set_tsk_need_resched(t);
62945+ }
62946+ }
62947+ read_unlock(&tasklist_lock);
62948+ rcu_read_unlock();
62949+ }
62950+#endif
62951+ return __commit_creds(new);
62952+}
62953+
62954 EXPORT_SYMBOL(commit_creds);
62955
62956 /**
62957diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
62958index 0d7c087..01b8cef 100644
62959--- a/kernel/debug/debug_core.c
62960+++ b/kernel/debug/debug_core.c
62961@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
62962 */
62963 static atomic_t masters_in_kgdb;
62964 static atomic_t slaves_in_kgdb;
62965-static atomic_t kgdb_break_tasklet_var;
62966+static atomic_unchecked_t kgdb_break_tasklet_var;
62967 atomic_t kgdb_setting_breakpoint;
62968
62969 struct task_struct *kgdb_usethread;
62970@@ -129,7 +129,7 @@ int kgdb_single_step;
62971 static pid_t kgdb_sstep_pid;
62972
62973 /* to keep track of the CPU which is doing the single stepping*/
62974-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62975+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62976
62977 /*
62978 * If you are debugging a problem where roundup (the collection of
62979@@ -542,7 +542,7 @@ return_normal:
62980 * kernel will only try for the value of sstep_tries before
62981 * giving up and continuing on.
62982 */
62983- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
62984+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
62985 (kgdb_info[cpu].task &&
62986 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
62987 atomic_set(&kgdb_active, -1);
62988@@ -636,8 +636,8 @@ cpu_master_loop:
62989 }
62990
62991 kgdb_restore:
62992- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
62993- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
62994+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
62995+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
62996 if (kgdb_info[sstep_cpu].task)
62997 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
62998 else
62999@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
63000 static void kgdb_tasklet_bpt(unsigned long ing)
63001 {
63002 kgdb_breakpoint();
63003- atomic_set(&kgdb_break_tasklet_var, 0);
63004+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
63005 }
63006
63007 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
63008
63009 void kgdb_schedule_breakpoint(void)
63010 {
63011- if (atomic_read(&kgdb_break_tasklet_var) ||
63012+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
63013 atomic_read(&kgdb_active) != -1 ||
63014 atomic_read(&kgdb_setting_breakpoint))
63015 return;
63016- atomic_inc(&kgdb_break_tasklet_var);
63017+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
63018 tasklet_schedule(&kgdb_tasklet_breakpoint);
63019 }
63020 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
63021diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
63022index 63786e7..0780cac 100644
63023--- a/kernel/debug/kdb/kdb_main.c
63024+++ b/kernel/debug/kdb/kdb_main.c
63025@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
63026 list_for_each_entry(mod, kdb_modules, list) {
63027
63028 kdb_printf("%-20s%8u 0x%p ", mod->name,
63029- mod->core_size, (void *)mod);
63030+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
63031 #ifdef CONFIG_MODULE_UNLOAD
63032 kdb_printf("%4d ", module_refcount(mod));
63033 #endif
63034@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
63035 kdb_printf(" (Loading)");
63036 else
63037 kdb_printf(" (Live)");
63038- kdb_printf(" 0x%p", mod->module_core);
63039+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63040
63041 #ifdef CONFIG_MODULE_UNLOAD
63042 {
63043diff --git a/kernel/events/core.c b/kernel/events/core.c
63044index 58690af..d903d75 100644
63045--- a/kernel/events/core.c
63046+++ b/kernel/events/core.c
63047@@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
63048 return 0;
63049 }
63050
63051-static atomic64_t perf_event_id;
63052+static atomic64_unchecked_t perf_event_id;
63053
63054 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63055 enum event_type_t event_type);
63056@@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
63057
63058 static inline u64 perf_event_count(struct perf_event *event)
63059 {
63060- return local64_read(&event->count) + atomic64_read(&event->child_count);
63061+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63062 }
63063
63064 static u64 perf_event_read(struct perf_event *event)
63065@@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
63066 mutex_lock(&event->child_mutex);
63067 total += perf_event_read(event);
63068 *enabled += event->total_time_enabled +
63069- atomic64_read(&event->child_total_time_enabled);
63070+ atomic64_read_unchecked(&event->child_total_time_enabled);
63071 *running += event->total_time_running +
63072- atomic64_read(&event->child_total_time_running);
63073+ atomic64_read_unchecked(&event->child_total_time_running);
63074
63075 list_for_each_entry(child, &event->child_list, child_list) {
63076 total += perf_event_read(child);
63077@@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
63078 userpg->offset -= local64_read(&event->hw.prev_count);
63079
63080 userpg->time_enabled = enabled +
63081- atomic64_read(&event->child_total_time_enabled);
63082+ atomic64_read_unchecked(&event->child_total_time_enabled);
63083
63084 userpg->time_running = running +
63085- atomic64_read(&event->child_total_time_running);
63086+ atomic64_read_unchecked(&event->child_total_time_running);
63087
63088 barrier();
63089 ++userpg->lock;
63090@@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
63091 values[n++] = perf_event_count(event);
63092 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63093 values[n++] = enabled +
63094- atomic64_read(&event->child_total_time_enabled);
63095+ atomic64_read_unchecked(&event->child_total_time_enabled);
63096 }
63097 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63098 values[n++] = running +
63099- atomic64_read(&event->child_total_time_running);
63100+ atomic64_read_unchecked(&event->child_total_time_running);
63101 }
63102 if (read_format & PERF_FORMAT_ID)
63103 values[n++] = primary_event_id(event);
63104@@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
63105 * need to add enough zero bytes after the string to handle
63106 * the 64bit alignment we do later.
63107 */
63108- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63109+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
63110 if (!buf) {
63111 name = strncpy(tmp, "//enomem", sizeof(tmp));
63112 goto got_name;
63113 }
63114- name = d_path(&file->f_path, buf, PATH_MAX);
63115+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63116 if (IS_ERR(name)) {
63117 name = strncpy(tmp, "//toolong", sizeof(tmp));
63118 goto got_name;
63119@@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
63120 event->parent = parent_event;
63121
63122 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63123- event->id = atomic64_inc_return(&perf_event_id);
63124+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
63125
63126 event->state = PERF_EVENT_STATE_INACTIVE;
63127
63128@@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
63129 /*
63130 * Add back the child's count to the parent's count:
63131 */
63132- atomic64_add(child_val, &parent_event->child_count);
63133- atomic64_add(child_event->total_time_enabled,
63134+ atomic64_add_unchecked(child_val, &parent_event->child_count);
63135+ atomic64_add_unchecked(child_event->total_time_enabled,
63136 &parent_event->child_total_time_enabled);
63137- atomic64_add(child_event->total_time_running,
63138+ atomic64_add_unchecked(child_event->total_time_running,
63139 &parent_event->child_total_time_running);
63140
63141 /*
63142diff --git a/kernel/exit.c b/kernel/exit.c
63143index e6e01b9..619f837 100644
63144--- a/kernel/exit.c
63145+++ b/kernel/exit.c
63146@@ -57,6 +57,10 @@
63147 #include <asm/pgtable.h>
63148 #include <asm/mmu_context.h>
63149
63150+#ifdef CONFIG_GRKERNSEC
63151+extern rwlock_t grsec_exec_file_lock;
63152+#endif
63153+
63154 static void exit_mm(struct task_struct * tsk);
63155
63156 static void __unhash_process(struct task_struct *p, bool group_dead)
63157@@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
63158 struct task_struct *leader;
63159 int zap_leader;
63160 repeat:
63161+#ifdef CONFIG_NET
63162+ gr_del_task_from_ip_table(p);
63163+#endif
63164+
63165 /* don't need to get the RCU readlock here - the process is dead and
63166 * can't be modifying its own credentials. But shut RCU-lockdep up */
63167 rcu_read_lock();
63168@@ -380,7 +388,7 @@ int allow_signal(int sig)
63169 * know it'll be handled, so that they don't get converted to
63170 * SIGKILL or just silently dropped.
63171 */
63172- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63173+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63174 recalc_sigpending();
63175 spin_unlock_irq(&current->sighand->siglock);
63176 return 0;
63177@@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
63178 vsnprintf(current->comm, sizeof(current->comm), name, args);
63179 va_end(args);
63180
63181+#ifdef CONFIG_GRKERNSEC
63182+ write_lock(&grsec_exec_file_lock);
63183+ if (current->exec_file) {
63184+ fput(current->exec_file);
63185+ current->exec_file = NULL;
63186+ }
63187+ write_unlock(&grsec_exec_file_lock);
63188+#endif
63189+
63190+ gr_set_kernel_label(current);
63191+
63192 /*
63193 * If we were started as result of loading a module, close all of the
63194 * user space pages. We don't need them, and if we didn't close them
63195@@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
63196 struct task_struct *tsk = current;
63197 int group_dead;
63198
63199+ set_fs(USER_DS);
63200+
63201 profile_task_exit(tsk);
63202
63203 WARN_ON(blk_needs_flush_plug(tsk));
63204@@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
63205 * mm_release()->clear_child_tid() from writing to a user-controlled
63206 * kernel address.
63207 */
63208- set_fs(USER_DS);
63209
63210 ptrace_event(PTRACE_EVENT_EXIT, code);
63211
63212@@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
63213 tsk->exit_code = code;
63214 taskstats_exit(tsk, group_dead);
63215
63216+ gr_acl_handle_psacct(tsk, code);
63217+ gr_acl_handle_exit();
63218+
63219 exit_mm(tsk);
63220
63221 if (group_dead)
63222diff --git a/kernel/fork.c b/kernel/fork.c
63223index da4a6a1..0973380 100644
63224--- a/kernel/fork.c
63225+++ b/kernel/fork.c
63226@@ -280,7 +280,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
63227 *stackend = STACK_END_MAGIC; /* for overflow detection */
63228
63229 #ifdef CONFIG_CC_STACKPROTECTOR
63230- tsk->stack_canary = get_random_int();
63231+ tsk->stack_canary = pax_get_random_long();
63232 #endif
63233
63234 /*
63235@@ -304,13 +304,77 @@ out:
63236 }
63237
63238 #ifdef CONFIG_MMU
63239+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63240+{
63241+ struct vm_area_struct *tmp;
63242+ unsigned long charge;
63243+ struct mempolicy *pol;
63244+ struct file *file;
63245+
63246+ charge = 0;
63247+ if (mpnt->vm_flags & VM_ACCOUNT) {
63248+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63249+ if (security_vm_enough_memory(len))
63250+ goto fail_nomem;
63251+ charge = len;
63252+ }
63253+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63254+ if (!tmp)
63255+ goto fail_nomem;
63256+ *tmp = *mpnt;
63257+ tmp->vm_mm = mm;
63258+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
63259+ pol = mpol_dup(vma_policy(mpnt));
63260+ if (IS_ERR(pol))
63261+ goto fail_nomem_policy;
63262+ vma_set_policy(tmp, pol);
63263+ if (anon_vma_fork(tmp, mpnt))
63264+ goto fail_nomem_anon_vma_fork;
63265+ tmp->vm_flags &= ~VM_LOCKED;
63266+ tmp->vm_next = tmp->vm_prev = NULL;
63267+ tmp->vm_mirror = NULL;
63268+ file = tmp->vm_file;
63269+ if (file) {
63270+ struct inode *inode = file->f_path.dentry->d_inode;
63271+ struct address_space *mapping = file->f_mapping;
63272+
63273+ get_file(file);
63274+ if (tmp->vm_flags & VM_DENYWRITE)
63275+ atomic_dec(&inode->i_writecount);
63276+ mutex_lock(&mapping->i_mmap_mutex);
63277+ if (tmp->vm_flags & VM_SHARED)
63278+ mapping->i_mmap_writable++;
63279+ flush_dcache_mmap_lock(mapping);
63280+ /* insert tmp into the share list, just after mpnt */
63281+ vma_prio_tree_add(tmp, mpnt);
63282+ flush_dcache_mmap_unlock(mapping);
63283+ mutex_unlock(&mapping->i_mmap_mutex);
63284+ }
63285+
63286+ /*
63287+ * Clear hugetlb-related page reserves for children. This only
63288+ * affects MAP_PRIVATE mappings. Faults generated by the child
63289+ * are not guaranteed to succeed, even if read-only
63290+ */
63291+ if (is_vm_hugetlb_page(tmp))
63292+ reset_vma_resv_huge_pages(tmp);
63293+
63294+ return tmp;
63295+
63296+fail_nomem_anon_vma_fork:
63297+ mpol_put(pol);
63298+fail_nomem_policy:
63299+ kmem_cache_free(vm_area_cachep, tmp);
63300+fail_nomem:
63301+ vm_unacct_memory(charge);
63302+ return NULL;
63303+}
63304+
63305 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63306 {
63307 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63308 struct rb_node **rb_link, *rb_parent;
63309 int retval;
63310- unsigned long charge;
63311- struct mempolicy *pol;
63312
63313 down_write(&oldmm->mmap_sem);
63314 flush_cache_dup_mm(oldmm);
63315@@ -322,8 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63316 mm->locked_vm = 0;
63317 mm->mmap = NULL;
63318 mm->mmap_cache = NULL;
63319- mm->free_area_cache = oldmm->mmap_base;
63320- mm->cached_hole_size = ~0UL;
63321+ mm->free_area_cache = oldmm->free_area_cache;
63322+ mm->cached_hole_size = oldmm->cached_hole_size;
63323 mm->map_count = 0;
63324 cpumask_clear(mm_cpumask(mm));
63325 mm->mm_rb = RB_ROOT;
63326@@ -339,8 +403,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63327
63328 prev = NULL;
63329 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63330- struct file *file;
63331-
63332 if (mpnt->vm_flags & VM_DONTCOPY) {
63333 long pages = vma_pages(mpnt);
63334 mm->total_vm -= pages;
63335@@ -348,53 +410,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63336 -pages);
63337 continue;
63338 }
63339- charge = 0;
63340- if (mpnt->vm_flags & VM_ACCOUNT) {
63341- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63342- if (security_vm_enough_memory(len))
63343- goto fail_nomem;
63344- charge = len;
63345+ tmp = dup_vma(mm, mpnt);
63346+ if (!tmp) {
63347+ retval = -ENOMEM;
63348+ goto out;
63349 }
63350- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63351- if (!tmp)
63352- goto fail_nomem;
63353- *tmp = *mpnt;
63354- INIT_LIST_HEAD(&tmp->anon_vma_chain);
63355- pol = mpol_dup(vma_policy(mpnt));
63356- retval = PTR_ERR(pol);
63357- if (IS_ERR(pol))
63358- goto fail_nomem_policy;
63359- vma_set_policy(tmp, pol);
63360- tmp->vm_mm = mm;
63361- if (anon_vma_fork(tmp, mpnt))
63362- goto fail_nomem_anon_vma_fork;
63363- tmp->vm_flags &= ~VM_LOCKED;
63364- tmp->vm_next = tmp->vm_prev = NULL;
63365- file = tmp->vm_file;
63366- if (file) {
63367- struct inode *inode = file->f_path.dentry->d_inode;
63368- struct address_space *mapping = file->f_mapping;
63369-
63370- get_file(file);
63371- if (tmp->vm_flags & VM_DENYWRITE)
63372- atomic_dec(&inode->i_writecount);
63373- mutex_lock(&mapping->i_mmap_mutex);
63374- if (tmp->vm_flags & VM_SHARED)
63375- mapping->i_mmap_writable++;
63376- flush_dcache_mmap_lock(mapping);
63377- /* insert tmp into the share list, just after mpnt */
63378- vma_prio_tree_add(tmp, mpnt);
63379- flush_dcache_mmap_unlock(mapping);
63380- mutex_unlock(&mapping->i_mmap_mutex);
63381- }
63382-
63383- /*
63384- * Clear hugetlb-related page reserves for children. This only
63385- * affects MAP_PRIVATE mappings. Faults generated by the child
63386- * are not guaranteed to succeed, even if read-only
63387- */
63388- if (is_vm_hugetlb_page(tmp))
63389- reset_vma_resv_huge_pages(tmp);
63390
63391 /*
63392 * Link in the new vma and copy the page table entries.
63393@@ -417,6 +437,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63394 if (retval)
63395 goto out;
63396 }
63397+
63398+#ifdef CONFIG_PAX_SEGMEXEC
63399+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63400+ struct vm_area_struct *mpnt_m;
63401+
63402+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63403+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63404+
63405+ if (!mpnt->vm_mirror)
63406+ continue;
63407+
63408+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63409+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63410+ mpnt->vm_mirror = mpnt_m;
63411+ } else {
63412+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63413+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63414+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63415+ mpnt->vm_mirror->vm_mirror = mpnt;
63416+ }
63417+ }
63418+ BUG_ON(mpnt_m);
63419+ }
63420+#endif
63421+
63422 /* a new mm has just been created */
63423 arch_dup_mmap(oldmm, mm);
63424 retval = 0;
63425@@ -425,14 +470,6 @@ out:
63426 flush_tlb_mm(oldmm);
63427 up_write(&oldmm->mmap_sem);
63428 return retval;
63429-fail_nomem_anon_vma_fork:
63430- mpol_put(pol);
63431-fail_nomem_policy:
63432- kmem_cache_free(vm_area_cachep, tmp);
63433-fail_nomem:
63434- retval = -ENOMEM;
63435- vm_unacct_memory(charge);
63436- goto out;
63437 }
63438
63439 static inline int mm_alloc_pgd(struct mm_struct *mm)
63440@@ -644,6 +681,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
63441 }
63442 EXPORT_SYMBOL_GPL(get_task_mm);
63443
63444+struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
63445+{
63446+ struct mm_struct *mm;
63447+ int err;
63448+
63449+ err = mutex_lock_killable(&task->signal->cred_guard_mutex);
63450+ if (err)
63451+ return ERR_PTR(err);
63452+
63453+ mm = get_task_mm(task);
63454+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
63455+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
63456+ mmput(mm);
63457+ mm = ERR_PTR(-EACCES);
63458+ }
63459+ mutex_unlock(&task->signal->cred_guard_mutex);
63460+
63461+ return mm;
63462+}
63463+
63464 /* Please note the differences between mmput and mm_release.
63465 * mmput is called whenever we stop holding onto a mm_struct,
63466 * error success whatever.
63467@@ -829,13 +886,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63468 spin_unlock(&fs->lock);
63469 return -EAGAIN;
63470 }
63471- fs->users++;
63472+ atomic_inc(&fs->users);
63473 spin_unlock(&fs->lock);
63474 return 0;
63475 }
63476 tsk->fs = copy_fs_struct(fs);
63477 if (!tsk->fs)
63478 return -ENOMEM;
63479+ gr_set_chroot_entries(tsk, &tsk->fs->root);
63480 return 0;
63481 }
63482
63483@@ -1097,6 +1155,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63484 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63485 #endif
63486 retval = -EAGAIN;
63487+
63488+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63489+
63490 if (atomic_read(&p->real_cred->user->processes) >=
63491 task_rlimit(p, RLIMIT_NPROC)) {
63492 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63493@@ -1256,6 +1317,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63494 if (clone_flags & CLONE_THREAD)
63495 p->tgid = current->tgid;
63496
63497+ gr_copy_label(p);
63498+
63499 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63500 /*
63501 * Clear TID on mm_release()?
63502@@ -1418,6 +1481,8 @@ bad_fork_cleanup_count:
63503 bad_fork_free:
63504 free_task(p);
63505 fork_out:
63506+ gr_log_forkfail(retval);
63507+
63508 return ERR_PTR(retval);
63509 }
63510
63511@@ -1518,6 +1583,8 @@ long do_fork(unsigned long clone_flags,
63512 if (clone_flags & CLONE_PARENT_SETTID)
63513 put_user(nr, parent_tidptr);
63514
63515+ gr_handle_brute_check();
63516+
63517 if (clone_flags & CLONE_VFORK) {
63518 p->vfork_done = &vfork;
63519 init_completion(&vfork);
63520@@ -1627,7 +1694,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63521 return 0;
63522
63523 /* don't need lock here; in the worst case we'll do useless copy */
63524- if (fs->users == 1)
63525+ if (atomic_read(&fs->users) == 1)
63526 return 0;
63527
63528 *new_fsp = copy_fs_struct(fs);
63529@@ -1716,7 +1783,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63530 fs = current->fs;
63531 spin_lock(&fs->lock);
63532 current->fs = new_fs;
63533- if (--fs->users)
63534+ gr_set_chroot_entries(current, &current->fs->root);
63535+ if (atomic_dec_return(&fs->users))
63536 new_fs = NULL;
63537 else
63538 new_fs = fs;
63539diff --git a/kernel/futex.c b/kernel/futex.c
63540index 1614be2..37abc7e 100644
63541--- a/kernel/futex.c
63542+++ b/kernel/futex.c
63543@@ -54,6 +54,7 @@
63544 #include <linux/mount.h>
63545 #include <linux/pagemap.h>
63546 #include <linux/syscalls.h>
63547+#include <linux/ptrace.h>
63548 #include <linux/signal.h>
63549 #include <linux/export.h>
63550 #include <linux/magic.h>
63551@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63552 struct page *page, *page_head;
63553 int err, ro = 0;
63554
63555+#ifdef CONFIG_PAX_SEGMEXEC
63556+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63557+ return -EFAULT;
63558+#endif
63559+
63560 /*
63561 * The futex address must be "naturally" aligned.
63562 */
63563@@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
63564 if (!p)
63565 goto err_unlock;
63566 ret = -EPERM;
63567+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63568+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63569+ goto err_unlock;
63570+#endif
63571 pcred = __task_cred(p);
63572 /* If victim is in different user_ns, then uids are not
63573 comparable, so we must have CAP_SYS_PTRACE */
63574@@ -2724,6 +2734,7 @@ static int __init futex_init(void)
63575 {
63576 u32 curval;
63577 int i;
63578+ mm_segment_t oldfs;
63579
63580 /*
63581 * This will fail and we want it. Some arch implementations do
63582@@ -2735,8 +2746,11 @@ static int __init futex_init(void)
63583 * implementation, the non-functional ones will return
63584 * -ENOSYS.
63585 */
63586+ oldfs = get_fs();
63587+ set_fs(USER_DS);
63588 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63589 futex_cmpxchg_enabled = 1;
63590+ set_fs(oldfs);
63591
63592 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63593 plist_head_init(&futex_queues[i].chain);
63594diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
63595index 5f9e689..582d46d 100644
63596--- a/kernel/futex_compat.c
63597+++ b/kernel/futex_compat.c
63598@@ -10,6 +10,7 @@
63599 #include <linux/compat.h>
63600 #include <linux/nsproxy.h>
63601 #include <linux/futex.h>
63602+#include <linux/ptrace.h>
63603
63604 #include <asm/uaccess.h>
63605
63606@@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63607 {
63608 struct compat_robust_list_head __user *head;
63609 unsigned long ret;
63610- const struct cred *cred = current_cred(), *pcred;
63611+ const struct cred *cred = current_cred();
63612+ const struct cred *pcred;
63613
63614 if (!futex_cmpxchg_enabled)
63615 return -ENOSYS;
63616@@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63617 if (!p)
63618 goto err_unlock;
63619 ret = -EPERM;
63620+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63621+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63622+ goto err_unlock;
63623+#endif
63624 pcred = __task_cred(p);
63625 /* If victim is in different user_ns, then uids are not
63626 comparable, so we must have CAP_SYS_PTRACE */
63627diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
63628index 9b22d03..6295b62 100644
63629--- a/kernel/gcov/base.c
63630+++ b/kernel/gcov/base.c
63631@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63632 }
63633
63634 #ifdef CONFIG_MODULES
63635-static inline int within(void *addr, void *start, unsigned long size)
63636-{
63637- return ((addr >= start) && (addr < start + size));
63638-}
63639-
63640 /* Update list and generate events when modules are unloaded. */
63641 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63642 void *data)
63643@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63644 prev = NULL;
63645 /* Remove entries located in module from linked list. */
63646 for (info = gcov_info_head; info; info = info->next) {
63647- if (within(info, mod->module_core, mod->core_size)) {
63648+ if (within_module_core_rw((unsigned long)info, mod)) {
63649 if (prev)
63650 prev->next = info->next;
63651 else
63652diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
63653index ae34bf5..4e2f3d0 100644
63654--- a/kernel/hrtimer.c
63655+++ b/kernel/hrtimer.c
63656@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
63657 local_irq_restore(flags);
63658 }
63659
63660-static void run_hrtimer_softirq(struct softirq_action *h)
63661+static void run_hrtimer_softirq(void)
63662 {
63663 hrtimer_peek_ahead_timers();
63664 }
63665diff --git a/kernel/jump_label.c b/kernel/jump_label.c
63666index 66ff710..05a5128 100644
63667--- a/kernel/jump_label.c
63668+++ b/kernel/jump_label.c
63669@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
63670
63671 size = (((unsigned long)stop - (unsigned long)start)
63672 / sizeof(struct jump_entry));
63673+ pax_open_kernel();
63674 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63675+ pax_close_kernel();
63676 }
63677
63678 static void jump_label_update(struct jump_label_key *key, int enable);
63679@@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
63680 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63681 struct jump_entry *iter;
63682
63683+ pax_open_kernel();
63684 for (iter = iter_start; iter < iter_stop; iter++) {
63685 if (within_module_init(iter->code, mod))
63686 iter->code = 0;
63687 }
63688+ pax_close_kernel();
63689 }
63690
63691 static int
63692diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
63693index 079f1d3..a407562 100644
63694--- a/kernel/kallsyms.c
63695+++ b/kernel/kallsyms.c
63696@@ -11,6 +11,9 @@
63697 * Changed the compression method from stem compression to "table lookup"
63698 * compression (see scripts/kallsyms.c for a more complete description)
63699 */
63700+#ifdef CONFIG_GRKERNSEC_HIDESYM
63701+#define __INCLUDED_BY_HIDESYM 1
63702+#endif
63703 #include <linux/kallsyms.h>
63704 #include <linux/module.h>
63705 #include <linux/init.h>
63706@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
63707
63708 static inline int is_kernel_inittext(unsigned long addr)
63709 {
63710+ if (system_state != SYSTEM_BOOTING)
63711+ return 0;
63712+
63713 if (addr >= (unsigned long)_sinittext
63714 && addr <= (unsigned long)_einittext)
63715 return 1;
63716 return 0;
63717 }
63718
63719+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63720+#ifdef CONFIG_MODULES
63721+static inline int is_module_text(unsigned long addr)
63722+{
63723+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63724+ return 1;
63725+
63726+ addr = ktla_ktva(addr);
63727+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63728+}
63729+#else
63730+static inline int is_module_text(unsigned long addr)
63731+{
63732+ return 0;
63733+}
63734+#endif
63735+#endif
63736+
63737 static inline int is_kernel_text(unsigned long addr)
63738 {
63739 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63740@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
63741
63742 static inline int is_kernel(unsigned long addr)
63743 {
63744+
63745+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63746+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63747+ return 1;
63748+
63749+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63750+#else
63751 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63752+#endif
63753+
63754 return 1;
63755 return in_gate_area_no_mm(addr);
63756 }
63757
63758 static int is_ksym_addr(unsigned long addr)
63759 {
63760+
63761+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63762+ if (is_module_text(addr))
63763+ return 0;
63764+#endif
63765+
63766 if (all_var)
63767 return is_kernel(addr);
63768
63769@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
63770
63771 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63772 {
63773- iter->name[0] = '\0';
63774 iter->nameoff = get_symbol_offset(new_pos);
63775 iter->pos = new_pos;
63776 }
63777@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
63778 {
63779 struct kallsym_iter *iter = m->private;
63780
63781+#ifdef CONFIG_GRKERNSEC_HIDESYM
63782+ if (current_uid())
63783+ return 0;
63784+#endif
63785+
63786 /* Some debugging symbols have no name. Ignore them. */
63787 if (!iter->name[0])
63788 return 0;
63789@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
63790 struct kallsym_iter *iter;
63791 int ret;
63792
63793- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63794+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63795 if (!iter)
63796 return -ENOMEM;
63797 reset_iter(iter, 0);
63798diff --git a/kernel/kexec.c b/kernel/kexec.c
63799index dc7bc08..4601964 100644
63800--- a/kernel/kexec.c
63801+++ b/kernel/kexec.c
63802@@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
63803 unsigned long flags)
63804 {
63805 struct compat_kexec_segment in;
63806- struct kexec_segment out, __user *ksegments;
63807+ struct kexec_segment out;
63808+ struct kexec_segment __user *ksegments;
63809 unsigned long i, result;
63810
63811 /* Don't allow clients that don't understand the native
63812diff --git a/kernel/kmod.c b/kernel/kmod.c
63813index a4bea97..7a1ae9a 100644
63814--- a/kernel/kmod.c
63815+++ b/kernel/kmod.c
63816@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
63817 * If module auto-loading support is disabled then this function
63818 * becomes a no-operation.
63819 */
63820-int __request_module(bool wait, const char *fmt, ...)
63821+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63822 {
63823- va_list args;
63824 char module_name[MODULE_NAME_LEN];
63825 unsigned int max_modprobes;
63826 int ret;
63827- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63828+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63829 static char *envp[] = { "HOME=/",
63830 "TERM=linux",
63831 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63832@@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
63833 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63834 static int kmod_loop_msg;
63835
63836- va_start(args, fmt);
63837- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63838- va_end(args);
63839+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63840 if (ret >= MODULE_NAME_LEN)
63841 return -ENAMETOOLONG;
63842
63843@@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
63844 if (ret)
63845 return ret;
63846
63847+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63848+ if (!current_uid()) {
63849+ /* hack to workaround consolekit/udisks stupidity */
63850+ read_lock(&tasklist_lock);
63851+ if (!strcmp(current->comm, "mount") &&
63852+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63853+ read_unlock(&tasklist_lock);
63854+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63855+ return -EPERM;
63856+ }
63857+ read_unlock(&tasklist_lock);
63858+ }
63859+#endif
63860+
63861 /* If modprobe needs a service that is in a module, we get a recursive
63862 * loop. Limit the number of running kmod threads to max_threads/2 or
63863 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63864@@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
63865 atomic_dec(&kmod_concurrent);
63866 return ret;
63867 }
63868+
63869+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63870+{
63871+ va_list args;
63872+ int ret;
63873+
63874+ va_start(args, fmt);
63875+ ret = ____request_module(wait, module_param, fmt, args);
63876+ va_end(args);
63877+
63878+ return ret;
63879+}
63880+
63881+int __request_module(bool wait, const char *fmt, ...)
63882+{
63883+ va_list args;
63884+ int ret;
63885+
63886+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63887+ if (current_uid()) {
63888+ char module_param[MODULE_NAME_LEN];
63889+
63890+ memset(module_param, 0, sizeof(module_param));
63891+
63892+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63893+
63894+ va_start(args, fmt);
63895+ ret = ____request_module(wait, module_param, fmt, args);
63896+ va_end(args);
63897+
63898+ return ret;
63899+ }
63900+#endif
63901+
63902+ va_start(args, fmt);
63903+ ret = ____request_module(wait, NULL, fmt, args);
63904+ va_end(args);
63905+
63906+ return ret;
63907+}
63908+
63909 EXPORT_SYMBOL(__request_module);
63910 #endif /* CONFIG_MODULES */
63911
63912@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
63913 *
63914 * Thus the __user pointer cast is valid here.
63915 */
63916- sys_wait4(pid, (int __user *)&ret, 0, NULL);
63917+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63918
63919 /*
63920 * If ret is 0, either ____call_usermodehelper failed and the
63921diff --git a/kernel/kprobes.c b/kernel/kprobes.c
63922index 52fd049..3def6a8 100644
63923--- a/kernel/kprobes.c
63924+++ b/kernel/kprobes.c
63925@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
63926 * kernel image and loaded module images reside. This is required
63927 * so x86_64 can correctly handle the %rip-relative fixups.
63928 */
63929- kip->insns = module_alloc(PAGE_SIZE);
63930+ kip->insns = module_alloc_exec(PAGE_SIZE);
63931 if (!kip->insns) {
63932 kfree(kip);
63933 return NULL;
63934@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
63935 */
63936 if (!list_is_singular(&kip->list)) {
63937 list_del(&kip->list);
63938- module_free(NULL, kip->insns);
63939+ module_free_exec(NULL, kip->insns);
63940 kfree(kip);
63941 }
63942 return 1;
63943@@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
63944 {
63945 int i, err = 0;
63946 unsigned long offset = 0, size = 0;
63947- char *modname, namebuf[128];
63948+ char *modname, namebuf[KSYM_NAME_LEN];
63949 const char *symbol_name;
63950 void *addr;
63951 struct kprobe_blackpoint *kb;
63952@@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
63953 const char *sym = NULL;
63954 unsigned int i = *(loff_t *) v;
63955 unsigned long offset = 0;
63956- char *modname, namebuf[128];
63957+ char *modname, namebuf[KSYM_NAME_LEN];
63958
63959 head = &kprobe_table[i];
63960 preempt_disable();
63961diff --git a/kernel/lockdep.c b/kernel/lockdep.c
63962index b2e08c9..01d8049 100644
63963--- a/kernel/lockdep.c
63964+++ b/kernel/lockdep.c
63965@@ -592,6 +592,10 @@ static int static_obj(void *obj)
63966 end = (unsigned long) &_end,
63967 addr = (unsigned long) obj;
63968
63969+#ifdef CONFIG_PAX_KERNEXEC
63970+ start = ktla_ktva(start);
63971+#endif
63972+
63973 /*
63974 * static variable?
63975 */
63976@@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
63977 if (!static_obj(lock->key)) {
63978 debug_locks_off();
63979 printk("INFO: trying to register non-static key.\n");
63980+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63981 printk("the code is fine but needs lockdep annotation.\n");
63982 printk("turning off the locking correctness validator.\n");
63983 dump_stack();
63984@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
63985 if (!class)
63986 return 0;
63987 }
63988- atomic_inc((atomic_t *)&class->ops);
63989+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
63990 if (very_verbose(class)) {
63991 printk("\nacquire class [%p] %s", class->key, class->name);
63992 if (class->name_version > 1)
63993diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
63994index 91c32a0..b2c71c5 100644
63995--- a/kernel/lockdep_proc.c
63996+++ b/kernel/lockdep_proc.c
63997@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
63998
63999 static void print_name(struct seq_file *m, struct lock_class *class)
64000 {
64001- char str[128];
64002+ char str[KSYM_NAME_LEN];
64003 const char *name = class->name;
64004
64005 if (!name) {
64006diff --git a/kernel/module.c b/kernel/module.c
64007index 178333c..04e3408 100644
64008--- a/kernel/module.c
64009+++ b/kernel/module.c
64010@@ -58,6 +58,7 @@
64011 #include <linux/jump_label.h>
64012 #include <linux/pfn.h>
64013 #include <linux/bsearch.h>
64014+#include <linux/grsecurity.h>
64015
64016 #define CREATE_TRACE_POINTS
64017 #include <trace/events/module.h>
64018@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64019
64020 /* Bounds of module allocation, for speeding __module_address.
64021 * Protected by module_mutex. */
64022-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64023+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64024+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64025
64026 int register_module_notifier(struct notifier_block * nb)
64027 {
64028@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64029 return true;
64030
64031 list_for_each_entry_rcu(mod, &modules, list) {
64032- struct symsearch arr[] = {
64033+ struct symsearch modarr[] = {
64034 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64035 NOT_GPL_ONLY, false },
64036 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64037@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64038 #endif
64039 };
64040
64041- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64042+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64043 return true;
64044 }
64045 return false;
64046@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
64047 static int percpu_modalloc(struct module *mod,
64048 unsigned long size, unsigned long align)
64049 {
64050- if (align > PAGE_SIZE) {
64051+ if (align-1 >= PAGE_SIZE) {
64052 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64053 mod->name, align, PAGE_SIZE);
64054 align = PAGE_SIZE;
64055@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64056 */
64057 #ifdef CONFIG_SYSFS
64058
64059-#ifdef CONFIG_KALLSYMS
64060+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64061 static inline bool sect_empty(const Elf_Shdr *sect)
64062 {
64063 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64064@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
64065
64066 static void unset_module_core_ro_nx(struct module *mod)
64067 {
64068- set_page_attributes(mod->module_core + mod->core_text_size,
64069- mod->module_core + mod->core_size,
64070+ set_page_attributes(mod->module_core_rw,
64071+ mod->module_core_rw + mod->core_size_rw,
64072 set_memory_x);
64073- set_page_attributes(mod->module_core,
64074- mod->module_core + mod->core_ro_size,
64075+ set_page_attributes(mod->module_core_rx,
64076+ mod->module_core_rx + mod->core_size_rx,
64077 set_memory_rw);
64078 }
64079
64080 static void unset_module_init_ro_nx(struct module *mod)
64081 {
64082- set_page_attributes(mod->module_init + mod->init_text_size,
64083- mod->module_init + mod->init_size,
64084+ set_page_attributes(mod->module_init_rw,
64085+ mod->module_init_rw + mod->init_size_rw,
64086 set_memory_x);
64087- set_page_attributes(mod->module_init,
64088- mod->module_init + mod->init_ro_size,
64089+ set_page_attributes(mod->module_init_rx,
64090+ mod->module_init_rx + mod->init_size_rx,
64091 set_memory_rw);
64092 }
64093
64094@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64095
64096 mutex_lock(&module_mutex);
64097 list_for_each_entry_rcu(mod, &modules, list) {
64098- if ((mod->module_core) && (mod->core_text_size)) {
64099- set_page_attributes(mod->module_core,
64100- mod->module_core + mod->core_text_size,
64101+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
64102+ set_page_attributes(mod->module_core_rx,
64103+ mod->module_core_rx + mod->core_size_rx,
64104 set_memory_rw);
64105 }
64106- if ((mod->module_init) && (mod->init_text_size)) {
64107- set_page_attributes(mod->module_init,
64108- mod->module_init + mod->init_text_size,
64109+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
64110+ set_page_attributes(mod->module_init_rx,
64111+ mod->module_init_rx + mod->init_size_rx,
64112 set_memory_rw);
64113 }
64114 }
64115@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64116
64117 mutex_lock(&module_mutex);
64118 list_for_each_entry_rcu(mod, &modules, list) {
64119- if ((mod->module_core) && (mod->core_text_size)) {
64120- set_page_attributes(mod->module_core,
64121- mod->module_core + mod->core_text_size,
64122+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
64123+ set_page_attributes(mod->module_core_rx,
64124+ mod->module_core_rx + mod->core_size_rx,
64125 set_memory_ro);
64126 }
64127- if ((mod->module_init) && (mod->init_text_size)) {
64128- set_page_attributes(mod->module_init,
64129- mod->module_init + mod->init_text_size,
64130+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
64131+ set_page_attributes(mod->module_init_rx,
64132+ mod->module_init_rx + mod->init_size_rx,
64133 set_memory_ro);
64134 }
64135 }
64136@@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
64137
64138 /* This may be NULL, but that's OK */
64139 unset_module_init_ro_nx(mod);
64140- module_free(mod, mod->module_init);
64141+ module_free(mod, mod->module_init_rw);
64142+ module_free_exec(mod, mod->module_init_rx);
64143 kfree(mod->args);
64144 percpu_modfree(mod);
64145
64146 /* Free lock-classes: */
64147- lockdep_free_key_range(mod->module_core, mod->core_size);
64148+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64149+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64150
64151 /* Finally, free the core (containing the module structure) */
64152 unset_module_core_ro_nx(mod);
64153- module_free(mod, mod->module_core);
64154+ module_free_exec(mod, mod->module_core_rx);
64155+ module_free(mod, mod->module_core_rw);
64156
64157 #ifdef CONFIG_MPU
64158 update_protections(current->mm);
64159@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64160 unsigned int i;
64161 int ret = 0;
64162 const struct kernel_symbol *ksym;
64163+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64164+ int is_fs_load = 0;
64165+ int register_filesystem_found = 0;
64166+ char *p;
64167+
64168+ p = strstr(mod->args, "grsec_modharden_fs");
64169+ if (p) {
64170+ char *endptr = p + strlen("grsec_modharden_fs");
64171+ /* copy \0 as well */
64172+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64173+ is_fs_load = 1;
64174+ }
64175+#endif
64176
64177 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64178 const char *name = info->strtab + sym[i].st_name;
64179
64180+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64181+ /* it's a real shame this will never get ripped and copied
64182+ upstream! ;(
64183+ */
64184+ if (is_fs_load && !strcmp(name, "register_filesystem"))
64185+ register_filesystem_found = 1;
64186+#endif
64187+
64188 switch (sym[i].st_shndx) {
64189 case SHN_COMMON:
64190 /* We compiled with -fno-common. These are not
64191@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64192 ksym = resolve_symbol_wait(mod, info, name);
64193 /* Ok if resolved. */
64194 if (ksym && !IS_ERR(ksym)) {
64195+ pax_open_kernel();
64196 sym[i].st_value = ksym->value;
64197+ pax_close_kernel();
64198 break;
64199 }
64200
64201@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64202 secbase = (unsigned long)mod_percpu(mod);
64203 else
64204 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64205+ pax_open_kernel();
64206 sym[i].st_value += secbase;
64207+ pax_close_kernel();
64208 break;
64209 }
64210 }
64211
64212+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64213+ if (is_fs_load && !register_filesystem_found) {
64214+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64215+ ret = -EPERM;
64216+ }
64217+#endif
64218+
64219 return ret;
64220 }
64221
64222@@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
64223 || s->sh_entsize != ~0UL
64224 || strstarts(sname, ".init"))
64225 continue;
64226- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64227+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64228+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64229+ else
64230+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64231 DEBUGP("\t%s\n", name);
64232 }
64233- switch (m) {
64234- case 0: /* executable */
64235- mod->core_size = debug_align(mod->core_size);
64236- mod->core_text_size = mod->core_size;
64237- break;
64238- case 1: /* RO: text and ro-data */
64239- mod->core_size = debug_align(mod->core_size);
64240- mod->core_ro_size = mod->core_size;
64241- break;
64242- case 3: /* whole core */
64243- mod->core_size = debug_align(mod->core_size);
64244- break;
64245- }
64246 }
64247
64248 DEBUGP("Init section allocation order:\n");
64249@@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
64250 || s->sh_entsize != ~0UL
64251 || !strstarts(sname, ".init"))
64252 continue;
64253- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64254- | INIT_OFFSET_MASK);
64255+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64256+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64257+ else
64258+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64259+ s->sh_entsize |= INIT_OFFSET_MASK;
64260 DEBUGP("\t%s\n", sname);
64261 }
64262- switch (m) {
64263- case 0: /* executable */
64264- mod->init_size = debug_align(mod->init_size);
64265- mod->init_text_size = mod->init_size;
64266- break;
64267- case 1: /* RO: text and ro-data */
64268- mod->init_size = debug_align(mod->init_size);
64269- mod->init_ro_size = mod->init_size;
64270- break;
64271- case 3: /* whole init */
64272- mod->init_size = debug_align(mod->init_size);
64273- break;
64274- }
64275 }
64276 }
64277
64278@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64279
64280 /* Put symbol section at end of init part of module. */
64281 symsect->sh_flags |= SHF_ALLOC;
64282- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64283+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64284 info->index.sym) | INIT_OFFSET_MASK;
64285 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64286
64287@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64288 }
64289
64290 /* Append room for core symbols at end of core part. */
64291- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64292- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64293+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64294+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64295
64296 /* Put string table section at end of init part of module. */
64297 strsect->sh_flags |= SHF_ALLOC;
64298- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64299+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64300 info->index.str) | INIT_OFFSET_MASK;
64301 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64302
64303 /* Append room for core symbols' strings at end of core part. */
64304- info->stroffs = mod->core_size;
64305+ info->stroffs = mod->core_size_rx;
64306 __set_bit(0, info->strmap);
64307- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64308+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64309 }
64310
64311 static void add_kallsyms(struct module *mod, const struct load_info *info)
64312@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64313 /* Make sure we get permanent strtab: don't use info->strtab. */
64314 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64315
64316+ pax_open_kernel();
64317+
64318 /* Set types up while we still have access to sections. */
64319 for (i = 0; i < mod->num_symtab; i++)
64320 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64321
64322- mod->core_symtab = dst = mod->module_core + info->symoffs;
64323+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64324 src = mod->symtab;
64325 *dst = *src;
64326 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64327@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64328 }
64329 mod->core_num_syms = ndst;
64330
64331- mod->core_strtab = s = mod->module_core + info->stroffs;
64332+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64333 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64334 if (test_bit(i, info->strmap))
64335 *++s = mod->strtab[i];
64336+
64337+ pax_close_kernel();
64338 }
64339 #else
64340 static inline void layout_symtab(struct module *mod, struct load_info *info)
64341@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64342 return size == 0 ? NULL : vmalloc_exec(size);
64343 }
64344
64345-static void *module_alloc_update_bounds(unsigned long size)
64346+static void *module_alloc_update_bounds_rw(unsigned long size)
64347 {
64348 void *ret = module_alloc(size);
64349
64350 if (ret) {
64351 mutex_lock(&module_mutex);
64352 /* Update module bounds. */
64353- if ((unsigned long)ret < module_addr_min)
64354- module_addr_min = (unsigned long)ret;
64355- if ((unsigned long)ret + size > module_addr_max)
64356- module_addr_max = (unsigned long)ret + size;
64357+ if ((unsigned long)ret < module_addr_min_rw)
64358+ module_addr_min_rw = (unsigned long)ret;
64359+ if ((unsigned long)ret + size > module_addr_max_rw)
64360+ module_addr_max_rw = (unsigned long)ret + size;
64361+ mutex_unlock(&module_mutex);
64362+ }
64363+ return ret;
64364+}
64365+
64366+static void *module_alloc_update_bounds_rx(unsigned long size)
64367+{
64368+ void *ret = module_alloc_exec(size);
64369+
64370+ if (ret) {
64371+ mutex_lock(&module_mutex);
64372+ /* Update module bounds. */
64373+ if ((unsigned long)ret < module_addr_min_rx)
64374+ module_addr_min_rx = (unsigned long)ret;
64375+ if ((unsigned long)ret + size > module_addr_max_rx)
64376+ module_addr_max_rx = (unsigned long)ret + size;
64377 mutex_unlock(&module_mutex);
64378 }
64379 return ret;
64380@@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64381 static int check_modinfo(struct module *mod, struct load_info *info)
64382 {
64383 const char *modmagic = get_modinfo(info, "vermagic");
64384+ const char *license = get_modinfo(info, "license");
64385 int err;
64386
64387+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64388+ if (!license || !license_is_gpl_compatible(license))
64389+ return -ENOEXEC;
64390+#endif
64391+
64392 /* This is allowed: modprobe --force will invalidate it. */
64393 if (!modmagic) {
64394 err = try_to_force_load(mod, "bad vermagic");
64395@@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64396 }
64397
64398 /* Set up license info based on the info section */
64399- set_license(mod, get_modinfo(info, "license"));
64400+ set_license(mod, license);
64401
64402 return 0;
64403 }
64404@@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64405 void *ptr;
64406
64407 /* Do the allocs. */
64408- ptr = module_alloc_update_bounds(mod->core_size);
64409+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64410 /*
64411 * The pointer to this block is stored in the module structure
64412 * which is inside the block. Just mark it as not being a
64413@@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64414 if (!ptr)
64415 return -ENOMEM;
64416
64417- memset(ptr, 0, mod->core_size);
64418- mod->module_core = ptr;
64419+ memset(ptr, 0, mod->core_size_rw);
64420+ mod->module_core_rw = ptr;
64421
64422- ptr = module_alloc_update_bounds(mod->init_size);
64423+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64424 /*
64425 * The pointer to this block is stored in the module structure
64426 * which is inside the block. This block doesn't need to be
64427 * scanned as it contains data and code that will be freed
64428 * after the module is initialized.
64429 */
64430- kmemleak_ignore(ptr);
64431- if (!ptr && mod->init_size) {
64432- module_free(mod, mod->module_core);
64433+ kmemleak_not_leak(ptr);
64434+ if (!ptr && mod->init_size_rw) {
64435+ module_free(mod, mod->module_core_rw);
64436 return -ENOMEM;
64437 }
64438- memset(ptr, 0, mod->init_size);
64439- mod->module_init = ptr;
64440+ memset(ptr, 0, mod->init_size_rw);
64441+ mod->module_init_rw = ptr;
64442+
64443+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64444+ kmemleak_not_leak(ptr);
64445+ if (!ptr) {
64446+ module_free(mod, mod->module_init_rw);
64447+ module_free(mod, mod->module_core_rw);
64448+ return -ENOMEM;
64449+ }
64450+
64451+ pax_open_kernel();
64452+ memset(ptr, 0, mod->core_size_rx);
64453+ pax_close_kernel();
64454+ mod->module_core_rx = ptr;
64455+
64456+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64457+ kmemleak_not_leak(ptr);
64458+ if (!ptr && mod->init_size_rx) {
64459+ module_free_exec(mod, mod->module_core_rx);
64460+ module_free(mod, mod->module_init_rw);
64461+ module_free(mod, mod->module_core_rw);
64462+ return -ENOMEM;
64463+ }
64464+
64465+ pax_open_kernel();
64466+ memset(ptr, 0, mod->init_size_rx);
64467+ pax_close_kernel();
64468+ mod->module_init_rx = ptr;
64469
64470 /* Transfer each section which specifies SHF_ALLOC */
64471 DEBUGP("final section addresses:\n");
64472@@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64473 if (!(shdr->sh_flags & SHF_ALLOC))
64474 continue;
64475
64476- if (shdr->sh_entsize & INIT_OFFSET_MASK)
64477- dest = mod->module_init
64478- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64479- else
64480- dest = mod->module_core + shdr->sh_entsize;
64481+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64482+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64483+ dest = mod->module_init_rw
64484+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64485+ else
64486+ dest = mod->module_init_rx
64487+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64488+ } else {
64489+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64490+ dest = mod->module_core_rw + shdr->sh_entsize;
64491+ else
64492+ dest = mod->module_core_rx + shdr->sh_entsize;
64493+ }
64494+
64495+ if (shdr->sh_type != SHT_NOBITS) {
64496+
64497+#ifdef CONFIG_PAX_KERNEXEC
64498+#ifdef CONFIG_X86_64
64499+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64500+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64501+#endif
64502+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64503+ pax_open_kernel();
64504+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64505+ pax_close_kernel();
64506+ } else
64507+#endif
64508
64509- if (shdr->sh_type != SHT_NOBITS)
64510 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64511+ }
64512 /* Update sh_addr to point to copy in image. */
64513- shdr->sh_addr = (unsigned long)dest;
64514+
64515+#ifdef CONFIG_PAX_KERNEXEC
64516+ if (shdr->sh_flags & SHF_EXECINSTR)
64517+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
64518+ else
64519+#endif
64520+
64521+ shdr->sh_addr = (unsigned long)dest;
64522 DEBUGP("\t0x%lx %s\n",
64523 shdr->sh_addr, info->secstrings + shdr->sh_name);
64524 }
64525@@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64526 * Do it before processing of module parameters, so the module
64527 * can provide parameter accessor functions of its own.
64528 */
64529- if (mod->module_init)
64530- flush_icache_range((unsigned long)mod->module_init,
64531- (unsigned long)mod->module_init
64532- + mod->init_size);
64533- flush_icache_range((unsigned long)mod->module_core,
64534- (unsigned long)mod->module_core + mod->core_size);
64535+ if (mod->module_init_rx)
64536+ flush_icache_range((unsigned long)mod->module_init_rx,
64537+ (unsigned long)mod->module_init_rx
64538+ + mod->init_size_rx);
64539+ flush_icache_range((unsigned long)mod->module_core_rx,
64540+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64541
64542 set_fs(old_fs);
64543 }
64544@@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64545 {
64546 kfree(info->strmap);
64547 percpu_modfree(mod);
64548- module_free(mod, mod->module_init);
64549- module_free(mod, mod->module_core);
64550+ module_free_exec(mod, mod->module_init_rx);
64551+ module_free_exec(mod, mod->module_core_rx);
64552+ module_free(mod, mod->module_init_rw);
64553+ module_free(mod, mod->module_core_rw);
64554 }
64555
64556 int __weak module_finalize(const Elf_Ehdr *hdr,
64557@@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64558 if (err)
64559 goto free_unload;
64560
64561+ /* Now copy in args */
64562+ mod->args = strndup_user(uargs, ~0UL >> 1);
64563+ if (IS_ERR(mod->args)) {
64564+ err = PTR_ERR(mod->args);
64565+ goto free_unload;
64566+ }
64567+
64568 /* Set up MODINFO_ATTR fields */
64569 setup_modinfo(mod, &info);
64570
64571+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64572+ {
64573+ char *p, *p2;
64574+
64575+ if (strstr(mod->args, "grsec_modharden_netdev")) {
64576+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64577+ err = -EPERM;
64578+ goto free_modinfo;
64579+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64580+ p += strlen("grsec_modharden_normal");
64581+ p2 = strstr(p, "_");
64582+ if (p2) {
64583+ *p2 = '\0';
64584+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64585+ *p2 = '_';
64586+ }
64587+ err = -EPERM;
64588+ goto free_modinfo;
64589+ }
64590+ }
64591+#endif
64592+
64593 /* Fix up syms, so that st_value is a pointer to location. */
64594 err = simplify_symbols(mod, &info);
64595 if (err < 0)
64596@@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
64597
64598 flush_module_icache(mod);
64599
64600- /* Now copy in args */
64601- mod->args = strndup_user(uargs, ~0UL >> 1);
64602- if (IS_ERR(mod->args)) {
64603- err = PTR_ERR(mod->args);
64604- goto free_arch_cleanup;
64605- }
64606-
64607 /* Mark state as coming so strong_try_module_get() ignores us. */
64608 mod->state = MODULE_STATE_COMING;
64609
64610@@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
64611 unlock:
64612 mutex_unlock(&module_mutex);
64613 synchronize_sched();
64614- kfree(mod->args);
64615- free_arch_cleanup:
64616 module_arch_cleanup(mod);
64617 free_modinfo:
64618 free_modinfo(mod);
64619+ kfree(mod->args);
64620 free_unload:
64621 module_unload_free(mod);
64622 free_module:
64623@@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64624 MODULE_STATE_COMING, mod);
64625
64626 /* Set RO and NX regions for core */
64627- set_section_ro_nx(mod->module_core,
64628- mod->core_text_size,
64629- mod->core_ro_size,
64630- mod->core_size);
64631+ set_section_ro_nx(mod->module_core_rx,
64632+ mod->core_size_rx,
64633+ mod->core_size_rx,
64634+ mod->core_size_rx);
64635
64636 /* Set RO and NX regions for init */
64637- set_section_ro_nx(mod->module_init,
64638- mod->init_text_size,
64639- mod->init_ro_size,
64640- mod->init_size);
64641+ set_section_ro_nx(mod->module_init_rx,
64642+ mod->init_size_rx,
64643+ mod->init_size_rx,
64644+ mod->init_size_rx);
64645
64646 do_mod_ctors(mod);
64647 /* Start the module */
64648@@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64649 mod->strtab = mod->core_strtab;
64650 #endif
64651 unset_module_init_ro_nx(mod);
64652- module_free(mod, mod->module_init);
64653- mod->module_init = NULL;
64654- mod->init_size = 0;
64655- mod->init_ro_size = 0;
64656- mod->init_text_size = 0;
64657+ module_free(mod, mod->module_init_rw);
64658+ module_free_exec(mod, mod->module_init_rx);
64659+ mod->module_init_rw = NULL;
64660+ mod->module_init_rx = NULL;
64661+ mod->init_size_rw = 0;
64662+ mod->init_size_rx = 0;
64663 mutex_unlock(&module_mutex);
64664
64665 return 0;
64666@@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
64667 unsigned long nextval;
64668
64669 /* At worse, next value is at end of module */
64670- if (within_module_init(addr, mod))
64671- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64672+ if (within_module_init_rx(addr, mod))
64673+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64674+ else if (within_module_init_rw(addr, mod))
64675+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64676+ else if (within_module_core_rx(addr, mod))
64677+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64678+ else if (within_module_core_rw(addr, mod))
64679+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64680 else
64681- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64682+ return NULL;
64683
64684 /* Scan for closest preceding symbol, and next symbol. (ELF
64685 starts real symbols at 1). */
64686@@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
64687 char buf[8];
64688
64689 seq_printf(m, "%s %u",
64690- mod->name, mod->init_size + mod->core_size);
64691+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64692 print_unload_info(m, mod);
64693
64694 /* Informative for users. */
64695@@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
64696 mod->state == MODULE_STATE_COMING ? "Loading":
64697 "Live");
64698 /* Used by oprofile and other similar tools. */
64699- seq_printf(m, " 0x%pK", mod->module_core);
64700+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64701
64702 /* Taints info */
64703 if (mod->taints)
64704@@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
64705
64706 static int __init proc_modules_init(void)
64707 {
64708+#ifndef CONFIG_GRKERNSEC_HIDESYM
64709+#ifdef CONFIG_GRKERNSEC_PROC_USER
64710+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64711+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64712+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64713+#else
64714 proc_create("modules", 0, NULL, &proc_modules_operations);
64715+#endif
64716+#else
64717+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64718+#endif
64719 return 0;
64720 }
64721 module_init(proc_modules_init);
64722@@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
64723 {
64724 struct module *mod;
64725
64726- if (addr < module_addr_min || addr > module_addr_max)
64727+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64728+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64729 return NULL;
64730
64731 list_for_each_entry_rcu(mod, &modules, list)
64732- if (within_module_core(addr, mod)
64733- || within_module_init(addr, mod))
64734+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64735 return mod;
64736 return NULL;
64737 }
64738@@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
64739 */
64740 struct module *__module_text_address(unsigned long addr)
64741 {
64742- struct module *mod = __module_address(addr);
64743+ struct module *mod;
64744+
64745+#ifdef CONFIG_X86_32
64746+ addr = ktla_ktva(addr);
64747+#endif
64748+
64749+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64750+ return NULL;
64751+
64752+ mod = __module_address(addr);
64753+
64754 if (mod) {
64755 /* Make sure it's within the text section. */
64756- if (!within(addr, mod->module_init, mod->init_text_size)
64757- && !within(addr, mod->module_core, mod->core_text_size))
64758+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64759 mod = NULL;
64760 }
64761 return mod;
64762diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
64763index 7e3443f..b2a1e6b 100644
64764--- a/kernel/mutex-debug.c
64765+++ b/kernel/mutex-debug.c
64766@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
64767 }
64768
64769 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64770- struct thread_info *ti)
64771+ struct task_struct *task)
64772 {
64773 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64774
64775 /* Mark the current thread as blocked on the lock: */
64776- ti->task->blocked_on = waiter;
64777+ task->blocked_on = waiter;
64778 }
64779
64780 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64781- struct thread_info *ti)
64782+ struct task_struct *task)
64783 {
64784 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64785- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64786- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64787- ti->task->blocked_on = NULL;
64788+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64789+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64790+ task->blocked_on = NULL;
64791
64792 list_del_init(&waiter->list);
64793 waiter->task = NULL;
64794diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
64795index 0799fd3..d06ae3b 100644
64796--- a/kernel/mutex-debug.h
64797+++ b/kernel/mutex-debug.h
64798@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
64799 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64800 extern void debug_mutex_add_waiter(struct mutex *lock,
64801 struct mutex_waiter *waiter,
64802- struct thread_info *ti);
64803+ struct task_struct *task);
64804 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64805- struct thread_info *ti);
64806+ struct task_struct *task);
64807 extern void debug_mutex_unlock(struct mutex *lock);
64808 extern void debug_mutex_init(struct mutex *lock, const char *name,
64809 struct lock_class_key *key);
64810diff --git a/kernel/mutex.c b/kernel/mutex.c
64811index 89096dd..f91ebc5 100644
64812--- a/kernel/mutex.c
64813+++ b/kernel/mutex.c
64814@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64815 spin_lock_mutex(&lock->wait_lock, flags);
64816
64817 debug_mutex_lock_common(lock, &waiter);
64818- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64819+ debug_mutex_add_waiter(lock, &waiter, task);
64820
64821 /* add waiting tasks to the end of the waitqueue (FIFO): */
64822 list_add_tail(&waiter.list, &lock->wait_list);
64823@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64824 * TASK_UNINTERRUPTIBLE case.)
64825 */
64826 if (unlikely(signal_pending_state(state, task))) {
64827- mutex_remove_waiter(lock, &waiter,
64828- task_thread_info(task));
64829+ mutex_remove_waiter(lock, &waiter, task);
64830 mutex_release(&lock->dep_map, 1, ip);
64831 spin_unlock_mutex(&lock->wait_lock, flags);
64832
64833@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64834 done:
64835 lock_acquired(&lock->dep_map, ip);
64836 /* got the lock - rejoice! */
64837- mutex_remove_waiter(lock, &waiter, current_thread_info());
64838+ mutex_remove_waiter(lock, &waiter, task);
64839 mutex_set_owner(lock);
64840
64841 /* set it to 0 if there are no waiters left: */
64842diff --git a/kernel/padata.c b/kernel/padata.c
64843index b452599..5d68f4e 100644
64844--- a/kernel/padata.c
64845+++ b/kernel/padata.c
64846@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
64847 padata->pd = pd;
64848 padata->cb_cpu = cb_cpu;
64849
64850- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64851- atomic_set(&pd->seq_nr, -1);
64852+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64853+ atomic_set_unchecked(&pd->seq_nr, -1);
64854
64855- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64856+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64857
64858 target_cpu = padata_cpu_hash(padata);
64859 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64860@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
64861 padata_init_pqueues(pd);
64862 padata_init_squeues(pd);
64863 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64864- atomic_set(&pd->seq_nr, -1);
64865+ atomic_set_unchecked(&pd->seq_nr, -1);
64866 atomic_set(&pd->reorder_objects, 0);
64867 atomic_set(&pd->refcnt, 0);
64868 pd->pinst = pinst;
64869diff --git a/kernel/panic.c b/kernel/panic.c
64870index b2659360..5972a0f 100644
64871--- a/kernel/panic.c
64872+++ b/kernel/panic.c
64873@@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
64874 va_end(args);
64875 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
64876 #ifdef CONFIG_DEBUG_BUGVERBOSE
64877- dump_stack();
64878+ /*
64879+ * Avoid nested stack-dumping if a panic occurs during oops processing
64880+ */
64881+ if (!oops_in_progress)
64882+ dump_stack();
64883 #endif
64884
64885 /*
64886@@ -373,7 +377,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
64887 const char *board;
64888
64889 printk(KERN_WARNING "------------[ cut here ]------------\n");
64890- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64891+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64892 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64893 if (board)
64894 printk(KERN_WARNING "Hardware name: %s\n", board);
64895@@ -428,7 +432,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64896 */
64897 void __stack_chk_fail(void)
64898 {
64899- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64900+ dump_stack();
64901+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64902 __builtin_return_address(0));
64903 }
64904 EXPORT_SYMBOL(__stack_chk_fail);
64905diff --git a/kernel/pid.c b/kernel/pid.c
64906index fa5f722..0c93e57 100644
64907--- a/kernel/pid.c
64908+++ b/kernel/pid.c
64909@@ -33,6 +33,7 @@
64910 #include <linux/rculist.h>
64911 #include <linux/bootmem.h>
64912 #include <linux/hash.h>
64913+#include <linux/security.h>
64914 #include <linux/pid_namespace.h>
64915 #include <linux/init_task.h>
64916 #include <linux/syscalls.h>
64917@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
64918
64919 int pid_max = PID_MAX_DEFAULT;
64920
64921-#define RESERVED_PIDS 300
64922+#define RESERVED_PIDS 500
64923
64924 int pid_max_min = RESERVED_PIDS + 1;
64925 int pid_max_max = PID_MAX_LIMIT;
64926@@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
64927 */
64928 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64929 {
64930+ struct task_struct *task;
64931+
64932 rcu_lockdep_assert(rcu_read_lock_held(),
64933 "find_task_by_pid_ns() needs rcu_read_lock()"
64934 " protection");
64935- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64936+
64937+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64938+
64939+ if (gr_pid_is_chrooted(task))
64940+ return NULL;
64941+
64942+ return task;
64943 }
64944
64945 struct task_struct *find_task_by_vpid(pid_t vnr)
64946@@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
64947 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64948 }
64949
64950+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64951+{
64952+ rcu_lockdep_assert(rcu_read_lock_held(),
64953+ "find_task_by_pid_ns() needs rcu_read_lock()"
64954+ " protection");
64955+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64956+}
64957+
64958 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64959 {
64960 struct pid *pid;
64961diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
64962index e7cb76d..75eceb3 100644
64963--- a/kernel/posix-cpu-timers.c
64964+++ b/kernel/posix-cpu-timers.c
64965@@ -6,6 +6,7 @@
64966 #include <linux/posix-timers.h>
64967 #include <linux/errno.h>
64968 #include <linux/math64.h>
64969+#include <linux/security.h>
64970 #include <asm/uaccess.h>
64971 #include <linux/kernel_stat.h>
64972 #include <trace/events/timer.h>
64973@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
64974
64975 static __init int init_posix_cpu_timers(void)
64976 {
64977- struct k_clock process = {
64978+ static struct k_clock process = {
64979 .clock_getres = process_cpu_clock_getres,
64980 .clock_get = process_cpu_clock_get,
64981 .timer_create = process_cpu_timer_create,
64982 .nsleep = process_cpu_nsleep,
64983 .nsleep_restart = process_cpu_nsleep_restart,
64984 };
64985- struct k_clock thread = {
64986+ static struct k_clock thread = {
64987 .clock_getres = thread_cpu_clock_getres,
64988 .clock_get = thread_cpu_clock_get,
64989 .timer_create = thread_cpu_timer_create,
64990diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
64991index 69185ae..cc2847a 100644
64992--- a/kernel/posix-timers.c
64993+++ b/kernel/posix-timers.c
64994@@ -43,6 +43,7 @@
64995 #include <linux/idr.h>
64996 #include <linux/posix-clock.h>
64997 #include <linux/posix-timers.h>
64998+#include <linux/grsecurity.h>
64999 #include <linux/syscalls.h>
65000 #include <linux/wait.h>
65001 #include <linux/workqueue.h>
65002@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
65003 * which we beg off on and pass to do_sys_settimeofday().
65004 */
65005
65006-static struct k_clock posix_clocks[MAX_CLOCKS];
65007+static struct k_clock *posix_clocks[MAX_CLOCKS];
65008
65009 /*
65010 * These ones are defined below.
65011@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
65012 */
65013 static __init int init_posix_timers(void)
65014 {
65015- struct k_clock clock_realtime = {
65016+ static struct k_clock clock_realtime = {
65017 .clock_getres = hrtimer_get_res,
65018 .clock_get = posix_clock_realtime_get,
65019 .clock_set = posix_clock_realtime_set,
65020@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
65021 .timer_get = common_timer_get,
65022 .timer_del = common_timer_del,
65023 };
65024- struct k_clock clock_monotonic = {
65025+ static struct k_clock clock_monotonic = {
65026 .clock_getres = hrtimer_get_res,
65027 .clock_get = posix_ktime_get_ts,
65028 .nsleep = common_nsleep,
65029@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
65030 .timer_get = common_timer_get,
65031 .timer_del = common_timer_del,
65032 };
65033- struct k_clock clock_monotonic_raw = {
65034+ static struct k_clock clock_monotonic_raw = {
65035 .clock_getres = hrtimer_get_res,
65036 .clock_get = posix_get_monotonic_raw,
65037 };
65038- struct k_clock clock_realtime_coarse = {
65039+ static struct k_clock clock_realtime_coarse = {
65040 .clock_getres = posix_get_coarse_res,
65041 .clock_get = posix_get_realtime_coarse,
65042 };
65043- struct k_clock clock_monotonic_coarse = {
65044+ static struct k_clock clock_monotonic_coarse = {
65045 .clock_getres = posix_get_coarse_res,
65046 .clock_get = posix_get_monotonic_coarse,
65047 };
65048- struct k_clock clock_boottime = {
65049+ static struct k_clock clock_boottime = {
65050 .clock_getres = hrtimer_get_res,
65051 .clock_get = posix_get_boottime,
65052 .nsleep = common_nsleep,
65053@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
65054 return;
65055 }
65056
65057- posix_clocks[clock_id] = *new_clock;
65058+ posix_clocks[clock_id] = new_clock;
65059 }
65060 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65061
65062@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
65063 return (id & CLOCKFD_MASK) == CLOCKFD ?
65064 &clock_posix_dynamic : &clock_posix_cpu;
65065
65066- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65067+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65068 return NULL;
65069- return &posix_clocks[id];
65070+ return posix_clocks[id];
65071 }
65072
65073 static int common_timer_create(struct k_itimer *new_timer)
65074@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
65075 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65076 return -EFAULT;
65077
65078+ /* only the CLOCK_REALTIME clock can be set, all other clocks
65079+ have their clock_set fptr set to a nosettime dummy function
65080+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65081+ call common_clock_set, which calls do_sys_settimeofday, which
65082+ we hook
65083+ */
65084+
65085 return kc->clock_set(which_clock, &new_tp);
65086 }
65087
65088diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
65089index d523593..68197a4 100644
65090--- a/kernel/power/poweroff.c
65091+++ b/kernel/power/poweroff.c
65092@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
65093 .enable_mask = SYSRQ_ENABLE_BOOT,
65094 };
65095
65096-static int pm_sysrq_init(void)
65097+static int __init pm_sysrq_init(void)
65098 {
65099 register_sysrq_key('o', &sysrq_poweroff_op);
65100 return 0;
65101diff --git a/kernel/power/process.c b/kernel/power/process.c
65102index addbbe5..f9e32e0 100644
65103--- a/kernel/power/process.c
65104+++ b/kernel/power/process.c
65105@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
65106 u64 elapsed_csecs64;
65107 unsigned int elapsed_csecs;
65108 bool wakeup = false;
65109+ bool timedout = false;
65110
65111 do_gettimeofday(&start);
65112
65113@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
65114
65115 while (true) {
65116 todo = 0;
65117+ if (time_after(jiffies, end_time))
65118+ timedout = true;
65119 read_lock(&tasklist_lock);
65120 do_each_thread(g, p) {
65121 if (frozen(p) || !freezable(p))
65122@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
65123 * try_to_stop() after schedule() in ptrace/signal
65124 * stop sees TIF_FREEZE.
65125 */
65126- if (!task_is_stopped_or_traced(p) &&
65127- !freezer_should_skip(p))
65128+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65129 todo++;
65130+ if (timedout) {
65131+ printk(KERN_ERR "Task refusing to freeze:\n");
65132+ sched_show_task(p);
65133+ }
65134+ }
65135 } while_each_thread(g, p);
65136 read_unlock(&tasklist_lock);
65137
65138@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
65139 todo += wq_busy;
65140 }
65141
65142- if (!todo || time_after(jiffies, end_time))
65143+ if (!todo || timedout)
65144 break;
65145
65146 if (pm_wakeup_pending()) {
65147diff --git a/kernel/printk.c b/kernel/printk.c
65148index 7982a0a..2095fdc 100644
65149--- a/kernel/printk.c
65150+++ b/kernel/printk.c
65151@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
65152 if (from_file && type != SYSLOG_ACTION_OPEN)
65153 return 0;
65154
65155+#ifdef CONFIG_GRKERNSEC_DMESG
65156+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65157+ return -EPERM;
65158+#endif
65159+
65160 if (syslog_action_restricted(type)) {
65161 if (capable(CAP_SYSLOG))
65162 return 0;
65163diff --git a/kernel/profile.c b/kernel/profile.c
65164index 76b8e77..a2930e8 100644
65165--- a/kernel/profile.c
65166+++ b/kernel/profile.c
65167@@ -39,7 +39,7 @@ struct profile_hit {
65168 /* Oprofile timer tick hook */
65169 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65170
65171-static atomic_t *prof_buffer;
65172+static atomic_unchecked_t *prof_buffer;
65173 static unsigned long prof_len, prof_shift;
65174
65175 int prof_on __read_mostly;
65176@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65177 hits[i].pc = 0;
65178 continue;
65179 }
65180- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65181+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65182 hits[i].hits = hits[i].pc = 0;
65183 }
65184 }
65185@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65186 * Add the current hit(s) and flush the write-queue out
65187 * to the global buffer:
65188 */
65189- atomic_add(nr_hits, &prof_buffer[pc]);
65190+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65191 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65192- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65193+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65194 hits[i].pc = hits[i].hits = 0;
65195 }
65196 out:
65197@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65198 {
65199 unsigned long pc;
65200 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65201- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65202+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65203 }
65204 #endif /* !CONFIG_SMP */
65205
65206@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65207 return -EFAULT;
65208 buf++; p++; count--; read++;
65209 }
65210- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65211+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65212 if (copy_to_user(buf, (void *)pnt, count))
65213 return -EFAULT;
65214 read += count;
65215@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
65216 }
65217 #endif
65218 profile_discard_flip_buffers();
65219- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65220+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65221 return count;
65222 }
65223
65224diff --git a/kernel/ptrace.c b/kernel/ptrace.c
65225index 78ab24a..332c915 100644
65226--- a/kernel/ptrace.c
65227+++ b/kernel/ptrace.c
65228@@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
65229 return ret;
65230 }
65231
65232-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65233+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65234+ unsigned int log)
65235 {
65236 const struct cred *cred = current_cred(), *tcred;
65237
65238@@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65239 cred->gid == tcred->sgid &&
65240 cred->gid == tcred->gid))
65241 goto ok;
65242- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65243+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65244+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65245 goto ok;
65246 rcu_read_unlock();
65247 return -EPERM;
65248@@ -207,7 +209,9 @@ ok:
65249 smp_rmb();
65250 if (task->mm)
65251 dumpable = get_dumpable(task->mm);
65252- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65253+ if (!dumpable &&
65254+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65255+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65256 return -EPERM;
65257
65258 return security_ptrace_access_check(task, mode);
65259@@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
65260 {
65261 int err;
65262 task_lock(task);
65263- err = __ptrace_may_access(task, mode);
65264+ err = __ptrace_may_access(task, mode, 0);
65265+ task_unlock(task);
65266+ return !err;
65267+}
65268+
65269+bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
65270+{
65271+ return __ptrace_may_access(task, mode, 0);
65272+}
65273+
65274+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65275+{
65276+ int err;
65277+ task_lock(task);
65278+ err = __ptrace_may_access(task, mode, 1);
65279 task_unlock(task);
65280 return !err;
65281 }
65282@@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65283 goto out;
65284
65285 task_lock(task);
65286- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65287+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65288 task_unlock(task);
65289 if (retval)
65290 goto unlock_creds;
65291@@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65292 task->ptrace = PT_PTRACED;
65293 if (seize)
65294 task->ptrace |= PT_SEIZED;
65295- if (task_ns_capable(task, CAP_SYS_PTRACE))
65296+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65297 task->ptrace |= PT_PTRACE_CAP;
65298
65299 __ptrace_link(task, current);
65300@@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
65301 break;
65302 return -EIO;
65303 }
65304- if (copy_to_user(dst, buf, retval))
65305+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65306 return -EFAULT;
65307 copied += retval;
65308 src += retval;
65309@@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
65310 bool seized = child->ptrace & PT_SEIZED;
65311 int ret = -EIO;
65312 siginfo_t siginfo, *si;
65313- void __user *datavp = (void __user *) data;
65314+ void __user *datavp = (__force void __user *) data;
65315 unsigned long __user *datalp = datavp;
65316 unsigned long flags;
65317
65318@@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
65319 goto out;
65320 }
65321
65322+ if (gr_handle_ptrace(child, request)) {
65323+ ret = -EPERM;
65324+ goto out_put_task_struct;
65325+ }
65326+
65327 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65328 ret = ptrace_attach(child, request, data);
65329 /*
65330 * Some architectures need to do book-keeping after
65331 * a ptrace attach.
65332 */
65333- if (!ret)
65334+ if (!ret) {
65335 arch_ptrace_attach(child);
65336+ gr_audit_ptrace(child);
65337+ }
65338 goto out_put_task_struct;
65339 }
65340
65341@@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65342 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65343 if (copied != sizeof(tmp))
65344 return -EIO;
65345- return put_user(tmp, (unsigned long __user *)data);
65346+ return put_user(tmp, (__force unsigned long __user *)data);
65347 }
65348
65349 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65350@@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65351 goto out;
65352 }
65353
65354+ if (gr_handle_ptrace(child, request)) {
65355+ ret = -EPERM;
65356+ goto out_put_task_struct;
65357+ }
65358+
65359 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65360 ret = ptrace_attach(child, request, data);
65361 /*
65362 * Some architectures need to do book-keeping after
65363 * a ptrace attach.
65364 */
65365- if (!ret)
65366+ if (!ret) {
65367 arch_ptrace_attach(child);
65368+ gr_audit_ptrace(child);
65369+ }
65370 goto out_put_task_struct;
65371 }
65372
65373diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65374index 764825c..3aa6ac4 100644
65375--- a/kernel/rcutorture.c
65376+++ b/kernel/rcutorture.c
65377@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65378 { 0 };
65379 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65380 { 0 };
65381-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65382-static atomic_t n_rcu_torture_alloc;
65383-static atomic_t n_rcu_torture_alloc_fail;
65384-static atomic_t n_rcu_torture_free;
65385-static atomic_t n_rcu_torture_mberror;
65386-static atomic_t n_rcu_torture_error;
65387+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65388+static atomic_unchecked_t n_rcu_torture_alloc;
65389+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65390+static atomic_unchecked_t n_rcu_torture_free;
65391+static atomic_unchecked_t n_rcu_torture_mberror;
65392+static atomic_unchecked_t n_rcu_torture_error;
65393 static long n_rcu_torture_boost_ktrerror;
65394 static long n_rcu_torture_boost_rterror;
65395 static long n_rcu_torture_boost_failure;
65396@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65397
65398 spin_lock_bh(&rcu_torture_lock);
65399 if (list_empty(&rcu_torture_freelist)) {
65400- atomic_inc(&n_rcu_torture_alloc_fail);
65401+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65402 spin_unlock_bh(&rcu_torture_lock);
65403 return NULL;
65404 }
65405- atomic_inc(&n_rcu_torture_alloc);
65406+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65407 p = rcu_torture_freelist.next;
65408 list_del_init(p);
65409 spin_unlock_bh(&rcu_torture_lock);
65410@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65411 static void
65412 rcu_torture_free(struct rcu_torture *p)
65413 {
65414- atomic_inc(&n_rcu_torture_free);
65415+ atomic_inc_unchecked(&n_rcu_torture_free);
65416 spin_lock_bh(&rcu_torture_lock);
65417 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65418 spin_unlock_bh(&rcu_torture_lock);
65419@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65420 i = rp->rtort_pipe_count;
65421 if (i > RCU_TORTURE_PIPE_LEN)
65422 i = RCU_TORTURE_PIPE_LEN;
65423- atomic_inc(&rcu_torture_wcount[i]);
65424+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65425 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65426 rp->rtort_mbtest = 0;
65427 rcu_torture_free(rp);
65428@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65429 i = rp->rtort_pipe_count;
65430 if (i > RCU_TORTURE_PIPE_LEN)
65431 i = RCU_TORTURE_PIPE_LEN;
65432- atomic_inc(&rcu_torture_wcount[i]);
65433+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65434 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65435 rp->rtort_mbtest = 0;
65436 list_del(&rp->rtort_free);
65437@@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65438 i = old_rp->rtort_pipe_count;
65439 if (i > RCU_TORTURE_PIPE_LEN)
65440 i = RCU_TORTURE_PIPE_LEN;
65441- atomic_inc(&rcu_torture_wcount[i]);
65442+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65443 old_rp->rtort_pipe_count++;
65444 cur_ops->deferred_free(old_rp);
65445 }
65446@@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65447 return;
65448 }
65449 if (p->rtort_mbtest == 0)
65450- atomic_inc(&n_rcu_torture_mberror);
65451+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65452 spin_lock(&rand_lock);
65453 cur_ops->read_delay(&rand);
65454 n_rcu_torture_timers++;
65455@@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65456 continue;
65457 }
65458 if (p->rtort_mbtest == 0)
65459- atomic_inc(&n_rcu_torture_mberror);
65460+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65461 cur_ops->read_delay(&rand);
65462 preempt_disable();
65463 pipe_count = p->rtort_pipe_count;
65464@@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65465 rcu_torture_current,
65466 rcu_torture_current_version,
65467 list_empty(&rcu_torture_freelist),
65468- atomic_read(&n_rcu_torture_alloc),
65469- atomic_read(&n_rcu_torture_alloc_fail),
65470- atomic_read(&n_rcu_torture_free),
65471- atomic_read(&n_rcu_torture_mberror),
65472+ atomic_read_unchecked(&n_rcu_torture_alloc),
65473+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65474+ atomic_read_unchecked(&n_rcu_torture_free),
65475+ atomic_read_unchecked(&n_rcu_torture_mberror),
65476 n_rcu_torture_boost_ktrerror,
65477 n_rcu_torture_boost_rterror,
65478 n_rcu_torture_boost_failure,
65479 n_rcu_torture_boosts,
65480 n_rcu_torture_timers);
65481- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65482+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65483 n_rcu_torture_boost_ktrerror != 0 ||
65484 n_rcu_torture_boost_rterror != 0 ||
65485 n_rcu_torture_boost_failure != 0)
65486@@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65487 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65488 if (i > 1) {
65489 cnt += sprintf(&page[cnt], "!!! ");
65490- atomic_inc(&n_rcu_torture_error);
65491+ atomic_inc_unchecked(&n_rcu_torture_error);
65492 WARN_ON_ONCE(1);
65493 }
65494 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65495@@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65496 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65497 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65498 cnt += sprintf(&page[cnt], " %d",
65499- atomic_read(&rcu_torture_wcount[i]));
65500+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65501 }
65502 cnt += sprintf(&page[cnt], "\n");
65503 if (cur_ops->stats)
65504@@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65505
65506 if (cur_ops->cleanup)
65507 cur_ops->cleanup();
65508- if (atomic_read(&n_rcu_torture_error))
65509+ if (atomic_read_unchecked(&n_rcu_torture_error))
65510 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65511 else
65512 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65513@@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65514
65515 rcu_torture_current = NULL;
65516 rcu_torture_current_version = 0;
65517- atomic_set(&n_rcu_torture_alloc, 0);
65518- atomic_set(&n_rcu_torture_alloc_fail, 0);
65519- atomic_set(&n_rcu_torture_free, 0);
65520- atomic_set(&n_rcu_torture_mberror, 0);
65521- atomic_set(&n_rcu_torture_error, 0);
65522+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65523+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65524+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65525+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65526+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65527 n_rcu_torture_boost_ktrerror = 0;
65528 n_rcu_torture_boost_rterror = 0;
65529 n_rcu_torture_boost_failure = 0;
65530 n_rcu_torture_boosts = 0;
65531 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65532- atomic_set(&rcu_torture_wcount[i], 0);
65533+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65534 for_each_possible_cpu(cpu) {
65535 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65536 per_cpu(rcu_torture_count, cpu)[i] = 0;
65537diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65538index 6b76d81..7afc1b3 100644
65539--- a/kernel/rcutree.c
65540+++ b/kernel/rcutree.c
65541@@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65542 trace_rcu_dyntick("Start");
65543 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65544 smp_mb__before_atomic_inc(); /* See above. */
65545- atomic_inc(&rdtp->dynticks);
65546+ atomic_inc_unchecked(&rdtp->dynticks);
65547 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65548- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65549+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65550 local_irq_restore(flags);
65551 }
65552
65553@@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65554 return;
65555 }
65556 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65557- atomic_inc(&rdtp->dynticks);
65558+ atomic_inc_unchecked(&rdtp->dynticks);
65559 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65560 smp_mb__after_atomic_inc(); /* See above. */
65561- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65562+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65563 trace_rcu_dyntick("End");
65564 local_irq_restore(flags);
65565 }
65566@@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
65567 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65568
65569 if (rdtp->dynticks_nmi_nesting == 0 &&
65570- (atomic_read(&rdtp->dynticks) & 0x1))
65571+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65572 return;
65573 rdtp->dynticks_nmi_nesting++;
65574 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65575- atomic_inc(&rdtp->dynticks);
65576+ atomic_inc_unchecked(&rdtp->dynticks);
65577 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65578 smp_mb__after_atomic_inc(); /* See above. */
65579- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65580+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65581 }
65582
65583 /**
65584@@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
65585 return;
65586 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65587 smp_mb__before_atomic_inc(); /* See above. */
65588- atomic_inc(&rdtp->dynticks);
65589+ atomic_inc_unchecked(&rdtp->dynticks);
65590 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65591- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65592+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65593 }
65594
65595 /**
65596@@ -474,7 +474,7 @@ void rcu_irq_exit(void)
65597 */
65598 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65599 {
65600- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65601+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65602 return 0;
65603 }
65604
65605@@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
65606 unsigned int curr;
65607 unsigned int snap;
65608
65609- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
65610+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65611 snap = (unsigned int)rdp->dynticks_snap;
65612
65613 /*
65614@@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
65615 /*
65616 * Do RCU core processing for the current CPU.
65617 */
65618-static void rcu_process_callbacks(struct softirq_action *unused)
65619+static void rcu_process_callbacks(void)
65620 {
65621 trace_rcu_utilization("Start RCU core");
65622 __rcu_process_callbacks(&rcu_sched_state,
65623diff --git a/kernel/rcutree.h b/kernel/rcutree.h
65624index 849ce9e..74bc9de 100644
65625--- a/kernel/rcutree.h
65626+++ b/kernel/rcutree.h
65627@@ -86,7 +86,7 @@
65628 struct rcu_dynticks {
65629 int dynticks_nesting; /* Track irq/process nesting level. */
65630 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65631- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65632+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65633 };
65634
65635 /* RCU's kthread states for tracing. */
65636diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
65637index 4b9b9f8..2326053 100644
65638--- a/kernel/rcutree_plugin.h
65639+++ b/kernel/rcutree_plugin.h
65640@@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
65641
65642 /* Clean up and exit. */
65643 smp_mb(); /* ensure expedited GP seen before counter increment. */
65644- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65645+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65646 unlock_mb_ret:
65647 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65648 mb_ret:
65649@@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
65650
65651 #else /* #ifndef CONFIG_SMP */
65652
65653-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65654-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65655+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65656+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65657
65658 static int synchronize_sched_expedited_cpu_stop(void *data)
65659 {
65660@@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
65661 int firstsnap, s, snap, trycount = 0;
65662
65663 /* Note that atomic_inc_return() implies full memory barrier. */
65664- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65665+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65666 get_online_cpus();
65667
65668 /*
65669@@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
65670 }
65671
65672 /* Check to see if someone else did our work for us. */
65673- s = atomic_read(&sync_sched_expedited_done);
65674+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65675 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65676 smp_mb(); /* ensure test happens before caller kfree */
65677 return;
65678@@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
65679 * grace period works for us.
65680 */
65681 get_online_cpus();
65682- snap = atomic_read(&sync_sched_expedited_started) - 1;
65683+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65684 smp_mb(); /* ensure read is before try_stop_cpus(). */
65685 }
65686
65687@@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
65688 * than we did beat us to the punch.
65689 */
65690 do {
65691- s = atomic_read(&sync_sched_expedited_done);
65692+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65693 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65694 smp_mb(); /* ensure test happens before caller kfree */
65695 break;
65696 }
65697- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65698+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65699
65700 put_online_cpus();
65701 }
65702@@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
65703 for_each_online_cpu(thatcpu) {
65704 if (thatcpu == cpu)
65705 continue;
65706- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
65707+ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
65708 thatcpu).dynticks);
65709 smp_mb(); /* Order sampling of snap with end of grace period. */
65710 if ((snap & 0x1) != 0) {
65711diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
65712index 9feffa4..54058df 100644
65713--- a/kernel/rcutree_trace.c
65714+++ b/kernel/rcutree_trace.c
65715@@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
65716 rdp->qs_pending);
65717 #ifdef CONFIG_NO_HZ
65718 seq_printf(m, " dt=%d/%d/%d df=%lu",
65719- atomic_read(&rdp->dynticks->dynticks),
65720+ atomic_read_unchecked(&rdp->dynticks->dynticks),
65721 rdp->dynticks->dynticks_nesting,
65722 rdp->dynticks->dynticks_nmi_nesting,
65723 rdp->dynticks_fqs);
65724@@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
65725 rdp->qs_pending);
65726 #ifdef CONFIG_NO_HZ
65727 seq_printf(m, ",%d,%d,%d,%lu",
65728- atomic_read(&rdp->dynticks->dynticks),
65729+ atomic_read_unchecked(&rdp->dynticks->dynticks),
65730 rdp->dynticks->dynticks_nesting,
65731 rdp->dynticks->dynticks_nmi_nesting,
65732 rdp->dynticks_fqs);
65733diff --git a/kernel/resource.c b/kernel/resource.c
65734index 7640b3a..5879283 100644
65735--- a/kernel/resource.c
65736+++ b/kernel/resource.c
65737@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
65738
65739 static int __init ioresources_init(void)
65740 {
65741+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65742+#ifdef CONFIG_GRKERNSEC_PROC_USER
65743+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65744+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65745+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65746+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65747+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65748+#endif
65749+#else
65750 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65751 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65752+#endif
65753 return 0;
65754 }
65755 __initcall(ioresources_init);
65756diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
65757index 3d9f31c..7fefc9e 100644
65758--- a/kernel/rtmutex-tester.c
65759+++ b/kernel/rtmutex-tester.c
65760@@ -20,7 +20,7 @@
65761 #define MAX_RT_TEST_MUTEXES 8
65762
65763 static spinlock_t rttest_lock;
65764-static atomic_t rttest_event;
65765+static atomic_unchecked_t rttest_event;
65766
65767 struct test_thread_data {
65768 int opcode;
65769@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65770
65771 case RTTEST_LOCKCONT:
65772 td->mutexes[td->opdata] = 1;
65773- td->event = atomic_add_return(1, &rttest_event);
65774+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65775 return 0;
65776
65777 case RTTEST_RESET:
65778@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65779 return 0;
65780
65781 case RTTEST_RESETEVENT:
65782- atomic_set(&rttest_event, 0);
65783+ atomic_set_unchecked(&rttest_event, 0);
65784 return 0;
65785
65786 default:
65787@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65788 return ret;
65789
65790 td->mutexes[id] = 1;
65791- td->event = atomic_add_return(1, &rttest_event);
65792+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65793 rt_mutex_lock(&mutexes[id]);
65794- td->event = atomic_add_return(1, &rttest_event);
65795+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65796 td->mutexes[id] = 4;
65797 return 0;
65798
65799@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65800 return ret;
65801
65802 td->mutexes[id] = 1;
65803- td->event = atomic_add_return(1, &rttest_event);
65804+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65805 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65806- td->event = atomic_add_return(1, &rttest_event);
65807+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65808 td->mutexes[id] = ret ? 0 : 4;
65809 return ret ? -EINTR : 0;
65810
65811@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65812 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65813 return ret;
65814
65815- td->event = atomic_add_return(1, &rttest_event);
65816+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65817 rt_mutex_unlock(&mutexes[id]);
65818- td->event = atomic_add_return(1, &rttest_event);
65819+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65820 td->mutexes[id] = 0;
65821 return 0;
65822
65823@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65824 break;
65825
65826 td->mutexes[dat] = 2;
65827- td->event = atomic_add_return(1, &rttest_event);
65828+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65829 break;
65830
65831 default:
65832@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65833 return;
65834
65835 td->mutexes[dat] = 3;
65836- td->event = atomic_add_return(1, &rttest_event);
65837+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65838 break;
65839
65840 case RTTEST_LOCKNOWAIT:
65841@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65842 return;
65843
65844 td->mutexes[dat] = 1;
65845- td->event = atomic_add_return(1, &rttest_event);
65846+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65847 return;
65848
65849 default:
65850diff --git a/kernel/sched.c b/kernel/sched.c
65851index d6b149c..896cbb8 100644
65852--- a/kernel/sched.c
65853+++ b/kernel/sched.c
65854@@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
65855 BUG(); /* the idle class will always have a runnable task */
65856 }
65857
65858+#ifdef CONFIG_GRKERNSEC_SETXID
65859+extern void gr_delayed_cred_worker(void);
65860+static inline void gr_cred_schedule(void)
65861+{
65862+ if (unlikely(current->delayed_cred))
65863+ gr_delayed_cred_worker();
65864+}
65865+#else
65866+static inline void gr_cred_schedule(void)
65867+{
65868+}
65869+#endif
65870+
65871 /*
65872 * __schedule() is the main scheduler function.
65873 */
65874@@ -4408,6 +4421,8 @@ need_resched:
65875
65876 schedule_debug(prev);
65877
65878+ gr_cred_schedule();
65879+
65880 if (sched_feat(HRTICK))
65881 hrtick_clear(rq);
65882
65883@@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
65884 /* convert nice value [19,-20] to rlimit style value [1,40] */
65885 int nice_rlim = 20 - nice;
65886
65887+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65888+
65889 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65890 capable(CAP_SYS_NICE));
65891 }
65892@@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65893 if (nice > 19)
65894 nice = 19;
65895
65896- if (increment < 0 && !can_nice(current, nice))
65897+ if (increment < 0 && (!can_nice(current, nice) ||
65898+ gr_handle_chroot_nice()))
65899 return -EPERM;
65900
65901 retval = security_task_setnice(current, nice);
65902@@ -5288,6 +5306,7 @@ recheck:
65903 unsigned long rlim_rtprio =
65904 task_rlimit(p, RLIMIT_RTPRIO);
65905
65906+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65907 /* can't set/change the rt policy */
65908 if (policy != p->policy && !rlim_rtprio)
65909 return -EPERM;
65910diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
65911index 429242f..d7cca82 100644
65912--- a/kernel/sched_autogroup.c
65913+++ b/kernel/sched_autogroup.c
65914@@ -7,7 +7,7 @@
65915
65916 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65917 static struct autogroup autogroup_default;
65918-static atomic_t autogroup_seq_nr;
65919+static atomic_unchecked_t autogroup_seq_nr;
65920
65921 static void __init autogroup_init(struct task_struct *init_task)
65922 {
65923@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
65924
65925 kref_init(&ag->kref);
65926 init_rwsem(&ag->lock);
65927- ag->id = atomic_inc_return(&autogroup_seq_nr);
65928+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
65929 ag->tg = tg;
65930 #ifdef CONFIG_RT_GROUP_SCHED
65931 /*
65932diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
65933index 8a39fa3..34f3dbc 100644
65934--- a/kernel/sched_fair.c
65935+++ b/kernel/sched_fair.c
65936@@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
65937 * run_rebalance_domains is triggered when needed from the scheduler tick.
65938 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
65939 */
65940-static void run_rebalance_domains(struct softirq_action *h)
65941+static void run_rebalance_domains(void)
65942 {
65943 int this_cpu = smp_processor_id();
65944 struct rq *this_rq = cpu_rq(this_cpu);
65945diff --git a/kernel/signal.c b/kernel/signal.c
65946index 2065515..aed2987 100644
65947--- a/kernel/signal.c
65948+++ b/kernel/signal.c
65949@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
65950
65951 int print_fatal_signals __read_mostly;
65952
65953-static void __user *sig_handler(struct task_struct *t, int sig)
65954+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65955 {
65956 return t->sighand->action[sig - 1].sa.sa_handler;
65957 }
65958
65959-static int sig_handler_ignored(void __user *handler, int sig)
65960+static int sig_handler_ignored(__sighandler_t handler, int sig)
65961 {
65962 /* Is it explicitly or implicitly ignored? */
65963 return handler == SIG_IGN ||
65964@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
65965 static int sig_task_ignored(struct task_struct *t, int sig,
65966 int from_ancestor_ns)
65967 {
65968- void __user *handler;
65969+ __sighandler_t handler;
65970
65971 handler = sig_handler(t, sig);
65972
65973@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
65974 atomic_inc(&user->sigpending);
65975 rcu_read_unlock();
65976
65977+ if (!override_rlimit)
65978+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65979+
65980 if (override_rlimit ||
65981 atomic_read(&user->sigpending) <=
65982 task_rlimit(t, RLIMIT_SIGPENDING)) {
65983@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
65984
65985 int unhandled_signal(struct task_struct *tsk, int sig)
65986 {
65987- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65988+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65989 if (is_global_init(tsk))
65990 return 1;
65991 if (handler != SIG_IGN && handler != SIG_DFL)
65992@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
65993 }
65994 }
65995
65996+ /* allow glibc communication via tgkill to other threads in our
65997+ thread group */
65998+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65999+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
66000+ && gr_handle_signal(t, sig))
66001+ return -EPERM;
66002+
66003 return security_task_kill(t, info, sig, 0);
66004 }
66005
66006@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66007 return send_signal(sig, info, p, 1);
66008 }
66009
66010-static int
66011+int
66012 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66013 {
66014 return send_signal(sig, info, t, 0);
66015@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66016 unsigned long int flags;
66017 int ret, blocked, ignored;
66018 struct k_sigaction *action;
66019+ int is_unhandled = 0;
66020
66021 spin_lock_irqsave(&t->sighand->siglock, flags);
66022 action = &t->sighand->action[sig-1];
66023@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66024 }
66025 if (action->sa.sa_handler == SIG_DFL)
66026 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66027+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66028+ is_unhandled = 1;
66029 ret = specific_send_sig_info(sig, info, t);
66030 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66031
66032+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
66033+ normal operation */
66034+ if (is_unhandled) {
66035+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66036+ gr_handle_crash(t, sig);
66037+ }
66038+
66039 return ret;
66040 }
66041
66042@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66043 ret = check_kill_permission(sig, info, p);
66044 rcu_read_unlock();
66045
66046- if (!ret && sig)
66047+ if (!ret && sig) {
66048 ret = do_send_sig_info(sig, info, p, true);
66049+ if (!ret)
66050+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66051+ }
66052
66053 return ret;
66054 }
66055@@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
66056 int error = -ESRCH;
66057
66058 rcu_read_lock();
66059- p = find_task_by_vpid(pid);
66060+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66061+ /* allow glibc communication via tgkill to other threads in our
66062+ thread group */
66063+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66064+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
66065+ p = find_task_by_vpid_unrestricted(pid);
66066+ else
66067+#endif
66068+ p = find_task_by_vpid(pid);
66069 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66070 error = check_kill_permission(sig, info, p);
66071 /*
66072diff --git a/kernel/smp.c b/kernel/smp.c
66073index db197d6..17aef0b 100644
66074--- a/kernel/smp.c
66075+++ b/kernel/smp.c
66076@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
66077 }
66078 EXPORT_SYMBOL(smp_call_function);
66079
66080-void ipi_call_lock(void)
66081+void ipi_call_lock(void) __acquires(call_function.lock)
66082 {
66083 raw_spin_lock(&call_function.lock);
66084 }
66085
66086-void ipi_call_unlock(void)
66087+void ipi_call_unlock(void) __releases(call_function.lock)
66088 {
66089 raw_spin_unlock(&call_function.lock);
66090 }
66091
66092-void ipi_call_lock_irq(void)
66093+void ipi_call_lock_irq(void) __acquires(call_function.lock)
66094 {
66095 raw_spin_lock_irq(&call_function.lock);
66096 }
66097
66098-void ipi_call_unlock_irq(void)
66099+void ipi_call_unlock_irq(void) __releases(call_function.lock)
66100 {
66101 raw_spin_unlock_irq(&call_function.lock);
66102 }
66103diff --git a/kernel/softirq.c b/kernel/softirq.c
66104index 2c71d91..1021f81 100644
66105--- a/kernel/softirq.c
66106+++ b/kernel/softirq.c
66107@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
66108
66109 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66110
66111-char *softirq_to_name[NR_SOFTIRQS] = {
66112+const char * const softirq_to_name[NR_SOFTIRQS] = {
66113 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66114 "TASKLET", "SCHED", "HRTIMER", "RCU"
66115 };
66116@@ -235,7 +235,7 @@ restart:
66117 kstat_incr_softirqs_this_cpu(vec_nr);
66118
66119 trace_softirq_entry(vec_nr);
66120- h->action(h);
66121+ h->action();
66122 trace_softirq_exit(vec_nr);
66123 if (unlikely(prev_count != preempt_count())) {
66124 printk(KERN_ERR "huh, entered softirq %u %s %p"
66125@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66126 local_irq_restore(flags);
66127 }
66128
66129-void open_softirq(int nr, void (*action)(struct softirq_action *))
66130+void open_softirq(int nr, void (*action)(void))
66131 {
66132- softirq_vec[nr].action = action;
66133+ pax_open_kernel();
66134+ *(void **)&softirq_vec[nr].action = action;
66135+ pax_close_kernel();
66136 }
66137
66138 /*
66139@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
66140
66141 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66142
66143-static void tasklet_action(struct softirq_action *a)
66144+static void tasklet_action(void)
66145 {
66146 struct tasklet_struct *list;
66147
66148@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
66149 }
66150 }
66151
66152-static void tasklet_hi_action(struct softirq_action *a)
66153+static void tasklet_hi_action(void)
66154 {
66155 struct tasklet_struct *list;
66156
66157diff --git a/kernel/sys.c b/kernel/sys.c
66158index 481611f..4665125 100644
66159--- a/kernel/sys.c
66160+++ b/kernel/sys.c
66161@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
66162 error = -EACCES;
66163 goto out;
66164 }
66165+
66166+ if (gr_handle_chroot_setpriority(p, niceval)) {
66167+ error = -EACCES;
66168+ goto out;
66169+ }
66170+
66171 no_nice = security_task_setnice(p, niceval);
66172 if (no_nice) {
66173 error = no_nice;
66174@@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
66175 goto error;
66176 }
66177
66178+ if (gr_check_group_change(new->gid, new->egid, -1))
66179+ goto error;
66180+
66181 if (rgid != (gid_t) -1 ||
66182 (egid != (gid_t) -1 && egid != old->gid))
66183 new->sgid = new->egid;
66184@@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66185 old = current_cred();
66186
66187 retval = -EPERM;
66188+
66189+ if (gr_check_group_change(gid, gid, gid))
66190+ goto error;
66191+
66192 if (nsown_capable(CAP_SETGID))
66193 new->gid = new->egid = new->sgid = new->fsgid = gid;
66194 else if (gid == old->gid || gid == old->sgid)
66195@@ -618,7 +631,7 @@ error:
66196 /*
66197 * change the user struct in a credentials set to match the new UID
66198 */
66199-static int set_user(struct cred *new)
66200+int set_user(struct cred *new)
66201 {
66202 struct user_struct *new_user;
66203
66204@@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
66205 goto error;
66206 }
66207
66208+ if (gr_check_user_change(new->uid, new->euid, -1))
66209+ goto error;
66210+
66211 if (new->uid != old->uid) {
66212 retval = set_user(new);
66213 if (retval < 0)
66214@@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66215 old = current_cred();
66216
66217 retval = -EPERM;
66218+
66219+ if (gr_check_crash_uid(uid))
66220+ goto error;
66221+ if (gr_check_user_change(uid, uid, uid))
66222+ goto error;
66223+
66224 if (nsown_capable(CAP_SETUID)) {
66225 new->suid = new->uid = uid;
66226 if (uid != old->uid) {
66227@@ -775,9 +797,18 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66228
66229 retval = -EPERM;
66230 if (!nsown_capable(CAP_SETUID)) {
66231- if (ruid != (uid_t) -1 && ruid != old->uid &&
66232- ruid != old->euid && ruid != old->suid)
66233- goto error;
66234+ // if RBAC is enabled, require CAP_SETUID to change
66235+ // uid to euid (from a suid binary, for instance)
66236+ // this is a hardening of normal permissions, not
66237+ // weakening
66238+ if (gr_acl_is_enabled()) {
66239+ if (ruid != (uid_t) -1 && ruid != old->uid)
66240+ goto error;
66241+ } else {
66242+ if (ruid != (uid_t) -1 && ruid != old->uid &&
66243+ ruid != old->euid && ruid != old->suid)
66244+ goto error;
66245+ }
66246 if (euid != (uid_t) -1 && euid != old->uid &&
66247 euid != old->euid && euid != old->suid)
66248 goto error;
66249@@ -786,6 +817,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66250 goto error;
66251 }
66252
66253+ if (gr_check_user_change(ruid, euid, -1))
66254+ goto error;
66255+
66256 if (ruid != (uid_t) -1) {
66257 new->uid = ruid;
66258 if (ruid != old->uid) {
66259@@ -839,9 +873,18 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66260
66261 retval = -EPERM;
66262 if (!nsown_capable(CAP_SETGID)) {
66263- if (rgid != (gid_t) -1 && rgid != old->gid &&
66264- rgid != old->egid && rgid != old->sgid)
66265- goto error;
66266+ // if RBAC is enabled, require CAP_SETGID to change
66267+ // gid to egid (from a sgid binary, for instance)
66268+ // this is a hardening of normal permissions, not
66269+ // weakening
66270+ if (gr_acl_is_enabled()) {
66271+ if (rgid != (gid_t) -1 && rgid != old->gid)
66272+ goto error;
66273+ } else {
66274+ if (rgid != (gid_t) -1 && rgid != old->gid &&
66275+ rgid != old->egid && rgid != old->sgid)
66276+ goto error;
66277+ }
66278 if (egid != (gid_t) -1 && egid != old->gid &&
66279 egid != old->egid && egid != old->sgid)
66280 goto error;
66281@@ -850,6 +893,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66282 goto error;
66283 }
66284
66285+ if (gr_check_group_change(rgid, egid, -1))
66286+ goto error;
66287+
66288 if (rgid != (gid_t) -1)
66289 new->gid = rgid;
66290 if (egid != (gid_t) -1)
66291@@ -896,6 +942,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66292 old = current_cred();
66293 old_fsuid = old->fsuid;
66294
66295+ if (gr_check_user_change(-1, -1, uid))
66296+ goto error;
66297+
66298 if (uid == old->uid || uid == old->euid ||
66299 uid == old->suid || uid == old->fsuid ||
66300 nsown_capable(CAP_SETUID)) {
66301@@ -906,6 +955,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66302 }
66303 }
66304
66305+error:
66306 abort_creds(new);
66307 return old_fsuid;
66308
66309@@ -932,12 +982,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66310 if (gid == old->gid || gid == old->egid ||
66311 gid == old->sgid || gid == old->fsgid ||
66312 nsown_capable(CAP_SETGID)) {
66313+ if (gr_check_group_change(-1, -1, gid))
66314+ goto error;
66315+
66316 if (gid != old_fsgid) {
66317 new->fsgid = gid;
66318 goto change_okay;
66319 }
66320 }
66321
66322+error:
66323 abort_creds(new);
66324 return old_fsgid;
66325
66326@@ -1189,7 +1243,10 @@ static int override_release(char __user *release, int len)
66327 }
66328 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
66329 snprintf(buf, len, "2.6.%u%s", v, rest);
66330- ret = copy_to_user(release, buf, len);
66331+ if (len > sizeof(buf))
66332+ ret = -EFAULT;
66333+ else
66334+ ret = copy_to_user(release, buf, len);
66335 }
66336 return ret;
66337 }
66338@@ -1243,19 +1300,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
66339 return -EFAULT;
66340
66341 down_read(&uts_sem);
66342- error = __copy_to_user(&name->sysname, &utsname()->sysname,
66343+ error = __copy_to_user(name->sysname, &utsname()->sysname,
66344 __OLD_UTS_LEN);
66345 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66346- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66347+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
66348 __OLD_UTS_LEN);
66349 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66350- error |= __copy_to_user(&name->release, &utsname()->release,
66351+ error |= __copy_to_user(name->release, &utsname()->release,
66352 __OLD_UTS_LEN);
66353 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66354- error |= __copy_to_user(&name->version, &utsname()->version,
66355+ error |= __copy_to_user(name->version, &utsname()->version,
66356 __OLD_UTS_LEN);
66357 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66358- error |= __copy_to_user(&name->machine, &utsname()->machine,
66359+ error |= __copy_to_user(name->machine, &utsname()->machine,
66360 __OLD_UTS_LEN);
66361 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66362 up_read(&uts_sem);
66363@@ -1720,7 +1777,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
66364 error = get_dumpable(me->mm);
66365 break;
66366 case PR_SET_DUMPABLE:
66367- if (arg2 < 0 || arg2 > 1) {
66368+ if (arg2 > 1) {
66369 error = -EINVAL;
66370 break;
66371 }
66372diff --git a/kernel/sysctl.c b/kernel/sysctl.c
66373index ae27196..7506d69 100644
66374--- a/kernel/sysctl.c
66375+++ b/kernel/sysctl.c
66376@@ -86,6 +86,13 @@
66377
66378
66379 #if defined(CONFIG_SYSCTL)
66380+#include <linux/grsecurity.h>
66381+#include <linux/grinternal.h>
66382+
66383+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66384+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66385+ const int op);
66386+extern int gr_handle_chroot_sysctl(const int op);
66387
66388 /* External variables not in a header file. */
66389 extern int sysctl_overcommit_memory;
66390@@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66391 }
66392
66393 #endif
66394+extern struct ctl_table grsecurity_table[];
66395
66396 static struct ctl_table root_table[];
66397 static struct ctl_table_root sysctl_table_root;
66398@@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66399 int sysctl_legacy_va_layout;
66400 #endif
66401
66402+#ifdef CONFIG_PAX_SOFTMODE
66403+static ctl_table pax_table[] = {
66404+ {
66405+ .procname = "softmode",
66406+ .data = &pax_softmode,
66407+ .maxlen = sizeof(unsigned int),
66408+ .mode = 0600,
66409+ .proc_handler = &proc_dointvec,
66410+ },
66411+
66412+ { }
66413+};
66414+#endif
66415+
66416 /* The default sysctl tables: */
66417
66418 static struct ctl_table root_table[] = {
66419@@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66420 #endif
66421
66422 static struct ctl_table kern_table[] = {
66423+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66424+ {
66425+ .procname = "grsecurity",
66426+ .mode = 0500,
66427+ .child = grsecurity_table,
66428+ },
66429+#endif
66430+
66431+#ifdef CONFIG_PAX_SOFTMODE
66432+ {
66433+ .procname = "pax",
66434+ .mode = 0500,
66435+ .child = pax_table,
66436+ },
66437+#endif
66438+
66439 {
66440 .procname = "sched_child_runs_first",
66441 .data = &sysctl_sched_child_runs_first,
66442@@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66443 .data = &modprobe_path,
66444 .maxlen = KMOD_PATH_LEN,
66445 .mode = 0644,
66446- .proc_handler = proc_dostring,
66447+ .proc_handler = proc_dostring_modpriv,
66448 },
66449 {
66450 .procname = "modules_disabled",
66451@@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66452 .extra1 = &zero,
66453 .extra2 = &one,
66454 },
66455+#endif
66456 {
66457 .procname = "kptr_restrict",
66458 .data = &kptr_restrict,
66459 .maxlen = sizeof(int),
66460 .mode = 0644,
66461 .proc_handler = proc_dmesg_restrict,
66462+#ifdef CONFIG_GRKERNSEC_HIDESYM
66463+ .extra1 = &two,
66464+#else
66465 .extra1 = &zero,
66466+#endif
66467 .extra2 = &two,
66468 },
66469-#endif
66470 {
66471 .procname = "ngroups_max",
66472 .data = &ngroups_max,
66473@@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66474 .proc_handler = proc_dointvec_minmax,
66475 .extra1 = &zero,
66476 },
66477+ {
66478+ .procname = "heap_stack_gap",
66479+ .data = &sysctl_heap_stack_gap,
66480+ .maxlen = sizeof(sysctl_heap_stack_gap),
66481+ .mode = 0644,
66482+ .proc_handler = proc_doulongvec_minmax,
66483+ },
66484 #else
66485 {
66486 .procname = "nr_trim_pages",
66487@@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66488 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66489 {
66490 int mode;
66491+ int error;
66492+
66493+ if (table->parent != NULL && table->parent->procname != NULL &&
66494+ table->procname != NULL &&
66495+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66496+ return -EACCES;
66497+ if (gr_handle_chroot_sysctl(op))
66498+ return -EACCES;
66499+ error = gr_handle_sysctl(table, op);
66500+ if (error)
66501+ return error;
66502
66503 if (root->permissions)
66504 mode = root->permissions(root, current->nsproxy, table);
66505@@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66506 buffer, lenp, ppos);
66507 }
66508
66509+int proc_dostring_modpriv(struct ctl_table *table, int write,
66510+ void __user *buffer, size_t *lenp, loff_t *ppos)
66511+{
66512+ if (write && !capable(CAP_SYS_MODULE))
66513+ return -EPERM;
66514+
66515+ return _proc_do_string(table->data, table->maxlen, write,
66516+ buffer, lenp, ppos);
66517+}
66518+
66519 static size_t proc_skip_spaces(char **buf)
66520 {
66521 size_t ret;
66522@@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66523 len = strlen(tmp);
66524 if (len > *size)
66525 len = *size;
66526+ if (len > sizeof(tmp))
66527+ len = sizeof(tmp);
66528 if (copy_to_user(*buf, tmp, len))
66529 return -EFAULT;
66530 *size -= len;
66531@@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66532 *i = val;
66533 } else {
66534 val = convdiv * (*i) / convmul;
66535- if (!first)
66536+ if (!first) {
66537 err = proc_put_char(&buffer, &left, '\t');
66538+ if (err)
66539+ break;
66540+ }
66541 err = proc_put_long(&buffer, &left, val, false);
66542 if (err)
66543 break;
66544@@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66545 return -ENOSYS;
66546 }
66547
66548+int proc_dostring_modpriv(struct ctl_table *table, int write,
66549+ void __user *buffer, size_t *lenp, loff_t *ppos)
66550+{
66551+ return -ENOSYS;
66552+}
66553+
66554 int proc_dointvec(struct ctl_table *table, int write,
66555 void __user *buffer, size_t *lenp, loff_t *ppos)
66556 {
66557@@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66558 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66559 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66560 EXPORT_SYMBOL(proc_dostring);
66561+EXPORT_SYMBOL(proc_dostring_modpriv);
66562 EXPORT_SYMBOL(proc_doulongvec_minmax);
66563 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66564 EXPORT_SYMBOL(register_sysctl_table);
66565diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66566index a650694..aaeeb20 100644
66567--- a/kernel/sysctl_binary.c
66568+++ b/kernel/sysctl_binary.c
66569@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66570 int i;
66571
66572 set_fs(KERNEL_DS);
66573- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66574+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66575 set_fs(old_fs);
66576 if (result < 0)
66577 goto out_kfree;
66578@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66579 }
66580
66581 set_fs(KERNEL_DS);
66582- result = vfs_write(file, buffer, str - buffer, &pos);
66583+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66584 set_fs(old_fs);
66585 if (result < 0)
66586 goto out_kfree;
66587@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66588 int i;
66589
66590 set_fs(KERNEL_DS);
66591- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66592+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66593 set_fs(old_fs);
66594 if (result < 0)
66595 goto out_kfree;
66596@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66597 }
66598
66599 set_fs(KERNEL_DS);
66600- result = vfs_write(file, buffer, str - buffer, &pos);
66601+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66602 set_fs(old_fs);
66603 if (result < 0)
66604 goto out_kfree;
66605@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
66606 int i;
66607
66608 set_fs(KERNEL_DS);
66609- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66610+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66611 set_fs(old_fs);
66612 if (result < 0)
66613 goto out;
66614@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66615 __le16 dnaddr;
66616
66617 set_fs(KERNEL_DS);
66618- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66619+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66620 set_fs(old_fs);
66621 if (result < 0)
66622 goto out;
66623@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66624 le16_to_cpu(dnaddr) & 0x3ff);
66625
66626 set_fs(KERNEL_DS);
66627- result = vfs_write(file, buf, len, &pos);
66628+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66629 set_fs(old_fs);
66630 if (result < 0)
66631 goto out;
66632diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
66633index 362da65..ab8ef8c 100644
66634--- a/kernel/sysctl_check.c
66635+++ b/kernel/sysctl_check.c
66636@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
66637 set_fail(&fail, table, "Directory with extra2");
66638 } else {
66639 if ((table->proc_handler == proc_dostring) ||
66640+ (table->proc_handler == proc_dostring_modpriv) ||
66641 (table->proc_handler == proc_dointvec) ||
66642 (table->proc_handler == proc_dointvec_minmax) ||
66643 (table->proc_handler == proc_dointvec_jiffies) ||
66644diff --git a/kernel/taskstats.c b/kernel/taskstats.c
66645index e660464..c8b9e67 100644
66646--- a/kernel/taskstats.c
66647+++ b/kernel/taskstats.c
66648@@ -27,9 +27,12 @@
66649 #include <linux/cgroup.h>
66650 #include <linux/fs.h>
66651 #include <linux/file.h>
66652+#include <linux/grsecurity.h>
66653 #include <net/genetlink.h>
66654 #include <linux/atomic.h>
66655
66656+extern int gr_is_taskstats_denied(int pid);
66657+
66658 /*
66659 * Maximum length of a cpumask that can be specified in
66660 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66661@@ -556,6 +559,9 @@ err:
66662
66663 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66664 {
66665+ if (gr_is_taskstats_denied(current->pid))
66666+ return -EACCES;
66667+
66668 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66669 return cmd_attr_register_cpumask(info);
66670 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66671diff --git a/kernel/time.c b/kernel/time.c
66672index 73e416d..cfc6f69 100644
66673--- a/kernel/time.c
66674+++ b/kernel/time.c
66675@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
66676 return error;
66677
66678 if (tz) {
66679+ /* we log in do_settimeofday called below, so don't log twice
66680+ */
66681+ if (!tv)
66682+ gr_log_timechange();
66683+
66684 /* SMP safe, global irq locking makes it work. */
66685 sys_tz = *tz;
66686 update_vsyscall_tz();
66687diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
66688index 8a46f5d..bbe6f9c 100644
66689--- a/kernel/time/alarmtimer.c
66690+++ b/kernel/time/alarmtimer.c
66691@@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
66692 struct platform_device *pdev;
66693 int error = 0;
66694 int i;
66695- struct k_clock alarm_clock = {
66696+ static struct k_clock alarm_clock = {
66697 .clock_getres = alarm_clock_getres,
66698 .clock_get = alarm_clock_get,
66699 .timer_create = alarm_timer_create,
66700diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
66701index fd4a7b1..fae5c2a 100644
66702--- a/kernel/time/tick-broadcast.c
66703+++ b/kernel/time/tick-broadcast.c
66704@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
66705 * then clear the broadcast bit.
66706 */
66707 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66708- int cpu = smp_processor_id();
66709+ cpu = smp_processor_id();
66710
66711 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66712 tick_broadcast_clear_oneshot(cpu);
66713diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
66714index 2378413..be455fd 100644
66715--- a/kernel/time/timekeeping.c
66716+++ b/kernel/time/timekeeping.c
66717@@ -14,6 +14,7 @@
66718 #include <linux/init.h>
66719 #include <linux/mm.h>
66720 #include <linux/sched.h>
66721+#include <linux/grsecurity.h>
66722 #include <linux/syscore_ops.h>
66723 #include <linux/clocksource.h>
66724 #include <linux/jiffies.h>
66725@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
66726 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66727 return -EINVAL;
66728
66729+ gr_log_timechange();
66730+
66731 write_seqlock_irqsave(&xtime_lock, flags);
66732
66733 timekeeping_forward_now();
66734diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
66735index 3258455..f35227d 100644
66736--- a/kernel/time/timer_list.c
66737+++ b/kernel/time/timer_list.c
66738@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
66739
66740 static void print_name_offset(struct seq_file *m, void *sym)
66741 {
66742+#ifdef CONFIG_GRKERNSEC_HIDESYM
66743+ SEQ_printf(m, "<%p>", NULL);
66744+#else
66745 char symname[KSYM_NAME_LEN];
66746
66747 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66748 SEQ_printf(m, "<%pK>", sym);
66749 else
66750 SEQ_printf(m, "%s", symname);
66751+#endif
66752 }
66753
66754 static void
66755@@ -112,7 +116,11 @@ next_one:
66756 static void
66757 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66758 {
66759+#ifdef CONFIG_GRKERNSEC_HIDESYM
66760+ SEQ_printf(m, " .base: %p\n", NULL);
66761+#else
66762 SEQ_printf(m, " .base: %pK\n", base);
66763+#endif
66764 SEQ_printf(m, " .index: %d\n",
66765 base->index);
66766 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66767@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
66768 {
66769 struct proc_dir_entry *pe;
66770
66771+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66772+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66773+#else
66774 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66775+#endif
66776 if (!pe)
66777 return -ENOMEM;
66778 return 0;
66779diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
66780index 0b537f2..9e71eca 100644
66781--- a/kernel/time/timer_stats.c
66782+++ b/kernel/time/timer_stats.c
66783@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66784 static unsigned long nr_entries;
66785 static struct entry entries[MAX_ENTRIES];
66786
66787-static atomic_t overflow_count;
66788+static atomic_unchecked_t overflow_count;
66789
66790 /*
66791 * The entries are in a hash-table, for fast lookup:
66792@@ -140,7 +140,7 @@ static void reset_entries(void)
66793 nr_entries = 0;
66794 memset(entries, 0, sizeof(entries));
66795 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66796- atomic_set(&overflow_count, 0);
66797+ atomic_set_unchecked(&overflow_count, 0);
66798 }
66799
66800 static struct entry *alloc_entry(void)
66801@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66802 if (likely(entry))
66803 entry->count++;
66804 else
66805- atomic_inc(&overflow_count);
66806+ atomic_inc_unchecked(&overflow_count);
66807
66808 out_unlock:
66809 raw_spin_unlock_irqrestore(lock, flags);
66810@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66811
66812 static void print_name_offset(struct seq_file *m, unsigned long addr)
66813 {
66814+#ifdef CONFIG_GRKERNSEC_HIDESYM
66815+ seq_printf(m, "<%p>", NULL);
66816+#else
66817 char symname[KSYM_NAME_LEN];
66818
66819 if (lookup_symbol_name(addr, symname) < 0)
66820 seq_printf(m, "<%p>", (void *)addr);
66821 else
66822 seq_printf(m, "%s", symname);
66823+#endif
66824 }
66825
66826 static int tstats_show(struct seq_file *m, void *v)
66827@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
66828
66829 seq_puts(m, "Timer Stats Version: v0.2\n");
66830 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66831- if (atomic_read(&overflow_count))
66832+ if (atomic_read_unchecked(&overflow_count))
66833 seq_printf(m, "Overflow: %d entries\n",
66834- atomic_read(&overflow_count));
66835+ atomic_read_unchecked(&overflow_count));
66836
66837 for (i = 0; i < nr_entries; i++) {
66838 entry = entries + i;
66839@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
66840 {
66841 struct proc_dir_entry *pe;
66842
66843+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66844+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66845+#else
66846 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66847+#endif
66848 if (!pe)
66849 return -ENOMEM;
66850 return 0;
66851diff --git a/kernel/timer.c b/kernel/timer.c
66852index 9c3c62b..441690e 100644
66853--- a/kernel/timer.c
66854+++ b/kernel/timer.c
66855@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66856 /*
66857 * This function runs timers and the timer-tq in bottom half context.
66858 */
66859-static void run_timer_softirq(struct softirq_action *h)
66860+static void run_timer_softirq(void)
66861 {
66862 struct tvec_base *base = __this_cpu_read(tvec_bases);
66863
66864diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
66865index 16fc34a..efd8bb8 100644
66866--- a/kernel/trace/blktrace.c
66867+++ b/kernel/trace/blktrace.c
66868@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
66869 struct blk_trace *bt = filp->private_data;
66870 char buf[16];
66871
66872- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66873+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66874
66875 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66876 }
66877@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
66878 return 1;
66879
66880 bt = buf->chan->private_data;
66881- atomic_inc(&bt->dropped);
66882+ atomic_inc_unchecked(&bt->dropped);
66883 return 0;
66884 }
66885
66886@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
66887
66888 bt->dir = dir;
66889 bt->dev = dev;
66890- atomic_set(&bt->dropped, 0);
66891+ atomic_set_unchecked(&bt->dropped, 0);
66892
66893 ret = -EIO;
66894 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66895diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
66896index 25b4f4d..6f4772d 100644
66897--- a/kernel/trace/ftrace.c
66898+++ b/kernel/trace/ftrace.c
66899@@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
66900 if (unlikely(ftrace_disabled))
66901 return 0;
66902
66903+ ret = ftrace_arch_code_modify_prepare();
66904+ FTRACE_WARN_ON(ret);
66905+ if (ret)
66906+ return 0;
66907+
66908 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66909+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66910 if (ret) {
66911 ftrace_bug(ret, ip);
66912- return 0;
66913 }
66914- return 1;
66915+ return ret ? 0 : 1;
66916 }
66917
66918 /*
66919@@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
66920
66921 int
66922 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66923- void *data)
66924+ void *data)
66925 {
66926 struct ftrace_func_probe *entry;
66927 struct ftrace_page *pg;
66928diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
66929index f2bd275..adaf3a2 100644
66930--- a/kernel/trace/trace.c
66931+++ b/kernel/trace/trace.c
66932@@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
66933 };
66934 #endif
66935
66936-static struct dentry *d_tracer;
66937-
66938 struct dentry *tracing_init_dentry(void)
66939 {
66940+ static struct dentry *d_tracer;
66941 static int once;
66942
66943 if (d_tracer)
66944@@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
66945 return d_tracer;
66946 }
66947
66948-static struct dentry *d_percpu;
66949-
66950 struct dentry *tracing_dentry_percpu(void)
66951 {
66952+ static struct dentry *d_percpu;
66953 static int once;
66954 struct dentry *d_tracer;
66955
66956diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
66957index c212a7f..7b02394 100644
66958--- a/kernel/trace/trace_events.c
66959+++ b/kernel/trace/trace_events.c
66960@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
66961 struct ftrace_module_file_ops {
66962 struct list_head list;
66963 struct module *mod;
66964- struct file_operations id;
66965- struct file_operations enable;
66966- struct file_operations format;
66967- struct file_operations filter;
66968 };
66969
66970 static struct ftrace_module_file_ops *
66971@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
66972
66973 file_ops->mod = mod;
66974
66975- file_ops->id = ftrace_event_id_fops;
66976- file_ops->id.owner = mod;
66977-
66978- file_ops->enable = ftrace_enable_fops;
66979- file_ops->enable.owner = mod;
66980-
66981- file_ops->filter = ftrace_event_filter_fops;
66982- file_ops->filter.owner = mod;
66983-
66984- file_ops->format = ftrace_event_format_fops;
66985- file_ops->format.owner = mod;
66986+ pax_open_kernel();
66987+ *(void **)&mod->trace_id.owner = mod;
66988+ *(void **)&mod->trace_enable.owner = mod;
66989+ *(void **)&mod->trace_filter.owner = mod;
66990+ *(void **)&mod->trace_format.owner = mod;
66991+ pax_close_kernel();
66992
66993 list_add(&file_ops->list, &ftrace_module_file_list);
66994
66995@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
66996
66997 for_each_event(call, start, end) {
66998 __trace_add_event_call(*call, mod,
66999- &file_ops->id, &file_ops->enable,
67000- &file_ops->filter, &file_ops->format);
67001+ &mod->trace_id, &mod->trace_enable,
67002+ &mod->trace_filter, &mod->trace_format);
67003 }
67004 }
67005
67006diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
67007index 00d527c..7c5b1a3 100644
67008--- a/kernel/trace/trace_kprobe.c
67009+++ b/kernel/trace/trace_kprobe.c
67010@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67011 long ret;
67012 int maxlen = get_rloc_len(*(u32 *)dest);
67013 u8 *dst = get_rloc_data(dest);
67014- u8 *src = addr;
67015+ const u8 __user *src = (const u8 __force_user *)addr;
67016 mm_segment_t old_fs = get_fs();
67017 if (!maxlen)
67018 return;
67019@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67020 pagefault_disable();
67021 do
67022 ret = __copy_from_user_inatomic(dst++, src++, 1);
67023- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
67024+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
67025 dst[-1] = '\0';
67026 pagefault_enable();
67027 set_fs(old_fs);
67028@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67029 ((u8 *)get_rloc_data(dest))[0] = '\0';
67030 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
67031 } else
67032- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67033+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67034 get_rloc_offs(*(u32 *)dest));
67035 }
67036 /* Return the length of string -- including null terminal byte */
67037@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
67038 set_fs(KERNEL_DS);
67039 pagefault_disable();
67040 do {
67041- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67042+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67043 len++;
67044 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67045 pagefault_enable();
67046diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
67047index fd3c8aa..5f324a6 100644
67048--- a/kernel/trace/trace_mmiotrace.c
67049+++ b/kernel/trace/trace_mmiotrace.c
67050@@ -24,7 +24,7 @@ struct header_iter {
67051 static struct trace_array *mmio_trace_array;
67052 static bool overrun_detected;
67053 static unsigned long prev_overruns;
67054-static atomic_t dropped_count;
67055+static atomic_unchecked_t dropped_count;
67056
67057 static void mmio_reset_data(struct trace_array *tr)
67058 {
67059@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
67060
67061 static unsigned long count_overruns(struct trace_iterator *iter)
67062 {
67063- unsigned long cnt = atomic_xchg(&dropped_count, 0);
67064+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67065 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67066
67067 if (over > prev_overruns)
67068@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
67069 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67070 sizeof(*entry), 0, pc);
67071 if (!event) {
67072- atomic_inc(&dropped_count);
67073+ atomic_inc_unchecked(&dropped_count);
67074 return;
67075 }
67076 entry = ring_buffer_event_data(event);
67077@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
67078 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67079 sizeof(*entry), 0, pc);
67080 if (!event) {
67081- atomic_inc(&dropped_count);
67082+ atomic_inc_unchecked(&dropped_count);
67083 return;
67084 }
67085 entry = ring_buffer_event_data(event);
67086diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
67087index 5199930..26c73a0 100644
67088--- a/kernel/trace/trace_output.c
67089+++ b/kernel/trace/trace_output.c
67090@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
67091
67092 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67093 if (!IS_ERR(p)) {
67094- p = mangle_path(s->buffer + s->len, p, "\n");
67095+ p = mangle_path(s->buffer + s->len, p, "\n\\");
67096 if (p) {
67097 s->len = p - s->buffer;
67098 return 1;
67099diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
67100index 77575b3..6e623d1 100644
67101--- a/kernel/trace/trace_stack.c
67102+++ b/kernel/trace/trace_stack.c
67103@@ -50,7 +50,7 @@ static inline void check_stack(void)
67104 return;
67105
67106 /* we do not handle interrupt stacks yet */
67107- if (!object_is_on_stack(&this_size))
67108+ if (!object_starts_on_stack(&this_size))
67109 return;
67110
67111 local_irq_save(flags);
67112diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
67113index 209b379..7f76423 100644
67114--- a/kernel/trace/trace_workqueue.c
67115+++ b/kernel/trace/trace_workqueue.c
67116@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67117 int cpu;
67118 pid_t pid;
67119 /* Can be inserted from interrupt or user context, need to be atomic */
67120- atomic_t inserted;
67121+ atomic_unchecked_t inserted;
67122 /*
67123 * Don't need to be atomic, works are serialized in a single workqueue thread
67124 * on a single CPU.
67125@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67126 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67127 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67128 if (node->pid == wq_thread->pid) {
67129- atomic_inc(&node->inserted);
67130+ atomic_inc_unchecked(&node->inserted);
67131 goto found;
67132 }
67133 }
67134@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
67135 tsk = get_pid_task(pid, PIDTYPE_PID);
67136 if (tsk) {
67137 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67138- atomic_read(&cws->inserted), cws->executed,
67139+ atomic_read_unchecked(&cws->inserted), cws->executed,
67140 tsk->comm);
67141 put_task_struct(tsk);
67142 }
67143diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
67144index 82928f5..92da771 100644
67145--- a/lib/Kconfig.debug
67146+++ b/lib/Kconfig.debug
67147@@ -1103,6 +1103,7 @@ config LATENCYTOP
67148 depends on DEBUG_KERNEL
67149 depends on STACKTRACE_SUPPORT
67150 depends on PROC_FS
67151+ depends on !GRKERNSEC_HIDESYM
67152 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
67153 select KALLSYMS
67154 select KALLSYMS_ALL
67155diff --git a/lib/bitmap.c b/lib/bitmap.c
67156index 0d4a127..33a06c7 100644
67157--- a/lib/bitmap.c
67158+++ b/lib/bitmap.c
67159@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
67160 {
67161 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67162 u32 chunk;
67163- const char __user __force *ubuf = (const char __user __force *)buf;
67164+ const char __user *ubuf = (const char __force_user *)buf;
67165
67166 bitmap_zero(maskp, nmaskbits);
67167
67168@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
67169 {
67170 if (!access_ok(VERIFY_READ, ubuf, ulen))
67171 return -EFAULT;
67172- return __bitmap_parse((const char __force *)ubuf,
67173+ return __bitmap_parse((const char __force_kernel *)ubuf,
67174 ulen, 1, maskp, nmaskbits);
67175
67176 }
67177@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
67178 {
67179 unsigned a, b;
67180 int c, old_c, totaldigits;
67181- const char __user __force *ubuf = (const char __user __force *)buf;
67182+ const char __user *ubuf = (const char __force_user *)buf;
67183 int exp_digit, in_range;
67184
67185 totaldigits = c = 0;
67186@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
67187 {
67188 if (!access_ok(VERIFY_READ, ubuf, ulen))
67189 return -EFAULT;
67190- return __bitmap_parselist((const char __force *)ubuf,
67191+ return __bitmap_parselist((const char __force_kernel *)ubuf,
67192 ulen, 1, maskp, nmaskbits);
67193 }
67194 EXPORT_SYMBOL(bitmap_parselist_user);
67195diff --git a/lib/bug.c b/lib/bug.c
67196index 1955209..cbbb2ad 100644
67197--- a/lib/bug.c
67198+++ b/lib/bug.c
67199@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
67200 return BUG_TRAP_TYPE_NONE;
67201
67202 bug = find_bug(bugaddr);
67203+ if (!bug)
67204+ return BUG_TRAP_TYPE_NONE;
67205
67206 file = NULL;
67207 line = 0;
67208diff --git a/lib/debugobjects.c b/lib/debugobjects.c
67209index a78b7c6..2c73084 100644
67210--- a/lib/debugobjects.c
67211+++ b/lib/debugobjects.c
67212@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
67213 if (limit > 4)
67214 return;
67215
67216- is_on_stack = object_is_on_stack(addr);
67217+ is_on_stack = object_starts_on_stack(addr);
67218 if (is_on_stack == onstack)
67219 return;
67220
67221diff --git a/lib/devres.c b/lib/devres.c
67222index 7c0e953..f642b5c 100644
67223--- a/lib/devres.c
67224+++ b/lib/devres.c
67225@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67226 void devm_iounmap(struct device *dev, void __iomem *addr)
67227 {
67228 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67229- (void *)addr));
67230+ (void __force *)addr));
67231 iounmap(addr);
67232 }
67233 EXPORT_SYMBOL(devm_iounmap);
67234@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
67235 {
67236 ioport_unmap(addr);
67237 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67238- devm_ioport_map_match, (void *)addr));
67239+ devm_ioport_map_match, (void __force *)addr));
67240 }
67241 EXPORT_SYMBOL(devm_ioport_unmap);
67242
67243diff --git a/lib/dma-debug.c b/lib/dma-debug.c
67244index fea790a..ebb0e82 100644
67245--- a/lib/dma-debug.c
67246+++ b/lib/dma-debug.c
67247@@ -925,7 +925,7 @@ out:
67248
67249 static void check_for_stack(struct device *dev, void *addr)
67250 {
67251- if (object_is_on_stack(addr))
67252+ if (object_starts_on_stack(addr))
67253 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67254 "stack [addr=%p]\n", addr);
67255 }
67256diff --git a/lib/extable.c b/lib/extable.c
67257index 4cac81e..63e9b8f 100644
67258--- a/lib/extable.c
67259+++ b/lib/extable.c
67260@@ -13,6 +13,7 @@
67261 #include <linux/init.h>
67262 #include <linux/sort.h>
67263 #include <asm/uaccess.h>
67264+#include <asm/pgtable.h>
67265
67266 #ifndef ARCH_HAS_SORT_EXTABLE
67267 /*
67268@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
67269 void sort_extable(struct exception_table_entry *start,
67270 struct exception_table_entry *finish)
67271 {
67272+ pax_open_kernel();
67273 sort(start, finish - start, sizeof(struct exception_table_entry),
67274 cmp_ex, NULL);
67275+ pax_close_kernel();
67276 }
67277
67278 #ifdef CONFIG_MODULES
67279diff --git a/lib/inflate.c b/lib/inflate.c
67280index 013a761..c28f3fc 100644
67281--- a/lib/inflate.c
67282+++ b/lib/inflate.c
67283@@ -269,7 +269,7 @@ static void free(void *where)
67284 malloc_ptr = free_mem_ptr;
67285 }
67286 #else
67287-#define malloc(a) kmalloc(a, GFP_KERNEL)
67288+#define malloc(a) kmalloc((a), GFP_KERNEL)
67289 #define free(a) kfree(a)
67290 #endif
67291
67292diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
67293index bd2bea9..6b3c95e 100644
67294--- a/lib/is_single_threaded.c
67295+++ b/lib/is_single_threaded.c
67296@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
67297 struct task_struct *p, *t;
67298 bool ret;
67299
67300+ if (!mm)
67301+ return true;
67302+
67303 if (atomic_read(&task->signal->live) != 1)
67304 return false;
67305
67306diff --git a/lib/kref.c b/lib/kref.c
67307index 3efb882..8492f4c 100644
67308--- a/lib/kref.c
67309+++ b/lib/kref.c
67310@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67311 */
67312 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67313 {
67314- WARN_ON(release == NULL);
67315+ BUG_ON(release == NULL);
67316 WARN_ON(release == (void (*)(struct kref *))kfree);
67317
67318 if (atomic_dec_and_test(&kref->refcount)) {
67319diff --git a/lib/radix-tree.c b/lib/radix-tree.c
67320index d9df745..e73c2fe 100644
67321--- a/lib/radix-tree.c
67322+++ b/lib/radix-tree.c
67323@@ -80,7 +80,7 @@ struct radix_tree_preload {
67324 int nr;
67325 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67326 };
67327-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67328+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67329
67330 static inline void *ptr_to_indirect(void *ptr)
67331 {
67332diff --git a/lib/vsprintf.c b/lib/vsprintf.c
67333index 993599e..84dc70e 100644
67334--- a/lib/vsprintf.c
67335+++ b/lib/vsprintf.c
67336@@ -16,6 +16,9 @@
67337 * - scnprintf and vscnprintf
67338 */
67339
67340+#ifdef CONFIG_GRKERNSEC_HIDESYM
67341+#define __INCLUDED_BY_HIDESYM 1
67342+#endif
67343 #include <stdarg.h>
67344 #include <linux/module.h>
67345 #include <linux/types.h>
67346@@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
67347 char sym[KSYM_SYMBOL_LEN];
67348 if (ext == 'B')
67349 sprint_backtrace(sym, value);
67350- else if (ext != 'f' && ext != 's')
67351+ else if (ext != 'f' && ext != 's' && ext != 'a')
67352 sprint_symbol(sym, value);
67353 else
67354 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67355@@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
67356 return string(buf, end, uuid, spec);
67357 }
67358
67359+#ifdef CONFIG_GRKERNSEC_HIDESYM
67360+int kptr_restrict __read_mostly = 2;
67361+#else
67362 int kptr_restrict __read_mostly;
67363+#endif
67364
67365 /*
67366 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67367@@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
67368 * - 'S' For symbolic direct pointers with offset
67369 * - 's' For symbolic direct pointers without offset
67370 * - 'B' For backtraced symbolic direct pointers with offset
67371+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67372+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67373 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67374 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67375 * - 'M' For a 6-byte MAC address, it prints the address in the
67376@@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67377 {
67378 if (!ptr && *fmt != 'K') {
67379 /*
67380- * Print (null) with the same width as a pointer so it makes
67381+ * Print (nil) with the same width as a pointer so it makes
67382 * tabular output look nice.
67383 */
67384 if (spec.field_width == -1)
67385 spec.field_width = 2 * sizeof(void *);
67386- return string(buf, end, "(null)", spec);
67387+ return string(buf, end, "(nil)", spec);
67388 }
67389
67390 switch (*fmt) {
67391@@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67392 /* Fallthrough */
67393 case 'S':
67394 case 's':
67395+#ifdef CONFIG_GRKERNSEC_HIDESYM
67396+ break;
67397+#else
67398+ return symbol_string(buf, end, ptr, spec, *fmt);
67399+#endif
67400+ case 'A':
67401+ case 'a':
67402 case 'B':
67403 return symbol_string(buf, end, ptr, spec, *fmt);
67404 case 'R':
67405@@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67406 typeof(type) value; \
67407 if (sizeof(type) == 8) { \
67408 args = PTR_ALIGN(args, sizeof(u32)); \
67409- *(u32 *)&value = *(u32 *)args; \
67410- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67411+ *(u32 *)&value = *(const u32 *)args; \
67412+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67413 } else { \
67414 args = PTR_ALIGN(args, sizeof(type)); \
67415- value = *(typeof(type) *)args; \
67416+ value = *(const typeof(type) *)args; \
67417 } \
67418 args += sizeof(type); \
67419 value; \
67420@@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67421 case FORMAT_TYPE_STR: {
67422 const char *str_arg = args;
67423 args += strlen(str_arg) + 1;
67424- str = string(str, end, (char *)str_arg, spec);
67425+ str = string(str, end, str_arg, spec);
67426 break;
67427 }
67428
67429diff --git a/localversion-grsec b/localversion-grsec
67430new file mode 100644
67431index 0000000..7cd6065
67432--- /dev/null
67433+++ b/localversion-grsec
67434@@ -0,0 +1 @@
67435+-grsec
67436diff --git a/mm/Kconfig b/mm/Kconfig
67437index 011b110..b492af2 100644
67438--- a/mm/Kconfig
67439+++ b/mm/Kconfig
67440@@ -241,10 +241,10 @@ config KSM
67441 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67442
67443 config DEFAULT_MMAP_MIN_ADDR
67444- int "Low address space to protect from user allocation"
67445+ int "Low address space to protect from user allocation"
67446 depends on MMU
67447- default 4096
67448- help
67449+ default 65536
67450+ help
67451 This is the portion of low virtual memory which should be protected
67452 from userspace allocation. Keeping a user from writing to low pages
67453 can help reduce the impact of kernel NULL pointer bugs.
67454diff --git a/mm/filemap.c b/mm/filemap.c
67455index 90286a4..f441caa 100644
67456--- a/mm/filemap.c
67457+++ b/mm/filemap.c
67458@@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67459 struct address_space *mapping = file->f_mapping;
67460
67461 if (!mapping->a_ops->readpage)
67462- return -ENOEXEC;
67463+ return -ENODEV;
67464 file_accessed(file);
67465 vma->vm_ops = &generic_file_vm_ops;
67466 vma->vm_flags |= VM_CAN_NONLINEAR;
67467@@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67468 *pos = i_size_read(inode);
67469
67470 if (limit != RLIM_INFINITY) {
67471+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67472 if (*pos >= limit) {
67473 send_sig(SIGXFSZ, current, 0);
67474 return -EFBIG;
67475diff --git a/mm/fremap.c b/mm/fremap.c
67476index 9ed4fd4..c42648d 100644
67477--- a/mm/fremap.c
67478+++ b/mm/fremap.c
67479@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67480 retry:
67481 vma = find_vma(mm, start);
67482
67483+#ifdef CONFIG_PAX_SEGMEXEC
67484+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67485+ goto out;
67486+#endif
67487+
67488 /*
67489 * Make sure the vma is shared, that it supports prefaulting,
67490 * and that the remapped range is valid and fully within
67491diff --git a/mm/highmem.c b/mm/highmem.c
67492index 57d82c6..e9e0552 100644
67493--- a/mm/highmem.c
67494+++ b/mm/highmem.c
67495@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67496 * So no dangers, even with speculative execution.
67497 */
67498 page = pte_page(pkmap_page_table[i]);
67499+ pax_open_kernel();
67500 pte_clear(&init_mm, (unsigned long)page_address(page),
67501 &pkmap_page_table[i]);
67502-
67503+ pax_close_kernel();
67504 set_page_address(page, NULL);
67505 need_flush = 1;
67506 }
67507@@ -186,9 +187,11 @@ start:
67508 }
67509 }
67510 vaddr = PKMAP_ADDR(last_pkmap_nr);
67511+
67512+ pax_open_kernel();
67513 set_pte_at(&init_mm, vaddr,
67514 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67515-
67516+ pax_close_kernel();
67517 pkmap_count[last_pkmap_nr] = 1;
67518 set_page_address(page, (void *)vaddr);
67519
67520diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67521index 36b3d98..584cb54 100644
67522--- a/mm/huge_memory.c
67523+++ b/mm/huge_memory.c
67524@@ -703,7 +703,7 @@ out:
67525 * run pte_offset_map on the pmd, if an huge pmd could
67526 * materialize from under us from a different thread.
67527 */
67528- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67529+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67530 return VM_FAULT_OOM;
67531 /* if an huge pmd materialized from under us just retry later */
67532 if (unlikely(pmd_trans_huge(*pmd)))
67533diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67534index 2316840..b418671 100644
67535--- a/mm/hugetlb.c
67536+++ b/mm/hugetlb.c
67537@@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67538 return 1;
67539 }
67540
67541+#ifdef CONFIG_PAX_SEGMEXEC
67542+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67543+{
67544+ struct mm_struct *mm = vma->vm_mm;
67545+ struct vm_area_struct *vma_m;
67546+ unsigned long address_m;
67547+ pte_t *ptep_m;
67548+
67549+ vma_m = pax_find_mirror_vma(vma);
67550+ if (!vma_m)
67551+ return;
67552+
67553+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67554+ address_m = address + SEGMEXEC_TASK_SIZE;
67555+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67556+ get_page(page_m);
67557+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
67558+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67559+}
67560+#endif
67561+
67562 /*
67563 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67564 */
67565@@ -2450,6 +2471,11 @@ retry_avoidcopy:
67566 make_huge_pte(vma, new_page, 1));
67567 page_remove_rmap(old_page);
67568 hugepage_add_new_anon_rmap(new_page, vma, address);
67569+
67570+#ifdef CONFIG_PAX_SEGMEXEC
67571+ pax_mirror_huge_pte(vma, address, new_page);
67572+#endif
67573+
67574 /* Make the old page be freed below */
67575 new_page = old_page;
67576 mmu_notifier_invalidate_range_end(mm,
67577@@ -2601,6 +2627,10 @@ retry:
67578 && (vma->vm_flags & VM_SHARED)));
67579 set_huge_pte_at(mm, address, ptep, new_pte);
67580
67581+#ifdef CONFIG_PAX_SEGMEXEC
67582+ pax_mirror_huge_pte(vma, address, page);
67583+#endif
67584+
67585 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67586 /* Optimization, do the COW without a second fault */
67587 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67588@@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67589 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67590 struct hstate *h = hstate_vma(vma);
67591
67592+#ifdef CONFIG_PAX_SEGMEXEC
67593+ struct vm_area_struct *vma_m;
67594+#endif
67595+
67596 ptep = huge_pte_offset(mm, address);
67597 if (ptep) {
67598 entry = huge_ptep_get(ptep);
67599@@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67600 VM_FAULT_SET_HINDEX(h - hstates);
67601 }
67602
67603+#ifdef CONFIG_PAX_SEGMEXEC
67604+ vma_m = pax_find_mirror_vma(vma);
67605+ if (vma_m) {
67606+ unsigned long address_m;
67607+
67608+ if (vma->vm_start > vma_m->vm_start) {
67609+ address_m = address;
67610+ address -= SEGMEXEC_TASK_SIZE;
67611+ vma = vma_m;
67612+ h = hstate_vma(vma);
67613+ } else
67614+ address_m = address + SEGMEXEC_TASK_SIZE;
67615+
67616+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67617+ return VM_FAULT_OOM;
67618+ address_m &= HPAGE_MASK;
67619+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67620+ }
67621+#endif
67622+
67623 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67624 if (!ptep)
67625 return VM_FAULT_OOM;
67626diff --git a/mm/internal.h b/mm/internal.h
67627index 2189af4..f2ca332 100644
67628--- a/mm/internal.h
67629+++ b/mm/internal.h
67630@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
67631 * in mm/page_alloc.c
67632 */
67633 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67634+extern void free_compound_page(struct page *page);
67635 extern void prep_compound_page(struct page *page, unsigned long order);
67636 #ifdef CONFIG_MEMORY_FAILURE
67637 extern bool is_free_buddy_page(struct page *page);
67638diff --git a/mm/kmemleak.c b/mm/kmemleak.c
67639index f3b2a00..61da94d 100644
67640--- a/mm/kmemleak.c
67641+++ b/mm/kmemleak.c
67642@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
67643
67644 for (i = 0; i < object->trace_len; i++) {
67645 void *ptr = (void *)object->trace[i];
67646- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67647+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67648 }
67649 }
67650
67651diff --git a/mm/maccess.c b/mm/maccess.c
67652index d53adf9..03a24bf 100644
67653--- a/mm/maccess.c
67654+++ b/mm/maccess.c
67655@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
67656 set_fs(KERNEL_DS);
67657 pagefault_disable();
67658 ret = __copy_from_user_inatomic(dst,
67659- (__force const void __user *)src, size);
67660+ (const void __force_user *)src, size);
67661 pagefault_enable();
67662 set_fs(old_fs);
67663
67664@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
67665
67666 set_fs(KERNEL_DS);
67667 pagefault_disable();
67668- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67669+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67670 pagefault_enable();
67671 set_fs(old_fs);
67672
67673diff --git a/mm/madvise.c b/mm/madvise.c
67674index 74bf193..feb6fd3 100644
67675--- a/mm/madvise.c
67676+++ b/mm/madvise.c
67677@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
67678 pgoff_t pgoff;
67679 unsigned long new_flags = vma->vm_flags;
67680
67681+#ifdef CONFIG_PAX_SEGMEXEC
67682+ struct vm_area_struct *vma_m;
67683+#endif
67684+
67685 switch (behavior) {
67686 case MADV_NORMAL:
67687 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67688@@ -110,6 +114,13 @@ success:
67689 /*
67690 * vm_flags is protected by the mmap_sem held in write mode.
67691 */
67692+
67693+#ifdef CONFIG_PAX_SEGMEXEC
67694+ vma_m = pax_find_mirror_vma(vma);
67695+ if (vma_m)
67696+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67697+#endif
67698+
67699 vma->vm_flags = new_flags;
67700
67701 out:
67702@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67703 struct vm_area_struct ** prev,
67704 unsigned long start, unsigned long end)
67705 {
67706+
67707+#ifdef CONFIG_PAX_SEGMEXEC
67708+ struct vm_area_struct *vma_m;
67709+#endif
67710+
67711 *prev = vma;
67712 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67713 return -EINVAL;
67714@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67715 zap_page_range(vma, start, end - start, &details);
67716 } else
67717 zap_page_range(vma, start, end - start, NULL);
67718+
67719+#ifdef CONFIG_PAX_SEGMEXEC
67720+ vma_m = pax_find_mirror_vma(vma);
67721+ if (vma_m) {
67722+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67723+ struct zap_details details = {
67724+ .nonlinear_vma = vma_m,
67725+ .last_index = ULONG_MAX,
67726+ };
67727+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67728+ } else
67729+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67730+ }
67731+#endif
67732+
67733 return 0;
67734 }
67735
67736@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
67737 if (end < start)
67738 goto out;
67739
67740+#ifdef CONFIG_PAX_SEGMEXEC
67741+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67742+ if (end > SEGMEXEC_TASK_SIZE)
67743+ goto out;
67744+ } else
67745+#endif
67746+
67747+ if (end > TASK_SIZE)
67748+ goto out;
67749+
67750 error = 0;
67751 if (end == start)
67752 goto out;
67753diff --git a/mm/memory-failure.c b/mm/memory-failure.c
67754index 06d3479..0778eef 100644
67755--- a/mm/memory-failure.c
67756+++ b/mm/memory-failure.c
67757@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
67758
67759 int sysctl_memory_failure_recovery __read_mostly = 1;
67760
67761-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67762+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67763
67764 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67765
67766@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
67767 si.si_signo = SIGBUS;
67768 si.si_errno = 0;
67769 si.si_code = BUS_MCEERR_AO;
67770- si.si_addr = (void *)addr;
67771+ si.si_addr = (void __user *)addr;
67772 #ifdef __ARCH_SI_TRAPNO
67773 si.si_trapno = trapno;
67774 #endif
67775@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67776 }
67777
67778 nr_pages = 1 << compound_trans_order(hpage);
67779- atomic_long_add(nr_pages, &mce_bad_pages);
67780+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67781
67782 /*
67783 * We need/can do nothing about count=0 pages.
67784@@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67785 if (!PageHWPoison(hpage)
67786 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67787 || (p != hpage && TestSetPageHWPoison(hpage))) {
67788- atomic_long_sub(nr_pages, &mce_bad_pages);
67789+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67790 return 0;
67791 }
67792 set_page_hwpoison_huge_page(hpage);
67793@@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67794 }
67795 if (hwpoison_filter(p)) {
67796 if (TestClearPageHWPoison(p))
67797- atomic_long_sub(nr_pages, &mce_bad_pages);
67798+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67799 unlock_page(hpage);
67800 put_page(hpage);
67801 return 0;
67802@@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
67803 return 0;
67804 }
67805 if (TestClearPageHWPoison(p))
67806- atomic_long_sub(nr_pages, &mce_bad_pages);
67807+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67808 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
67809 return 0;
67810 }
67811@@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
67812 */
67813 if (TestClearPageHWPoison(page)) {
67814 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
67815- atomic_long_sub(nr_pages, &mce_bad_pages);
67816+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67817 freeit = 1;
67818 if (PageHuge(page))
67819 clear_page_hwpoison_huge_page(page);
67820@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
67821 }
67822 done:
67823 if (!PageHWPoison(hpage))
67824- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
67825+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
67826 set_page_hwpoison_huge_page(hpage);
67827 dequeue_hwpoisoned_huge_page(hpage);
67828 /* keep elevated page count for bad page */
67829@@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
67830 return ret;
67831
67832 done:
67833- atomic_long_add(1, &mce_bad_pages);
67834+ atomic_long_add_unchecked(1, &mce_bad_pages);
67835 SetPageHWPoison(page);
67836 /* keep elevated page count for bad page */
67837 return ret;
67838diff --git a/mm/memory.c b/mm/memory.c
67839index 829d437..3d3926a 100644
67840--- a/mm/memory.c
67841+++ b/mm/memory.c
67842@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
67843 return;
67844
67845 pmd = pmd_offset(pud, start);
67846+
67847+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67848 pud_clear(pud);
67849 pmd_free_tlb(tlb, pmd, start);
67850+#endif
67851+
67852 }
67853
67854 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67855@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67856 if (end - 1 > ceiling - 1)
67857 return;
67858
67859+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67860 pud = pud_offset(pgd, start);
67861 pgd_clear(pgd);
67862 pud_free_tlb(tlb, pud, start);
67863+#endif
67864+
67865 }
67866
67867 /*
67868@@ -1566,12 +1573,6 @@ no_page_table:
67869 return page;
67870 }
67871
67872-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67873-{
67874- return stack_guard_page_start(vma, addr) ||
67875- stack_guard_page_end(vma, addr+PAGE_SIZE);
67876-}
67877-
67878 /**
67879 * __get_user_pages() - pin user pages in memory
67880 * @tsk: task_struct of target task
67881@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67882 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67883 i = 0;
67884
67885- do {
67886+ while (nr_pages) {
67887 struct vm_area_struct *vma;
67888
67889- vma = find_extend_vma(mm, start);
67890+ vma = find_vma(mm, start);
67891 if (!vma && in_gate_area(mm, start)) {
67892 unsigned long pg = start & PAGE_MASK;
67893 pgd_t *pgd;
67894@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67895 goto next_page;
67896 }
67897
67898- if (!vma ||
67899+ if (!vma || start < vma->vm_start ||
67900 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67901 !(vm_flags & vma->vm_flags))
67902 return i ? : -EFAULT;
67903@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67904 int ret;
67905 unsigned int fault_flags = 0;
67906
67907- /* For mlock, just skip the stack guard page. */
67908- if (foll_flags & FOLL_MLOCK) {
67909- if (stack_guard_page(vma, start))
67910- goto next_page;
67911- }
67912 if (foll_flags & FOLL_WRITE)
67913 fault_flags |= FAULT_FLAG_WRITE;
67914 if (nonblocking)
67915@@ -1800,7 +1796,7 @@ next_page:
67916 start += PAGE_SIZE;
67917 nr_pages--;
67918 } while (nr_pages && start < vma->vm_end);
67919- } while (nr_pages);
67920+ }
67921 return i;
67922 }
67923 EXPORT_SYMBOL(__get_user_pages);
67924@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
67925 page_add_file_rmap(page);
67926 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67927
67928+#ifdef CONFIG_PAX_SEGMEXEC
67929+ pax_mirror_file_pte(vma, addr, page, ptl);
67930+#endif
67931+
67932 retval = 0;
67933 pte_unmap_unlock(pte, ptl);
67934 return retval;
67935@@ -2041,10 +2041,22 @@ out:
67936 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67937 struct page *page)
67938 {
67939+
67940+#ifdef CONFIG_PAX_SEGMEXEC
67941+ struct vm_area_struct *vma_m;
67942+#endif
67943+
67944 if (addr < vma->vm_start || addr >= vma->vm_end)
67945 return -EFAULT;
67946 if (!page_count(page))
67947 return -EINVAL;
67948+
67949+#ifdef CONFIG_PAX_SEGMEXEC
67950+ vma_m = pax_find_mirror_vma(vma);
67951+ if (vma_m)
67952+ vma_m->vm_flags |= VM_INSERTPAGE;
67953+#endif
67954+
67955 vma->vm_flags |= VM_INSERTPAGE;
67956 return insert_page(vma, addr, page, vma->vm_page_prot);
67957 }
67958@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
67959 unsigned long pfn)
67960 {
67961 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67962+ BUG_ON(vma->vm_mirror);
67963
67964 if (addr < vma->vm_start || addr >= vma->vm_end)
67965 return -EFAULT;
67966@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
67967 copy_user_highpage(dst, src, va, vma);
67968 }
67969
67970+#ifdef CONFIG_PAX_SEGMEXEC
67971+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67972+{
67973+ struct mm_struct *mm = vma->vm_mm;
67974+ spinlock_t *ptl;
67975+ pte_t *pte, entry;
67976+
67977+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67978+ entry = *pte;
67979+ if (!pte_present(entry)) {
67980+ if (!pte_none(entry)) {
67981+ BUG_ON(pte_file(entry));
67982+ free_swap_and_cache(pte_to_swp_entry(entry));
67983+ pte_clear_not_present_full(mm, address, pte, 0);
67984+ }
67985+ } else {
67986+ struct page *page;
67987+
67988+ flush_cache_page(vma, address, pte_pfn(entry));
67989+ entry = ptep_clear_flush(vma, address, pte);
67990+ BUG_ON(pte_dirty(entry));
67991+ page = vm_normal_page(vma, address, entry);
67992+ if (page) {
67993+ update_hiwater_rss(mm);
67994+ if (PageAnon(page))
67995+ dec_mm_counter_fast(mm, MM_ANONPAGES);
67996+ else
67997+ dec_mm_counter_fast(mm, MM_FILEPAGES);
67998+ page_remove_rmap(page);
67999+ page_cache_release(page);
68000+ }
68001+ }
68002+ pte_unmap_unlock(pte, ptl);
68003+}
68004+
68005+/* PaX: if vma is mirrored, synchronize the mirror's PTE
68006+ *
68007+ * the ptl of the lower mapped page is held on entry and is not released on exit
68008+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68009+ */
68010+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68011+{
68012+ struct mm_struct *mm = vma->vm_mm;
68013+ unsigned long address_m;
68014+ spinlock_t *ptl_m;
68015+ struct vm_area_struct *vma_m;
68016+ pmd_t *pmd_m;
68017+ pte_t *pte_m, entry_m;
68018+
68019+ BUG_ON(!page_m || !PageAnon(page_m));
68020+
68021+ vma_m = pax_find_mirror_vma(vma);
68022+ if (!vma_m)
68023+ return;
68024+
68025+ BUG_ON(!PageLocked(page_m));
68026+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68027+ address_m = address + SEGMEXEC_TASK_SIZE;
68028+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68029+ pte_m = pte_offset_map(pmd_m, address_m);
68030+ ptl_m = pte_lockptr(mm, pmd_m);
68031+ if (ptl != ptl_m) {
68032+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68033+ if (!pte_none(*pte_m))
68034+ goto out;
68035+ }
68036+
68037+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68038+ page_cache_get(page_m);
68039+ page_add_anon_rmap(page_m, vma_m, address_m);
68040+ inc_mm_counter_fast(mm, MM_ANONPAGES);
68041+ set_pte_at(mm, address_m, pte_m, entry_m);
68042+ update_mmu_cache(vma_m, address_m, entry_m);
68043+out:
68044+ if (ptl != ptl_m)
68045+ spin_unlock(ptl_m);
68046+ pte_unmap(pte_m);
68047+ unlock_page(page_m);
68048+}
68049+
68050+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68051+{
68052+ struct mm_struct *mm = vma->vm_mm;
68053+ unsigned long address_m;
68054+ spinlock_t *ptl_m;
68055+ struct vm_area_struct *vma_m;
68056+ pmd_t *pmd_m;
68057+ pte_t *pte_m, entry_m;
68058+
68059+ BUG_ON(!page_m || PageAnon(page_m));
68060+
68061+ vma_m = pax_find_mirror_vma(vma);
68062+ if (!vma_m)
68063+ return;
68064+
68065+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68066+ address_m = address + SEGMEXEC_TASK_SIZE;
68067+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68068+ pte_m = pte_offset_map(pmd_m, address_m);
68069+ ptl_m = pte_lockptr(mm, pmd_m);
68070+ if (ptl != ptl_m) {
68071+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68072+ if (!pte_none(*pte_m))
68073+ goto out;
68074+ }
68075+
68076+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68077+ page_cache_get(page_m);
68078+ page_add_file_rmap(page_m);
68079+ inc_mm_counter_fast(mm, MM_FILEPAGES);
68080+ set_pte_at(mm, address_m, pte_m, entry_m);
68081+ update_mmu_cache(vma_m, address_m, entry_m);
68082+out:
68083+ if (ptl != ptl_m)
68084+ spin_unlock(ptl_m);
68085+ pte_unmap(pte_m);
68086+}
68087+
68088+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68089+{
68090+ struct mm_struct *mm = vma->vm_mm;
68091+ unsigned long address_m;
68092+ spinlock_t *ptl_m;
68093+ struct vm_area_struct *vma_m;
68094+ pmd_t *pmd_m;
68095+ pte_t *pte_m, entry_m;
68096+
68097+ vma_m = pax_find_mirror_vma(vma);
68098+ if (!vma_m)
68099+ return;
68100+
68101+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68102+ address_m = address + SEGMEXEC_TASK_SIZE;
68103+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68104+ pte_m = pte_offset_map(pmd_m, address_m);
68105+ ptl_m = pte_lockptr(mm, pmd_m);
68106+ if (ptl != ptl_m) {
68107+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68108+ if (!pte_none(*pte_m))
68109+ goto out;
68110+ }
68111+
68112+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68113+ set_pte_at(mm, address_m, pte_m, entry_m);
68114+out:
68115+ if (ptl != ptl_m)
68116+ spin_unlock(ptl_m);
68117+ pte_unmap(pte_m);
68118+}
68119+
68120+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68121+{
68122+ struct page *page_m;
68123+ pte_t entry;
68124+
68125+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68126+ goto out;
68127+
68128+ entry = *pte;
68129+ page_m = vm_normal_page(vma, address, entry);
68130+ if (!page_m)
68131+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68132+ else if (PageAnon(page_m)) {
68133+ if (pax_find_mirror_vma(vma)) {
68134+ pte_unmap_unlock(pte, ptl);
68135+ lock_page(page_m);
68136+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68137+ if (pte_same(entry, *pte))
68138+ pax_mirror_anon_pte(vma, address, page_m, ptl);
68139+ else
68140+ unlock_page(page_m);
68141+ }
68142+ } else
68143+ pax_mirror_file_pte(vma, address, page_m, ptl);
68144+
68145+out:
68146+ pte_unmap_unlock(pte, ptl);
68147+}
68148+#endif
68149+
68150 /*
68151 * This routine handles present pages, when users try to write
68152 * to a shared page. It is done by copying the page to a new address
68153@@ -2656,6 +2849,12 @@ gotten:
68154 */
68155 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68156 if (likely(pte_same(*page_table, orig_pte))) {
68157+
68158+#ifdef CONFIG_PAX_SEGMEXEC
68159+ if (pax_find_mirror_vma(vma))
68160+ BUG_ON(!trylock_page(new_page));
68161+#endif
68162+
68163 if (old_page) {
68164 if (!PageAnon(old_page)) {
68165 dec_mm_counter_fast(mm, MM_FILEPAGES);
68166@@ -2707,6 +2906,10 @@ gotten:
68167 page_remove_rmap(old_page);
68168 }
68169
68170+#ifdef CONFIG_PAX_SEGMEXEC
68171+ pax_mirror_anon_pte(vma, address, new_page, ptl);
68172+#endif
68173+
68174 /* Free the old page.. */
68175 new_page = old_page;
68176 ret |= VM_FAULT_WRITE;
68177@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68178 swap_free(entry);
68179 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68180 try_to_free_swap(page);
68181+
68182+#ifdef CONFIG_PAX_SEGMEXEC
68183+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68184+#endif
68185+
68186 unlock_page(page);
68187 if (swapcache) {
68188 /*
68189@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68190
68191 /* No need to invalidate - it was non-present before */
68192 update_mmu_cache(vma, address, page_table);
68193+
68194+#ifdef CONFIG_PAX_SEGMEXEC
68195+ pax_mirror_anon_pte(vma, address, page, ptl);
68196+#endif
68197+
68198 unlock:
68199 pte_unmap_unlock(page_table, ptl);
68200 out:
68201@@ -3028,40 +3241,6 @@ out_release:
68202 }
68203
68204 /*
68205- * This is like a special single-page "expand_{down|up}wards()",
68206- * except we must first make sure that 'address{-|+}PAGE_SIZE'
68207- * doesn't hit another vma.
68208- */
68209-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68210-{
68211- address &= PAGE_MASK;
68212- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68213- struct vm_area_struct *prev = vma->vm_prev;
68214-
68215- /*
68216- * Is there a mapping abutting this one below?
68217- *
68218- * That's only ok if it's the same stack mapping
68219- * that has gotten split..
68220- */
68221- if (prev && prev->vm_end == address)
68222- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68223-
68224- expand_downwards(vma, address - PAGE_SIZE);
68225- }
68226- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68227- struct vm_area_struct *next = vma->vm_next;
68228-
68229- /* As VM_GROWSDOWN but s/below/above/ */
68230- if (next && next->vm_start == address + PAGE_SIZE)
68231- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68232-
68233- expand_upwards(vma, address + PAGE_SIZE);
68234- }
68235- return 0;
68236-}
68237-
68238-/*
68239 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68240 * but allow concurrent faults), and pte mapped but not yet locked.
68241 * We return with mmap_sem still held, but pte unmapped and unlocked.
68242@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68243 unsigned long address, pte_t *page_table, pmd_t *pmd,
68244 unsigned int flags)
68245 {
68246- struct page *page;
68247+ struct page *page = NULL;
68248 spinlock_t *ptl;
68249 pte_t entry;
68250
68251- pte_unmap(page_table);
68252-
68253- /* Check if we need to add a guard page to the stack */
68254- if (check_stack_guard_page(vma, address) < 0)
68255- return VM_FAULT_SIGBUS;
68256-
68257- /* Use the zero-page for reads */
68258 if (!(flags & FAULT_FLAG_WRITE)) {
68259 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68260 vma->vm_page_prot));
68261- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68262+ ptl = pte_lockptr(mm, pmd);
68263+ spin_lock(ptl);
68264 if (!pte_none(*page_table))
68265 goto unlock;
68266 goto setpte;
68267 }
68268
68269 /* Allocate our own private page. */
68270+ pte_unmap(page_table);
68271+
68272 if (unlikely(anon_vma_prepare(vma)))
68273 goto oom;
68274 page = alloc_zeroed_user_highpage_movable(vma, address);
68275@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68276 if (!pte_none(*page_table))
68277 goto release;
68278
68279+#ifdef CONFIG_PAX_SEGMEXEC
68280+ if (pax_find_mirror_vma(vma))
68281+ BUG_ON(!trylock_page(page));
68282+#endif
68283+
68284 inc_mm_counter_fast(mm, MM_ANONPAGES);
68285 page_add_new_anon_rmap(page, vma, address);
68286 setpte:
68287@@ -3116,6 +3296,12 @@ setpte:
68288
68289 /* No need to invalidate - it was non-present before */
68290 update_mmu_cache(vma, address, page_table);
68291+
68292+#ifdef CONFIG_PAX_SEGMEXEC
68293+ if (page)
68294+ pax_mirror_anon_pte(vma, address, page, ptl);
68295+#endif
68296+
68297 unlock:
68298 pte_unmap_unlock(page_table, ptl);
68299 return 0;
68300@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68301 */
68302 /* Only go through if we didn't race with anybody else... */
68303 if (likely(pte_same(*page_table, orig_pte))) {
68304+
68305+#ifdef CONFIG_PAX_SEGMEXEC
68306+ if (anon && pax_find_mirror_vma(vma))
68307+ BUG_ON(!trylock_page(page));
68308+#endif
68309+
68310 flush_icache_page(vma, page);
68311 entry = mk_pte(page, vma->vm_page_prot);
68312 if (flags & FAULT_FLAG_WRITE)
68313@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68314
68315 /* no need to invalidate: a not-present page won't be cached */
68316 update_mmu_cache(vma, address, page_table);
68317+
68318+#ifdef CONFIG_PAX_SEGMEXEC
68319+ if (anon)
68320+ pax_mirror_anon_pte(vma, address, page, ptl);
68321+ else
68322+ pax_mirror_file_pte(vma, address, page, ptl);
68323+#endif
68324+
68325 } else {
68326 if (cow_page)
68327 mem_cgroup_uncharge_page(cow_page);
68328@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
68329 if (flags & FAULT_FLAG_WRITE)
68330 flush_tlb_fix_spurious_fault(vma, address);
68331 }
68332+
68333+#ifdef CONFIG_PAX_SEGMEXEC
68334+ pax_mirror_pte(vma, address, pte, pmd, ptl);
68335+ return 0;
68336+#endif
68337+
68338 unlock:
68339 pte_unmap_unlock(pte, ptl);
68340 return 0;
68341@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68342 pmd_t *pmd;
68343 pte_t *pte;
68344
68345+#ifdef CONFIG_PAX_SEGMEXEC
68346+ struct vm_area_struct *vma_m;
68347+#endif
68348+
68349 __set_current_state(TASK_RUNNING);
68350
68351 count_vm_event(PGFAULT);
68352@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68353 if (unlikely(is_vm_hugetlb_page(vma)))
68354 return hugetlb_fault(mm, vma, address, flags);
68355
68356+#ifdef CONFIG_PAX_SEGMEXEC
68357+ vma_m = pax_find_mirror_vma(vma);
68358+ if (vma_m) {
68359+ unsigned long address_m;
68360+ pgd_t *pgd_m;
68361+ pud_t *pud_m;
68362+ pmd_t *pmd_m;
68363+
68364+ if (vma->vm_start > vma_m->vm_start) {
68365+ address_m = address;
68366+ address -= SEGMEXEC_TASK_SIZE;
68367+ vma = vma_m;
68368+ } else
68369+ address_m = address + SEGMEXEC_TASK_SIZE;
68370+
68371+ pgd_m = pgd_offset(mm, address_m);
68372+ pud_m = pud_alloc(mm, pgd_m, address_m);
68373+ if (!pud_m)
68374+ return VM_FAULT_OOM;
68375+ pmd_m = pmd_alloc(mm, pud_m, address_m);
68376+ if (!pmd_m)
68377+ return VM_FAULT_OOM;
68378+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68379+ return VM_FAULT_OOM;
68380+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68381+ }
68382+#endif
68383+
68384 pgd = pgd_offset(mm, address);
68385 pud = pud_alloc(mm, pgd, address);
68386 if (!pud)
68387@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68388 * run pte_offset_map on the pmd, if an huge pmd could
68389 * materialize from under us from a different thread.
68390 */
68391- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68392+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68393 return VM_FAULT_OOM;
68394 /* if an huge pmd materialized from under us just retry later */
68395 if (unlikely(pmd_trans_huge(*pmd)))
68396@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68397 gate_vma.vm_start = FIXADDR_USER_START;
68398 gate_vma.vm_end = FIXADDR_USER_END;
68399 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68400- gate_vma.vm_page_prot = __P101;
68401+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68402 /*
68403 * Make sure the vDSO gets into every core dump.
68404 * Dumping its contents makes post-mortem fully interpretable later
68405diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68406index c3fdbcb..2e8ef90 100644
68407--- a/mm/mempolicy.c
68408+++ b/mm/mempolicy.c
68409@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68410 unsigned long vmstart;
68411 unsigned long vmend;
68412
68413+#ifdef CONFIG_PAX_SEGMEXEC
68414+ struct vm_area_struct *vma_m;
68415+#endif
68416+
68417 vma = find_vma_prev(mm, start, &prev);
68418 if (!vma || vma->vm_start > start)
68419 return -EFAULT;
68420@@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68421 err = policy_vma(vma, new_pol);
68422 if (err)
68423 goto out;
68424+
68425+#ifdef CONFIG_PAX_SEGMEXEC
68426+ vma_m = pax_find_mirror_vma(vma);
68427+ if (vma_m) {
68428+ err = policy_vma(vma_m, new_pol);
68429+ if (err)
68430+ goto out;
68431+ }
68432+#endif
68433+
68434 }
68435
68436 out:
68437@@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68438
68439 if (end < start)
68440 return -EINVAL;
68441+
68442+#ifdef CONFIG_PAX_SEGMEXEC
68443+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68444+ if (end > SEGMEXEC_TASK_SIZE)
68445+ return -EINVAL;
68446+ } else
68447+#endif
68448+
68449+ if (end > TASK_SIZE)
68450+ return -EINVAL;
68451+
68452 if (end == start)
68453 return 0;
68454
68455@@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68456 if (!mm)
68457 goto out;
68458
68459+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68460+ if (mm != current->mm &&
68461+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68462+ err = -EPERM;
68463+ goto out;
68464+ }
68465+#endif
68466+
68467 /*
68468 * Check if this process has the right to modify the specified
68469 * process. The right exists if the process has administrative
68470@@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68471 rcu_read_lock();
68472 tcred = __task_cred(task);
68473 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68474- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68475- !capable(CAP_SYS_NICE)) {
68476+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68477 rcu_read_unlock();
68478 err = -EPERM;
68479 goto out;
68480diff --git a/mm/migrate.c b/mm/migrate.c
68481index 177aca4..ab3a744 100644
68482--- a/mm/migrate.c
68483+++ b/mm/migrate.c
68484@@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68485 if (!mm)
68486 return -EINVAL;
68487
68488+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68489+ if (mm != current->mm &&
68490+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68491+ err = -EPERM;
68492+ goto out;
68493+ }
68494+#endif
68495+
68496 /*
68497 * Check if this process has the right to modify the specified
68498 * process. The right exists if the process has administrative
68499@@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68500 rcu_read_lock();
68501 tcred = __task_cred(task);
68502 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68503- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68504- !capable(CAP_SYS_NICE)) {
68505+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68506 rcu_read_unlock();
68507 err = -EPERM;
68508 goto out;
68509diff --git a/mm/mlock.c b/mm/mlock.c
68510index 4f4f53b..9511904 100644
68511--- a/mm/mlock.c
68512+++ b/mm/mlock.c
68513@@ -13,6 +13,7 @@
68514 #include <linux/pagemap.h>
68515 #include <linux/mempolicy.h>
68516 #include <linux/syscalls.h>
68517+#include <linux/security.h>
68518 #include <linux/sched.h>
68519 #include <linux/export.h>
68520 #include <linux/rmap.h>
68521@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68522 return -EINVAL;
68523 if (end == start)
68524 return 0;
68525+ if (end > TASK_SIZE)
68526+ return -EINVAL;
68527+
68528 vma = find_vma_prev(current->mm, start, &prev);
68529 if (!vma || vma->vm_start > start)
68530 return -ENOMEM;
68531@@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68532 for (nstart = start ; ; ) {
68533 vm_flags_t newflags;
68534
68535+#ifdef CONFIG_PAX_SEGMEXEC
68536+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68537+ break;
68538+#endif
68539+
68540 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68541
68542 newflags = vma->vm_flags | VM_LOCKED;
68543@@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68544 lock_limit >>= PAGE_SHIFT;
68545
68546 /* check against resource limits */
68547+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68548 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68549 error = do_mlock(start, len, 1);
68550 up_write(&current->mm->mmap_sem);
68551@@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68552 static int do_mlockall(int flags)
68553 {
68554 struct vm_area_struct * vma, * prev = NULL;
68555- unsigned int def_flags = 0;
68556
68557 if (flags & MCL_FUTURE)
68558- def_flags = VM_LOCKED;
68559- current->mm->def_flags = def_flags;
68560+ current->mm->def_flags |= VM_LOCKED;
68561+ else
68562+ current->mm->def_flags &= ~VM_LOCKED;
68563 if (flags == MCL_FUTURE)
68564 goto out;
68565
68566 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68567 vm_flags_t newflags;
68568
68569+#ifdef CONFIG_PAX_SEGMEXEC
68570+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68571+ break;
68572+#endif
68573+
68574+ BUG_ON(vma->vm_end > TASK_SIZE);
68575 newflags = vma->vm_flags | VM_LOCKED;
68576 if (!(flags & MCL_CURRENT))
68577 newflags &= ~VM_LOCKED;
68578@@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68579 lock_limit >>= PAGE_SHIFT;
68580
68581 ret = -ENOMEM;
68582+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68583 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68584 capable(CAP_IPC_LOCK))
68585 ret = do_mlockall(flags);
68586diff --git a/mm/mmap.c b/mm/mmap.c
68587index eae90af..51ca80b 100644
68588--- a/mm/mmap.c
68589+++ b/mm/mmap.c
68590@@ -46,6 +46,16 @@
68591 #define arch_rebalance_pgtables(addr, len) (addr)
68592 #endif
68593
68594+static inline void verify_mm_writelocked(struct mm_struct *mm)
68595+{
68596+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68597+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68598+ up_read(&mm->mmap_sem);
68599+ BUG();
68600+ }
68601+#endif
68602+}
68603+
68604 static void unmap_region(struct mm_struct *mm,
68605 struct vm_area_struct *vma, struct vm_area_struct *prev,
68606 unsigned long start, unsigned long end);
68607@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
68608 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68609 *
68610 */
68611-pgprot_t protection_map[16] = {
68612+pgprot_t protection_map[16] __read_only = {
68613 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68614 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68615 };
68616
68617-pgprot_t vm_get_page_prot(unsigned long vm_flags)
68618+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68619 {
68620- return __pgprot(pgprot_val(protection_map[vm_flags &
68621+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68622 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68623 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68624+
68625+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68626+ if (!(__supported_pte_mask & _PAGE_NX) &&
68627+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68628+ (vm_flags & (VM_READ | VM_WRITE)))
68629+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68630+#endif
68631+
68632+ return prot;
68633 }
68634 EXPORT_SYMBOL(vm_get_page_prot);
68635
68636 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68637 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68638 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68639+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68640 /*
68641 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68642 * other variables. It can be updated by several CPUs frequently.
68643@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
68644 struct vm_area_struct *next = vma->vm_next;
68645
68646 might_sleep();
68647+ BUG_ON(vma->vm_mirror);
68648 if (vma->vm_ops && vma->vm_ops->close)
68649 vma->vm_ops->close(vma);
68650 if (vma->vm_file) {
68651@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68652 * not page aligned -Ram Gupta
68653 */
68654 rlim = rlimit(RLIMIT_DATA);
68655+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68656 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68657 (mm->end_data - mm->start_data) > rlim)
68658 goto out;
68659@@ -689,6 +711,12 @@ static int
68660 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68661 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68662 {
68663+
68664+#ifdef CONFIG_PAX_SEGMEXEC
68665+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68666+ return 0;
68667+#endif
68668+
68669 if (is_mergeable_vma(vma, file, vm_flags) &&
68670 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68671 if (vma->vm_pgoff == vm_pgoff)
68672@@ -708,6 +736,12 @@ static int
68673 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68674 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68675 {
68676+
68677+#ifdef CONFIG_PAX_SEGMEXEC
68678+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68679+ return 0;
68680+#endif
68681+
68682 if (is_mergeable_vma(vma, file, vm_flags) &&
68683 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68684 pgoff_t vm_pglen;
68685@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68686 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68687 struct vm_area_struct *prev, unsigned long addr,
68688 unsigned long end, unsigned long vm_flags,
68689- struct anon_vma *anon_vma, struct file *file,
68690+ struct anon_vma *anon_vma, struct file *file,
68691 pgoff_t pgoff, struct mempolicy *policy)
68692 {
68693 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68694 struct vm_area_struct *area, *next;
68695 int err;
68696
68697+#ifdef CONFIG_PAX_SEGMEXEC
68698+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68699+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68700+
68701+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68702+#endif
68703+
68704 /*
68705 * We later require that vma->vm_flags == vm_flags,
68706 * so this tests vma->vm_flags & VM_SPECIAL, too.
68707@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68708 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68709 next = next->vm_next;
68710
68711+#ifdef CONFIG_PAX_SEGMEXEC
68712+ if (prev)
68713+ prev_m = pax_find_mirror_vma(prev);
68714+ if (area)
68715+ area_m = pax_find_mirror_vma(area);
68716+ if (next)
68717+ next_m = pax_find_mirror_vma(next);
68718+#endif
68719+
68720 /*
68721 * Can it merge with the predecessor?
68722 */
68723@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68724 /* cases 1, 6 */
68725 err = vma_adjust(prev, prev->vm_start,
68726 next->vm_end, prev->vm_pgoff, NULL);
68727- } else /* cases 2, 5, 7 */
68728+
68729+#ifdef CONFIG_PAX_SEGMEXEC
68730+ if (!err && prev_m)
68731+ err = vma_adjust(prev_m, prev_m->vm_start,
68732+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68733+#endif
68734+
68735+ } else { /* cases 2, 5, 7 */
68736 err = vma_adjust(prev, prev->vm_start,
68737 end, prev->vm_pgoff, NULL);
68738+
68739+#ifdef CONFIG_PAX_SEGMEXEC
68740+ if (!err && prev_m)
68741+ err = vma_adjust(prev_m, prev_m->vm_start,
68742+ end_m, prev_m->vm_pgoff, NULL);
68743+#endif
68744+
68745+ }
68746 if (err)
68747 return NULL;
68748 khugepaged_enter_vma_merge(prev);
68749@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68750 mpol_equal(policy, vma_policy(next)) &&
68751 can_vma_merge_before(next, vm_flags,
68752 anon_vma, file, pgoff+pglen)) {
68753- if (prev && addr < prev->vm_end) /* case 4 */
68754+ if (prev && addr < prev->vm_end) { /* case 4 */
68755 err = vma_adjust(prev, prev->vm_start,
68756 addr, prev->vm_pgoff, NULL);
68757- else /* cases 3, 8 */
68758+
68759+#ifdef CONFIG_PAX_SEGMEXEC
68760+ if (!err && prev_m)
68761+ err = vma_adjust(prev_m, prev_m->vm_start,
68762+ addr_m, prev_m->vm_pgoff, NULL);
68763+#endif
68764+
68765+ } else { /* cases 3, 8 */
68766 err = vma_adjust(area, addr, next->vm_end,
68767 next->vm_pgoff - pglen, NULL);
68768+
68769+#ifdef CONFIG_PAX_SEGMEXEC
68770+ if (!err && area_m)
68771+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
68772+ next_m->vm_pgoff - pglen, NULL);
68773+#endif
68774+
68775+ }
68776 if (err)
68777 return NULL;
68778 khugepaged_enter_vma_merge(area);
68779@@ -921,14 +1001,11 @@ none:
68780 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68781 struct file *file, long pages)
68782 {
68783- const unsigned long stack_flags
68784- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68785-
68786 if (file) {
68787 mm->shared_vm += pages;
68788 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68789 mm->exec_vm += pages;
68790- } else if (flags & stack_flags)
68791+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68792 mm->stack_vm += pages;
68793 if (flags & (VM_RESERVED|VM_IO))
68794 mm->reserved_vm += pages;
68795@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68796 * (the exception is when the underlying filesystem is noexec
68797 * mounted, in which case we dont add PROT_EXEC.)
68798 */
68799- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68800+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68801 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68802 prot |= PROT_EXEC;
68803
68804@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68805 /* Obtain the address to map to. we verify (or select) it and ensure
68806 * that it represents a valid section of the address space.
68807 */
68808- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68809+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68810 if (addr & ~PAGE_MASK)
68811 return addr;
68812
68813@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68814 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68815 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68816
68817+#ifdef CONFIG_PAX_MPROTECT
68818+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68819+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68820+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68821+ gr_log_rwxmmap(file);
68822+
68823+#ifdef CONFIG_PAX_EMUPLT
68824+ vm_flags &= ~VM_EXEC;
68825+#else
68826+ return -EPERM;
68827+#endif
68828+
68829+ }
68830+
68831+ if (!(vm_flags & VM_EXEC))
68832+ vm_flags &= ~VM_MAYEXEC;
68833+#else
68834+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68835+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68836+#endif
68837+ else
68838+ vm_flags &= ~VM_MAYWRITE;
68839+ }
68840+#endif
68841+
68842+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68843+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68844+ vm_flags &= ~VM_PAGEEXEC;
68845+#endif
68846+
68847 if (flags & MAP_LOCKED)
68848 if (!can_do_mlock())
68849 return -EPERM;
68850@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68851 locked += mm->locked_vm;
68852 lock_limit = rlimit(RLIMIT_MEMLOCK);
68853 lock_limit >>= PAGE_SHIFT;
68854+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68855 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68856 return -EAGAIN;
68857 }
68858@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68859 if (error)
68860 return error;
68861
68862+ if (!gr_acl_handle_mmap(file, prot))
68863+ return -EACCES;
68864+
68865 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68866 }
68867 EXPORT_SYMBOL(do_mmap_pgoff);
68868@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
68869 vm_flags_t vm_flags = vma->vm_flags;
68870
68871 /* If it was private or non-writable, the write bit is already clear */
68872- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68873+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68874 return 0;
68875
68876 /* The backer wishes to know when pages are first written to? */
68877@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
68878 unsigned long charged = 0;
68879 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68880
68881+#ifdef CONFIG_PAX_SEGMEXEC
68882+ struct vm_area_struct *vma_m = NULL;
68883+#endif
68884+
68885+ /*
68886+ * mm->mmap_sem is required to protect against another thread
68887+ * changing the mappings in case we sleep.
68888+ */
68889+ verify_mm_writelocked(mm);
68890+
68891 /* Clear old maps */
68892 error = -ENOMEM;
68893-munmap_back:
68894 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68895 if (vma && vma->vm_start < addr + len) {
68896 if (do_munmap(mm, addr, len))
68897 return -ENOMEM;
68898- goto munmap_back;
68899+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68900+ BUG_ON(vma && vma->vm_start < addr + len);
68901 }
68902
68903 /* Check against address space limit. */
68904@@ -1258,6 +1379,16 @@ munmap_back:
68905 goto unacct_error;
68906 }
68907
68908+#ifdef CONFIG_PAX_SEGMEXEC
68909+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68910+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68911+ if (!vma_m) {
68912+ error = -ENOMEM;
68913+ goto free_vma;
68914+ }
68915+ }
68916+#endif
68917+
68918 vma->vm_mm = mm;
68919 vma->vm_start = addr;
68920 vma->vm_end = addr + len;
68921@@ -1281,6 +1412,19 @@ munmap_back:
68922 error = file->f_op->mmap(file, vma);
68923 if (error)
68924 goto unmap_and_free_vma;
68925+
68926+#ifdef CONFIG_PAX_SEGMEXEC
68927+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68928+ added_exe_file_vma(mm);
68929+#endif
68930+
68931+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68932+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68933+ vma->vm_flags |= VM_PAGEEXEC;
68934+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68935+ }
68936+#endif
68937+
68938 if (vm_flags & VM_EXECUTABLE)
68939 added_exe_file_vma(mm);
68940
68941@@ -1316,6 +1460,11 @@ munmap_back:
68942 vma_link(mm, vma, prev, rb_link, rb_parent);
68943 file = vma->vm_file;
68944
68945+#ifdef CONFIG_PAX_SEGMEXEC
68946+ if (vma_m)
68947+ BUG_ON(pax_mirror_vma(vma_m, vma));
68948+#endif
68949+
68950 /* Once vma denies write, undo our temporary denial count */
68951 if (correct_wcount)
68952 atomic_inc(&inode->i_writecount);
68953@@ -1324,6 +1473,7 @@ out:
68954
68955 mm->total_vm += len >> PAGE_SHIFT;
68956 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68957+ track_exec_limit(mm, addr, addr + len, vm_flags);
68958 if (vm_flags & VM_LOCKED) {
68959 if (!mlock_vma_pages_range(vma, addr, addr + len))
68960 mm->locked_vm += (len >> PAGE_SHIFT);
68961@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
68962 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68963 charged = 0;
68964 free_vma:
68965+
68966+#ifdef CONFIG_PAX_SEGMEXEC
68967+ if (vma_m)
68968+ kmem_cache_free(vm_area_cachep, vma_m);
68969+#endif
68970+
68971 kmem_cache_free(vm_area_cachep, vma);
68972 unacct_error:
68973 if (charged)
68974@@ -1348,6 +1504,44 @@ unacct_error:
68975 return error;
68976 }
68977
68978+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68979+{
68980+ if (!vma) {
68981+#ifdef CONFIG_STACK_GROWSUP
68982+ if (addr > sysctl_heap_stack_gap)
68983+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68984+ else
68985+ vma = find_vma(current->mm, 0);
68986+ if (vma && (vma->vm_flags & VM_GROWSUP))
68987+ return false;
68988+#endif
68989+ return true;
68990+ }
68991+
68992+ if (addr + len > vma->vm_start)
68993+ return false;
68994+
68995+ if (vma->vm_flags & VM_GROWSDOWN)
68996+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68997+#ifdef CONFIG_STACK_GROWSUP
68998+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68999+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69000+#endif
69001+
69002+ return true;
69003+}
69004+
69005+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69006+{
69007+ if (vma->vm_start < len)
69008+ return -ENOMEM;
69009+ if (!(vma->vm_flags & VM_GROWSDOWN))
69010+ return vma->vm_start - len;
69011+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
69012+ return vma->vm_start - len - sysctl_heap_stack_gap;
69013+ return -ENOMEM;
69014+}
69015+
69016 /* Get an address range which is currently unmapped.
69017 * For shmat() with addr=0.
69018 *
69019@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
69020 if (flags & MAP_FIXED)
69021 return addr;
69022
69023+#ifdef CONFIG_PAX_RANDMMAP
69024+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69025+#endif
69026+
69027 if (addr) {
69028 addr = PAGE_ALIGN(addr);
69029- vma = find_vma(mm, addr);
69030- if (TASK_SIZE - len >= addr &&
69031- (!vma || addr + len <= vma->vm_start))
69032- return addr;
69033+ if (TASK_SIZE - len >= addr) {
69034+ vma = find_vma(mm, addr);
69035+ if (check_heap_stack_gap(vma, addr, len))
69036+ return addr;
69037+ }
69038 }
69039 if (len > mm->cached_hole_size) {
69040- start_addr = addr = mm->free_area_cache;
69041+ start_addr = addr = mm->free_area_cache;
69042 } else {
69043- start_addr = addr = TASK_UNMAPPED_BASE;
69044- mm->cached_hole_size = 0;
69045+ start_addr = addr = mm->mmap_base;
69046+ mm->cached_hole_size = 0;
69047 }
69048
69049 full_search:
69050@@ -1396,34 +1595,40 @@ full_search:
69051 * Start a new search - just in case we missed
69052 * some holes.
69053 */
69054- if (start_addr != TASK_UNMAPPED_BASE) {
69055- addr = TASK_UNMAPPED_BASE;
69056- start_addr = addr;
69057+ if (start_addr != mm->mmap_base) {
69058+ start_addr = addr = mm->mmap_base;
69059 mm->cached_hole_size = 0;
69060 goto full_search;
69061 }
69062 return -ENOMEM;
69063 }
69064- if (!vma || addr + len <= vma->vm_start) {
69065- /*
69066- * Remember the place where we stopped the search:
69067- */
69068- mm->free_area_cache = addr + len;
69069- return addr;
69070- }
69071+ if (check_heap_stack_gap(vma, addr, len))
69072+ break;
69073 if (addr + mm->cached_hole_size < vma->vm_start)
69074 mm->cached_hole_size = vma->vm_start - addr;
69075 addr = vma->vm_end;
69076 }
69077+
69078+ /*
69079+ * Remember the place where we stopped the search:
69080+ */
69081+ mm->free_area_cache = addr + len;
69082+ return addr;
69083 }
69084 #endif
69085
69086 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69087 {
69088+
69089+#ifdef CONFIG_PAX_SEGMEXEC
69090+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69091+ return;
69092+#endif
69093+
69094 /*
69095 * Is this a new hole at the lowest possible address?
69096 */
69097- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69098+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69099 mm->free_area_cache = addr;
69100 mm->cached_hole_size = ~0UL;
69101 }
69102@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69103 {
69104 struct vm_area_struct *vma;
69105 struct mm_struct *mm = current->mm;
69106- unsigned long addr = addr0;
69107+ unsigned long base = mm->mmap_base, addr = addr0;
69108
69109 /* requested length too big for entire address space */
69110 if (len > TASK_SIZE)
69111@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69112 if (flags & MAP_FIXED)
69113 return addr;
69114
69115+#ifdef CONFIG_PAX_RANDMMAP
69116+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69117+#endif
69118+
69119 /* requesting a specific address */
69120 if (addr) {
69121 addr = PAGE_ALIGN(addr);
69122- vma = find_vma(mm, addr);
69123- if (TASK_SIZE - len >= addr &&
69124- (!vma || addr + len <= vma->vm_start))
69125- return addr;
69126+ if (TASK_SIZE - len >= addr) {
69127+ vma = find_vma(mm, addr);
69128+ if (check_heap_stack_gap(vma, addr, len))
69129+ return addr;
69130+ }
69131 }
69132
69133 /* check if free_area_cache is useful for us */
69134@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69135 /* make sure it can fit in the remaining address space */
69136 if (addr > len) {
69137 vma = find_vma(mm, addr-len);
69138- if (!vma || addr <= vma->vm_start)
69139+ if (check_heap_stack_gap(vma, addr - len, len))
69140 /* remember the address as a hint for next time */
69141 return (mm->free_area_cache = addr-len);
69142 }
69143@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69144 * return with success:
69145 */
69146 vma = find_vma(mm, addr);
69147- if (!vma || addr+len <= vma->vm_start)
69148+ if (check_heap_stack_gap(vma, addr, len))
69149 /* remember the address as a hint for next time */
69150 return (mm->free_area_cache = addr);
69151
69152@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69153 mm->cached_hole_size = vma->vm_start - addr;
69154
69155 /* try just below the current vma->vm_start */
69156- addr = vma->vm_start-len;
69157- } while (len < vma->vm_start);
69158+ addr = skip_heap_stack_gap(vma, len);
69159+ } while (!IS_ERR_VALUE(addr));
69160
69161 bottomup:
69162 /*
69163@@ -1507,13 +1717,21 @@ bottomup:
69164 * can happen with large stack limits and large mmap()
69165 * allocations.
69166 */
69167+ mm->mmap_base = TASK_UNMAPPED_BASE;
69168+
69169+#ifdef CONFIG_PAX_RANDMMAP
69170+ if (mm->pax_flags & MF_PAX_RANDMMAP)
69171+ mm->mmap_base += mm->delta_mmap;
69172+#endif
69173+
69174+ mm->free_area_cache = mm->mmap_base;
69175 mm->cached_hole_size = ~0UL;
69176- mm->free_area_cache = TASK_UNMAPPED_BASE;
69177 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69178 /*
69179 * Restore the topdown base:
69180 */
69181- mm->free_area_cache = mm->mmap_base;
69182+ mm->mmap_base = base;
69183+ mm->free_area_cache = base;
69184 mm->cached_hole_size = ~0UL;
69185
69186 return addr;
69187@@ -1522,6 +1740,12 @@ bottomup:
69188
69189 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69190 {
69191+
69192+#ifdef CONFIG_PAX_SEGMEXEC
69193+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69194+ return;
69195+#endif
69196+
69197 /*
69198 * Is this a new hole at the highest possible address?
69199 */
69200@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69201 mm->free_area_cache = addr;
69202
69203 /* dont allow allocations above current base */
69204- if (mm->free_area_cache > mm->mmap_base)
69205+ if (mm->free_area_cache > mm->mmap_base) {
69206 mm->free_area_cache = mm->mmap_base;
69207+ mm->cached_hole_size = ~0UL;
69208+ }
69209 }
69210
69211 unsigned long
69212@@ -1638,6 +1864,28 @@ out:
69213 return prev ? prev->vm_next : vma;
69214 }
69215
69216+#ifdef CONFIG_PAX_SEGMEXEC
69217+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69218+{
69219+ struct vm_area_struct *vma_m;
69220+
69221+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69222+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69223+ BUG_ON(vma->vm_mirror);
69224+ return NULL;
69225+ }
69226+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69227+ vma_m = vma->vm_mirror;
69228+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69229+ BUG_ON(vma->vm_file != vma_m->vm_file);
69230+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69231+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69232+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69233+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69234+ return vma_m;
69235+}
69236+#endif
69237+
69238 /*
69239 * Verify that the stack growth is acceptable and
69240 * update accounting. This is shared with both the
69241@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69242 return -ENOMEM;
69243
69244 /* Stack limit test */
69245+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
69246 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69247 return -ENOMEM;
69248
69249@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69250 locked = mm->locked_vm + grow;
69251 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69252 limit >>= PAGE_SHIFT;
69253+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69254 if (locked > limit && !capable(CAP_IPC_LOCK))
69255 return -ENOMEM;
69256 }
69257@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69258 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69259 * vma is the last one with address > vma->vm_end. Have to extend vma.
69260 */
69261+#ifndef CONFIG_IA64
69262+static
69263+#endif
69264 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69265 {
69266 int error;
69267+ bool locknext;
69268
69269 if (!(vma->vm_flags & VM_GROWSUP))
69270 return -EFAULT;
69271
69272+ /* Also guard against wrapping around to address 0. */
69273+ if (address < PAGE_ALIGN(address+1))
69274+ address = PAGE_ALIGN(address+1);
69275+ else
69276+ return -ENOMEM;
69277+
69278 /*
69279 * We must make sure the anon_vma is allocated
69280 * so that the anon_vma locking is not a noop.
69281 */
69282 if (unlikely(anon_vma_prepare(vma)))
69283 return -ENOMEM;
69284+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69285+ if (locknext && anon_vma_prepare(vma->vm_next))
69286+ return -ENOMEM;
69287 vma_lock_anon_vma(vma);
69288+ if (locknext)
69289+ vma_lock_anon_vma(vma->vm_next);
69290
69291 /*
69292 * vma->vm_start/vm_end cannot change under us because the caller
69293 * is required to hold the mmap_sem in read mode. We need the
69294- * anon_vma lock to serialize against concurrent expand_stacks.
69295- * Also guard against wrapping around to address 0.
69296+ * anon_vma locks to serialize against concurrent expand_stacks
69297+ * and expand_upwards.
69298 */
69299- if (address < PAGE_ALIGN(address+4))
69300- address = PAGE_ALIGN(address+4);
69301- else {
69302- vma_unlock_anon_vma(vma);
69303- return -ENOMEM;
69304- }
69305 error = 0;
69306
69307 /* Somebody else might have raced and expanded it already */
69308- if (address > vma->vm_end) {
69309+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69310+ error = -ENOMEM;
69311+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69312 unsigned long size, grow;
69313
69314 size = address - vma->vm_start;
69315@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69316 }
69317 }
69318 }
69319+ if (locknext)
69320+ vma_unlock_anon_vma(vma->vm_next);
69321 vma_unlock_anon_vma(vma);
69322 khugepaged_enter_vma_merge(vma);
69323 return error;
69324@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma,
69325 unsigned long address)
69326 {
69327 int error;
69328+ bool lockprev = false;
69329+ struct vm_area_struct *prev;
69330
69331 /*
69332 * We must make sure the anon_vma is allocated
69333@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma,
69334 if (error)
69335 return error;
69336
69337+ prev = vma->vm_prev;
69338+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69339+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69340+#endif
69341+ if (lockprev && anon_vma_prepare(prev))
69342+ return -ENOMEM;
69343+ if (lockprev)
69344+ vma_lock_anon_vma(prev);
69345+
69346 vma_lock_anon_vma(vma);
69347
69348 /*
69349@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma,
69350 */
69351
69352 /* Somebody else might have raced and expanded it already */
69353- if (address < vma->vm_start) {
69354+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69355+ error = -ENOMEM;
69356+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69357 unsigned long size, grow;
69358
69359+#ifdef CONFIG_PAX_SEGMEXEC
69360+ struct vm_area_struct *vma_m;
69361+
69362+ vma_m = pax_find_mirror_vma(vma);
69363+#endif
69364+
69365 size = vma->vm_end - address;
69366 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69367
69368@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma,
69369 if (!error) {
69370 vma->vm_start = address;
69371 vma->vm_pgoff -= grow;
69372+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69373+
69374+#ifdef CONFIG_PAX_SEGMEXEC
69375+ if (vma_m) {
69376+ vma_m->vm_start -= grow << PAGE_SHIFT;
69377+ vma_m->vm_pgoff -= grow;
69378+ }
69379+#endif
69380+
69381 perf_event_mmap(vma);
69382 }
69383 }
69384 }
69385 vma_unlock_anon_vma(vma);
69386+ if (lockprev)
69387+ vma_unlock_anon_vma(prev);
69388 khugepaged_enter_vma_merge(vma);
69389 return error;
69390 }
69391@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69392 do {
69393 long nrpages = vma_pages(vma);
69394
69395+#ifdef CONFIG_PAX_SEGMEXEC
69396+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69397+ vma = remove_vma(vma);
69398+ continue;
69399+ }
69400+#endif
69401+
69402 mm->total_vm -= nrpages;
69403 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69404 vma = remove_vma(vma);
69405@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69406 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69407 vma->vm_prev = NULL;
69408 do {
69409+
69410+#ifdef CONFIG_PAX_SEGMEXEC
69411+ if (vma->vm_mirror) {
69412+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69413+ vma->vm_mirror->vm_mirror = NULL;
69414+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
69415+ vma->vm_mirror = NULL;
69416+ }
69417+#endif
69418+
69419 rb_erase(&vma->vm_rb, &mm->mm_rb);
69420 mm->map_count--;
69421 tail_vma = vma;
69422@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69423 struct vm_area_struct *new;
69424 int err = -ENOMEM;
69425
69426+#ifdef CONFIG_PAX_SEGMEXEC
69427+ struct vm_area_struct *vma_m, *new_m = NULL;
69428+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69429+#endif
69430+
69431 if (is_vm_hugetlb_page(vma) && (addr &
69432 ~(huge_page_mask(hstate_vma(vma)))))
69433 return -EINVAL;
69434
69435+#ifdef CONFIG_PAX_SEGMEXEC
69436+ vma_m = pax_find_mirror_vma(vma);
69437+#endif
69438+
69439 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69440 if (!new)
69441 goto out_err;
69442
69443+#ifdef CONFIG_PAX_SEGMEXEC
69444+ if (vma_m) {
69445+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69446+ if (!new_m) {
69447+ kmem_cache_free(vm_area_cachep, new);
69448+ goto out_err;
69449+ }
69450+ }
69451+#endif
69452+
69453 /* most fields are the same, copy all, and then fixup */
69454 *new = *vma;
69455
69456@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69457 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69458 }
69459
69460+#ifdef CONFIG_PAX_SEGMEXEC
69461+ if (vma_m) {
69462+ *new_m = *vma_m;
69463+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
69464+ new_m->vm_mirror = new;
69465+ new->vm_mirror = new_m;
69466+
69467+ if (new_below)
69468+ new_m->vm_end = addr_m;
69469+ else {
69470+ new_m->vm_start = addr_m;
69471+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69472+ }
69473+ }
69474+#endif
69475+
69476 pol = mpol_dup(vma_policy(vma));
69477 if (IS_ERR(pol)) {
69478 err = PTR_ERR(pol);
69479@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69480 else
69481 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69482
69483+#ifdef CONFIG_PAX_SEGMEXEC
69484+ if (!err && vma_m) {
69485+ if (anon_vma_clone(new_m, vma_m))
69486+ goto out_free_mpol;
69487+
69488+ mpol_get(pol);
69489+ vma_set_policy(new_m, pol);
69490+
69491+ if (new_m->vm_file) {
69492+ get_file(new_m->vm_file);
69493+ if (vma_m->vm_flags & VM_EXECUTABLE)
69494+ added_exe_file_vma(mm);
69495+ }
69496+
69497+ if (new_m->vm_ops && new_m->vm_ops->open)
69498+ new_m->vm_ops->open(new_m);
69499+
69500+ if (new_below)
69501+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69502+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69503+ else
69504+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69505+
69506+ if (err) {
69507+ if (new_m->vm_ops && new_m->vm_ops->close)
69508+ new_m->vm_ops->close(new_m);
69509+ if (new_m->vm_file) {
69510+ if (vma_m->vm_flags & VM_EXECUTABLE)
69511+ removed_exe_file_vma(mm);
69512+ fput(new_m->vm_file);
69513+ }
69514+ mpol_put(pol);
69515+ }
69516+ }
69517+#endif
69518+
69519 /* Success. */
69520 if (!err)
69521 return 0;
69522@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69523 removed_exe_file_vma(mm);
69524 fput(new->vm_file);
69525 }
69526- unlink_anon_vmas(new);
69527 out_free_mpol:
69528 mpol_put(pol);
69529 out_free_vma:
69530+
69531+#ifdef CONFIG_PAX_SEGMEXEC
69532+ if (new_m) {
69533+ unlink_anon_vmas(new_m);
69534+ kmem_cache_free(vm_area_cachep, new_m);
69535+ }
69536+#endif
69537+
69538+ unlink_anon_vmas(new);
69539 kmem_cache_free(vm_area_cachep, new);
69540 out_err:
69541 return err;
69542@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69543 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69544 unsigned long addr, int new_below)
69545 {
69546+
69547+#ifdef CONFIG_PAX_SEGMEXEC
69548+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69549+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69550+ if (mm->map_count >= sysctl_max_map_count-1)
69551+ return -ENOMEM;
69552+ } else
69553+#endif
69554+
69555 if (mm->map_count >= sysctl_max_map_count)
69556 return -ENOMEM;
69557
69558@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69559 * work. This now handles partial unmappings.
69560 * Jeremy Fitzhardinge <jeremy@goop.org>
69561 */
69562+#ifdef CONFIG_PAX_SEGMEXEC
69563 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69564 {
69565+ int ret = __do_munmap(mm, start, len);
69566+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69567+ return ret;
69568+
69569+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69570+}
69571+
69572+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69573+#else
69574+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69575+#endif
69576+{
69577 unsigned long end;
69578 struct vm_area_struct *vma, *prev, *last;
69579
69580+ /*
69581+ * mm->mmap_sem is required to protect against another thread
69582+ * changing the mappings in case we sleep.
69583+ */
69584+ verify_mm_writelocked(mm);
69585+
69586 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69587 return -EINVAL;
69588
69589@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69590 /* Fix up all other VM information */
69591 remove_vma_list(mm, vma);
69592
69593+ track_exec_limit(mm, start, end, 0UL);
69594+
69595 return 0;
69596 }
69597
69598@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
69599
69600 profile_munmap(addr);
69601
69602+#ifdef CONFIG_PAX_SEGMEXEC
69603+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69604+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69605+ return -EINVAL;
69606+#endif
69607+
69608 down_write(&mm->mmap_sem);
69609 ret = do_munmap(mm, addr, len);
69610 up_write(&mm->mmap_sem);
69611 return ret;
69612 }
69613
69614-static inline void verify_mm_writelocked(struct mm_struct *mm)
69615-{
69616-#ifdef CONFIG_DEBUG_VM
69617- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69618- WARN_ON(1);
69619- up_read(&mm->mmap_sem);
69620- }
69621-#endif
69622-}
69623-
69624 /*
69625 * this is really a simplified "do_mmap". it only handles
69626 * anonymous maps. eventually we may be able to do some
69627@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69628 struct rb_node ** rb_link, * rb_parent;
69629 pgoff_t pgoff = addr >> PAGE_SHIFT;
69630 int error;
69631+ unsigned long charged;
69632
69633 len = PAGE_ALIGN(len);
69634 if (!len)
69635@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69636
69637 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69638
69639+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69640+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69641+ flags &= ~VM_EXEC;
69642+
69643+#ifdef CONFIG_PAX_MPROTECT
69644+ if (mm->pax_flags & MF_PAX_MPROTECT)
69645+ flags &= ~VM_MAYEXEC;
69646+#endif
69647+
69648+ }
69649+#endif
69650+
69651 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69652 if (error & ~PAGE_MASK)
69653 return error;
69654
69655+ charged = len >> PAGE_SHIFT;
69656+
69657 /*
69658 * mlock MCL_FUTURE?
69659 */
69660 if (mm->def_flags & VM_LOCKED) {
69661 unsigned long locked, lock_limit;
69662- locked = len >> PAGE_SHIFT;
69663+ locked = charged;
69664 locked += mm->locked_vm;
69665 lock_limit = rlimit(RLIMIT_MEMLOCK);
69666 lock_limit >>= PAGE_SHIFT;
69667@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69668 /*
69669 * Clear old maps. this also does some error checking for us
69670 */
69671- munmap_back:
69672 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69673 if (vma && vma->vm_start < addr + len) {
69674 if (do_munmap(mm, addr, len))
69675 return -ENOMEM;
69676- goto munmap_back;
69677+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69678+ BUG_ON(vma && vma->vm_start < addr + len);
69679 }
69680
69681 /* Check against address space limits *after* clearing old maps... */
69682- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69683+ if (!may_expand_vm(mm, charged))
69684 return -ENOMEM;
69685
69686 if (mm->map_count > sysctl_max_map_count)
69687 return -ENOMEM;
69688
69689- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69690+ if (security_vm_enough_memory(charged))
69691 return -ENOMEM;
69692
69693 /* Can we just expand an old private anonymous mapping? */
69694@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69695 */
69696 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69697 if (!vma) {
69698- vm_unacct_memory(len >> PAGE_SHIFT);
69699+ vm_unacct_memory(charged);
69700 return -ENOMEM;
69701 }
69702
69703@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69704 vma_link(mm, vma, prev, rb_link, rb_parent);
69705 out:
69706 perf_event_mmap(vma);
69707- mm->total_vm += len >> PAGE_SHIFT;
69708+ mm->total_vm += charged;
69709 if (flags & VM_LOCKED) {
69710 if (!mlock_vma_pages_range(vma, addr, addr + len))
69711- mm->locked_vm += (len >> PAGE_SHIFT);
69712+ mm->locked_vm += charged;
69713 }
69714+ track_exec_limit(mm, addr, addr + len, flags);
69715 return addr;
69716 }
69717
69718@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
69719 * Walk the list again, actually closing and freeing it,
69720 * with preemption enabled, without holding any MM locks.
69721 */
69722- while (vma)
69723+ while (vma) {
69724+ vma->vm_mirror = NULL;
69725 vma = remove_vma(vma);
69726+ }
69727
69728 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69729 }
69730@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69731 struct vm_area_struct * __vma, * prev;
69732 struct rb_node ** rb_link, * rb_parent;
69733
69734+#ifdef CONFIG_PAX_SEGMEXEC
69735+ struct vm_area_struct *vma_m = NULL;
69736+#endif
69737+
69738+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69739+ return -EPERM;
69740+
69741 /*
69742 * The vm_pgoff of a purely anonymous vma should be irrelevant
69743 * until its first write fault, when page's anon_vma and index
69744@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69745 if ((vma->vm_flags & VM_ACCOUNT) &&
69746 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69747 return -ENOMEM;
69748+
69749+#ifdef CONFIG_PAX_SEGMEXEC
69750+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69751+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69752+ if (!vma_m)
69753+ return -ENOMEM;
69754+ }
69755+#endif
69756+
69757 vma_link(mm, vma, prev, rb_link, rb_parent);
69758+
69759+#ifdef CONFIG_PAX_SEGMEXEC
69760+ if (vma_m)
69761+ BUG_ON(pax_mirror_vma(vma_m, vma));
69762+#endif
69763+
69764 return 0;
69765 }
69766
69767@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69768 struct rb_node **rb_link, *rb_parent;
69769 struct mempolicy *pol;
69770
69771+ BUG_ON(vma->vm_mirror);
69772+
69773 /*
69774 * If anonymous vma has not yet been faulted, update new pgoff
69775 * to match new location, to increase its chance of merging.
69776@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69777 return NULL;
69778 }
69779
69780+#ifdef CONFIG_PAX_SEGMEXEC
69781+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69782+{
69783+ struct vm_area_struct *prev_m;
69784+ struct rb_node **rb_link_m, *rb_parent_m;
69785+ struct mempolicy *pol_m;
69786+
69787+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69788+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69789+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69790+ *vma_m = *vma;
69791+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69792+ if (anon_vma_clone(vma_m, vma))
69793+ return -ENOMEM;
69794+ pol_m = vma_policy(vma_m);
69795+ mpol_get(pol_m);
69796+ vma_set_policy(vma_m, pol_m);
69797+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69798+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69799+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69800+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69801+ if (vma_m->vm_file)
69802+ get_file(vma_m->vm_file);
69803+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69804+ vma_m->vm_ops->open(vma_m);
69805+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69806+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69807+ vma_m->vm_mirror = vma;
69808+ vma->vm_mirror = vma_m;
69809+ return 0;
69810+}
69811+#endif
69812+
69813 /*
69814 * Return true if the calling process may expand its vm space by the passed
69815 * number of pages
69816@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
69817 unsigned long lim;
69818
69819 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69820-
69821+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69822 if (cur + npages > lim)
69823 return 0;
69824 return 1;
69825@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm,
69826 vma->vm_start = addr;
69827 vma->vm_end = addr + len;
69828
69829+#ifdef CONFIG_PAX_MPROTECT
69830+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69831+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69832+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69833+ return -EPERM;
69834+ if (!(vm_flags & VM_EXEC))
69835+ vm_flags &= ~VM_MAYEXEC;
69836+#else
69837+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69838+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69839+#endif
69840+ else
69841+ vm_flags &= ~VM_MAYWRITE;
69842+ }
69843+#endif
69844+
69845 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69846 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69847
69848diff --git a/mm/mprotect.c b/mm/mprotect.c
69849index 5a688a2..27e031c 100644
69850--- a/mm/mprotect.c
69851+++ b/mm/mprotect.c
69852@@ -23,10 +23,16 @@
69853 #include <linux/mmu_notifier.h>
69854 #include <linux/migrate.h>
69855 #include <linux/perf_event.h>
69856+
69857+#ifdef CONFIG_PAX_MPROTECT
69858+#include <linux/elf.h>
69859+#endif
69860+
69861 #include <asm/uaccess.h>
69862 #include <asm/pgtable.h>
69863 #include <asm/cacheflush.h>
69864 #include <asm/tlbflush.h>
69865+#include <asm/mmu_context.h>
69866
69867 #ifndef pgprot_modify
69868 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69869@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
69870 flush_tlb_range(vma, start, end);
69871 }
69872
69873+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69874+/* called while holding the mmap semaphor for writing except stack expansion */
69875+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69876+{
69877+ unsigned long oldlimit, newlimit = 0UL;
69878+
69879+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69880+ return;
69881+
69882+ spin_lock(&mm->page_table_lock);
69883+ oldlimit = mm->context.user_cs_limit;
69884+ if ((prot & VM_EXEC) && oldlimit < end)
69885+ /* USER_CS limit moved up */
69886+ newlimit = end;
69887+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69888+ /* USER_CS limit moved down */
69889+ newlimit = start;
69890+
69891+ if (newlimit) {
69892+ mm->context.user_cs_limit = newlimit;
69893+
69894+#ifdef CONFIG_SMP
69895+ wmb();
69896+ cpus_clear(mm->context.cpu_user_cs_mask);
69897+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69898+#endif
69899+
69900+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69901+ }
69902+ spin_unlock(&mm->page_table_lock);
69903+ if (newlimit == end) {
69904+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69905+
69906+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69907+ if (is_vm_hugetlb_page(vma))
69908+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69909+ else
69910+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69911+ }
69912+}
69913+#endif
69914+
69915 int
69916 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69917 unsigned long start, unsigned long end, unsigned long newflags)
69918@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69919 int error;
69920 int dirty_accountable = 0;
69921
69922+#ifdef CONFIG_PAX_SEGMEXEC
69923+ struct vm_area_struct *vma_m = NULL;
69924+ unsigned long start_m, end_m;
69925+
69926+ start_m = start + SEGMEXEC_TASK_SIZE;
69927+ end_m = end + SEGMEXEC_TASK_SIZE;
69928+#endif
69929+
69930 if (newflags == oldflags) {
69931 *pprev = vma;
69932 return 0;
69933 }
69934
69935+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69936+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69937+
69938+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69939+ return -ENOMEM;
69940+
69941+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69942+ return -ENOMEM;
69943+ }
69944+
69945 /*
69946 * If we make a private mapping writable we increase our commit;
69947 * but (without finer accounting) cannot reduce our commit if we
69948@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69949 }
69950 }
69951
69952+#ifdef CONFIG_PAX_SEGMEXEC
69953+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69954+ if (start != vma->vm_start) {
69955+ error = split_vma(mm, vma, start, 1);
69956+ if (error)
69957+ goto fail;
69958+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69959+ *pprev = (*pprev)->vm_next;
69960+ }
69961+
69962+ if (end != vma->vm_end) {
69963+ error = split_vma(mm, vma, end, 0);
69964+ if (error)
69965+ goto fail;
69966+ }
69967+
69968+ if (pax_find_mirror_vma(vma)) {
69969+ error = __do_munmap(mm, start_m, end_m - start_m);
69970+ if (error)
69971+ goto fail;
69972+ } else {
69973+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69974+ if (!vma_m) {
69975+ error = -ENOMEM;
69976+ goto fail;
69977+ }
69978+ vma->vm_flags = newflags;
69979+ error = pax_mirror_vma(vma_m, vma);
69980+ if (error) {
69981+ vma->vm_flags = oldflags;
69982+ goto fail;
69983+ }
69984+ }
69985+ }
69986+#endif
69987+
69988 /*
69989 * First try to merge with previous and/or next vma.
69990 */
69991@@ -204,9 +306,21 @@ success:
69992 * vm_flags and vm_page_prot are protected by the mmap_sem
69993 * held in write mode.
69994 */
69995+
69996+#ifdef CONFIG_PAX_SEGMEXEC
69997+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69998+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69999+#endif
70000+
70001 vma->vm_flags = newflags;
70002+
70003+#ifdef CONFIG_PAX_MPROTECT
70004+ if (mm->binfmt && mm->binfmt->handle_mprotect)
70005+ mm->binfmt->handle_mprotect(vma, newflags);
70006+#endif
70007+
70008 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70009- vm_get_page_prot(newflags));
70010+ vm_get_page_prot(vma->vm_flags));
70011
70012 if (vma_wants_writenotify(vma)) {
70013 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70014@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70015 end = start + len;
70016 if (end <= start)
70017 return -ENOMEM;
70018+
70019+#ifdef CONFIG_PAX_SEGMEXEC
70020+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70021+ if (end > SEGMEXEC_TASK_SIZE)
70022+ return -EINVAL;
70023+ } else
70024+#endif
70025+
70026+ if (end > TASK_SIZE)
70027+ return -EINVAL;
70028+
70029 if (!arch_validate_prot(prot))
70030 return -EINVAL;
70031
70032@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70033 /*
70034 * Does the application expect PROT_READ to imply PROT_EXEC:
70035 */
70036- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70037+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70038 prot |= PROT_EXEC;
70039
70040 vm_flags = calc_vm_prot_bits(prot);
70041@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70042 if (start > vma->vm_start)
70043 prev = vma;
70044
70045+#ifdef CONFIG_PAX_MPROTECT
70046+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70047+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
70048+#endif
70049+
70050 for (nstart = start ; ; ) {
70051 unsigned long newflags;
70052
70053@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70054
70055 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70056 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70057+ if (prot & (PROT_WRITE | PROT_EXEC))
70058+ gr_log_rwxmprotect(vma->vm_file);
70059+
70060+ error = -EACCES;
70061+ goto out;
70062+ }
70063+
70064+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70065 error = -EACCES;
70066 goto out;
70067 }
70068@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70069 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70070 if (error)
70071 goto out;
70072+
70073+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
70074+
70075 nstart = tmp;
70076
70077 if (nstart < prev->vm_end)
70078diff --git a/mm/mremap.c b/mm/mremap.c
70079index d6959cb..18a402a 100644
70080--- a/mm/mremap.c
70081+++ b/mm/mremap.c
70082@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
70083 continue;
70084 pte = ptep_get_and_clear(mm, old_addr, old_pte);
70085 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70086+
70087+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70088+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70089+ pte = pte_exprotect(pte);
70090+#endif
70091+
70092 set_pte_at(mm, new_addr, new_pte, pte);
70093 }
70094
70095@@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
70096 if (is_vm_hugetlb_page(vma))
70097 goto Einval;
70098
70099+#ifdef CONFIG_PAX_SEGMEXEC
70100+ if (pax_find_mirror_vma(vma))
70101+ goto Einval;
70102+#endif
70103+
70104 /* We can't remap across vm area boundaries */
70105 if (old_len > vma->vm_end - addr)
70106 goto Efault;
70107@@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
70108 unsigned long ret = -EINVAL;
70109 unsigned long charged = 0;
70110 unsigned long map_flags;
70111+ unsigned long pax_task_size = TASK_SIZE;
70112
70113 if (new_addr & ~PAGE_MASK)
70114 goto out;
70115
70116- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70117+#ifdef CONFIG_PAX_SEGMEXEC
70118+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70119+ pax_task_size = SEGMEXEC_TASK_SIZE;
70120+#endif
70121+
70122+ pax_task_size -= PAGE_SIZE;
70123+
70124+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70125 goto out;
70126
70127 /* Check if the location we're moving into overlaps the
70128 * old location at all, and fail if it does.
70129 */
70130- if ((new_addr <= addr) && (new_addr+new_len) > addr)
70131- goto out;
70132-
70133- if ((addr <= new_addr) && (addr+old_len) > new_addr)
70134+ if (addr + old_len > new_addr && new_addr + new_len > addr)
70135 goto out;
70136
70137 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70138@@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
70139 struct vm_area_struct *vma;
70140 unsigned long ret = -EINVAL;
70141 unsigned long charged = 0;
70142+ unsigned long pax_task_size = TASK_SIZE;
70143
70144 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70145 goto out;
70146@@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
70147 if (!new_len)
70148 goto out;
70149
70150+#ifdef CONFIG_PAX_SEGMEXEC
70151+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70152+ pax_task_size = SEGMEXEC_TASK_SIZE;
70153+#endif
70154+
70155+ pax_task_size -= PAGE_SIZE;
70156+
70157+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70158+ old_len > pax_task_size || addr > pax_task_size-old_len)
70159+ goto out;
70160+
70161 if (flags & MREMAP_FIXED) {
70162 if (flags & MREMAP_MAYMOVE)
70163 ret = mremap_to(addr, old_len, new_addr, new_len);
70164@@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
70165 addr + new_len);
70166 }
70167 ret = addr;
70168+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70169 goto out;
70170 }
70171 }
70172@@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
70173 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70174 if (ret)
70175 goto out;
70176+
70177+ map_flags = vma->vm_flags;
70178 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70179+ if (!(ret & ~PAGE_MASK)) {
70180+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70181+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70182+ }
70183 }
70184 out:
70185 if (ret & ~PAGE_MASK)
70186diff --git a/mm/nobootmem.c b/mm/nobootmem.c
70187index 7fa41b4..6087460 100644
70188--- a/mm/nobootmem.c
70189+++ b/mm/nobootmem.c
70190@@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
70191 unsigned long __init free_all_memory_core_early(int nodeid)
70192 {
70193 int i;
70194- u64 start, end;
70195+ u64 start, end, startrange, endrange;
70196 unsigned long count = 0;
70197- struct range *range = NULL;
70198+ struct range *range = NULL, rangerange = { 0, 0 };
70199 int nr_range;
70200
70201 nr_range = get_free_all_memory_range(&range, nodeid);
70202+ startrange = __pa(range) >> PAGE_SHIFT;
70203+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70204
70205 for (i = 0; i < nr_range; i++) {
70206 start = range[i].start;
70207 end = range[i].end;
70208+ if (start <= endrange && startrange < end) {
70209+ BUG_ON(rangerange.start | rangerange.end);
70210+ rangerange = range[i];
70211+ continue;
70212+ }
70213 count += end - start;
70214 __free_pages_memory(start, end);
70215 }
70216+ start = rangerange.start;
70217+ end = rangerange.end;
70218+ count += end - start;
70219+ __free_pages_memory(start, end);
70220
70221 return count;
70222 }
70223diff --git a/mm/nommu.c b/mm/nommu.c
70224index b982290..7d73f53 100644
70225--- a/mm/nommu.c
70226+++ b/mm/nommu.c
70227@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
70228 int sysctl_overcommit_ratio = 50; /* default is 50% */
70229 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70230 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70231-int heap_stack_gap = 0;
70232
70233 atomic_long_t mmap_pages_allocated;
70234
70235@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
70236 EXPORT_SYMBOL(find_vma);
70237
70238 /*
70239- * find a VMA
70240- * - we don't extend stack VMAs under NOMMU conditions
70241- */
70242-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70243-{
70244- return find_vma(mm, addr);
70245-}
70246-
70247-/*
70248 * expand a stack to a given address
70249 * - not supported under NOMMU conditions
70250 */
70251@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70252
70253 /* most fields are the same, copy all, and then fixup */
70254 *new = *vma;
70255+ INIT_LIST_HEAD(&new->anon_vma_chain);
70256 *region = *vma->vm_region;
70257 new->vm_region = region;
70258
70259diff --git a/mm/page_alloc.c b/mm/page_alloc.c
70260index 485be89..c059ad3 100644
70261--- a/mm/page_alloc.c
70262+++ b/mm/page_alloc.c
70263@@ -341,7 +341,7 @@ out:
70264 * This usage means that zero-order pages may not be compound.
70265 */
70266
70267-static void free_compound_page(struct page *page)
70268+void free_compound_page(struct page *page)
70269 {
70270 __free_pages_ok(page, compound_order(page));
70271 }
70272@@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70273 int i;
70274 int bad = 0;
70275
70276+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70277+ unsigned long index = 1UL << order;
70278+#endif
70279+
70280 trace_mm_page_free_direct(page, order);
70281 kmemcheck_free_shadow(page, order);
70282
70283@@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70284 debug_check_no_obj_freed(page_address(page),
70285 PAGE_SIZE << order);
70286 }
70287+
70288+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70289+ for (; index; --index)
70290+ sanitize_highpage(page + index - 1);
70291+#endif
70292+
70293 arch_free_page(page, order);
70294 kernel_map_pages(page, 1 << order, 0);
70295
70296@@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
70297 arch_alloc_page(page, order);
70298 kernel_map_pages(page, 1 << order, 1);
70299
70300+#ifndef CONFIG_PAX_MEMORY_SANITIZE
70301 if (gfp_flags & __GFP_ZERO)
70302 prep_zero_page(page, order, gfp_flags);
70303+#endif
70304
70305 if (order && (gfp_flags & __GFP_COMP))
70306 prep_compound_page(page, order);
70307@@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
70308 unsigned long pfn;
70309
70310 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70311+#ifdef CONFIG_X86_32
70312+ /* boot failures in VMware 8 on 32bit vanilla since
70313+ this change */
70314+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70315+#else
70316 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70317+#endif
70318 return 1;
70319 }
70320 return 0;
70321diff --git a/mm/percpu.c b/mm/percpu.c
70322index 716eb4a..8d10419 100644
70323--- a/mm/percpu.c
70324+++ b/mm/percpu.c
70325@@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
70326 static unsigned int pcpu_high_unit_cpu __read_mostly;
70327
70328 /* the address of the first chunk which starts with the kernel static area */
70329-void *pcpu_base_addr __read_mostly;
70330+void *pcpu_base_addr __read_only;
70331 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70332
70333 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70334diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
70335index e920aa3..137702a 100644
70336--- a/mm/process_vm_access.c
70337+++ b/mm/process_vm_access.c
70338@@ -13,6 +13,7 @@
70339 #include <linux/uio.h>
70340 #include <linux/sched.h>
70341 #include <linux/highmem.h>
70342+#include <linux/security.h>
70343 #include <linux/ptrace.h>
70344 #include <linux/slab.h>
70345 #include <linux/syscalls.h>
70346@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70347 size_t iov_l_curr_offset = 0;
70348 ssize_t iov_len;
70349
70350+ return -ENOSYS; // PaX: until properly audited
70351+
70352 /*
70353 * Work out how many pages of struct pages we're going to need
70354 * when eventually calling get_user_pages
70355 */
70356 for (i = 0; i < riovcnt; i++) {
70357 iov_len = rvec[i].iov_len;
70358- if (iov_len > 0) {
70359- nr_pages_iov = ((unsigned long)rvec[i].iov_base
70360- + iov_len)
70361- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
70362- / PAGE_SIZE + 1;
70363- nr_pages = max(nr_pages, nr_pages_iov);
70364- }
70365+ if (iov_len <= 0)
70366+ continue;
70367+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
70368+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
70369+ nr_pages = max(nr_pages, nr_pages_iov);
70370 }
70371
70372 if (nr_pages == 0)
70373@@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70374 goto free_proc_pages;
70375 }
70376
70377- task_lock(task);
70378- if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
70379- task_unlock(task);
70380+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
70381 rc = -EPERM;
70382 goto put_task_struct;
70383 }
70384- mm = task->mm;
70385
70386- if (!mm || (task->flags & PF_KTHREAD)) {
70387- task_unlock(task);
70388- rc = -EINVAL;
70389+ mm = mm_access(task, PTRACE_MODE_ATTACH);
70390+ if (!mm || IS_ERR(mm)) {
70391+ rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
70392+ /*
70393+ * Explicitly map EACCES to EPERM as EPERM is a more a
70394+ * appropriate error code for process_vw_readv/writev
70395+ */
70396+ if (rc == -EACCES)
70397+ rc = -EPERM;
70398 goto put_task_struct;
70399 }
70400
70401- atomic_inc(&mm->mm_users);
70402- task_unlock(task);
70403-
70404 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
70405 rc = process_vm_rw_single_vec(
70406 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
70407diff --git a/mm/rmap.c b/mm/rmap.c
70408index a4fd368..e0ffec7 100644
70409--- a/mm/rmap.c
70410+++ b/mm/rmap.c
70411@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70412 struct anon_vma *anon_vma = vma->anon_vma;
70413 struct anon_vma_chain *avc;
70414
70415+#ifdef CONFIG_PAX_SEGMEXEC
70416+ struct anon_vma_chain *avc_m = NULL;
70417+#endif
70418+
70419 might_sleep();
70420 if (unlikely(!anon_vma)) {
70421 struct mm_struct *mm = vma->vm_mm;
70422@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70423 if (!avc)
70424 goto out_enomem;
70425
70426+#ifdef CONFIG_PAX_SEGMEXEC
70427+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70428+ if (!avc_m)
70429+ goto out_enomem_free_avc;
70430+#endif
70431+
70432 anon_vma = find_mergeable_anon_vma(vma);
70433 allocated = NULL;
70434 if (!anon_vma) {
70435@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70436 /* page_table_lock to protect against threads */
70437 spin_lock(&mm->page_table_lock);
70438 if (likely(!vma->anon_vma)) {
70439+
70440+#ifdef CONFIG_PAX_SEGMEXEC
70441+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70442+
70443+ if (vma_m) {
70444+ BUG_ON(vma_m->anon_vma);
70445+ vma_m->anon_vma = anon_vma;
70446+ avc_m->anon_vma = anon_vma;
70447+ avc_m->vma = vma;
70448+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70449+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
70450+ avc_m = NULL;
70451+ }
70452+#endif
70453+
70454 vma->anon_vma = anon_vma;
70455 avc->anon_vma = anon_vma;
70456 avc->vma = vma;
70457@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70458
70459 if (unlikely(allocated))
70460 put_anon_vma(allocated);
70461+
70462+#ifdef CONFIG_PAX_SEGMEXEC
70463+ if (unlikely(avc_m))
70464+ anon_vma_chain_free(avc_m);
70465+#endif
70466+
70467 if (unlikely(avc))
70468 anon_vma_chain_free(avc);
70469 }
70470 return 0;
70471
70472 out_enomem_free_avc:
70473+
70474+#ifdef CONFIG_PAX_SEGMEXEC
70475+ if (avc_m)
70476+ anon_vma_chain_free(avc_m);
70477+#endif
70478+
70479 anon_vma_chain_free(avc);
70480 out_enomem:
70481 return -ENOMEM;
70482@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70483 * Attach the anon_vmas from src to dst.
70484 * Returns 0 on success, -ENOMEM on failure.
70485 */
70486-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70487+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70488 {
70489 struct anon_vma_chain *avc, *pavc;
70490 struct anon_vma *root = NULL;
70491@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70492 * the corresponding VMA in the parent process is attached to.
70493 * Returns 0 on success, non-zero on failure.
70494 */
70495-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70496+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70497 {
70498 struct anon_vma_chain *avc;
70499 struct anon_vma *anon_vma;
70500diff --git a/mm/shmem.c b/mm/shmem.c
70501index 6c253f7..367e20a 100644
70502--- a/mm/shmem.c
70503+++ b/mm/shmem.c
70504@@ -31,7 +31,7 @@
70505 #include <linux/export.h>
70506 #include <linux/swap.h>
70507
70508-static struct vfsmount *shm_mnt;
70509+struct vfsmount *shm_mnt;
70510
70511 #ifdef CONFIG_SHMEM
70512 /*
70513@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70514 #define BOGO_DIRENT_SIZE 20
70515
70516 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70517-#define SHORT_SYMLINK_LEN 128
70518+#define SHORT_SYMLINK_LEN 64
70519
70520 struct shmem_xattr {
70521 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70522@@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70523 int err = -ENOMEM;
70524
70525 /* Round up to L1_CACHE_BYTES to resist false sharing */
70526- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70527- L1_CACHE_BYTES), GFP_KERNEL);
70528+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70529 if (!sbinfo)
70530 return -ENOMEM;
70531
70532diff --git a/mm/slab.c b/mm/slab.c
70533index 83311c9a..fcf8f86 100644
70534--- a/mm/slab.c
70535+++ b/mm/slab.c
70536@@ -151,7 +151,7 @@
70537
70538 /* Legal flag mask for kmem_cache_create(). */
70539 #if DEBUG
70540-# define CREATE_MASK (SLAB_RED_ZONE | \
70541+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70542 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70543 SLAB_CACHE_DMA | \
70544 SLAB_STORE_USER | \
70545@@ -159,7 +159,7 @@
70546 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70547 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70548 #else
70549-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70550+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70551 SLAB_CACHE_DMA | \
70552 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70553 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70554@@ -288,7 +288,7 @@ struct kmem_list3 {
70555 * Need this for bootstrapping a per node allocator.
70556 */
70557 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70558-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70559+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70560 #define CACHE_CACHE 0
70561 #define SIZE_AC MAX_NUMNODES
70562 #define SIZE_L3 (2 * MAX_NUMNODES)
70563@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70564 if ((x)->max_freeable < i) \
70565 (x)->max_freeable = i; \
70566 } while (0)
70567-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70568-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70569-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70570-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70571+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70572+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70573+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70574+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70575 #else
70576 #define STATS_INC_ACTIVE(x) do { } while (0)
70577 #define STATS_DEC_ACTIVE(x) do { } while (0)
70578@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
70579 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70580 */
70581 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70582- const struct slab *slab, void *obj)
70583+ const struct slab *slab, const void *obj)
70584 {
70585 u32 offset = (obj - slab->s_mem);
70586 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70587@@ -564,7 +564,7 @@ struct cache_names {
70588 static struct cache_names __initdata cache_names[] = {
70589 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70590 #include <linux/kmalloc_sizes.h>
70591- {NULL,}
70592+ {NULL}
70593 #undef CACHE
70594 };
70595
70596@@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
70597 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70598 sizes[INDEX_AC].cs_size,
70599 ARCH_KMALLOC_MINALIGN,
70600- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70601+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70602 NULL);
70603
70604 if (INDEX_AC != INDEX_L3) {
70605@@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
70606 kmem_cache_create(names[INDEX_L3].name,
70607 sizes[INDEX_L3].cs_size,
70608 ARCH_KMALLOC_MINALIGN,
70609- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70610+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70611 NULL);
70612 }
70613
70614@@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
70615 sizes->cs_cachep = kmem_cache_create(names->name,
70616 sizes->cs_size,
70617 ARCH_KMALLOC_MINALIGN,
70618- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70619+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70620 NULL);
70621 }
70622 #ifdef CONFIG_ZONE_DMA
70623@@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
70624 }
70625 /* cpu stats */
70626 {
70627- unsigned long allochit = atomic_read(&cachep->allochit);
70628- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70629- unsigned long freehit = atomic_read(&cachep->freehit);
70630- unsigned long freemiss = atomic_read(&cachep->freemiss);
70631+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70632+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70633+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70634+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70635
70636 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70637 allochit, allocmiss, freehit, freemiss);
70638@@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
70639 {
70640 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
70641 #ifdef CONFIG_DEBUG_SLAB_LEAK
70642- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70643+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
70644 #endif
70645 return 0;
70646 }
70647 module_init(slab_proc_init);
70648 #endif
70649
70650+void check_object_size(const void *ptr, unsigned long n, bool to)
70651+{
70652+
70653+#ifdef CONFIG_PAX_USERCOPY
70654+ struct page *page;
70655+ struct kmem_cache *cachep = NULL;
70656+ struct slab *slabp;
70657+ unsigned int objnr;
70658+ unsigned long offset;
70659+ const char *type;
70660+
70661+ if (!n)
70662+ return;
70663+
70664+ type = "<null>";
70665+ if (ZERO_OR_NULL_PTR(ptr))
70666+ goto report;
70667+
70668+ if (!virt_addr_valid(ptr))
70669+ return;
70670+
70671+ page = virt_to_head_page(ptr);
70672+
70673+ type = "<process stack>";
70674+ if (!PageSlab(page)) {
70675+ if (object_is_on_stack(ptr, n) == -1)
70676+ goto report;
70677+ return;
70678+ }
70679+
70680+ cachep = page_get_cache(page);
70681+ type = cachep->name;
70682+ if (!(cachep->flags & SLAB_USERCOPY))
70683+ goto report;
70684+
70685+ slabp = page_get_slab(page);
70686+ objnr = obj_to_index(cachep, slabp, ptr);
70687+ BUG_ON(objnr >= cachep->num);
70688+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70689+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70690+ return;
70691+
70692+report:
70693+ pax_report_usercopy(ptr, n, to, type);
70694+#endif
70695+
70696+}
70697+EXPORT_SYMBOL(check_object_size);
70698+
70699 /**
70700 * ksize - get the actual amount of memory allocated for a given object
70701 * @objp: Pointer to the object
70702diff --git a/mm/slob.c b/mm/slob.c
70703index 8105be4..579da9d 100644
70704--- a/mm/slob.c
70705+++ b/mm/slob.c
70706@@ -29,7 +29,7 @@
70707 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70708 * alloc_pages() directly, allocating compound pages so the page order
70709 * does not have to be separately tracked, and also stores the exact
70710- * allocation size in page->private so that it can be used to accurately
70711+ * allocation size in slob_page->size so that it can be used to accurately
70712 * provide ksize(). These objects are detected in kfree() because slob_page()
70713 * is false for them.
70714 *
70715@@ -58,6 +58,7 @@
70716 */
70717
70718 #include <linux/kernel.h>
70719+#include <linux/sched.h>
70720 #include <linux/slab.h>
70721 #include <linux/mm.h>
70722 #include <linux/swap.h> /* struct reclaim_state */
70723@@ -102,7 +103,8 @@ struct slob_page {
70724 unsigned long flags; /* mandatory */
70725 atomic_t _count; /* mandatory */
70726 slobidx_t units; /* free units left in page */
70727- unsigned long pad[2];
70728+ unsigned long pad[1];
70729+ unsigned long size; /* size when >=PAGE_SIZE */
70730 slob_t *free; /* first free slob_t in page */
70731 struct list_head list; /* linked list of free pages */
70732 };
70733@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70734 */
70735 static inline int is_slob_page(struct slob_page *sp)
70736 {
70737- return PageSlab((struct page *)sp);
70738+ return PageSlab((struct page *)sp) && !sp->size;
70739 }
70740
70741 static inline void set_slob_page(struct slob_page *sp)
70742@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
70743
70744 static inline struct slob_page *slob_page(const void *addr)
70745 {
70746- return (struct slob_page *)virt_to_page(addr);
70747+ return (struct slob_page *)virt_to_head_page(addr);
70748 }
70749
70750 /*
70751@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
70752 /*
70753 * Return the size of a slob block.
70754 */
70755-static slobidx_t slob_units(slob_t *s)
70756+static slobidx_t slob_units(const slob_t *s)
70757 {
70758 if (s->units > 0)
70759 return s->units;
70760@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70761 /*
70762 * Return the next free slob block pointer after this one.
70763 */
70764-static slob_t *slob_next(slob_t *s)
70765+static slob_t *slob_next(const slob_t *s)
70766 {
70767 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70768 slobidx_t next;
70769@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70770 /*
70771 * Returns true if s is the last free block in its page.
70772 */
70773-static int slob_last(slob_t *s)
70774+static int slob_last(const slob_t *s)
70775 {
70776 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70777 }
70778@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
70779 if (!page)
70780 return NULL;
70781
70782+ set_slob_page(page);
70783 return page_address(page);
70784 }
70785
70786@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
70787 if (!b)
70788 return NULL;
70789 sp = slob_page(b);
70790- set_slob_page(sp);
70791
70792 spin_lock_irqsave(&slob_lock, flags);
70793 sp->units = SLOB_UNITS(PAGE_SIZE);
70794 sp->free = b;
70795+ sp->size = 0;
70796 INIT_LIST_HEAD(&sp->list);
70797 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70798 set_slob_page_free(sp, slob_list);
70799@@ -476,10 +479,9 @@ out:
70800 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70801 */
70802
70803-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70804+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70805 {
70806- unsigned int *m;
70807- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70808+ slob_t *m;
70809 void *ret;
70810
70811 gfp &= gfp_allowed_mask;
70812@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70813
70814 if (!m)
70815 return NULL;
70816- *m = size;
70817+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70818+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70819+ m[0].units = size;
70820+ m[1].units = align;
70821 ret = (void *)m + align;
70822
70823 trace_kmalloc_node(_RET_IP_, ret,
70824@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70825 gfp |= __GFP_COMP;
70826 ret = slob_new_pages(gfp, order, node);
70827 if (ret) {
70828- struct page *page;
70829- page = virt_to_page(ret);
70830- page->private = size;
70831+ struct slob_page *sp;
70832+ sp = slob_page(ret);
70833+ sp->size = size;
70834 }
70835
70836 trace_kmalloc_node(_RET_IP_, ret,
70837 size, PAGE_SIZE << order, gfp, node);
70838 }
70839
70840- kmemleak_alloc(ret, size, 1, gfp);
70841+ return ret;
70842+}
70843+
70844+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70845+{
70846+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70847+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70848+
70849+ if (!ZERO_OR_NULL_PTR(ret))
70850+ kmemleak_alloc(ret, size, 1, gfp);
70851 return ret;
70852 }
70853 EXPORT_SYMBOL(__kmalloc_node);
70854@@ -533,13 +547,92 @@ void kfree(const void *block)
70855 sp = slob_page(block);
70856 if (is_slob_page(sp)) {
70857 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70858- unsigned int *m = (unsigned int *)(block - align);
70859- slob_free(m, *m + align);
70860- } else
70861+ slob_t *m = (slob_t *)(block - align);
70862+ slob_free(m, m[0].units + align);
70863+ } else {
70864+ clear_slob_page(sp);
70865+ free_slob_page(sp);
70866+ sp->size = 0;
70867 put_page(&sp->page);
70868+ }
70869 }
70870 EXPORT_SYMBOL(kfree);
70871
70872+void check_object_size(const void *ptr, unsigned long n, bool to)
70873+{
70874+
70875+#ifdef CONFIG_PAX_USERCOPY
70876+ struct slob_page *sp;
70877+ const slob_t *free;
70878+ const void *base;
70879+ unsigned long flags;
70880+ const char *type;
70881+
70882+ if (!n)
70883+ return;
70884+
70885+ type = "<null>";
70886+ if (ZERO_OR_NULL_PTR(ptr))
70887+ goto report;
70888+
70889+ if (!virt_addr_valid(ptr))
70890+ return;
70891+
70892+ type = "<process stack>";
70893+ sp = slob_page(ptr);
70894+ if (!PageSlab((struct page*)sp)) {
70895+ if (object_is_on_stack(ptr, n) == -1)
70896+ goto report;
70897+ return;
70898+ }
70899+
70900+ type = "<slob>";
70901+ if (sp->size) {
70902+ base = page_address(&sp->page);
70903+ if (base <= ptr && n <= sp->size - (ptr - base))
70904+ return;
70905+ goto report;
70906+ }
70907+
70908+ /* some tricky double walking to find the chunk */
70909+ spin_lock_irqsave(&slob_lock, flags);
70910+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70911+ free = sp->free;
70912+
70913+ while (!slob_last(free) && (void *)free <= ptr) {
70914+ base = free + slob_units(free);
70915+ free = slob_next(free);
70916+ }
70917+
70918+ while (base < (void *)free) {
70919+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70920+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70921+ int offset;
70922+
70923+ if (ptr < base + align)
70924+ break;
70925+
70926+ offset = ptr - base - align;
70927+ if (offset >= m) {
70928+ base += size;
70929+ continue;
70930+ }
70931+
70932+ if (n > m - offset)
70933+ break;
70934+
70935+ spin_unlock_irqrestore(&slob_lock, flags);
70936+ return;
70937+ }
70938+
70939+ spin_unlock_irqrestore(&slob_lock, flags);
70940+report:
70941+ pax_report_usercopy(ptr, n, to, type);
70942+#endif
70943+
70944+}
70945+EXPORT_SYMBOL(check_object_size);
70946+
70947 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70948 size_t ksize(const void *block)
70949 {
70950@@ -552,10 +645,10 @@ size_t ksize(const void *block)
70951 sp = slob_page(block);
70952 if (is_slob_page(sp)) {
70953 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70954- unsigned int *m = (unsigned int *)(block - align);
70955- return SLOB_UNITS(*m) * SLOB_UNIT;
70956+ slob_t *m = (slob_t *)(block - align);
70957+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70958 } else
70959- return sp->page.private;
70960+ return sp->size;
70961 }
70962 EXPORT_SYMBOL(ksize);
70963
70964@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
70965 {
70966 struct kmem_cache *c;
70967
70968+#ifdef CONFIG_PAX_USERCOPY
70969+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70970+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70971+#else
70972 c = slob_alloc(sizeof(struct kmem_cache),
70973 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70974+#endif
70975
70976 if (c) {
70977 c->name = name;
70978@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
70979
70980 lockdep_trace_alloc(flags);
70981
70982+#ifdef CONFIG_PAX_USERCOPY
70983+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70984+#else
70985 if (c->size < PAGE_SIZE) {
70986 b = slob_alloc(c->size, flags, c->align, node);
70987 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70988 SLOB_UNITS(c->size) * SLOB_UNIT,
70989 flags, node);
70990 } else {
70991+ struct slob_page *sp;
70992+
70993 b = slob_new_pages(flags, get_order(c->size), node);
70994+ sp = slob_page(b);
70995+ sp->size = c->size;
70996 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70997 PAGE_SIZE << get_order(c->size),
70998 flags, node);
70999 }
71000+#endif
71001
71002 if (c->ctor)
71003 c->ctor(b);
71004@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
71005
71006 static void __kmem_cache_free(void *b, int size)
71007 {
71008- if (size < PAGE_SIZE)
71009+ struct slob_page *sp = slob_page(b);
71010+
71011+ if (is_slob_page(sp))
71012 slob_free(b, size);
71013- else
71014+ else {
71015+ clear_slob_page(sp);
71016+ free_slob_page(sp);
71017+ sp->size = 0;
71018 slob_free_pages(b, get_order(size));
71019+ }
71020 }
71021
71022 static void kmem_rcu_free(struct rcu_head *head)
71023@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
71024
71025 void kmem_cache_free(struct kmem_cache *c, void *b)
71026 {
71027+ int size = c->size;
71028+
71029+#ifdef CONFIG_PAX_USERCOPY
71030+ if (size + c->align < PAGE_SIZE) {
71031+ size += c->align;
71032+ b -= c->align;
71033+ }
71034+#endif
71035+
71036 kmemleak_free_recursive(b, c->flags);
71037 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71038 struct slob_rcu *slob_rcu;
71039- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71040- slob_rcu->size = c->size;
71041+ slob_rcu = b + (size - sizeof(struct slob_rcu));
71042+ slob_rcu->size = size;
71043 call_rcu(&slob_rcu->head, kmem_rcu_free);
71044 } else {
71045- __kmem_cache_free(b, c->size);
71046+ __kmem_cache_free(b, size);
71047 }
71048
71049+#ifdef CONFIG_PAX_USERCOPY
71050+ trace_kfree(_RET_IP_, b);
71051+#else
71052 trace_kmem_cache_free(_RET_IP_, b);
71053+#endif
71054+
71055 }
71056 EXPORT_SYMBOL(kmem_cache_free);
71057
71058diff --git a/mm/slub.c b/mm/slub.c
71059index 1a919f0..1739c9b 100644
71060--- a/mm/slub.c
71061+++ b/mm/slub.c
71062@@ -208,7 +208,7 @@ struct track {
71063
71064 enum track_item { TRACK_ALLOC, TRACK_FREE };
71065
71066-#ifdef CONFIG_SYSFS
71067+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71068 static int sysfs_slab_add(struct kmem_cache *);
71069 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71070 static void sysfs_slab_remove(struct kmem_cache *);
71071@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
71072 if (!t->addr)
71073 return;
71074
71075- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71076+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71077 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71078 #ifdef CONFIG_STACKTRACE
71079 {
71080@@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
71081
71082 page = virt_to_head_page(x);
71083
71084+ BUG_ON(!PageSlab(page));
71085+
71086 slab_free(s, page, x, _RET_IP_);
71087
71088 trace_kmem_cache_free(_RET_IP_, x);
71089@@ -2592,7 +2594,7 @@ static int slub_min_objects;
71090 * Merge control. If this is set then no merging of slab caches will occur.
71091 * (Could be removed. This was introduced to pacify the merge skeptics.)
71092 */
71093-static int slub_nomerge;
71094+static int slub_nomerge = 1;
71095
71096 /*
71097 * Calculate the order of allocation given an slab object size.
71098@@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
71099 else
71100 s->cpu_partial = 30;
71101
71102- s->refcount = 1;
71103+ atomic_set(&s->refcount, 1);
71104 #ifdef CONFIG_NUMA
71105 s->remote_node_defrag_ratio = 1000;
71106 #endif
71107@@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
71108 void kmem_cache_destroy(struct kmem_cache *s)
71109 {
71110 down_write(&slub_lock);
71111- s->refcount--;
71112- if (!s->refcount) {
71113+ if (atomic_dec_and_test(&s->refcount)) {
71114 list_del(&s->list);
71115 up_write(&slub_lock);
71116 if (kmem_cache_close(s)) {
71117@@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
71118 EXPORT_SYMBOL(__kmalloc_node);
71119 #endif
71120
71121+void check_object_size(const void *ptr, unsigned long n, bool to)
71122+{
71123+
71124+#ifdef CONFIG_PAX_USERCOPY
71125+ struct page *page;
71126+ struct kmem_cache *s = NULL;
71127+ unsigned long offset;
71128+ const char *type;
71129+
71130+ if (!n)
71131+ return;
71132+
71133+ type = "<null>";
71134+ if (ZERO_OR_NULL_PTR(ptr))
71135+ goto report;
71136+
71137+ if (!virt_addr_valid(ptr))
71138+ return;
71139+
71140+ page = virt_to_head_page(ptr);
71141+
71142+ type = "<process stack>";
71143+ if (!PageSlab(page)) {
71144+ if (object_is_on_stack(ptr, n) == -1)
71145+ goto report;
71146+ return;
71147+ }
71148+
71149+ s = page->slab;
71150+ type = s->name;
71151+ if (!(s->flags & SLAB_USERCOPY))
71152+ goto report;
71153+
71154+ offset = (ptr - page_address(page)) % s->size;
71155+ if (offset <= s->objsize && n <= s->objsize - offset)
71156+ return;
71157+
71158+report:
71159+ pax_report_usercopy(ptr, n, to, type);
71160+#endif
71161+
71162+}
71163+EXPORT_SYMBOL(check_object_size);
71164+
71165 size_t ksize(const void *object)
71166 {
71167 struct page *page;
71168@@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
71169 int node;
71170
71171 list_add(&s->list, &slab_caches);
71172- s->refcount = -1;
71173+ atomic_set(&s->refcount, -1);
71174
71175 for_each_node_state(node, N_NORMAL_MEMORY) {
71176 struct kmem_cache_node *n = get_node(s, node);
71177@@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
71178
71179 /* Caches that are not of the two-to-the-power-of size */
71180 if (KMALLOC_MIN_SIZE <= 32) {
71181- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71182+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71183 caches++;
71184 }
71185
71186 if (KMALLOC_MIN_SIZE <= 64) {
71187- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71188+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71189 caches++;
71190 }
71191
71192 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71193- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71194+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71195 caches++;
71196 }
71197
71198@@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
71199 /*
71200 * We may have set a slab to be unmergeable during bootstrap.
71201 */
71202- if (s->refcount < 0)
71203+ if (atomic_read(&s->refcount) < 0)
71204 return 1;
71205
71206 return 0;
71207@@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71208 down_write(&slub_lock);
71209 s = find_mergeable(size, align, flags, name, ctor);
71210 if (s) {
71211- s->refcount++;
71212+ atomic_inc(&s->refcount);
71213 /*
71214 * Adjust the object sizes so that we clear
71215 * the complete object on kzalloc.
71216@@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71217 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71218
71219 if (sysfs_slab_alias(s, name)) {
71220- s->refcount--;
71221+ atomic_dec(&s->refcount);
71222 goto err;
71223 }
71224 up_write(&slub_lock);
71225@@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
71226 }
71227 #endif
71228
71229-#ifdef CONFIG_SYSFS
71230+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71231 static int count_inuse(struct page *page)
71232 {
71233 return page->inuse;
71234@@ -4410,12 +4455,12 @@ static void resiliency_test(void)
71235 validate_slab_cache(kmalloc_caches[9]);
71236 }
71237 #else
71238-#ifdef CONFIG_SYSFS
71239+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71240 static void resiliency_test(void) {};
71241 #endif
71242 #endif
71243
71244-#ifdef CONFIG_SYSFS
71245+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71246 enum slab_stat_type {
71247 SL_ALL, /* All slabs */
71248 SL_PARTIAL, /* Only partially allocated slabs */
71249@@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
71250
71251 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71252 {
71253- return sprintf(buf, "%d\n", s->refcount - 1);
71254+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71255 }
71256 SLAB_ATTR_RO(aliases);
71257
71258@@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
71259 return name;
71260 }
71261
71262+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71263 static int sysfs_slab_add(struct kmem_cache *s)
71264 {
71265 int err;
71266@@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
71267 kobject_del(&s->kobj);
71268 kobject_put(&s->kobj);
71269 }
71270+#endif
71271
71272 /*
71273 * Need to buffer aliases during bootup until sysfs becomes
71274@@ -5298,6 +5345,7 @@ struct saved_alias {
71275
71276 static struct saved_alias *alias_list;
71277
71278+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71279 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71280 {
71281 struct saved_alias *al;
71282@@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71283 alias_list = al;
71284 return 0;
71285 }
71286+#endif
71287
71288 static int __init slab_sysfs_init(void)
71289 {
71290diff --git a/mm/swap.c b/mm/swap.c
71291index a91caf7..b887e735 100644
71292--- a/mm/swap.c
71293+++ b/mm/swap.c
71294@@ -31,6 +31,7 @@
71295 #include <linux/backing-dev.h>
71296 #include <linux/memcontrol.h>
71297 #include <linux/gfp.h>
71298+#include <linux/hugetlb.h>
71299
71300 #include "internal.h"
71301
71302@@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
71303
71304 __page_cache_release(page);
71305 dtor = get_compound_page_dtor(page);
71306+ if (!PageHuge(page))
71307+ BUG_ON(dtor != free_compound_page);
71308 (*dtor)(page);
71309 }
71310
71311diff --git a/mm/swapfile.c b/mm/swapfile.c
71312index b1cd120..aaae885 100644
71313--- a/mm/swapfile.c
71314+++ b/mm/swapfile.c
71315@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
71316
71317 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71318 /* Activity counter to indicate that a swapon or swapoff has occurred */
71319-static atomic_t proc_poll_event = ATOMIC_INIT(0);
71320+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71321
71322 static inline unsigned char swap_count(unsigned char ent)
71323 {
71324@@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
71325 }
71326 filp_close(swap_file, NULL);
71327 err = 0;
71328- atomic_inc(&proc_poll_event);
71329+ atomic_inc_unchecked(&proc_poll_event);
71330 wake_up_interruptible(&proc_poll_wait);
71331
71332 out_dput:
71333@@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
71334
71335 poll_wait(file, &proc_poll_wait, wait);
71336
71337- if (seq->poll_event != atomic_read(&proc_poll_event)) {
71338- seq->poll_event = atomic_read(&proc_poll_event);
71339+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71340+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71341 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71342 }
71343
71344@@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
71345 return ret;
71346
71347 seq = file->private_data;
71348- seq->poll_event = atomic_read(&proc_poll_event);
71349+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71350 return 0;
71351 }
71352
71353@@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
71354 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71355
71356 mutex_unlock(&swapon_mutex);
71357- atomic_inc(&proc_poll_event);
71358+ atomic_inc_unchecked(&proc_poll_event);
71359 wake_up_interruptible(&proc_poll_wait);
71360
71361 if (S_ISREG(inode->i_mode))
71362diff --git a/mm/util.c b/mm/util.c
71363index 136ac4f..5117eef 100644
71364--- a/mm/util.c
71365+++ b/mm/util.c
71366@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71367 * allocated buffer. Use this if you don't want to free the buffer immediately
71368 * like, for example, with RCU.
71369 */
71370+#undef __krealloc
71371 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71372 {
71373 void *ret;
71374@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71375 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71376 * %NULL pointer, the object pointed to is freed.
71377 */
71378+#undef krealloc
71379 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71380 {
71381 void *ret;
71382@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71383 void arch_pick_mmap_layout(struct mm_struct *mm)
71384 {
71385 mm->mmap_base = TASK_UNMAPPED_BASE;
71386+
71387+#ifdef CONFIG_PAX_RANDMMAP
71388+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71389+ mm->mmap_base += mm->delta_mmap;
71390+#endif
71391+
71392 mm->get_unmapped_area = arch_get_unmapped_area;
71393 mm->unmap_area = arch_unmap_area;
71394 }
71395diff --git a/mm/vmalloc.c b/mm/vmalloc.c
71396index 27be2f0..0aef2c2 100644
71397--- a/mm/vmalloc.c
71398+++ b/mm/vmalloc.c
71399@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
71400
71401 pte = pte_offset_kernel(pmd, addr);
71402 do {
71403- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71404- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71405+
71406+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71407+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71408+ BUG_ON(!pte_exec(*pte));
71409+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71410+ continue;
71411+ }
71412+#endif
71413+
71414+ {
71415+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71416+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71417+ }
71418 } while (pte++, addr += PAGE_SIZE, addr != end);
71419 }
71420
71421@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71422 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71423 {
71424 pte_t *pte;
71425+ int ret = -ENOMEM;
71426
71427 /*
71428 * nr is a running index into the array which helps higher level
71429@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71430 pte = pte_alloc_kernel(pmd, addr);
71431 if (!pte)
71432 return -ENOMEM;
71433+
71434+ pax_open_kernel();
71435 do {
71436 struct page *page = pages[*nr];
71437
71438- if (WARN_ON(!pte_none(*pte)))
71439- return -EBUSY;
71440- if (WARN_ON(!page))
71441- return -ENOMEM;
71442+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71443+ if (pgprot_val(prot) & _PAGE_NX)
71444+#endif
71445+
71446+ if (WARN_ON(!pte_none(*pte))) {
71447+ ret = -EBUSY;
71448+ goto out;
71449+ }
71450+ if (WARN_ON(!page)) {
71451+ ret = -ENOMEM;
71452+ goto out;
71453+ }
71454 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71455 (*nr)++;
71456 } while (pte++, addr += PAGE_SIZE, addr != end);
71457- return 0;
71458+ ret = 0;
71459+out:
71460+ pax_close_kernel();
71461+ return ret;
71462 }
71463
71464 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71465@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71466 * and fall back on vmalloc() if that fails. Others
71467 * just put it in the vmalloc space.
71468 */
71469-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71470+#ifdef CONFIG_MODULES
71471+#ifdef MODULES_VADDR
71472 unsigned long addr = (unsigned long)x;
71473 if (addr >= MODULES_VADDR && addr < MODULES_END)
71474 return 1;
71475 #endif
71476+
71477+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71478+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71479+ return 1;
71480+#endif
71481+
71482+#endif
71483+
71484 return is_vmalloc_addr(x);
71485 }
71486
71487@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71488
71489 if (!pgd_none(*pgd)) {
71490 pud_t *pud = pud_offset(pgd, addr);
71491+#ifdef CONFIG_X86
71492+ if (!pud_large(*pud))
71493+#endif
71494 if (!pud_none(*pud)) {
71495 pmd_t *pmd = pmd_offset(pud, addr);
71496+#ifdef CONFIG_X86
71497+ if (!pmd_large(*pmd))
71498+#endif
71499 if (!pmd_none(*pmd)) {
71500 pte_t *ptep, pte;
71501
71502@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71503 struct vm_struct *area;
71504
71505 BUG_ON(in_interrupt());
71506+
71507+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71508+ if (flags & VM_KERNEXEC) {
71509+ if (start != VMALLOC_START || end != VMALLOC_END)
71510+ return NULL;
71511+ start = (unsigned long)MODULES_EXEC_VADDR;
71512+ end = (unsigned long)MODULES_EXEC_END;
71513+ }
71514+#endif
71515+
71516 if (flags & VM_IOREMAP) {
71517 int bit = fls(size);
71518
71519@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71520 if (count > totalram_pages)
71521 return NULL;
71522
71523+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71524+ if (!(pgprot_val(prot) & _PAGE_NX))
71525+ flags |= VM_KERNEXEC;
71526+#endif
71527+
71528 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71529 __builtin_return_address(0));
71530 if (!area)
71531@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71532 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71533 goto fail;
71534
71535+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71536+ if (!(pgprot_val(prot) & _PAGE_NX))
71537+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71538+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71539+ else
71540+#endif
71541+
71542 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71543 start, end, node, gfp_mask, caller);
71544 if (!area)
71545@@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71546 gfp_mask, prot, node, caller);
71547 }
71548
71549+#undef __vmalloc
71550 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71551 {
71552 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71553@@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71554 * For tight control over page level allocator and protection flags
71555 * use __vmalloc() instead.
71556 */
71557+#undef vmalloc
71558 void *vmalloc(unsigned long size)
71559 {
71560 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71561@@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71562 * For tight control over page level allocator and protection flags
71563 * use __vmalloc() instead.
71564 */
71565+#undef vzalloc
71566 void *vzalloc(unsigned long size)
71567 {
71568 return __vmalloc_node_flags(size, -1,
71569@@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
71570 * The resulting memory area is zeroed so it can be mapped to userspace
71571 * without leaking data.
71572 */
71573+#undef vmalloc_user
71574 void *vmalloc_user(unsigned long size)
71575 {
71576 struct vm_struct *area;
71577@@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
71578 * For tight control over page level allocator and protection flags
71579 * use __vmalloc() instead.
71580 */
71581+#undef vmalloc_node
71582 void *vmalloc_node(unsigned long size, int node)
71583 {
71584 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71585@@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
71586 * For tight control over page level allocator and protection flags
71587 * use __vmalloc_node() instead.
71588 */
71589+#undef vzalloc_node
71590 void *vzalloc_node(unsigned long size, int node)
71591 {
71592 return __vmalloc_node_flags(size, node,
71593@@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
71594 * For tight control over page level allocator and protection flags
71595 * use __vmalloc() instead.
71596 */
71597-
71598+#undef vmalloc_exec
71599 void *vmalloc_exec(unsigned long size)
71600 {
71601- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71602+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71603 -1, __builtin_return_address(0));
71604 }
71605
71606@@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
71607 * Allocate enough 32bit PA addressable pages to cover @size from the
71608 * page level allocator and map them into contiguous kernel virtual space.
71609 */
71610+#undef vmalloc_32
71611 void *vmalloc_32(unsigned long size)
71612 {
71613 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71614@@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
71615 * The resulting memory area is 32bit addressable and zeroed so it can be
71616 * mapped to userspace without leaking data.
71617 */
71618+#undef vmalloc_32_user
71619 void *vmalloc_32_user(unsigned long size)
71620 {
71621 struct vm_struct *area;
71622@@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
71623 unsigned long uaddr = vma->vm_start;
71624 unsigned long usize = vma->vm_end - vma->vm_start;
71625
71626+ BUG_ON(vma->vm_mirror);
71627+
71628 if ((PAGE_SIZE-1) & (unsigned long)addr)
71629 return -EINVAL;
71630
71631diff --git a/mm/vmstat.c b/mm/vmstat.c
71632index 8fd603b..cf0d930 100644
71633--- a/mm/vmstat.c
71634+++ b/mm/vmstat.c
71635@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71636 *
71637 * vm_stat contains the global counters
71638 */
71639-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71640+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71641 EXPORT_SYMBOL(vm_stat);
71642
71643 #ifdef CONFIG_SMP
71644@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71645 v = p->vm_stat_diff[i];
71646 p->vm_stat_diff[i] = 0;
71647 local_irq_restore(flags);
71648- atomic_long_add(v, &zone->vm_stat[i]);
71649+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71650 global_diff[i] += v;
71651 #ifdef CONFIG_NUMA
71652 /* 3 seconds idle till flush */
71653@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71654
71655 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71656 if (global_diff[i])
71657- atomic_long_add(global_diff[i], &vm_stat[i]);
71658+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71659 }
71660
71661 #endif
71662@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
71663 start_cpu_timer(cpu);
71664 #endif
71665 #ifdef CONFIG_PROC_FS
71666- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71667- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71668- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71669- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71670+ {
71671+ mode_t gr_mode = S_IRUGO;
71672+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71673+ gr_mode = S_IRUSR;
71674+#endif
71675+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71676+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71677+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71678+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71679+#else
71680+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71681+#endif
71682+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71683+ }
71684 #endif
71685 return 0;
71686 }
71687diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
71688index 5471628..cef8398 100644
71689--- a/net/8021q/vlan.c
71690+++ b/net/8021q/vlan.c
71691@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
71692 err = -EPERM;
71693 if (!capable(CAP_NET_ADMIN))
71694 break;
71695- if ((args.u.name_type >= 0) &&
71696- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71697+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71698 struct vlan_net *vn;
71699
71700 vn = net_generic(net, vlan_net_id);
71701diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
71702index fdfdb57..38d368c 100644
71703--- a/net/9p/trans_fd.c
71704+++ b/net/9p/trans_fd.c
71705@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
71706 oldfs = get_fs();
71707 set_fs(get_ds());
71708 /* The cast to a user pointer is valid due to the set_fs() */
71709- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71710+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71711 set_fs(oldfs);
71712
71713 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71714diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
71715index f41f026..fe76ea8 100644
71716--- a/net/atm/atm_misc.c
71717+++ b/net/atm/atm_misc.c
71718@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
71719 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71720 return 1;
71721 atm_return(vcc, truesize);
71722- atomic_inc(&vcc->stats->rx_drop);
71723+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71724 return 0;
71725 }
71726 EXPORT_SYMBOL(atm_charge);
71727@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
71728 }
71729 }
71730 atm_return(vcc, guess);
71731- atomic_inc(&vcc->stats->rx_drop);
71732+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71733 return NULL;
71734 }
71735 EXPORT_SYMBOL(atm_alloc_charge);
71736@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71737
71738 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71739 {
71740-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71741+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71742 __SONET_ITEMS
71743 #undef __HANDLE_ITEM
71744 }
71745@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71746
71747 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71748 {
71749-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71750+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71751 __SONET_ITEMS
71752 #undef __HANDLE_ITEM
71753 }
71754diff --git a/net/atm/lec.h b/net/atm/lec.h
71755index dfc0719..47c5322 100644
71756--- a/net/atm/lec.h
71757+++ b/net/atm/lec.h
71758@@ -48,7 +48,7 @@ struct lane2_ops {
71759 const u8 *tlvs, u32 sizeoftlvs);
71760 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71761 const u8 *tlvs, u32 sizeoftlvs);
71762-};
71763+} __no_const;
71764
71765 /*
71766 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71767diff --git a/net/atm/mpc.h b/net/atm/mpc.h
71768index 0919a88..a23d54e 100644
71769--- a/net/atm/mpc.h
71770+++ b/net/atm/mpc.h
71771@@ -33,7 +33,7 @@ struct mpoa_client {
71772 struct mpc_parameters parameters; /* parameters for this client */
71773
71774 const struct net_device_ops *old_ops;
71775- struct net_device_ops new_ops;
71776+ net_device_ops_no_const new_ops;
71777 };
71778
71779
71780diff --git a/net/atm/proc.c b/net/atm/proc.c
71781index 0d020de..011c7bb 100644
71782--- a/net/atm/proc.c
71783+++ b/net/atm/proc.c
71784@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
71785 const struct k_atm_aal_stats *stats)
71786 {
71787 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71788- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71789- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71790- atomic_read(&stats->rx_drop));
71791+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71792+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71793+ atomic_read_unchecked(&stats->rx_drop));
71794 }
71795
71796 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71797diff --git a/net/atm/resources.c b/net/atm/resources.c
71798index 23f45ce..c748f1a 100644
71799--- a/net/atm/resources.c
71800+++ b/net/atm/resources.c
71801@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71802 static void copy_aal_stats(struct k_atm_aal_stats *from,
71803 struct atm_aal_stats *to)
71804 {
71805-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71806+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71807 __AAL_STAT_ITEMS
71808 #undef __HANDLE_ITEM
71809 }
71810@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
71811 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71812 struct atm_aal_stats *to)
71813 {
71814-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71815+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71816 __AAL_STAT_ITEMS
71817 #undef __HANDLE_ITEM
71818 }
71819diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
71820index 3512e25..2b33401 100644
71821--- a/net/batman-adv/bat_iv_ogm.c
71822+++ b/net/batman-adv/bat_iv_ogm.c
71823@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71824
71825 /* change sequence number to network order */
71826 batman_ogm_packet->seqno =
71827- htonl((uint32_t)atomic_read(&hard_iface->seqno));
71828+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71829
71830 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
71831 batman_ogm_packet->tt_crc = htons((uint16_t)
71832@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71833 else
71834 batman_ogm_packet->gw_flags = NO_FLAGS;
71835
71836- atomic_inc(&hard_iface->seqno);
71837+ atomic_inc_unchecked(&hard_iface->seqno);
71838
71839 slide_own_bcast_window(hard_iface);
71840 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
71841@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
71842 return;
71843
71844 /* could be changed by schedule_own_packet() */
71845- if_incoming_seqno = atomic_read(&if_incoming->seqno);
71846+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71847
71848 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
71849
71850diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
71851index 7704df4..beb4e16 100644
71852--- a/net/batman-adv/hard-interface.c
71853+++ b/net/batman-adv/hard-interface.c
71854@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
71855 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71856 dev_add_pack(&hard_iface->batman_adv_ptype);
71857
71858- atomic_set(&hard_iface->seqno, 1);
71859- atomic_set(&hard_iface->frag_seqno, 1);
71860+ atomic_set_unchecked(&hard_iface->seqno, 1);
71861+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71862 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71863 hard_iface->net_dev->name);
71864
71865diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
71866index f9cc957..efd9dae 100644
71867--- a/net/batman-adv/soft-interface.c
71868+++ b/net/batman-adv/soft-interface.c
71869@@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
71870
71871 /* set broadcast sequence number */
71872 bcast_packet->seqno =
71873- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71874+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71875
71876 add_bcast_packet_to_list(bat_priv, skb, 1);
71877
71878@@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
71879 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71880
71881 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71882- atomic_set(&bat_priv->bcast_seqno, 1);
71883+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71884 atomic_set(&bat_priv->ttvn, 0);
71885 atomic_set(&bat_priv->tt_local_changes, 0);
71886 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
71887diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
71888index ab8d0fe..ceba3fd 100644
71889--- a/net/batman-adv/types.h
71890+++ b/net/batman-adv/types.h
71891@@ -38,8 +38,8 @@ struct hard_iface {
71892 int16_t if_num;
71893 char if_status;
71894 struct net_device *net_dev;
71895- atomic_t seqno;
71896- atomic_t frag_seqno;
71897+ atomic_unchecked_t seqno;
71898+ atomic_unchecked_t frag_seqno;
71899 unsigned char *packet_buff;
71900 int packet_len;
71901 struct kobject *hardif_obj;
71902@@ -154,7 +154,7 @@ struct bat_priv {
71903 atomic_t orig_interval; /* uint */
71904 atomic_t hop_penalty; /* uint */
71905 atomic_t log_level; /* uint */
71906- atomic_t bcast_seqno;
71907+ atomic_unchecked_t bcast_seqno;
71908 atomic_t bcast_queue_left;
71909 atomic_t batman_queue_left;
71910 atomic_t ttvn; /* translation table version number */
71911diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
71912index 07d1c1d..7e9bea9 100644
71913--- a/net/batman-adv/unicast.c
71914+++ b/net/batman-adv/unicast.c
71915@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
71916 frag1->flags = UNI_FRAG_HEAD | large_tail;
71917 frag2->flags = large_tail;
71918
71919- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71920+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71921 frag1->seqno = htons(seqno - 1);
71922 frag2->seqno = htons(seqno);
71923
71924diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
71925index c1c597e..05ebb40 100644
71926--- a/net/bluetooth/hci_conn.c
71927+++ b/net/bluetooth/hci_conn.c
71928@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
71929 memset(&cp, 0, sizeof(cp));
71930
71931 cp.handle = cpu_to_le16(conn->handle);
71932- memcpy(cp.ltk, ltk, sizeof(ltk));
71933+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71934
71935 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
71936 }
71937diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
71938index 17b5b1c..826d872 100644
71939--- a/net/bluetooth/l2cap_core.c
71940+++ b/net/bluetooth/l2cap_core.c
71941@@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
71942 break;
71943
71944 case L2CAP_CONF_RFC:
71945- if (olen == sizeof(rfc))
71946- memcpy(&rfc, (void *)val, olen);
71947+ if (olen != sizeof(rfc))
71948+ break;
71949+
71950+ memcpy(&rfc, (void *)val, olen);
71951
71952 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
71953 rfc.mode != chan->mode)
71954@@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
71955
71956 switch (type) {
71957 case L2CAP_CONF_RFC:
71958- if (olen == sizeof(rfc))
71959- memcpy(&rfc, (void *)val, olen);
71960+ if (olen != sizeof(rfc))
71961+ break;
71962+
71963+ memcpy(&rfc, (void *)val, olen);
71964 goto done;
71965 }
71966 }
71967diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
71968index a5f4e57..910ee6d 100644
71969--- a/net/bridge/br_multicast.c
71970+++ b/net/bridge/br_multicast.c
71971@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
71972 nexthdr = ip6h->nexthdr;
71973 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71974
71975- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71976+ if (nexthdr != IPPROTO_ICMPV6)
71977 return 0;
71978
71979 /* Okay, we found ICMPv6 header */
71980diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
71981index 5864cc4..121f3a3 100644
71982--- a/net/bridge/netfilter/ebtables.c
71983+++ b/net/bridge/netfilter/ebtables.c
71984@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
71985 tmp.valid_hooks = t->table->valid_hooks;
71986 }
71987 mutex_unlock(&ebt_mutex);
71988- if (copy_to_user(user, &tmp, *len) != 0){
71989+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71990 BUGPRINT("c2u Didn't work\n");
71991 ret = -EFAULT;
71992 break;
71993diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
71994index a986280..13444a1 100644
71995--- a/net/caif/caif_socket.c
71996+++ b/net/caif/caif_socket.c
71997@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
71998 #ifdef CONFIG_DEBUG_FS
71999 struct debug_fs_counter {
72000 atomic_t caif_nr_socks;
72001- atomic_t caif_sock_create;
72002- atomic_t num_connect_req;
72003- atomic_t num_connect_resp;
72004- atomic_t num_connect_fail_resp;
72005- atomic_t num_disconnect;
72006- atomic_t num_remote_shutdown_ind;
72007- atomic_t num_tx_flow_off_ind;
72008- atomic_t num_tx_flow_on_ind;
72009- atomic_t num_rx_flow_off;
72010- atomic_t num_rx_flow_on;
72011+ atomic_unchecked_t caif_sock_create;
72012+ atomic_unchecked_t num_connect_req;
72013+ atomic_unchecked_t num_connect_resp;
72014+ atomic_unchecked_t num_connect_fail_resp;
72015+ atomic_unchecked_t num_disconnect;
72016+ atomic_unchecked_t num_remote_shutdown_ind;
72017+ atomic_unchecked_t num_tx_flow_off_ind;
72018+ atomic_unchecked_t num_tx_flow_on_ind;
72019+ atomic_unchecked_t num_rx_flow_off;
72020+ atomic_unchecked_t num_rx_flow_on;
72021 };
72022 static struct debug_fs_counter cnt;
72023 #define dbfs_atomic_inc(v) atomic_inc_return(v)
72024+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
72025 #define dbfs_atomic_dec(v) atomic_dec_return(v)
72026 #else
72027 #define dbfs_atomic_inc(v) 0
72028@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72029 atomic_read(&cf_sk->sk.sk_rmem_alloc),
72030 sk_rcvbuf_lowwater(cf_sk));
72031 set_rx_flow_off(cf_sk);
72032- dbfs_atomic_inc(&cnt.num_rx_flow_off);
72033+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72034 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72035 }
72036
72037@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72038 set_rx_flow_off(cf_sk);
72039 if (net_ratelimit())
72040 pr_debug("sending flow OFF due to rmem_schedule\n");
72041- dbfs_atomic_inc(&cnt.num_rx_flow_off);
72042+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72043 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72044 }
72045 skb->dev = NULL;
72046@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
72047 switch (flow) {
72048 case CAIF_CTRLCMD_FLOW_ON_IND:
72049 /* OK from modem to start sending again */
72050- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72051+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72052 set_tx_flow_on(cf_sk);
72053 cf_sk->sk.sk_state_change(&cf_sk->sk);
72054 break;
72055
72056 case CAIF_CTRLCMD_FLOW_OFF_IND:
72057 /* Modem asks us to shut up */
72058- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72059+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72060 set_tx_flow_off(cf_sk);
72061 cf_sk->sk.sk_state_change(&cf_sk->sk);
72062 break;
72063@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72064 /* We're now connected */
72065 caif_client_register_refcnt(&cf_sk->layer,
72066 cfsk_hold, cfsk_put);
72067- dbfs_atomic_inc(&cnt.num_connect_resp);
72068+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72069 cf_sk->sk.sk_state = CAIF_CONNECTED;
72070 set_tx_flow_on(cf_sk);
72071 cf_sk->sk.sk_state_change(&cf_sk->sk);
72072@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72073
72074 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72075 /* Connect request failed */
72076- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72077+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72078 cf_sk->sk.sk_err = ECONNREFUSED;
72079 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72080 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72081@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72082
72083 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72084 /* Modem has closed this connection, or device is down. */
72085- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72086+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72087 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72088 cf_sk->sk.sk_err = ECONNRESET;
72089 set_rx_flow_on(cf_sk);
72090@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
72091 return;
72092
72093 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72094- dbfs_atomic_inc(&cnt.num_rx_flow_on);
72095+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72096 set_rx_flow_on(cf_sk);
72097 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72098 }
72099@@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
72100 /*ifindex = id of the interface.*/
72101 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72102
72103- dbfs_atomic_inc(&cnt.num_connect_req);
72104+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72105 cf_sk->layer.receive = caif_sktrecv_cb;
72106
72107 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72108@@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
72109 spin_unlock_bh(&sk->sk_receive_queue.lock);
72110 sock->sk = NULL;
72111
72112- dbfs_atomic_inc(&cnt.num_disconnect);
72113+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72114
72115 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72116 if (cf_sk->debugfs_socket_dir != NULL)
72117@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
72118 cf_sk->conn_req.protocol = protocol;
72119 /* Increase the number of sockets created. */
72120 dbfs_atomic_inc(&cnt.caif_nr_socks);
72121- num = dbfs_atomic_inc(&cnt.caif_sock_create);
72122+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72123 #ifdef CONFIG_DEBUG_FS
72124 if (!IS_ERR(debugfsdir)) {
72125
72126diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
72127index 5cf5222..6f704ad 100644
72128--- a/net/caif/cfctrl.c
72129+++ b/net/caif/cfctrl.c
72130@@ -9,6 +9,7 @@
72131 #include <linux/stddef.h>
72132 #include <linux/spinlock.h>
72133 #include <linux/slab.h>
72134+#include <linux/sched.h>
72135 #include <net/caif/caif_layer.h>
72136 #include <net/caif/cfpkt.h>
72137 #include <net/caif/cfctrl.h>
72138@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
72139 memset(&dev_info, 0, sizeof(dev_info));
72140 dev_info.id = 0xff;
72141 cfsrvl_init(&this->serv, 0, &dev_info, false);
72142- atomic_set(&this->req_seq_no, 1);
72143- atomic_set(&this->rsp_seq_no, 1);
72144+ atomic_set_unchecked(&this->req_seq_no, 1);
72145+ atomic_set_unchecked(&this->rsp_seq_no, 1);
72146 this->serv.layer.receive = cfctrl_recv;
72147 sprintf(this->serv.layer.name, "ctrl");
72148 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72149@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
72150 struct cfctrl_request_info *req)
72151 {
72152 spin_lock_bh(&ctrl->info_list_lock);
72153- atomic_inc(&ctrl->req_seq_no);
72154- req->sequence_no = atomic_read(&ctrl->req_seq_no);
72155+ atomic_inc_unchecked(&ctrl->req_seq_no);
72156+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72157 list_add_tail(&req->list, &ctrl->list);
72158 spin_unlock_bh(&ctrl->info_list_lock);
72159 }
72160@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
72161 if (p != first)
72162 pr_warn("Requests are not received in order\n");
72163
72164- atomic_set(&ctrl->rsp_seq_no,
72165+ atomic_set_unchecked(&ctrl->rsp_seq_no,
72166 p->sequence_no);
72167 list_del(&p->list);
72168 goto out;
72169diff --git a/net/can/gw.c b/net/can/gw.c
72170index 3d79b12..8de85fa 100644
72171--- a/net/can/gw.c
72172+++ b/net/can/gw.c
72173@@ -96,7 +96,7 @@ struct cf_mod {
72174 struct {
72175 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
72176 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
72177- } csumfunc;
72178+ } __no_const csumfunc;
72179 };
72180
72181
72182diff --git a/net/compat.c b/net/compat.c
72183index 6def90e..c6992fa 100644
72184--- a/net/compat.c
72185+++ b/net/compat.c
72186@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72187 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72188 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72189 return -EFAULT;
72190- kmsg->msg_name = compat_ptr(tmp1);
72191- kmsg->msg_iov = compat_ptr(tmp2);
72192- kmsg->msg_control = compat_ptr(tmp3);
72193+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72194+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72195+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72196 return 0;
72197 }
72198
72199@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72200
72201 if (kern_msg->msg_namelen) {
72202 if (mode == VERIFY_READ) {
72203- int err = move_addr_to_kernel(kern_msg->msg_name,
72204+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72205 kern_msg->msg_namelen,
72206 kern_address);
72207 if (err < 0)
72208@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72209 kern_msg->msg_name = NULL;
72210
72211 tot_len = iov_from_user_compat_to_kern(kern_iov,
72212- (struct compat_iovec __user *)kern_msg->msg_iov,
72213+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
72214 kern_msg->msg_iovlen);
72215 if (tot_len >= 0)
72216 kern_msg->msg_iov = kern_iov;
72217@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72218
72219 #define CMSG_COMPAT_FIRSTHDR(msg) \
72220 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72221- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72222+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72223 (struct compat_cmsghdr __user *)NULL)
72224
72225 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72226 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72227 (ucmlen) <= (unsigned long) \
72228 ((mhdr)->msg_controllen - \
72229- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72230+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72231
72232 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72233 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72234 {
72235 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72236- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72237+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72238 msg->msg_controllen)
72239 return NULL;
72240 return (struct compat_cmsghdr __user *)ptr;
72241@@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72242 {
72243 struct compat_timeval ctv;
72244 struct compat_timespec cts[3];
72245- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72246+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72247 struct compat_cmsghdr cmhdr;
72248 int cmlen;
72249
72250@@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72251
72252 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72253 {
72254- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72255+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72256 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72257 int fdnum = scm->fp->count;
72258 struct file **fp = scm->fp->fp;
72259@@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
72260 return -EFAULT;
72261 old_fs = get_fs();
72262 set_fs(KERNEL_DS);
72263- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72264+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72265 set_fs(old_fs);
72266
72267 return err;
72268@@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
72269 len = sizeof(ktime);
72270 old_fs = get_fs();
72271 set_fs(KERNEL_DS);
72272- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72273+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72274 set_fs(old_fs);
72275
72276 if (!err) {
72277@@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72278 case MCAST_JOIN_GROUP:
72279 case MCAST_LEAVE_GROUP:
72280 {
72281- struct compat_group_req __user *gr32 = (void *)optval;
72282+ struct compat_group_req __user *gr32 = (void __user *)optval;
72283 struct group_req __user *kgr =
72284 compat_alloc_user_space(sizeof(struct group_req));
72285 u32 interface;
72286@@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72287 case MCAST_BLOCK_SOURCE:
72288 case MCAST_UNBLOCK_SOURCE:
72289 {
72290- struct compat_group_source_req __user *gsr32 = (void *)optval;
72291+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72292 struct group_source_req __user *kgsr = compat_alloc_user_space(
72293 sizeof(struct group_source_req));
72294 u32 interface;
72295@@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72296 }
72297 case MCAST_MSFILTER:
72298 {
72299- struct compat_group_filter __user *gf32 = (void *)optval;
72300+ struct compat_group_filter __user *gf32 = (void __user *)optval;
72301 struct group_filter __user *kgf;
72302 u32 interface, fmode, numsrc;
72303
72304@@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
72305 char __user *optval, int __user *optlen,
72306 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72307 {
72308- struct compat_group_filter __user *gf32 = (void *)optval;
72309+ struct compat_group_filter __user *gf32 = (void __user *)optval;
72310 struct group_filter __user *kgf;
72311 int __user *koptlen;
72312 u32 interface, fmode, numsrc;
72313diff --git a/net/core/datagram.c b/net/core/datagram.c
72314index 68bbf9f..5ef0d12 100644
72315--- a/net/core/datagram.c
72316+++ b/net/core/datagram.c
72317@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
72318 }
72319
72320 kfree_skb(skb);
72321- atomic_inc(&sk->sk_drops);
72322+ atomic_inc_unchecked(&sk->sk_drops);
72323 sk_mem_reclaim_partial(sk);
72324
72325 return err;
72326diff --git a/net/core/dev.c b/net/core/dev.c
72327index 5a13edf..a6f2bd2 100644
72328--- a/net/core/dev.c
72329+++ b/net/core/dev.c
72330@@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
72331 if (no_module && capable(CAP_NET_ADMIN))
72332 no_module = request_module("netdev-%s", name);
72333 if (no_module && capable(CAP_SYS_MODULE)) {
72334+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72335+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
72336+#else
72337 if (!request_module("%s", name))
72338 pr_err("Loading kernel module for a network device "
72339 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72340 "instead\n", name);
72341+#endif
72342 }
72343 }
72344 EXPORT_SYMBOL(dev_load);
72345@@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72346 {
72347 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
72348 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
72349- atomic_long_inc(&dev->rx_dropped);
72350+ atomic_long_inc_unchecked(&dev->rx_dropped);
72351 kfree_skb(skb);
72352 return NET_RX_DROP;
72353 }
72354@@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72355 nf_reset(skb);
72356
72357 if (unlikely(!is_skb_forwardable(dev, skb))) {
72358- atomic_long_inc(&dev->rx_dropped);
72359+ atomic_long_inc_unchecked(&dev->rx_dropped);
72360 kfree_skb(skb);
72361 return NET_RX_DROP;
72362 }
72363@@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
72364
72365 struct dev_gso_cb {
72366 void (*destructor)(struct sk_buff *skb);
72367-};
72368+} __no_const;
72369
72370 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72371
72372@@ -2970,7 +2974,7 @@ enqueue:
72373
72374 local_irq_restore(flags);
72375
72376- atomic_long_inc(&skb->dev->rx_dropped);
72377+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72378 kfree_skb(skb);
72379 return NET_RX_DROP;
72380 }
72381@@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
72382 }
72383 EXPORT_SYMBOL(netif_rx_ni);
72384
72385-static void net_tx_action(struct softirq_action *h)
72386+static void net_tx_action(void)
72387 {
72388 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72389
72390@@ -3333,7 +3337,7 @@ ncls:
72391 if (pt_prev) {
72392 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
72393 } else {
72394- atomic_long_inc(&skb->dev->rx_dropped);
72395+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72396 kfree_skb(skb);
72397 /* Jamal, now you will not able to escape explaining
72398 * me how you were going to use this. :-)
72399@@ -3891,7 +3895,7 @@ void netif_napi_del(struct napi_struct *napi)
72400 }
72401 EXPORT_SYMBOL(netif_napi_del);
72402
72403-static void net_rx_action(struct softirq_action *h)
72404+static void net_rx_action(void)
72405 {
72406 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72407 unsigned long time_limit = jiffies + 2;
72408@@ -5949,7 +5953,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
72409 } else {
72410 netdev_stats_to_stats64(storage, &dev->stats);
72411 }
72412- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
72413+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
72414 return storage;
72415 }
72416 EXPORT_SYMBOL(dev_get_stats);
72417diff --git a/net/core/flow.c b/net/core/flow.c
72418index e318c7e..168b1d0 100644
72419--- a/net/core/flow.c
72420+++ b/net/core/flow.c
72421@@ -61,7 +61,7 @@ struct flow_cache {
72422 struct timer_list rnd_timer;
72423 };
72424
72425-atomic_t flow_cache_genid = ATOMIC_INIT(0);
72426+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72427 EXPORT_SYMBOL(flow_cache_genid);
72428 static struct flow_cache flow_cache_global;
72429 static struct kmem_cache *flow_cachep __read_mostly;
72430@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
72431
72432 static int flow_entry_valid(struct flow_cache_entry *fle)
72433 {
72434- if (atomic_read(&flow_cache_genid) != fle->genid)
72435+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72436 return 0;
72437 if (fle->object && !fle->object->ops->check(fle->object))
72438 return 0;
72439@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
72440 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72441 fcp->hash_count++;
72442 }
72443- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72444+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72445 flo = fle->object;
72446 if (!flo)
72447 goto ret_object;
72448@@ -280,7 +280,7 @@ nocache:
72449 }
72450 flo = resolver(net, key, family, dir, flo, ctx);
72451 if (fle) {
72452- fle->genid = atomic_read(&flow_cache_genid);
72453+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
72454 if (!IS_ERR(flo))
72455 fle->object = flo;
72456 else
72457diff --git a/net/core/iovec.c b/net/core/iovec.c
72458index c40f27e..7f49254 100644
72459--- a/net/core/iovec.c
72460+++ b/net/core/iovec.c
72461@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72462 if (m->msg_namelen) {
72463 if (mode == VERIFY_READ) {
72464 void __user *namep;
72465- namep = (void __user __force *) m->msg_name;
72466+ namep = (void __force_user *) m->msg_name;
72467 err = move_addr_to_kernel(namep, m->msg_namelen,
72468 address);
72469 if (err < 0)
72470@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72471 }
72472
72473 size = m->msg_iovlen * sizeof(struct iovec);
72474- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72475+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72476 return -EFAULT;
72477
72478 m->msg_iov = iov;
72479diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72480index 9083e82..1673203 100644
72481--- a/net/core/rtnetlink.c
72482+++ b/net/core/rtnetlink.c
72483@@ -57,7 +57,7 @@ struct rtnl_link {
72484 rtnl_doit_func doit;
72485 rtnl_dumpit_func dumpit;
72486 rtnl_calcit_func calcit;
72487-};
72488+} __no_const;
72489
72490 static DEFINE_MUTEX(rtnl_mutex);
72491 static u16 min_ifinfo_dump_size;
72492diff --git a/net/core/scm.c b/net/core/scm.c
72493index ff52ad0..aff1c0f 100644
72494--- a/net/core/scm.c
72495+++ b/net/core/scm.c
72496@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72497 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72498 {
72499 struct cmsghdr __user *cm
72500- = (__force struct cmsghdr __user *)msg->msg_control;
72501+ = (struct cmsghdr __force_user *)msg->msg_control;
72502 struct cmsghdr cmhdr;
72503 int cmlen = CMSG_LEN(len);
72504 int err;
72505@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72506 err = -EFAULT;
72507 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72508 goto out;
72509- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72510+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72511 goto out;
72512 cmlen = CMSG_SPACE(len);
72513 if (msg->msg_controllen < cmlen)
72514@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72515 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72516 {
72517 struct cmsghdr __user *cm
72518- = (__force struct cmsghdr __user*)msg->msg_control;
72519+ = (struct cmsghdr __force_user *)msg->msg_control;
72520
72521 int fdmax = 0;
72522 int fdnum = scm->fp->count;
72523@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72524 if (fdnum < fdmax)
72525 fdmax = fdnum;
72526
72527- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72528+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72529 i++, cmfptr++)
72530 {
72531 int new_fd;
72532diff --git a/net/core/sock.c b/net/core/sock.c
72533index b23f174..b9a0d26 100644
72534--- a/net/core/sock.c
72535+++ b/net/core/sock.c
72536@@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72537 struct sk_buff_head *list = &sk->sk_receive_queue;
72538
72539 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72540- atomic_inc(&sk->sk_drops);
72541+ atomic_inc_unchecked(&sk->sk_drops);
72542 trace_sock_rcvqueue_full(sk, skb);
72543 return -ENOMEM;
72544 }
72545@@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72546 return err;
72547
72548 if (!sk_rmem_schedule(sk, skb->truesize)) {
72549- atomic_inc(&sk->sk_drops);
72550+ atomic_inc_unchecked(&sk->sk_drops);
72551 return -ENOBUFS;
72552 }
72553
72554@@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72555 skb_dst_force(skb);
72556
72557 spin_lock_irqsave(&list->lock, flags);
72558- skb->dropcount = atomic_read(&sk->sk_drops);
72559+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72560 __skb_queue_tail(list, skb);
72561 spin_unlock_irqrestore(&list->lock, flags);
72562
72563@@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72564 skb->dev = NULL;
72565
72566 if (sk_rcvqueues_full(sk, skb)) {
72567- atomic_inc(&sk->sk_drops);
72568+ atomic_inc_unchecked(&sk->sk_drops);
72569 goto discard_and_relse;
72570 }
72571 if (nested)
72572@@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72573 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72574 } else if (sk_add_backlog(sk, skb)) {
72575 bh_unlock_sock(sk);
72576- atomic_inc(&sk->sk_drops);
72577+ atomic_inc_unchecked(&sk->sk_drops);
72578 goto discard_and_relse;
72579 }
72580
72581@@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72582 if (len > sizeof(peercred))
72583 len = sizeof(peercred);
72584 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72585- if (copy_to_user(optval, &peercred, len))
72586+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72587 return -EFAULT;
72588 goto lenout;
72589 }
72590@@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72591 return -ENOTCONN;
72592 if (lv < len)
72593 return -EINVAL;
72594- if (copy_to_user(optval, address, len))
72595+ if (len > sizeof(address) || copy_to_user(optval, address, len))
72596 return -EFAULT;
72597 goto lenout;
72598 }
72599@@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72600
72601 if (len > lv)
72602 len = lv;
72603- if (copy_to_user(optval, &v, len))
72604+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
72605 return -EFAULT;
72606 lenout:
72607 if (put_user(len, optlen))
72608@@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
72609 */
72610 smp_wmb();
72611 atomic_set(&sk->sk_refcnt, 1);
72612- atomic_set(&sk->sk_drops, 0);
72613+ atomic_set_unchecked(&sk->sk_drops, 0);
72614 }
72615 EXPORT_SYMBOL(sock_init_data);
72616
72617diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
72618index 02e75d1..9a57a7c 100644
72619--- a/net/decnet/sysctl_net_decnet.c
72620+++ b/net/decnet/sysctl_net_decnet.c
72621@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
72622
72623 if (len > *lenp) len = *lenp;
72624
72625- if (copy_to_user(buffer, addr, len))
72626+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
72627 return -EFAULT;
72628
72629 *lenp = len;
72630@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
72631
72632 if (len > *lenp) len = *lenp;
72633
72634- if (copy_to_user(buffer, devname, len))
72635+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
72636 return -EFAULT;
72637
72638 *lenp = len;
72639diff --git a/net/econet/Kconfig b/net/econet/Kconfig
72640index 39a2d29..f39c0fe 100644
72641--- a/net/econet/Kconfig
72642+++ b/net/econet/Kconfig
72643@@ -4,7 +4,7 @@
72644
72645 config ECONET
72646 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72647- depends on EXPERIMENTAL && INET
72648+ depends on EXPERIMENTAL && INET && BROKEN
72649 ---help---
72650 Econet is a fairly old and slow networking protocol mainly used by
72651 Acorn computers to access file and print servers. It uses native
72652diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
72653index 92fc5f6..b790d91 100644
72654--- a/net/ipv4/fib_frontend.c
72655+++ b/net/ipv4/fib_frontend.c
72656@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
72657 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72658 fib_sync_up(dev);
72659 #endif
72660- atomic_inc(&net->ipv4.dev_addr_genid);
72661+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72662 rt_cache_flush(dev_net(dev), -1);
72663 break;
72664 case NETDEV_DOWN:
72665 fib_del_ifaddr(ifa, NULL);
72666- atomic_inc(&net->ipv4.dev_addr_genid);
72667+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72668 if (ifa->ifa_dev->ifa_list == NULL) {
72669 /* Last address was deleted from this interface.
72670 * Disable IP.
72671@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
72672 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72673 fib_sync_up(dev);
72674 #endif
72675- atomic_inc(&net->ipv4.dev_addr_genid);
72676+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72677 rt_cache_flush(dev_net(dev), -1);
72678 break;
72679 case NETDEV_DOWN:
72680diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
72681index 80106d8..232e898 100644
72682--- a/net/ipv4/fib_semantics.c
72683+++ b/net/ipv4/fib_semantics.c
72684@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
72685 nh->nh_saddr = inet_select_addr(nh->nh_dev,
72686 nh->nh_gw,
72687 nh->nh_parent->fib_scope);
72688- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72689+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72690
72691 return nh->nh_saddr;
72692 }
72693diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
72694index ccee270..db23c3c 100644
72695--- a/net/ipv4/inet_diag.c
72696+++ b/net/ipv4/inet_diag.c
72697@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
72698 r->idiag_retrans = 0;
72699
72700 r->id.idiag_if = sk->sk_bound_dev_if;
72701+
72702+#ifdef CONFIG_GRKERNSEC_HIDESYM
72703+ r->id.idiag_cookie[0] = 0;
72704+ r->id.idiag_cookie[1] = 0;
72705+#else
72706 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72707 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72708+#endif
72709
72710 r->id.idiag_sport = inet->inet_sport;
72711 r->id.idiag_dport = inet->inet_dport;
72712@@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
72713 r->idiag_family = tw->tw_family;
72714 r->idiag_retrans = 0;
72715 r->id.idiag_if = tw->tw_bound_dev_if;
72716+
72717+#ifdef CONFIG_GRKERNSEC_HIDESYM
72718+ r->id.idiag_cookie[0] = 0;
72719+ r->id.idiag_cookie[1] = 0;
72720+#else
72721 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72722 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72723+#endif
72724+
72725 r->id.idiag_sport = tw->tw_sport;
72726 r->id.idiag_dport = tw->tw_dport;
72727 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72728@@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
72729 if (sk == NULL)
72730 goto unlock;
72731
72732+#ifndef CONFIG_GRKERNSEC_HIDESYM
72733 err = -ESTALE;
72734 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72735 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72736 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72737 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72738 goto out;
72739+#endif
72740
72741 err = -ENOMEM;
72742 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72743@@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
72744 r->idiag_retrans = req->retrans;
72745
72746 r->id.idiag_if = sk->sk_bound_dev_if;
72747+
72748+#ifdef CONFIG_GRKERNSEC_HIDESYM
72749+ r->id.idiag_cookie[0] = 0;
72750+ r->id.idiag_cookie[1] = 0;
72751+#else
72752 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72753 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72754+#endif
72755
72756 tmo = req->expires - jiffies;
72757 if (tmo < 0)
72758diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
72759index 984ec65..97ac518 100644
72760--- a/net/ipv4/inet_hashtables.c
72761+++ b/net/ipv4/inet_hashtables.c
72762@@ -18,12 +18,15 @@
72763 #include <linux/sched.h>
72764 #include <linux/slab.h>
72765 #include <linux/wait.h>
72766+#include <linux/security.h>
72767
72768 #include <net/inet_connection_sock.h>
72769 #include <net/inet_hashtables.h>
72770 #include <net/secure_seq.h>
72771 #include <net/ip.h>
72772
72773+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72774+
72775 /*
72776 * Allocate and initialize a new local port bind bucket.
72777 * The bindhash mutex for snum's hash chain must be held here.
72778@@ -530,6 +533,8 @@ ok:
72779 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72780 spin_unlock(&head->lock);
72781
72782+ gr_update_task_in_ip_table(current, inet_sk(sk));
72783+
72784 if (tw) {
72785 inet_twsk_deschedule(tw, death_row);
72786 while (twrefcnt) {
72787diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
72788index 86f13c67..59a35b5 100644
72789--- a/net/ipv4/inetpeer.c
72790+++ b/net/ipv4/inetpeer.c
72791@@ -436,8 +436,8 @@ relookup:
72792 if (p) {
72793 p->daddr = *daddr;
72794 atomic_set(&p->refcnt, 1);
72795- atomic_set(&p->rid, 0);
72796- atomic_set(&p->ip_id_count,
72797+ atomic_set_unchecked(&p->rid, 0);
72798+ atomic_set_unchecked(&p->ip_id_count,
72799 (daddr->family == AF_INET) ?
72800 secure_ip_id(daddr->addr.a4) :
72801 secure_ipv6_id(daddr->addr.a6));
72802diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
72803index fdaabf2..0ec3205 100644
72804--- a/net/ipv4/ip_fragment.c
72805+++ b/net/ipv4/ip_fragment.c
72806@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
72807 return 0;
72808
72809 start = qp->rid;
72810- end = atomic_inc_return(&peer->rid);
72811+ end = atomic_inc_return_unchecked(&peer->rid);
72812 qp->rid = end;
72813
72814 rc = qp->q.fragments && (end - start) > max;
72815diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
72816index 09ff51b..d3968eb 100644
72817--- a/net/ipv4/ip_sockglue.c
72818+++ b/net/ipv4/ip_sockglue.c
72819@@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72820 len = min_t(unsigned int, len, opt->optlen);
72821 if (put_user(len, optlen))
72822 return -EFAULT;
72823- if (copy_to_user(optval, opt->__data, len))
72824+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72825+ copy_to_user(optval, opt->__data, len))
72826 return -EFAULT;
72827 return 0;
72828 }
72829@@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72830 if (sk->sk_type != SOCK_STREAM)
72831 return -ENOPROTOOPT;
72832
72833- msg.msg_control = optval;
72834+ msg.msg_control = (void __force_kernel *)optval;
72835 msg.msg_controllen = len;
72836 msg.msg_flags = flags;
72837
72838diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
72839index 99ec116..c5628fe 100644
72840--- a/net/ipv4/ipconfig.c
72841+++ b/net/ipv4/ipconfig.c
72842@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
72843
72844 mm_segment_t oldfs = get_fs();
72845 set_fs(get_ds());
72846- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72847+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72848 set_fs(oldfs);
72849 return res;
72850 }
72851@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
72852
72853 mm_segment_t oldfs = get_fs();
72854 set_fs(get_ds());
72855- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72856+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72857 set_fs(oldfs);
72858 return res;
72859 }
72860@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
72861
72862 mm_segment_t oldfs = get_fs();
72863 set_fs(get_ds());
72864- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72865+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72866 set_fs(oldfs);
72867 return res;
72868 }
72869diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
72870index 2133c30..5c4b40b 100644
72871--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
72872+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
72873@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
72874
72875 *len = 0;
72876
72877- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72878+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72879 if (*octets == NULL)
72880 return 0;
72881
72882diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
72883index 43d4c3b..1914409 100644
72884--- a/net/ipv4/ping.c
72885+++ b/net/ipv4/ping.c
72886@@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
72887 sk_rmem_alloc_get(sp),
72888 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72889 atomic_read(&sp->sk_refcnt), sp,
72890- atomic_read(&sp->sk_drops), len);
72891+ atomic_read_unchecked(&sp->sk_drops), len);
72892 }
72893
72894 static int ping_seq_show(struct seq_file *seq, void *v)
72895diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
72896index 007e2eb..85a18a0 100644
72897--- a/net/ipv4/raw.c
72898+++ b/net/ipv4/raw.c
72899@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
72900 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72901 {
72902 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72903- atomic_inc(&sk->sk_drops);
72904+ atomic_inc_unchecked(&sk->sk_drops);
72905 kfree_skb(skb);
72906 return NET_RX_DROP;
72907 }
72908@@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
72909
72910 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72911 {
72912+ struct icmp_filter filter;
72913+
72914 if (optlen > sizeof(struct icmp_filter))
72915 optlen = sizeof(struct icmp_filter);
72916- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72917+ if (copy_from_user(&filter, optval, optlen))
72918 return -EFAULT;
72919+ raw_sk(sk)->filter = filter;
72920 return 0;
72921 }
72922
72923 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72924 {
72925 int len, ret = -EFAULT;
72926+ struct icmp_filter filter;
72927
72928 if (get_user(len, optlen))
72929 goto out;
72930@@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
72931 if (len > sizeof(struct icmp_filter))
72932 len = sizeof(struct icmp_filter);
72933 ret = -EFAULT;
72934- if (put_user(len, optlen) ||
72935- copy_to_user(optval, &raw_sk(sk)->filter, len))
72936+ filter = raw_sk(sk)->filter;
72937+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
72938 goto out;
72939 ret = 0;
72940 out: return ret;
72941@@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
72942 sk_wmem_alloc_get(sp),
72943 sk_rmem_alloc_get(sp),
72944 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72945- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72946+ atomic_read(&sp->sk_refcnt),
72947+#ifdef CONFIG_GRKERNSEC_HIDESYM
72948+ NULL,
72949+#else
72950+ sp,
72951+#endif
72952+ atomic_read_unchecked(&sp->sk_drops));
72953 }
72954
72955 static int raw_seq_show(struct seq_file *seq, void *v)
72956diff --git a/net/ipv4/route.c b/net/ipv4/route.c
72957index 94cdbc5..0cb0063 100644
72958--- a/net/ipv4/route.c
72959+++ b/net/ipv4/route.c
72960@@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
72961
72962 static inline int rt_genid(struct net *net)
72963 {
72964- return atomic_read(&net->ipv4.rt_genid);
72965+ return atomic_read_unchecked(&net->ipv4.rt_genid);
72966 }
72967
72968 #ifdef CONFIG_PROC_FS
72969@@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
72970 unsigned char shuffle;
72971
72972 get_random_bytes(&shuffle, sizeof(shuffle));
72973- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72974+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72975 redirect_genid++;
72976 }
72977
72978@@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
72979 error = rt->dst.error;
72980 if (peer) {
72981 inet_peer_refcheck(rt->peer);
72982- id = atomic_read(&peer->ip_id_count) & 0xffff;
72983+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72984 if (peer->tcp_ts_stamp) {
72985 ts = peer->tcp_ts;
72986 tsage = get_seconds() - peer->tcp_ts_stamp;
72987diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
72988index c89e354..8bd55c8 100644
72989--- a/net/ipv4/tcp_ipv4.c
72990+++ b/net/ipv4/tcp_ipv4.c
72991@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
72992 int sysctl_tcp_low_latency __read_mostly;
72993 EXPORT_SYMBOL(sysctl_tcp_low_latency);
72994
72995+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72996+extern int grsec_enable_blackhole;
72997+#endif
72998
72999 #ifdef CONFIG_TCP_MD5SIG
73000 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
73001@@ -1627,6 +1630,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
73002 return 0;
73003
73004 reset:
73005+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73006+ if (!grsec_enable_blackhole)
73007+#endif
73008 tcp_v4_send_reset(rsk, skb);
73009 discard:
73010 kfree_skb(skb);
73011@@ -1689,12 +1695,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
73012 TCP_SKB_CB(skb)->sacked = 0;
73013
73014 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73015- if (!sk)
73016+ if (!sk) {
73017+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73018+ ret = 1;
73019+#endif
73020 goto no_tcp_socket;
73021-
73022+ }
73023 process:
73024- if (sk->sk_state == TCP_TIME_WAIT)
73025+ if (sk->sk_state == TCP_TIME_WAIT) {
73026+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73027+ ret = 2;
73028+#endif
73029 goto do_time_wait;
73030+ }
73031
73032 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73033 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73034@@ -1744,6 +1757,10 @@ no_tcp_socket:
73035 bad_packet:
73036 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73037 } else {
73038+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73039+ if (!grsec_enable_blackhole || (ret == 1 &&
73040+ (skb->dev->flags & IFF_LOOPBACK)))
73041+#endif
73042 tcp_v4_send_reset(NULL, skb);
73043 }
73044
73045@@ -2404,7 +2421,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
73046 0, /* non standard timer */
73047 0, /* open_requests have no inode */
73048 atomic_read(&sk->sk_refcnt),
73049+#ifdef CONFIG_GRKERNSEC_HIDESYM
73050+ NULL,
73051+#else
73052 req,
73053+#endif
73054 len);
73055 }
73056
73057@@ -2454,7 +2475,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
73058 sock_i_uid(sk),
73059 icsk->icsk_probes_out,
73060 sock_i_ino(sk),
73061- atomic_read(&sk->sk_refcnt), sk,
73062+ atomic_read(&sk->sk_refcnt),
73063+#ifdef CONFIG_GRKERNSEC_HIDESYM
73064+ NULL,
73065+#else
73066+ sk,
73067+#endif
73068 jiffies_to_clock_t(icsk->icsk_rto),
73069 jiffies_to_clock_t(icsk->icsk_ack.ato),
73070 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73071@@ -2482,7 +2508,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
73072 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73073 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73074 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73075- atomic_read(&tw->tw_refcnt), tw, len);
73076+ atomic_read(&tw->tw_refcnt),
73077+#ifdef CONFIG_GRKERNSEC_HIDESYM
73078+ NULL,
73079+#else
73080+ tw,
73081+#endif
73082+ len);
73083 }
73084
73085 #define TMPSZ 150
73086diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
73087index 66363b6..b0654a3 100644
73088--- a/net/ipv4/tcp_minisocks.c
73089+++ b/net/ipv4/tcp_minisocks.c
73090@@ -27,6 +27,10 @@
73091 #include <net/inet_common.h>
73092 #include <net/xfrm.h>
73093
73094+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73095+extern int grsec_enable_blackhole;
73096+#endif
73097+
73098 int sysctl_tcp_syncookies __read_mostly = 1;
73099 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73100
73101@@ -751,6 +755,10 @@ listen_overflow:
73102
73103 embryonic_reset:
73104 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73105+
73106+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73107+ if (!grsec_enable_blackhole)
73108+#endif
73109 if (!(flg & TCP_FLAG_RST))
73110 req->rsk_ops->send_reset(sk, skb);
73111
73112diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
73113index 85ee7eb..53277ab 100644
73114--- a/net/ipv4/tcp_probe.c
73115+++ b/net/ipv4/tcp_probe.c
73116@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
73117 if (cnt + width >= len)
73118 break;
73119
73120- if (copy_to_user(buf + cnt, tbuf, width))
73121+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73122 return -EFAULT;
73123 cnt += width;
73124 }
73125diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
73126index 2e0f0af..e2948bf 100644
73127--- a/net/ipv4/tcp_timer.c
73128+++ b/net/ipv4/tcp_timer.c
73129@@ -22,6 +22,10 @@
73130 #include <linux/gfp.h>
73131 #include <net/tcp.h>
73132
73133+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73134+extern int grsec_lastack_retries;
73135+#endif
73136+
73137 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73138 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73139 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73140@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
73141 }
73142 }
73143
73144+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73145+ if ((sk->sk_state == TCP_LAST_ACK) &&
73146+ (grsec_lastack_retries > 0) &&
73147+ (grsec_lastack_retries < retry_until))
73148+ retry_until = grsec_lastack_retries;
73149+#endif
73150+
73151 if (retransmits_timed_out(sk, retry_until,
73152 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73153 /* Has it gone just too far? */
73154diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
73155index 5a65eea..bd913a1 100644
73156--- a/net/ipv4/udp.c
73157+++ b/net/ipv4/udp.c
73158@@ -86,6 +86,7 @@
73159 #include <linux/types.h>
73160 #include <linux/fcntl.h>
73161 #include <linux/module.h>
73162+#include <linux/security.h>
73163 #include <linux/socket.h>
73164 #include <linux/sockios.h>
73165 #include <linux/igmp.h>
73166@@ -108,6 +109,10 @@
73167 #include <trace/events/udp.h>
73168 #include "udp_impl.h"
73169
73170+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73171+extern int grsec_enable_blackhole;
73172+#endif
73173+
73174 struct udp_table udp_table __read_mostly;
73175 EXPORT_SYMBOL(udp_table);
73176
73177@@ -565,6 +570,9 @@ found:
73178 return s;
73179 }
73180
73181+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73182+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73183+
73184 /*
73185 * This routine is called by the ICMP module when it gets some
73186 * sort of error condition. If err < 0 then the socket should
73187@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
73188 dport = usin->sin_port;
73189 if (dport == 0)
73190 return -EINVAL;
73191+
73192+ err = gr_search_udp_sendmsg(sk, usin);
73193+ if (err)
73194+ return err;
73195 } else {
73196 if (sk->sk_state != TCP_ESTABLISHED)
73197 return -EDESTADDRREQ;
73198+
73199+ err = gr_search_udp_sendmsg(sk, NULL);
73200+ if (err)
73201+ return err;
73202+
73203 daddr = inet->inet_daddr;
73204 dport = inet->inet_dport;
73205 /* Open fast path for connected socket.
73206@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
73207 udp_lib_checksum_complete(skb)) {
73208 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73209 IS_UDPLITE(sk));
73210- atomic_inc(&sk->sk_drops);
73211+ atomic_inc_unchecked(&sk->sk_drops);
73212 __skb_unlink(skb, rcvq);
73213 __skb_queue_tail(&list_kill, skb);
73214 }
73215@@ -1185,6 +1202,10 @@ try_again:
73216 if (!skb)
73217 goto out;
73218
73219+ err = gr_search_udp_recvmsg(sk, skb);
73220+ if (err)
73221+ goto out_free;
73222+
73223 ulen = skb->len - sizeof(struct udphdr);
73224 copied = len;
73225 if (copied > ulen)
73226@@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73227
73228 drop:
73229 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73230- atomic_inc(&sk->sk_drops);
73231+ atomic_inc_unchecked(&sk->sk_drops);
73232 kfree_skb(skb);
73233 return -1;
73234 }
73235@@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73236 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73237
73238 if (!skb1) {
73239- atomic_inc(&sk->sk_drops);
73240+ atomic_inc_unchecked(&sk->sk_drops);
73241 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73242 IS_UDPLITE(sk));
73243 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73244@@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73245 goto csum_error;
73246
73247 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73248+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73249+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73250+#endif
73251 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73252
73253 /*
73254@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
73255 sk_wmem_alloc_get(sp),
73256 sk_rmem_alloc_get(sp),
73257 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73258- atomic_read(&sp->sk_refcnt), sp,
73259- atomic_read(&sp->sk_drops), len);
73260+ atomic_read(&sp->sk_refcnt),
73261+#ifdef CONFIG_GRKERNSEC_HIDESYM
73262+ NULL,
73263+#else
73264+ sp,
73265+#endif
73266+ atomic_read_unchecked(&sp->sk_drops), len);
73267 }
73268
73269 int udp4_seq_show(struct seq_file *seq, void *v)
73270diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
73271index 836c4ea..cbb74dc 100644
73272--- a/net/ipv6/addrconf.c
73273+++ b/net/ipv6/addrconf.c
73274@@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
73275 p.iph.ihl = 5;
73276 p.iph.protocol = IPPROTO_IPV6;
73277 p.iph.ttl = 64;
73278- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73279+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73280
73281 if (ops->ndo_do_ioctl) {
73282 mm_segment_t oldfs = get_fs();
73283diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
73284index 1567fb1..29af910 100644
73285--- a/net/ipv6/inet6_connection_sock.c
73286+++ b/net/ipv6/inet6_connection_sock.c
73287@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
73288 #ifdef CONFIG_XFRM
73289 {
73290 struct rt6_info *rt = (struct rt6_info *)dst;
73291- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73292+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73293 }
73294 #endif
73295 }
73296@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
73297 #ifdef CONFIG_XFRM
73298 if (dst) {
73299 struct rt6_info *rt = (struct rt6_info *)dst;
73300- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73301+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73302 __sk_dst_reset(sk);
73303 dst = NULL;
73304 }
73305diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
73306index 26cb08c..8af9877 100644
73307--- a/net/ipv6/ipv6_sockglue.c
73308+++ b/net/ipv6/ipv6_sockglue.c
73309@@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
73310 if (sk->sk_type != SOCK_STREAM)
73311 return -ENOPROTOOPT;
73312
73313- msg.msg_control = optval;
73314+ msg.msg_control = (void __force_kernel *)optval;
73315 msg.msg_controllen = len;
73316 msg.msg_flags = flags;
73317
73318diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
73319index 361ebf3..d5628fb 100644
73320--- a/net/ipv6/raw.c
73321+++ b/net/ipv6/raw.c
73322@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
73323 {
73324 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
73325 skb_checksum_complete(skb)) {
73326- atomic_inc(&sk->sk_drops);
73327+ atomic_inc_unchecked(&sk->sk_drops);
73328 kfree_skb(skb);
73329 return NET_RX_DROP;
73330 }
73331@@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73332 struct raw6_sock *rp = raw6_sk(sk);
73333
73334 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73335- atomic_inc(&sk->sk_drops);
73336+ atomic_inc_unchecked(&sk->sk_drops);
73337 kfree_skb(skb);
73338 return NET_RX_DROP;
73339 }
73340@@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73341
73342 if (inet->hdrincl) {
73343 if (skb_checksum_complete(skb)) {
73344- atomic_inc(&sk->sk_drops);
73345+ atomic_inc_unchecked(&sk->sk_drops);
73346 kfree_skb(skb);
73347 return NET_RX_DROP;
73348 }
73349@@ -601,7 +601,7 @@ out:
73350 return err;
73351 }
73352
73353-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73354+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73355 struct flowi6 *fl6, struct dst_entry **dstp,
73356 unsigned int flags)
73357 {
73358@@ -909,12 +909,15 @@ do_confirm:
73359 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73360 char __user *optval, int optlen)
73361 {
73362+ struct icmp6_filter filter;
73363+
73364 switch (optname) {
73365 case ICMPV6_FILTER:
73366 if (optlen > sizeof(struct icmp6_filter))
73367 optlen = sizeof(struct icmp6_filter);
73368- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73369+ if (copy_from_user(&filter, optval, optlen))
73370 return -EFAULT;
73371+ raw6_sk(sk)->filter = filter;
73372 return 0;
73373 default:
73374 return -ENOPROTOOPT;
73375@@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73376 char __user *optval, int __user *optlen)
73377 {
73378 int len;
73379+ struct icmp6_filter filter;
73380
73381 switch (optname) {
73382 case ICMPV6_FILTER:
73383@@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73384 len = sizeof(struct icmp6_filter);
73385 if (put_user(len, optlen))
73386 return -EFAULT;
73387- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73388+ filter = raw6_sk(sk)->filter;
73389+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
73390 return -EFAULT;
73391 return 0;
73392 default:
73393@@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73394 0, 0L, 0,
73395 sock_i_uid(sp), 0,
73396 sock_i_ino(sp),
73397- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73398+ atomic_read(&sp->sk_refcnt),
73399+#ifdef CONFIG_GRKERNSEC_HIDESYM
73400+ NULL,
73401+#else
73402+ sp,
73403+#endif
73404+ atomic_read_unchecked(&sp->sk_drops));
73405 }
73406
73407 static int raw6_seq_show(struct seq_file *seq, void *v)
73408diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
73409index b859e4a..f9d1589 100644
73410--- a/net/ipv6/tcp_ipv6.c
73411+++ b/net/ipv6/tcp_ipv6.c
73412@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
73413 }
73414 #endif
73415
73416+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73417+extern int grsec_enable_blackhole;
73418+#endif
73419+
73420 static void tcp_v6_hash(struct sock *sk)
73421 {
73422 if (sk->sk_state != TCP_CLOSE) {
73423@@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
73424 return 0;
73425
73426 reset:
73427+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73428+ if (!grsec_enable_blackhole)
73429+#endif
73430 tcp_v6_send_reset(sk, skb);
73431 discard:
73432 if (opt_skb)
73433@@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
73434 TCP_SKB_CB(skb)->sacked = 0;
73435
73436 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73437- if (!sk)
73438+ if (!sk) {
73439+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73440+ ret = 1;
73441+#endif
73442 goto no_tcp_socket;
73443+ }
73444
73445 process:
73446- if (sk->sk_state == TCP_TIME_WAIT)
73447+ if (sk->sk_state == TCP_TIME_WAIT) {
73448+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73449+ ret = 2;
73450+#endif
73451 goto do_time_wait;
73452+ }
73453
73454 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73455 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73456@@ -1783,6 +1798,10 @@ no_tcp_socket:
73457 bad_packet:
73458 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73459 } else {
73460+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73461+ if (!grsec_enable_blackhole || (ret == 1 &&
73462+ (skb->dev->flags & IFF_LOOPBACK)))
73463+#endif
73464 tcp_v6_send_reset(NULL, skb);
73465 }
73466
73467@@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73468 uid,
73469 0, /* non standard timer */
73470 0, /* open_requests have no inode */
73471- 0, req);
73472+ 0,
73473+#ifdef CONFIG_GRKERNSEC_HIDESYM
73474+ NULL
73475+#else
73476+ req
73477+#endif
73478+ );
73479 }
73480
73481 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73482@@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73483 sock_i_uid(sp),
73484 icsk->icsk_probes_out,
73485 sock_i_ino(sp),
73486- atomic_read(&sp->sk_refcnt), sp,
73487+ atomic_read(&sp->sk_refcnt),
73488+#ifdef CONFIG_GRKERNSEC_HIDESYM
73489+ NULL,
73490+#else
73491+ sp,
73492+#endif
73493 jiffies_to_clock_t(icsk->icsk_rto),
73494 jiffies_to_clock_t(icsk->icsk_ack.ato),
73495 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73496@@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73497 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73498 tw->tw_substate, 0, 0,
73499 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73500- atomic_read(&tw->tw_refcnt), tw);
73501+ atomic_read(&tw->tw_refcnt),
73502+#ifdef CONFIG_GRKERNSEC_HIDESYM
73503+ NULL
73504+#else
73505+ tw
73506+#endif
73507+ );
73508 }
73509
73510 static int tcp6_seq_show(struct seq_file *seq, void *v)
73511diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73512index 8c25419..47a51ae 100644
73513--- a/net/ipv6/udp.c
73514+++ b/net/ipv6/udp.c
73515@@ -50,6 +50,10 @@
73516 #include <linux/seq_file.h>
73517 #include "udp_impl.h"
73518
73519+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73520+extern int grsec_enable_blackhole;
73521+#endif
73522+
73523 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73524 {
73525 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73526@@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73527
73528 return 0;
73529 drop:
73530- atomic_inc(&sk->sk_drops);
73531+ atomic_inc_unchecked(&sk->sk_drops);
73532 drop_no_sk_drops_inc:
73533 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73534 kfree_skb(skb);
73535@@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73536 continue;
73537 }
73538 drop:
73539- atomic_inc(&sk->sk_drops);
73540+ atomic_inc_unchecked(&sk->sk_drops);
73541 UDP6_INC_STATS_BH(sock_net(sk),
73542 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73543 UDP6_INC_STATS_BH(sock_net(sk),
73544@@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73545 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73546 proto == IPPROTO_UDPLITE);
73547
73548+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73549+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73550+#endif
73551 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73552
73553 kfree_skb(skb);
73554@@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73555 if (!sock_owned_by_user(sk))
73556 udpv6_queue_rcv_skb(sk, skb);
73557 else if (sk_add_backlog(sk, skb)) {
73558- atomic_inc(&sk->sk_drops);
73559+ atomic_inc_unchecked(&sk->sk_drops);
73560 bh_unlock_sock(sk);
73561 sock_put(sk);
73562 goto discard;
73563@@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73564 0, 0L, 0,
73565 sock_i_uid(sp), 0,
73566 sock_i_ino(sp),
73567- atomic_read(&sp->sk_refcnt), sp,
73568- atomic_read(&sp->sk_drops));
73569+ atomic_read(&sp->sk_refcnt),
73570+#ifdef CONFIG_GRKERNSEC_HIDESYM
73571+ NULL,
73572+#else
73573+ sp,
73574+#endif
73575+ atomic_read_unchecked(&sp->sk_drops));
73576 }
73577
73578 int udp6_seq_show(struct seq_file *seq, void *v)
73579diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
73580index 253695d..9481ce8 100644
73581--- a/net/irda/ircomm/ircomm_tty.c
73582+++ b/net/irda/ircomm/ircomm_tty.c
73583@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73584 add_wait_queue(&self->open_wait, &wait);
73585
73586 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73587- __FILE__,__LINE__, tty->driver->name, self->open_count );
73588+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73589
73590 /* As far as I can see, we protect open_count - Jean II */
73591 spin_lock_irqsave(&self->spinlock, flags);
73592 if (!tty_hung_up_p(filp)) {
73593 extra_count = 1;
73594- self->open_count--;
73595+ local_dec(&self->open_count);
73596 }
73597 spin_unlock_irqrestore(&self->spinlock, flags);
73598- self->blocked_open++;
73599+ local_inc(&self->blocked_open);
73600
73601 while (1) {
73602 if (tty->termios->c_cflag & CBAUD) {
73603@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73604 }
73605
73606 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73607- __FILE__,__LINE__, tty->driver->name, self->open_count );
73608+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73609
73610 schedule();
73611 }
73612@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73613 if (extra_count) {
73614 /* ++ is not atomic, so this should be protected - Jean II */
73615 spin_lock_irqsave(&self->spinlock, flags);
73616- self->open_count++;
73617+ local_inc(&self->open_count);
73618 spin_unlock_irqrestore(&self->spinlock, flags);
73619 }
73620- self->blocked_open--;
73621+ local_dec(&self->blocked_open);
73622
73623 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73624- __FILE__,__LINE__, tty->driver->name, self->open_count);
73625+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73626
73627 if (!retval)
73628 self->flags |= ASYNC_NORMAL_ACTIVE;
73629@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
73630 }
73631 /* ++ is not atomic, so this should be protected - Jean II */
73632 spin_lock_irqsave(&self->spinlock, flags);
73633- self->open_count++;
73634+ local_inc(&self->open_count);
73635
73636 tty->driver_data = self;
73637 self->tty = tty;
73638 spin_unlock_irqrestore(&self->spinlock, flags);
73639
73640 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73641- self->line, self->open_count);
73642+ self->line, local_read(&self->open_count));
73643
73644 /* Not really used by us, but lets do it anyway */
73645 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73646@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73647 return;
73648 }
73649
73650- if ((tty->count == 1) && (self->open_count != 1)) {
73651+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73652 /*
73653 * Uh, oh. tty->count is 1, which means that the tty
73654 * structure will be freed. state->count should always
73655@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73656 */
73657 IRDA_DEBUG(0, "%s(), bad serial port count; "
73658 "tty->count is 1, state->count is %d\n", __func__ ,
73659- self->open_count);
73660- self->open_count = 1;
73661+ local_read(&self->open_count));
73662+ local_set(&self->open_count, 1);
73663 }
73664
73665- if (--self->open_count < 0) {
73666+ if (local_dec_return(&self->open_count) < 0) {
73667 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73668- __func__, self->line, self->open_count);
73669- self->open_count = 0;
73670+ __func__, self->line, local_read(&self->open_count));
73671+ local_set(&self->open_count, 0);
73672 }
73673- if (self->open_count) {
73674+ if (local_read(&self->open_count)) {
73675 spin_unlock_irqrestore(&self->spinlock, flags);
73676
73677 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73678@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73679 tty->closing = 0;
73680 self->tty = NULL;
73681
73682- if (self->blocked_open) {
73683+ if (local_read(&self->blocked_open)) {
73684 if (self->close_delay)
73685 schedule_timeout_interruptible(self->close_delay);
73686 wake_up_interruptible(&self->open_wait);
73687@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
73688 spin_lock_irqsave(&self->spinlock, flags);
73689 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73690 self->tty = NULL;
73691- self->open_count = 0;
73692+ local_set(&self->open_count, 0);
73693 spin_unlock_irqrestore(&self->spinlock, flags);
73694
73695 wake_up_interruptible(&self->open_wait);
73696@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
73697 seq_putc(m, '\n');
73698
73699 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73700- seq_printf(m, "Open count: %d\n", self->open_count);
73701+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73702 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73703 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73704
73705diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
73706index 274d150..656a144 100644
73707--- a/net/iucv/af_iucv.c
73708+++ b/net/iucv/af_iucv.c
73709@@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
73710
73711 write_lock_bh(&iucv_sk_list.lock);
73712
73713- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73714+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73715 while (__iucv_get_sock_by_name(name)) {
73716 sprintf(name, "%08x",
73717- atomic_inc_return(&iucv_sk_list.autobind_name));
73718+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73719 }
73720
73721 write_unlock_bh(&iucv_sk_list.lock);
73722diff --git a/net/key/af_key.c b/net/key/af_key.c
73723index 1e733e9..3d73c9f 100644
73724--- a/net/key/af_key.c
73725+++ b/net/key/af_key.c
73726@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
73727 static u32 get_acqseq(void)
73728 {
73729 u32 res;
73730- static atomic_t acqseq;
73731+ static atomic_unchecked_t acqseq;
73732
73733 do {
73734- res = atomic_inc_return(&acqseq);
73735+ res = atomic_inc_return_unchecked(&acqseq);
73736 } while (!res);
73737 return res;
73738 }
73739diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
73740index 73495f1..ad51356 100644
73741--- a/net/mac80211/ieee80211_i.h
73742+++ b/net/mac80211/ieee80211_i.h
73743@@ -27,6 +27,7 @@
73744 #include <net/ieee80211_radiotap.h>
73745 #include <net/cfg80211.h>
73746 #include <net/mac80211.h>
73747+#include <asm/local.h>
73748 #include "key.h"
73749 #include "sta_info.h"
73750
73751@@ -764,7 +765,7 @@ struct ieee80211_local {
73752 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73753 spinlock_t queue_stop_reason_lock;
73754
73755- int open_count;
73756+ local_t open_count;
73757 int monitors, cooked_mntrs;
73758 /* number of interfaces with corresponding FIF_ flags */
73759 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73760diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
73761index 30d7355..e260095 100644
73762--- a/net/mac80211/iface.c
73763+++ b/net/mac80211/iface.c
73764@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73765 break;
73766 }
73767
73768- if (local->open_count == 0) {
73769+ if (local_read(&local->open_count) == 0) {
73770 res = drv_start(local);
73771 if (res)
73772 goto err_del_bss;
73773@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73774 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73775
73776 if (!is_valid_ether_addr(dev->dev_addr)) {
73777- if (!local->open_count)
73778+ if (!local_read(&local->open_count))
73779 drv_stop(local);
73780 return -EADDRNOTAVAIL;
73781 }
73782@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73783 mutex_unlock(&local->mtx);
73784
73785 if (coming_up)
73786- local->open_count++;
73787+ local_inc(&local->open_count);
73788
73789 if (hw_reconf_flags) {
73790 ieee80211_hw_config(local, hw_reconf_flags);
73791@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73792 err_del_interface:
73793 drv_remove_interface(local, &sdata->vif);
73794 err_stop:
73795- if (!local->open_count)
73796+ if (!local_read(&local->open_count))
73797 drv_stop(local);
73798 err_del_bss:
73799 sdata->bss = NULL;
73800@@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73801 }
73802
73803 if (going_down)
73804- local->open_count--;
73805+ local_dec(&local->open_count);
73806
73807 switch (sdata->vif.type) {
73808 case NL80211_IFTYPE_AP_VLAN:
73809@@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73810
73811 ieee80211_recalc_ps(local, -1);
73812
73813- if (local->open_count == 0) {
73814+ if (local_read(&local->open_count) == 0) {
73815 if (local->ops->napi_poll)
73816 napi_disable(&local->napi);
73817 ieee80211_clear_tx_pending(local);
73818diff --git a/net/mac80211/main.c b/net/mac80211/main.c
73819index a7536fd..4039cc0 100644
73820--- a/net/mac80211/main.c
73821+++ b/net/mac80211/main.c
73822@@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
73823 local->hw.conf.power_level = power;
73824 }
73825
73826- if (changed && local->open_count) {
73827+ if (changed && local_read(&local->open_count)) {
73828 ret = drv_config(local, changed);
73829 /*
73830 * Goal:
73831diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
73832index 9ee7164..56c5061 100644
73833--- a/net/mac80211/pm.c
73834+++ b/net/mac80211/pm.c
73835@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73836 struct ieee80211_sub_if_data *sdata;
73837 struct sta_info *sta;
73838
73839- if (!local->open_count)
73840+ if (!local_read(&local->open_count))
73841 goto suspend;
73842
73843 ieee80211_scan_cancel(local);
73844@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73845 cancel_work_sync(&local->dynamic_ps_enable_work);
73846 del_timer_sync(&local->dynamic_ps_timer);
73847
73848- local->wowlan = wowlan && local->open_count;
73849+ local->wowlan = wowlan && local_read(&local->open_count);
73850 if (local->wowlan) {
73851 int err = drv_suspend(local, wowlan);
73852 if (err < 0) {
73853@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73854 }
73855
73856 /* stop hardware - this must stop RX */
73857- if (local->open_count)
73858+ if (local_read(&local->open_count))
73859 ieee80211_stop_device(local);
73860
73861 suspend:
73862diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
73863index 5a5a776..9600b11 100644
73864--- a/net/mac80211/rate.c
73865+++ b/net/mac80211/rate.c
73866@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
73867
73868 ASSERT_RTNL();
73869
73870- if (local->open_count)
73871+ if (local_read(&local->open_count))
73872 return -EBUSY;
73873
73874 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73875diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
73876index c97a065..ff61928 100644
73877--- a/net/mac80211/rc80211_pid_debugfs.c
73878+++ b/net/mac80211/rc80211_pid_debugfs.c
73879@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
73880
73881 spin_unlock_irqrestore(&events->lock, status);
73882
73883- if (copy_to_user(buf, pb, p))
73884+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73885 return -EFAULT;
73886
73887 return p;
73888diff --git a/net/mac80211/util.c b/net/mac80211/util.c
73889index d5230ec..c604b21 100644
73890--- a/net/mac80211/util.c
73891+++ b/net/mac80211/util.c
73892@@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
73893 drv_set_coverage_class(local, hw->wiphy->coverage_class);
73894
73895 /* everything else happens only if HW was up & running */
73896- if (!local->open_count)
73897+ if (!local_read(&local->open_count))
73898 goto wake_up;
73899
73900 /*
73901diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
73902index d5597b7..ab6d39c 100644
73903--- a/net/netfilter/Kconfig
73904+++ b/net/netfilter/Kconfig
73905@@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
73906
73907 To compile it as a module, choose M here. If unsure, say N.
73908
73909+config NETFILTER_XT_MATCH_GRADM
73910+ tristate '"gradm" match support'
73911+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73912+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73913+ ---help---
73914+ The gradm match allows to match on grsecurity RBAC being enabled.
73915+ It is useful when iptables rules are applied early on bootup to
73916+ prevent connections to the machine (except from a trusted host)
73917+ while the RBAC system is disabled.
73918+
73919 config NETFILTER_XT_MATCH_HASHLIMIT
73920 tristate '"hashlimit" match support'
73921 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73922diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
73923index 1a02853..5d8c22e 100644
73924--- a/net/netfilter/Makefile
73925+++ b/net/netfilter/Makefile
73926@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
73927 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73928 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73929 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73930+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73931 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73932 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73933 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73934diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
73935index 29fa5ba..8debc79 100644
73936--- a/net/netfilter/ipvs/ip_vs_conn.c
73937+++ b/net/netfilter/ipvs/ip_vs_conn.c
73938@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
73939 /* Increase the refcnt counter of the dest */
73940 atomic_inc(&dest->refcnt);
73941
73942- conn_flags = atomic_read(&dest->conn_flags);
73943+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
73944 if (cp->protocol != IPPROTO_UDP)
73945 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73946 /* Bind with the destination and its corresponding transmitter */
73947@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
73948 atomic_set(&cp->refcnt, 1);
73949
73950 atomic_set(&cp->n_control, 0);
73951- atomic_set(&cp->in_pkts, 0);
73952+ atomic_set_unchecked(&cp->in_pkts, 0);
73953
73954 atomic_inc(&ipvs->conn_count);
73955 if (flags & IP_VS_CONN_F_NO_CPORT)
73956@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
73957
73958 /* Don't drop the entry if its number of incoming packets is not
73959 located in [0, 8] */
73960- i = atomic_read(&cp->in_pkts);
73961+ i = atomic_read_unchecked(&cp->in_pkts);
73962 if (i > 8 || i < 0) return 0;
73963
73964 if (!todrop_rate[i]) return 0;
73965diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
73966index 093cc32..9209ae1 100644
73967--- a/net/netfilter/ipvs/ip_vs_core.c
73968+++ b/net/netfilter/ipvs/ip_vs_core.c
73969@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
73970 ret = cp->packet_xmit(skb, cp, pd->pp);
73971 /* do not touch skb anymore */
73972
73973- atomic_inc(&cp->in_pkts);
73974+ atomic_inc_unchecked(&cp->in_pkts);
73975 ip_vs_conn_put(cp);
73976 return ret;
73977 }
73978@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
73979 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73980 pkts = sysctl_sync_threshold(ipvs);
73981 else
73982- pkts = atomic_add_return(1, &cp->in_pkts);
73983+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73984
73985 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73986 cp->protocol == IPPROTO_SCTP) {
73987diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
73988index e1a66cf..0910076 100644
73989--- a/net/netfilter/ipvs/ip_vs_ctl.c
73990+++ b/net/netfilter/ipvs/ip_vs_ctl.c
73991@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
73992 ip_vs_rs_hash(ipvs, dest);
73993 write_unlock_bh(&ipvs->rs_lock);
73994 }
73995- atomic_set(&dest->conn_flags, conn_flags);
73996+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
73997
73998 /* bind the service */
73999 if (!dest->svc) {
74000@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74001 " %-7s %-6d %-10d %-10d\n",
74002 &dest->addr.in6,
74003 ntohs(dest->port),
74004- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74005+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74006 atomic_read(&dest->weight),
74007 atomic_read(&dest->activeconns),
74008 atomic_read(&dest->inactconns));
74009@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74010 "%-7s %-6d %-10d %-10d\n",
74011 ntohl(dest->addr.ip),
74012 ntohs(dest->port),
74013- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74014+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74015 atomic_read(&dest->weight),
74016 atomic_read(&dest->activeconns),
74017 atomic_read(&dest->inactconns));
74018@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
74019
74020 entry.addr = dest->addr.ip;
74021 entry.port = dest->port;
74022- entry.conn_flags = atomic_read(&dest->conn_flags);
74023+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
74024 entry.weight = atomic_read(&dest->weight);
74025 entry.u_threshold = dest->u_threshold;
74026 entry.l_threshold = dest->l_threshold;
74027@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
74028 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
74029
74030 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
74031- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74032+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74033 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74034 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74035 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74036diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
74037index 2b6678c0..aaa41fc 100644
74038--- a/net/netfilter/ipvs/ip_vs_sync.c
74039+++ b/net/netfilter/ipvs/ip_vs_sync.c
74040@@ -649,7 +649,7 @@ control:
74041 * i.e only increment in_pkts for Templates.
74042 */
74043 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74044- int pkts = atomic_add_return(1, &cp->in_pkts);
74045+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74046
74047 if (pkts % sysctl_sync_period(ipvs) != 1)
74048 return;
74049@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
74050
74051 if (opt)
74052 memcpy(&cp->in_seq, opt, sizeof(*opt));
74053- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74054+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74055 cp->state = state;
74056 cp->old_state = cp->state;
74057 /*
74058diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
74059index aa2d720..d8aa111 100644
74060--- a/net/netfilter/ipvs/ip_vs_xmit.c
74061+++ b/net/netfilter/ipvs/ip_vs_xmit.c
74062@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
74063 else
74064 rc = NF_ACCEPT;
74065 /* do not touch skb anymore */
74066- atomic_inc(&cp->in_pkts);
74067+ atomic_inc_unchecked(&cp->in_pkts);
74068 goto out;
74069 }
74070
74071@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
74072 else
74073 rc = NF_ACCEPT;
74074 /* do not touch skb anymore */
74075- atomic_inc(&cp->in_pkts);
74076+ atomic_inc_unchecked(&cp->in_pkts);
74077 goto out;
74078 }
74079
74080diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
74081index 66b2c54..c7884e3 100644
74082--- a/net/netfilter/nfnetlink_log.c
74083+++ b/net/netfilter/nfnetlink_log.c
74084@@ -70,7 +70,7 @@ struct nfulnl_instance {
74085 };
74086
74087 static DEFINE_SPINLOCK(instances_lock);
74088-static atomic_t global_seq;
74089+static atomic_unchecked_t global_seq;
74090
74091 #define INSTANCE_BUCKETS 16
74092 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74093@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
74094 /* global sequence number */
74095 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74096 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74097- htonl(atomic_inc_return(&global_seq)));
74098+ htonl(atomic_inc_return_unchecked(&global_seq)));
74099
74100 if (data_len) {
74101 struct nlattr *nla;
74102diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
74103new file mode 100644
74104index 0000000..6905327
74105--- /dev/null
74106+++ b/net/netfilter/xt_gradm.c
74107@@ -0,0 +1,51 @@
74108+/*
74109+ * gradm match for netfilter
74110