]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.2.7-201202202005.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.2.7-201202202005.patch
CommitLineData
ef577b6f
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index dfa6fc6..0095943 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9+*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13@@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17+*.gmo
18 *.grep
19 *.grp
20 *.gz
21@@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25+*.vim
26 *.xml
27 *.xz
28 *_MODULES
29+*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33@@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37+PERF*
38 SCCS
39 System.map*
40 TAGS
41@@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45+builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51+clut_vga16.c
52+common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59+config.c
60 config.mak
61 config.mak.autogen
62+config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66@@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70+exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74@@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78+gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90@@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103-linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107@@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111-media
112 mconf
113+mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120+mkpiggy
121 mkprep
122 mkregtable
123 mktables
124@@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128+regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132@@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152+vmlinux.bin.bz2
153 vmlinux.lds
154+vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158@@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zconf.lex.c
169 zoffset.h
170diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171index 81c287f..d456d02 100644
172--- a/Documentation/kernel-parameters.txt
173+++ b/Documentation/kernel-parameters.txt
174@@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179+ virtualization environments that don't cope well with the
180+ expand down segment used by UDEREF on X86-32 or the frequent
181+ page table updates on X86-64.
182+
183+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184+
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188diff --git a/Makefile b/Makefile
189index d1bdc90..e95fe1a 100644
190--- a/Makefile
191+++ b/Makefile
192@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197-HOSTCXXFLAGS = -O2
198+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208-PHONY += scripts_basic
209-scripts_basic:
210+PHONY += scripts_basic gcc-plugins
211+scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215@@ -564,6 +565,46 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219+ifndef DISABLE_PAX_PLUGINS
220+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223+endif
224+ifdef CONFIG_PAX_MEMORY_STACKLEAK
225+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227+endif
228+ifdef CONFIG_KALLOCSTAT_PLUGIN
229+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230+endif
231+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234+endif
235+ifdef CONFIG_CHECKER_PLUGIN
236+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238+endif
239+endif
240+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242+ifeq ($(KBUILD_EXTMOD),)
243+gcc-plugins:
244+ $(Q)$(MAKE) $(build)=tools/gcc
245+else
246+gcc-plugins: ;
247+endif
248+else
249+gcc-plugins:
250+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
251+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
252+else
253+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
254+endif
255+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
256+endif
257+endif
258+
259 include $(srctree)/arch/$(SRCARCH)/Makefile
260
261 ifneq ($(CONFIG_FRAME_WARN),0)
262@@ -708,7 +749,7 @@ export mod_strip_cmd
263
264
265 ifeq ($(KBUILD_EXTMOD),)
266-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
267+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
268
269 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
270 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
271@@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
272
273 # The actual objects are generated when descending,
274 # make sure no implicit rule kicks in
275+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
276 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
277
278 # Handle descending into subdirectories listed in $(vmlinux-dirs)
279@@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280 # Error messages still appears in the original language
281
282 PHONY += $(vmlinux-dirs)
283-$(vmlinux-dirs): prepare scripts
284+$(vmlinux-dirs): gcc-plugins prepare scripts
285 $(Q)$(MAKE) $(build)=$@
286
287 # Store (new) KERNELRELASE string in include/config/kernel.release
288@@ -985,6 +1027,7 @@ prepare0: archprepare FORCE
289 $(Q)$(MAKE) $(build)=.
290
291 # All the preparing..
292+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
293 prepare: prepare0
294
295 # Generate some files
296@@ -1086,6 +1129,7 @@ all: modules
297 # using awk while concatenating to the final file.
298
299 PHONY += modules
300+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
301 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
302 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
303 @$(kecho) ' Building modules, stage 2.';
304@@ -1101,7 +1145,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
305
306 # Target to prepare building external modules
307 PHONY += modules_prepare
308-modules_prepare: prepare scripts
309+modules_prepare: gcc-plugins prepare scripts
310
311 # Target to install modules
312 PHONY += modules_install
313@@ -1198,6 +1242,7 @@ distclean: mrproper
314 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
315 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
316 -o -name '.*.rej' \
317+ -o -name '.*.rej' -o -name '*.so' \
318 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
319 -type f -print | xargs rm -f
320
321@@ -1358,6 +1403,7 @@ PHONY += $(module-dirs) modules
322 $(module-dirs): crmodverdir $(objtree)/Module.symvers
323 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
324
325+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
326 modules: $(module-dirs)
327 @$(kecho) ' Building modules, stage 2.';
328 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
329@@ -1484,17 +1530,19 @@ else
330 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
331 endif
332
333-%.s: %.c prepare scripts FORCE
334+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
335+%.s: %.c gcc-plugins prepare scripts FORCE
336 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
337 %.i: %.c prepare scripts FORCE
338 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
339-%.o: %.c prepare scripts FORCE
340+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
341+%.o: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.lst: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345-%.s: %.S prepare scripts FORCE
346+%.s: %.S gcc-plugins prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348-%.o: %.S prepare scripts FORCE
349+%.o: %.S gcc-plugins prepare scripts FORCE
350 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
351 %.symtypes: %.c prepare scripts FORCE
352 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
353@@ -1504,11 +1552,13 @@ endif
354 $(cmd_crmodverdir)
355 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
356 $(build)=$(build-dir)
357-%/: prepare scripts FORCE
358+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
359+%/: gcc-plugins prepare scripts FORCE
360 $(cmd_crmodverdir)
361 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
362 $(build)=$(build-dir)
363-%.ko: prepare scripts FORCE
364+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
365+%.ko: gcc-plugins prepare scripts FORCE
366 $(cmd_crmodverdir)
367 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
368 $(build)=$(build-dir) $(@:.ko=.o)
369diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
370index da5449e..7418343 100644
371--- a/arch/alpha/include/asm/elf.h
372+++ b/arch/alpha/include/asm/elf.h
373@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
374
375 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
376
377+#ifdef CONFIG_PAX_ASLR
378+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
379+
380+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
381+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
382+#endif
383+
384 /* $0 is set by ld.so to a pointer to a function which might be
385 registered using atexit. This provides a mean for the dynamic
386 linker to call DT_FINI functions for shared libraries that have
387diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
388index de98a73..bd4f1f8 100644
389--- a/arch/alpha/include/asm/pgtable.h
390+++ b/arch/alpha/include/asm/pgtable.h
391@@ -101,6 +101,17 @@ struct vm_area_struct;
392 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
393 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
394 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
395+
396+#ifdef CONFIG_PAX_PAGEEXEC
397+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
398+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
399+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
400+#else
401+# define PAGE_SHARED_NOEXEC PAGE_SHARED
402+# define PAGE_COPY_NOEXEC PAGE_COPY
403+# define PAGE_READONLY_NOEXEC PAGE_READONLY
404+#endif
405+
406 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
407
408 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
409diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
410index 2fd00b7..cfd5069 100644
411--- a/arch/alpha/kernel/module.c
412+++ b/arch/alpha/kernel/module.c
413@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
414
415 /* The small sections were sorted to the end of the segment.
416 The following should definitely cover them. */
417- gp = (u64)me->module_core + me->core_size - 0x8000;
418+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
419 got = sechdrs[me->arch.gotsecindex].sh_addr;
420
421 for (i = 0; i < n; i++) {
422diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
423index 01e8715..be0e80f 100644
424--- a/arch/alpha/kernel/osf_sys.c
425+++ b/arch/alpha/kernel/osf_sys.c
426@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
427 /* At this point: (!vma || addr < vma->vm_end). */
428 if (limit - len < addr)
429 return -ENOMEM;
430- if (!vma || addr + len <= vma->vm_start)
431+ if (check_heap_stack_gap(vma, addr, len))
432 return addr;
433 addr = vma->vm_end;
434 vma = vma->vm_next;
435@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
436 merely specific addresses, but regions of memory -- perhaps
437 this feature should be incorporated into all ports? */
438
439+#ifdef CONFIG_PAX_RANDMMAP
440+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
441+#endif
442+
443 if (addr) {
444 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
445 if (addr != (unsigned long) -ENOMEM)
446@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
447 }
448
449 /* Next, try allocating at TASK_UNMAPPED_BASE. */
450- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
451- len, limit);
452+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
453+
454 if (addr != (unsigned long) -ENOMEM)
455 return addr;
456
457diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
458index fadd5f8..904e73a 100644
459--- a/arch/alpha/mm/fault.c
460+++ b/arch/alpha/mm/fault.c
461@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
462 __reload_thread(pcb);
463 }
464
465+#ifdef CONFIG_PAX_PAGEEXEC
466+/*
467+ * PaX: decide what to do with offenders (regs->pc = fault address)
468+ *
469+ * returns 1 when task should be killed
470+ * 2 when patched PLT trampoline was detected
471+ * 3 when unpatched PLT trampoline was detected
472+ */
473+static int pax_handle_fetch_fault(struct pt_regs *regs)
474+{
475+
476+#ifdef CONFIG_PAX_EMUPLT
477+ int err;
478+
479+ do { /* PaX: patched PLT emulation #1 */
480+ unsigned int ldah, ldq, jmp;
481+
482+ err = get_user(ldah, (unsigned int *)regs->pc);
483+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
484+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
485+
486+ if (err)
487+ break;
488+
489+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
490+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
491+ jmp == 0x6BFB0000U)
492+ {
493+ unsigned long r27, addr;
494+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
495+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
496+
497+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
498+ err = get_user(r27, (unsigned long *)addr);
499+ if (err)
500+ break;
501+
502+ regs->r27 = r27;
503+ regs->pc = r27;
504+ return 2;
505+ }
506+ } while (0);
507+
508+ do { /* PaX: patched PLT emulation #2 */
509+ unsigned int ldah, lda, br;
510+
511+ err = get_user(ldah, (unsigned int *)regs->pc);
512+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
513+ err |= get_user(br, (unsigned int *)(regs->pc+8));
514+
515+ if (err)
516+ break;
517+
518+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
519+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
520+ (br & 0xFFE00000U) == 0xC3E00000U)
521+ {
522+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
523+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
524+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
525+
526+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
527+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
528+ return 2;
529+ }
530+ } while (0);
531+
532+ do { /* PaX: unpatched PLT emulation */
533+ unsigned int br;
534+
535+ err = get_user(br, (unsigned int *)regs->pc);
536+
537+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
538+ unsigned int br2, ldq, nop, jmp;
539+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
540+
541+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
542+ err = get_user(br2, (unsigned int *)addr);
543+ err |= get_user(ldq, (unsigned int *)(addr+4));
544+ err |= get_user(nop, (unsigned int *)(addr+8));
545+ err |= get_user(jmp, (unsigned int *)(addr+12));
546+ err |= get_user(resolver, (unsigned long *)(addr+16));
547+
548+ if (err)
549+ break;
550+
551+ if (br2 == 0xC3600000U &&
552+ ldq == 0xA77B000CU &&
553+ nop == 0x47FF041FU &&
554+ jmp == 0x6B7B0000U)
555+ {
556+ regs->r28 = regs->pc+4;
557+ regs->r27 = addr+16;
558+ regs->pc = resolver;
559+ return 3;
560+ }
561+ }
562+ } while (0);
563+#endif
564+
565+ return 1;
566+}
567+
568+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
569+{
570+ unsigned long i;
571+
572+ printk(KERN_ERR "PAX: bytes at PC: ");
573+ for (i = 0; i < 5; i++) {
574+ unsigned int c;
575+ if (get_user(c, (unsigned int *)pc+i))
576+ printk(KERN_CONT "???????? ");
577+ else
578+ printk(KERN_CONT "%08x ", c);
579+ }
580+ printk("\n");
581+}
582+#endif
583
584 /*
585 * This routine handles page faults. It determines the address,
586@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
587 good_area:
588 si_code = SEGV_ACCERR;
589 if (cause < 0) {
590- if (!(vma->vm_flags & VM_EXEC))
591+ if (!(vma->vm_flags & VM_EXEC)) {
592+
593+#ifdef CONFIG_PAX_PAGEEXEC
594+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
595+ goto bad_area;
596+
597+ up_read(&mm->mmap_sem);
598+ switch (pax_handle_fetch_fault(regs)) {
599+
600+#ifdef CONFIG_PAX_EMUPLT
601+ case 2:
602+ case 3:
603+ return;
604+#endif
605+
606+ }
607+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
608+ do_group_exit(SIGKILL);
609+#else
610 goto bad_area;
611+#endif
612+
613+ }
614 } else if (!cause) {
615 /* Allow reads even for write-only mappings */
616 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
617diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
618index 86976d0..8a57797 100644
619--- a/arch/arm/include/asm/atomic.h
620+++ b/arch/arm/include/asm/atomic.h
621@@ -239,6 +239,14 @@ typedef struct {
622 u64 __aligned(8) counter;
623 } atomic64_t;
624
625+#ifdef CONFIG_PAX_REFCOUNT
626+typedef struct {
627+ u64 __aligned(8) counter;
628+} atomic64_unchecked_t;
629+#else
630+typedef atomic64_t atomic64_unchecked_t;
631+#endif
632+
633 #define ATOMIC64_INIT(i) { (i) }
634
635 static inline u64 atomic64_read(atomic64_t *v)
636diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
637index 0e9ce8d..6ef1e03 100644
638--- a/arch/arm/include/asm/elf.h
639+++ b/arch/arm/include/asm/elf.h
640@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
641 the loader. We need to make sure that it is out of the way of the program
642 that it will "exec", and that there is sufficient room for the brk. */
643
644-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
645+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
646+
647+#ifdef CONFIG_PAX_ASLR
648+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
649+
650+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
651+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
652+#endif
653
654 /* When the program starts, a1 contains a pointer to a function to be
655 registered with atexit, as per the SVR4 ABI. A value of 0 means we
656@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 extern void elf_set_personality(const struct elf32_hdr *);
658 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
659
660-struct mm_struct;
661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
662-#define arch_randomize_brk arch_randomize_brk
663-
664 extern int vectors_user_mapping(void);
665 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
666 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
667diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
668index e51b1e8..32a3113 100644
669--- a/arch/arm/include/asm/kmap_types.h
670+++ b/arch/arm/include/asm/kmap_types.h
671@@ -21,6 +21,7 @@ enum km_type {
672 KM_L1_CACHE,
673 KM_L2_CACHE,
674 KM_KDB,
675+ KM_CLEARPAGE,
676 KM_TYPE_NR
677 };
678
679diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
680index b293616..96310e5 100644
681--- a/arch/arm/include/asm/uaccess.h
682+++ b/arch/arm/include/asm/uaccess.h
683@@ -22,6 +22,8 @@
684 #define VERIFY_READ 0
685 #define VERIFY_WRITE 1
686
687+extern void check_object_size(const void *ptr, unsigned long n, bool to);
688+
689 /*
690 * The exception table consists of pairs of addresses: the first is the
691 * address of an instruction that is allowed to fault, and the second is
692@@ -387,8 +389,23 @@ do { \
693
694
695 #ifdef CONFIG_MMU
696-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
697-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
698+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
699+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
700+
701+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
702+{
703+ if (!__builtin_constant_p(n))
704+ check_object_size(to, n, false);
705+ return ___copy_from_user(to, from, n);
706+}
707+
708+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
709+{
710+ if (!__builtin_constant_p(n))
711+ check_object_size(from, n, true);
712+ return ___copy_to_user(to, from, n);
713+}
714+
715 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
716 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
717 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
718@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
719
720 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
721 {
722+ if ((long)n < 0)
723+ return n;
724+
725 if (access_ok(VERIFY_READ, from, n))
726 n = __copy_from_user(to, from, n);
727 else /* security hole - plug it */
728@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
729
730 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
731 {
732+ if ((long)n < 0)
733+ return n;
734+
735 if (access_ok(VERIFY_WRITE, to, n))
736 n = __copy_to_user(to, from, n);
737 return n;
738diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
739index 5b0bce6..becd81c 100644
740--- a/arch/arm/kernel/armksyms.c
741+++ b/arch/arm/kernel/armksyms.c
742@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
743 #ifdef CONFIG_MMU
744 EXPORT_SYMBOL(copy_page);
745
746-EXPORT_SYMBOL(__copy_from_user);
747-EXPORT_SYMBOL(__copy_to_user);
748+EXPORT_SYMBOL(___copy_from_user);
749+EXPORT_SYMBOL(___copy_to_user);
750 EXPORT_SYMBOL(__clear_user);
751
752 EXPORT_SYMBOL(__get_user_1);
753diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
754index 3d0c6fb..3dcae52 100644
755--- a/arch/arm/kernel/process.c
756+++ b/arch/arm/kernel/process.c
757@@ -28,7 +28,6 @@
758 #include <linux/tick.h>
759 #include <linux/utsname.h>
760 #include <linux/uaccess.h>
761-#include <linux/random.h>
762 #include <linux/hw_breakpoint.h>
763 #include <linux/cpuidle.h>
764
765@@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
766 return 0;
767 }
768
769-unsigned long arch_randomize_brk(struct mm_struct *mm)
770-{
771- unsigned long range_end = mm->brk + 0x02000000;
772- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
773-}
774-
775 #ifdef CONFIG_MMU
776 /*
777 * The vectors page is always readable from user space for the
778diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
779index 99a5727..a3d5bb1 100644
780--- a/arch/arm/kernel/traps.c
781+++ b/arch/arm/kernel/traps.c
782@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
783
784 static DEFINE_RAW_SPINLOCK(die_lock);
785
786+extern void gr_handle_kernel_exploit(void);
787+
788 /*
789 * This function is protected against re-entrancy.
790 */
791@@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
792 panic("Fatal exception in interrupt");
793 if (panic_on_oops)
794 panic("Fatal exception");
795+
796+ gr_handle_kernel_exploit();
797+
798 if (ret != NOTIFY_STOP)
799 do_exit(SIGSEGV);
800 }
801diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
802index 66a477a..bee61d3 100644
803--- a/arch/arm/lib/copy_from_user.S
804+++ b/arch/arm/lib/copy_from_user.S
805@@ -16,7 +16,7 @@
806 /*
807 * Prototype:
808 *
809- * size_t __copy_from_user(void *to, const void *from, size_t n)
810+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
811 *
812 * Purpose:
813 *
814@@ -84,11 +84,11 @@
815
816 .text
817
818-ENTRY(__copy_from_user)
819+ENTRY(___copy_from_user)
820
821 #include "copy_template.S"
822
823-ENDPROC(__copy_from_user)
824+ENDPROC(___copy_from_user)
825
826 .pushsection .fixup,"ax"
827 .align 0
828diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
829index d066df6..df28194 100644
830--- a/arch/arm/lib/copy_to_user.S
831+++ b/arch/arm/lib/copy_to_user.S
832@@ -16,7 +16,7 @@
833 /*
834 * Prototype:
835 *
836- * size_t __copy_to_user(void *to, const void *from, size_t n)
837+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
838 *
839 * Purpose:
840 *
841@@ -88,11 +88,11 @@
842 .text
843
844 ENTRY(__copy_to_user_std)
845-WEAK(__copy_to_user)
846+WEAK(___copy_to_user)
847
848 #include "copy_template.S"
849
850-ENDPROC(__copy_to_user)
851+ENDPROC(___copy_to_user)
852 ENDPROC(__copy_to_user_std)
853
854 .pushsection .fixup,"ax"
855diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
856index d0ece2a..5ae2f39 100644
857--- a/arch/arm/lib/uaccess.S
858+++ b/arch/arm/lib/uaccess.S
859@@ -20,7 +20,7 @@
860
861 #define PAGE_SHIFT 12
862
863-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
864+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
865 * Purpose : copy a block to user memory from kernel memory
866 * Params : to - user memory
867 * : from - kernel memory
868@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
869 sub r2, r2, ip
870 b .Lc2u_dest_aligned
871
872-ENTRY(__copy_to_user)
873+ENTRY(___copy_to_user)
874 stmfd sp!, {r2, r4 - r7, lr}
875 cmp r2, #4
876 blt .Lc2u_not_enough
877@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
878 ldrgtb r3, [r1], #0
879 USER( T(strgtb) r3, [r0], #1) @ May fault
880 b .Lc2u_finished
881-ENDPROC(__copy_to_user)
882+ENDPROC(___copy_to_user)
883
884 .pushsection .fixup,"ax"
885 .align 0
886 9001: ldmfd sp!, {r0, r4 - r7, pc}
887 .popsection
888
889-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
890+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
891 * Purpose : copy a block from user memory to kernel memory
892 * Params : to - kernel memory
893 * : from - user memory
894@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
895 sub r2, r2, ip
896 b .Lcfu_dest_aligned
897
898-ENTRY(__copy_from_user)
899+ENTRY(___copy_from_user)
900 stmfd sp!, {r0, r2, r4 - r7, lr}
901 cmp r2, #4
902 blt .Lcfu_not_enough
903@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
904 USER( T(ldrgtb) r3, [r1], #1) @ May fault
905 strgtb r3, [r0], #1
906 b .Lcfu_finished
907-ENDPROC(__copy_from_user)
908+ENDPROC(___copy_from_user)
909
910 .pushsection .fixup,"ax"
911 .align 0
912diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
913index 025f742..8432b08 100644
914--- a/arch/arm/lib/uaccess_with_memcpy.c
915+++ b/arch/arm/lib/uaccess_with_memcpy.c
916@@ -104,7 +104,7 @@ out:
917 }
918
919 unsigned long
920-__copy_to_user(void __user *to, const void *from, unsigned long n)
921+___copy_to_user(void __user *to, const void *from, unsigned long n)
922 {
923 /*
924 * This test is stubbed out of the main function above to keep
925diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
926index 2b2d51c..0127490 100644
927--- a/arch/arm/mach-ux500/mbox-db5500.c
928+++ b/arch/arm/mach-ux500/mbox-db5500.c
929@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
930 return sprintf(buf, "0x%X\n", mbox_value);
931 }
932
933-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
934+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
935
936 static int mbox_show(struct seq_file *s, void *data)
937 {
938diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
939index aa33949..b242a2f 100644
940--- a/arch/arm/mm/fault.c
941+++ b/arch/arm/mm/fault.c
942@@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
943 }
944 #endif
945
946+#ifdef CONFIG_PAX_PAGEEXEC
947+ if (fsr & FSR_LNX_PF) {
948+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
949+ do_group_exit(SIGKILL);
950+ }
951+#endif
952+
953 tsk->thread.address = addr;
954 tsk->thread.error_code = fsr;
955 tsk->thread.trap_no = 14;
956@@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
957 }
958 #endif /* CONFIG_MMU */
959
960+#ifdef CONFIG_PAX_PAGEEXEC
961+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
962+{
963+ long i;
964+
965+ printk(KERN_ERR "PAX: bytes at PC: ");
966+ for (i = 0; i < 20; i++) {
967+ unsigned char c;
968+ if (get_user(c, (__force unsigned char __user *)pc+i))
969+ printk(KERN_CONT "?? ");
970+ else
971+ printk(KERN_CONT "%02x ", c);
972+ }
973+ printk("\n");
974+
975+ printk(KERN_ERR "PAX: bytes at SP-4: ");
976+ for (i = -1; i < 20; i++) {
977+ unsigned long c;
978+ if (get_user(c, (__force unsigned long __user *)sp+i))
979+ printk(KERN_CONT "???????? ");
980+ else
981+ printk(KERN_CONT "%08lx ", c);
982+ }
983+ printk("\n");
984+}
985+#endif
986+
987 /*
988 * First Level Translation Fault Handler
989 *
990diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
991index 44b628e..623ee2a 100644
992--- a/arch/arm/mm/mmap.c
993+++ b/arch/arm/mm/mmap.c
994@@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
995 if (len > TASK_SIZE)
996 return -ENOMEM;
997
998+#ifdef CONFIG_PAX_RANDMMAP
999+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1000+#endif
1001+
1002 if (addr) {
1003 if (do_align)
1004 addr = COLOUR_ALIGN(addr, pgoff);
1005@@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1006 addr = PAGE_ALIGN(addr);
1007
1008 vma = find_vma(mm, addr);
1009- if (TASK_SIZE - len >= addr &&
1010- (!vma || addr + len <= vma->vm_start))
1011+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1012 return addr;
1013 }
1014 if (len > mm->cached_hole_size) {
1015- start_addr = addr = mm->free_area_cache;
1016+ start_addr = addr = mm->free_area_cache;
1017 } else {
1018- start_addr = addr = TASK_UNMAPPED_BASE;
1019- mm->cached_hole_size = 0;
1020+ start_addr = addr = mm->mmap_base;
1021+ mm->cached_hole_size = 0;
1022 }
1023 /* 8 bits of randomness in 20 address space bits */
1024 if ((current->flags & PF_RANDOMIZE) &&
1025@@ -89,14 +92,14 @@ full_search:
1026 * Start a new search - just in case we missed
1027 * some holes.
1028 */
1029- if (start_addr != TASK_UNMAPPED_BASE) {
1030- start_addr = addr = TASK_UNMAPPED_BASE;
1031+ if (start_addr != mm->mmap_base) {
1032+ start_addr = addr = mm->mmap_base;
1033 mm->cached_hole_size = 0;
1034 goto full_search;
1035 }
1036 return -ENOMEM;
1037 }
1038- if (!vma || addr + len <= vma->vm_start) {
1039+ if (check_heap_stack_gap(vma, addr, len)) {
1040 /*
1041 * Remember the place where we stopped the search:
1042 */
1043diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1044index 3b3159b..425ea94 100644
1045--- a/arch/avr32/include/asm/elf.h
1046+++ b/arch/avr32/include/asm/elf.h
1047@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1048 the loader. We need to make sure that it is out of the way of the program
1049 that it will "exec", and that there is sufficient room for the brk. */
1050
1051-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1052+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1053
1054+#ifdef CONFIG_PAX_ASLR
1055+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1056+
1057+#define PAX_DELTA_MMAP_LEN 15
1058+#define PAX_DELTA_STACK_LEN 15
1059+#endif
1060
1061 /* This yields a mask that user programs can use to figure out what
1062 instruction set this CPU supports. This could be done in user space,
1063diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1064index b7f5c68..556135c 100644
1065--- a/arch/avr32/include/asm/kmap_types.h
1066+++ b/arch/avr32/include/asm/kmap_types.h
1067@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1068 D(11) KM_IRQ1,
1069 D(12) KM_SOFTIRQ0,
1070 D(13) KM_SOFTIRQ1,
1071-D(14) KM_TYPE_NR
1072+D(14) KM_CLEARPAGE,
1073+D(15) KM_TYPE_NR
1074 };
1075
1076 #undef D
1077diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1078index f7040a1..db9f300 100644
1079--- a/arch/avr32/mm/fault.c
1080+++ b/arch/avr32/mm/fault.c
1081@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1082
1083 int exception_trace = 1;
1084
1085+#ifdef CONFIG_PAX_PAGEEXEC
1086+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1087+{
1088+ unsigned long i;
1089+
1090+ printk(KERN_ERR "PAX: bytes at PC: ");
1091+ for (i = 0; i < 20; i++) {
1092+ unsigned char c;
1093+ if (get_user(c, (unsigned char *)pc+i))
1094+ printk(KERN_CONT "???????? ");
1095+ else
1096+ printk(KERN_CONT "%02x ", c);
1097+ }
1098+ printk("\n");
1099+}
1100+#endif
1101+
1102 /*
1103 * This routine handles page faults. It determines the address and the
1104 * problem, and then passes it off to one of the appropriate routines.
1105@@ -156,6 +173,16 @@ bad_area:
1106 up_read(&mm->mmap_sem);
1107
1108 if (user_mode(regs)) {
1109+
1110+#ifdef CONFIG_PAX_PAGEEXEC
1111+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1112+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1113+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1114+ do_group_exit(SIGKILL);
1115+ }
1116+ }
1117+#endif
1118+
1119 if (exception_trace && printk_ratelimit())
1120 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1121 "sp %08lx ecr %lu\n",
1122diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1123index f8e16b2..c73ff79 100644
1124--- a/arch/frv/include/asm/kmap_types.h
1125+++ b/arch/frv/include/asm/kmap_types.h
1126@@ -23,6 +23,7 @@ enum km_type {
1127 KM_IRQ1,
1128 KM_SOFTIRQ0,
1129 KM_SOFTIRQ1,
1130+ KM_CLEARPAGE,
1131 KM_TYPE_NR
1132 };
1133
1134diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1135index 385fd30..6c3d97e 100644
1136--- a/arch/frv/mm/elf-fdpic.c
1137+++ b/arch/frv/mm/elf-fdpic.c
1138@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1139 if (addr) {
1140 addr = PAGE_ALIGN(addr);
1141 vma = find_vma(current->mm, addr);
1142- if (TASK_SIZE - len >= addr &&
1143- (!vma || addr + len <= vma->vm_start))
1144+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1145 goto success;
1146 }
1147
1148@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1149 for (; vma; vma = vma->vm_next) {
1150 if (addr > limit)
1151 break;
1152- if (addr + len <= vma->vm_start)
1153+ if (check_heap_stack_gap(vma, addr, len))
1154 goto success;
1155 addr = vma->vm_end;
1156 }
1157@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1158 for (; vma; vma = vma->vm_next) {
1159 if (addr > limit)
1160 break;
1161- if (addr + len <= vma->vm_start)
1162+ if (check_heap_stack_gap(vma, addr, len))
1163 goto success;
1164 addr = vma->vm_end;
1165 }
1166diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1167index b5298eb..67c6e62 100644
1168--- a/arch/ia64/include/asm/elf.h
1169+++ b/arch/ia64/include/asm/elf.h
1170@@ -42,6 +42,13 @@
1171 */
1172 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1173
1174+#ifdef CONFIG_PAX_ASLR
1175+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1176+
1177+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1178+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1179+#endif
1180+
1181 #define PT_IA_64_UNWIND 0x70000001
1182
1183 /* IA-64 relocations: */
1184diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1185index 1a97af3..7529d31 100644
1186--- a/arch/ia64/include/asm/pgtable.h
1187+++ b/arch/ia64/include/asm/pgtable.h
1188@@ -12,7 +12,7 @@
1189 * David Mosberger-Tang <davidm@hpl.hp.com>
1190 */
1191
1192-
1193+#include <linux/const.h>
1194 #include <asm/mman.h>
1195 #include <asm/page.h>
1196 #include <asm/processor.h>
1197@@ -143,6 +143,17 @@
1198 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1199 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1200 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1201+
1202+#ifdef CONFIG_PAX_PAGEEXEC
1203+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1204+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1205+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1206+#else
1207+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1208+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1209+# define PAGE_COPY_NOEXEC PAGE_COPY
1210+#endif
1211+
1212 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1213 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1214 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1215diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1216index b77768d..e0795eb 100644
1217--- a/arch/ia64/include/asm/spinlock.h
1218+++ b/arch/ia64/include/asm/spinlock.h
1219@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1220 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1221
1222 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1223- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1224+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1225 }
1226
1227 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1228diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1229index 449c8c0..432a3d2 100644
1230--- a/arch/ia64/include/asm/uaccess.h
1231+++ b/arch/ia64/include/asm/uaccess.h
1232@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1233 const void *__cu_from = (from); \
1234 long __cu_len = (n); \
1235 \
1236- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1237+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1238 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1239 __cu_len; \
1240 })
1241@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1242 long __cu_len = (n); \
1243 \
1244 __chk_user_ptr(__cu_from); \
1245- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1246+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1247 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1248 __cu_len; \
1249 })
1250diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1251index 24603be..948052d 100644
1252--- a/arch/ia64/kernel/module.c
1253+++ b/arch/ia64/kernel/module.c
1254@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1255 void
1256 module_free (struct module *mod, void *module_region)
1257 {
1258- if (mod && mod->arch.init_unw_table &&
1259- module_region == mod->module_init) {
1260+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1261 unw_remove_unwind_table(mod->arch.init_unw_table);
1262 mod->arch.init_unw_table = NULL;
1263 }
1264@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1265 }
1266
1267 static inline int
1268+in_init_rx (const struct module *mod, uint64_t addr)
1269+{
1270+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1271+}
1272+
1273+static inline int
1274+in_init_rw (const struct module *mod, uint64_t addr)
1275+{
1276+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1277+}
1278+
1279+static inline int
1280 in_init (const struct module *mod, uint64_t addr)
1281 {
1282- return addr - (uint64_t) mod->module_init < mod->init_size;
1283+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1284+}
1285+
1286+static inline int
1287+in_core_rx (const struct module *mod, uint64_t addr)
1288+{
1289+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1290+}
1291+
1292+static inline int
1293+in_core_rw (const struct module *mod, uint64_t addr)
1294+{
1295+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1296 }
1297
1298 static inline int
1299 in_core (const struct module *mod, uint64_t addr)
1300 {
1301- return addr - (uint64_t) mod->module_core < mod->core_size;
1302+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1303 }
1304
1305 static inline int
1306@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1307 break;
1308
1309 case RV_BDREL:
1310- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1311+ if (in_init_rx(mod, val))
1312+ val -= (uint64_t) mod->module_init_rx;
1313+ else if (in_init_rw(mod, val))
1314+ val -= (uint64_t) mod->module_init_rw;
1315+ else if (in_core_rx(mod, val))
1316+ val -= (uint64_t) mod->module_core_rx;
1317+ else if (in_core_rw(mod, val))
1318+ val -= (uint64_t) mod->module_core_rw;
1319 break;
1320
1321 case RV_LTV:
1322@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1323 * addresses have been selected...
1324 */
1325 uint64_t gp;
1326- if (mod->core_size > MAX_LTOFF)
1327+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1328 /*
1329 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1330 * at the end of the module.
1331 */
1332- gp = mod->core_size - MAX_LTOFF / 2;
1333+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1334 else
1335- gp = mod->core_size / 2;
1336- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1337+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1338+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1339 mod->arch.gp = gp;
1340 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1341 }
1342diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1343index 609d500..7dde2a8 100644
1344--- a/arch/ia64/kernel/sys_ia64.c
1345+++ b/arch/ia64/kernel/sys_ia64.c
1346@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1347 if (REGION_NUMBER(addr) == RGN_HPAGE)
1348 addr = 0;
1349 #endif
1350+
1351+#ifdef CONFIG_PAX_RANDMMAP
1352+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1353+ addr = mm->free_area_cache;
1354+ else
1355+#endif
1356+
1357 if (!addr)
1358 addr = mm->free_area_cache;
1359
1360@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1361 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1362 /* At this point: (!vma || addr < vma->vm_end). */
1363 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1364- if (start_addr != TASK_UNMAPPED_BASE) {
1365+ if (start_addr != mm->mmap_base) {
1366 /* Start a new search --- just in case we missed some holes. */
1367- addr = TASK_UNMAPPED_BASE;
1368+ addr = mm->mmap_base;
1369 goto full_search;
1370 }
1371 return -ENOMEM;
1372 }
1373- if (!vma || addr + len <= vma->vm_start) {
1374+ if (check_heap_stack_gap(vma, addr, len)) {
1375 /* Remember the address where we stopped this search: */
1376 mm->free_area_cache = addr + len;
1377 return addr;
1378diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1379index 53c0ba0..2accdde 100644
1380--- a/arch/ia64/kernel/vmlinux.lds.S
1381+++ b/arch/ia64/kernel/vmlinux.lds.S
1382@@ -199,7 +199,7 @@ SECTIONS {
1383 /* Per-cpu data: */
1384 . = ALIGN(PERCPU_PAGE_SIZE);
1385 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1386- __phys_per_cpu_start = __per_cpu_load;
1387+ __phys_per_cpu_start = per_cpu_load;
1388 /*
1389 * ensure percpu data fits
1390 * into percpu page size
1391diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1392index 20b3593..1ce77f0 100644
1393--- a/arch/ia64/mm/fault.c
1394+++ b/arch/ia64/mm/fault.c
1395@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1396 return pte_present(pte);
1397 }
1398
1399+#ifdef CONFIG_PAX_PAGEEXEC
1400+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1401+{
1402+ unsigned long i;
1403+
1404+ printk(KERN_ERR "PAX: bytes at PC: ");
1405+ for (i = 0; i < 8; i++) {
1406+ unsigned int c;
1407+ if (get_user(c, (unsigned int *)pc+i))
1408+ printk(KERN_CONT "???????? ");
1409+ else
1410+ printk(KERN_CONT "%08x ", c);
1411+ }
1412+ printk("\n");
1413+}
1414+#endif
1415+
1416 void __kprobes
1417 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1418 {
1419@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1420 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1421 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1422
1423- if ((vma->vm_flags & mask) != mask)
1424+ if ((vma->vm_flags & mask) != mask) {
1425+
1426+#ifdef CONFIG_PAX_PAGEEXEC
1427+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1428+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1429+ goto bad_area;
1430+
1431+ up_read(&mm->mmap_sem);
1432+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1433+ do_group_exit(SIGKILL);
1434+ }
1435+#endif
1436+
1437 goto bad_area;
1438
1439+ }
1440+
1441 /*
1442 * If for any reason at all we couldn't handle the fault, make
1443 * sure we exit gracefully rather than endlessly redo the
1444diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1445index 5ca674b..e0e1b70 100644
1446--- a/arch/ia64/mm/hugetlbpage.c
1447+++ b/arch/ia64/mm/hugetlbpage.c
1448@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1449 /* At this point: (!vmm || addr < vmm->vm_end). */
1450 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1451 return -ENOMEM;
1452- if (!vmm || (addr + len) <= vmm->vm_start)
1453+ if (check_heap_stack_gap(vmm, addr, len))
1454 return addr;
1455 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1456 }
1457diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1458index 00cb0e2..2ad8024 100644
1459--- a/arch/ia64/mm/init.c
1460+++ b/arch/ia64/mm/init.c
1461@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1462 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1463 vma->vm_end = vma->vm_start + PAGE_SIZE;
1464 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1465+
1466+#ifdef CONFIG_PAX_PAGEEXEC
1467+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1468+ vma->vm_flags &= ~VM_EXEC;
1469+
1470+#ifdef CONFIG_PAX_MPROTECT
1471+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1472+ vma->vm_flags &= ~VM_MAYEXEC;
1473+#endif
1474+
1475+ }
1476+#endif
1477+
1478 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1479 down_write(&current->mm->mmap_sem);
1480 if (insert_vm_struct(current->mm, vma)) {
1481diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1482index 82abd15..d95ae5d 100644
1483--- a/arch/m32r/lib/usercopy.c
1484+++ b/arch/m32r/lib/usercopy.c
1485@@ -14,6 +14,9 @@
1486 unsigned long
1487 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1488 {
1489+ if ((long)n < 0)
1490+ return n;
1491+
1492 prefetch(from);
1493 if (access_ok(VERIFY_WRITE, to, n))
1494 __copy_user(to,from,n);
1495@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1496 unsigned long
1497 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1498 {
1499+ if ((long)n < 0)
1500+ return n;
1501+
1502 prefetchw(to);
1503 if (access_ok(VERIFY_READ, from, n))
1504 __copy_user_zeroing(to,from,n);
1505diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1506index 455c0ac..ad65fbe 100644
1507--- a/arch/mips/include/asm/elf.h
1508+++ b/arch/mips/include/asm/elf.h
1509@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1510 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1511 #endif
1512
1513+#ifdef CONFIG_PAX_ASLR
1514+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1515+
1516+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1517+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1518+#endif
1519+
1520 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1521 struct linux_binprm;
1522 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1523 int uses_interp);
1524
1525-struct mm_struct;
1526-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1527-#define arch_randomize_brk arch_randomize_brk
1528-
1529 #endif /* _ASM_ELF_H */
1530diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1531index e59cd1a..8e329d6 100644
1532--- a/arch/mips/include/asm/page.h
1533+++ b/arch/mips/include/asm/page.h
1534@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1535 #ifdef CONFIG_CPU_MIPS32
1536 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1537 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1538- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1539+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1540 #else
1541 typedef struct { unsigned long long pte; } pte_t;
1542 #define pte_val(x) ((x).pte)
1543diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1544index 6018c80..7c37203 100644
1545--- a/arch/mips/include/asm/system.h
1546+++ b/arch/mips/include/asm/system.h
1547@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1548 */
1549 #define __ARCH_WANT_UNLOCKED_CTXSW
1550
1551-extern unsigned long arch_align_stack(unsigned long sp);
1552+#define arch_align_stack(x) ((x) & ~0xfUL)
1553
1554 #endif /* _ASM_SYSTEM_H */
1555diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1556index 9fdd8bc..4bd7f1a 100644
1557--- a/arch/mips/kernel/binfmt_elfn32.c
1558+++ b/arch/mips/kernel/binfmt_elfn32.c
1559@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1560 #undef ELF_ET_DYN_BASE
1561 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1562
1563+#ifdef CONFIG_PAX_ASLR
1564+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1565+
1566+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1567+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1568+#endif
1569+
1570 #include <asm/processor.h>
1571 #include <linux/module.h>
1572 #include <linux/elfcore.h>
1573diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1574index ff44823..97f8906 100644
1575--- a/arch/mips/kernel/binfmt_elfo32.c
1576+++ b/arch/mips/kernel/binfmt_elfo32.c
1577@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1578 #undef ELF_ET_DYN_BASE
1579 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1580
1581+#ifdef CONFIG_PAX_ASLR
1582+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1583+
1584+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1585+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1586+#endif
1587+
1588 #include <asm/processor.h>
1589
1590 /*
1591diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1592index c47f96e..661d418 100644
1593--- a/arch/mips/kernel/process.c
1594+++ b/arch/mips/kernel/process.c
1595@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1596 out:
1597 return pc;
1598 }
1599-
1600-/*
1601- * Don't forget that the stack pointer must be aligned on a 8 bytes
1602- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1603- */
1604-unsigned long arch_align_stack(unsigned long sp)
1605-{
1606- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1607- sp -= get_random_int() & ~PAGE_MASK;
1608-
1609- return sp & ALMASK;
1610-}
1611diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1612index 937cf33..adb39bb 100644
1613--- a/arch/mips/mm/fault.c
1614+++ b/arch/mips/mm/fault.c
1615@@ -28,6 +28,23 @@
1616 #include <asm/highmem.h> /* For VMALLOC_END */
1617 #include <linux/kdebug.h>
1618
1619+#ifdef CONFIG_PAX_PAGEEXEC
1620+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1621+{
1622+ unsigned long i;
1623+
1624+ printk(KERN_ERR "PAX: bytes at PC: ");
1625+ for (i = 0; i < 5; i++) {
1626+ unsigned int c;
1627+ if (get_user(c, (unsigned int *)pc+i))
1628+ printk(KERN_CONT "???????? ");
1629+ else
1630+ printk(KERN_CONT "%08x ", c);
1631+ }
1632+ printk("\n");
1633+}
1634+#endif
1635+
1636 /*
1637 * This routine handles page faults. It determines the address,
1638 * and the problem, and then passes it off to one of the appropriate
1639diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1640index 302d779..7d35bf8 100644
1641--- a/arch/mips/mm/mmap.c
1642+++ b/arch/mips/mm/mmap.c
1643@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1644 do_color_align = 1;
1645
1646 /* requesting a specific address */
1647+
1648+#ifdef CONFIG_PAX_RANDMMAP
1649+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1650+#endif
1651+
1652 if (addr) {
1653 if (do_color_align)
1654 addr = COLOUR_ALIGN(addr, pgoff);
1655@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1656 addr = PAGE_ALIGN(addr);
1657
1658 vma = find_vma(mm, addr);
1659- if (TASK_SIZE - len >= addr &&
1660- (!vma || addr + len <= vma->vm_start))
1661+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1662 return addr;
1663 }
1664
1665@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1666 /* At this point: (!vma || addr < vma->vm_end). */
1667 if (TASK_SIZE - len < addr)
1668 return -ENOMEM;
1669- if (!vma || addr + len <= vma->vm_start)
1670+ if (check_heap_stack_gap(vmm, addr, len))
1671 return addr;
1672 addr = vma->vm_end;
1673 if (do_color_align)
1674@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1675 /* make sure it can fit in the remaining address space */
1676 if (likely(addr > len)) {
1677 vma = find_vma(mm, addr - len);
1678- if (!vma || addr <= vma->vm_start) {
1679+ if (check_heap_stack_gap(vmm, addr - len, len))
1680 /* cache the address as a hint for next time */
1681 return mm->free_area_cache = addr - len;
1682 }
1683@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1684 * return with success:
1685 */
1686 vma = find_vma(mm, addr);
1687- if (likely(!vma || addr + len <= vma->vm_start)) {
1688+ if (check_heap_stack_gap(vmm, addr, len)) {
1689 /* cache the address as a hint for next time */
1690 return mm->free_area_cache = addr;
1691 }
1692@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1693 mm->unmap_area = arch_unmap_area_topdown;
1694 }
1695 }
1696-
1697-static inline unsigned long brk_rnd(void)
1698-{
1699- unsigned long rnd = get_random_int();
1700-
1701- rnd = rnd << PAGE_SHIFT;
1702- /* 8MB for 32bit, 256MB for 64bit */
1703- if (TASK_IS_32BIT_ADDR)
1704- rnd = rnd & 0x7ffffful;
1705- else
1706- rnd = rnd & 0xffffffful;
1707-
1708- return rnd;
1709-}
1710-
1711-unsigned long arch_randomize_brk(struct mm_struct *mm)
1712-{
1713- unsigned long base = mm->brk;
1714- unsigned long ret;
1715-
1716- ret = PAGE_ALIGN(base + brk_rnd());
1717-
1718- if (ret < mm->brk)
1719- return mm->brk;
1720-
1721- return ret;
1722-}
1723diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1724index 19f6cb1..6c78cf2 100644
1725--- a/arch/parisc/include/asm/elf.h
1726+++ b/arch/parisc/include/asm/elf.h
1727@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1728
1729 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1730
1731+#ifdef CONFIG_PAX_ASLR
1732+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1733+
1734+#define PAX_DELTA_MMAP_LEN 16
1735+#define PAX_DELTA_STACK_LEN 16
1736+#endif
1737+
1738 /* This yields a mask that user programs can use to figure out what
1739 instruction set this CPU supports. This could be done in user space,
1740 but it's not easy, and we've already done it here. */
1741diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1742index 22dadeb..f6c2be4 100644
1743--- a/arch/parisc/include/asm/pgtable.h
1744+++ b/arch/parisc/include/asm/pgtable.h
1745@@ -210,6 +210,17 @@ struct vm_area_struct;
1746 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1747 #define PAGE_COPY PAGE_EXECREAD
1748 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1749+
1750+#ifdef CONFIG_PAX_PAGEEXEC
1751+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1752+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1753+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1754+#else
1755+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1756+# define PAGE_COPY_NOEXEC PAGE_COPY
1757+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1758+#endif
1759+
1760 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1761 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1762 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1763diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1764index 5e34ccf..672bc9c 100644
1765--- a/arch/parisc/kernel/module.c
1766+++ b/arch/parisc/kernel/module.c
1767@@ -98,16 +98,38 @@
1768
1769 /* three functions to determine where in the module core
1770 * or init pieces the location is */
1771+static inline int in_init_rx(struct module *me, void *loc)
1772+{
1773+ return (loc >= me->module_init_rx &&
1774+ loc < (me->module_init_rx + me->init_size_rx));
1775+}
1776+
1777+static inline int in_init_rw(struct module *me, void *loc)
1778+{
1779+ return (loc >= me->module_init_rw &&
1780+ loc < (me->module_init_rw + me->init_size_rw));
1781+}
1782+
1783 static inline int in_init(struct module *me, void *loc)
1784 {
1785- return (loc >= me->module_init &&
1786- loc <= (me->module_init + me->init_size));
1787+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1788+}
1789+
1790+static inline int in_core_rx(struct module *me, void *loc)
1791+{
1792+ return (loc >= me->module_core_rx &&
1793+ loc < (me->module_core_rx + me->core_size_rx));
1794+}
1795+
1796+static inline int in_core_rw(struct module *me, void *loc)
1797+{
1798+ return (loc >= me->module_core_rw &&
1799+ loc < (me->module_core_rw + me->core_size_rw));
1800 }
1801
1802 static inline int in_core(struct module *me, void *loc)
1803 {
1804- return (loc >= me->module_core &&
1805- loc <= (me->module_core + me->core_size));
1806+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1807 }
1808
1809 static inline int in_local(struct module *me, void *loc)
1810@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1811 }
1812
1813 /* align things a bit */
1814- me->core_size = ALIGN(me->core_size, 16);
1815- me->arch.got_offset = me->core_size;
1816- me->core_size += gots * sizeof(struct got_entry);
1817+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1818+ me->arch.got_offset = me->core_size_rw;
1819+ me->core_size_rw += gots * sizeof(struct got_entry);
1820
1821- me->core_size = ALIGN(me->core_size, 16);
1822- me->arch.fdesc_offset = me->core_size;
1823- me->core_size += fdescs * sizeof(Elf_Fdesc);
1824+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1825+ me->arch.fdesc_offset = me->core_size_rw;
1826+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1827
1828 me->arch.got_max = gots;
1829 me->arch.fdesc_max = fdescs;
1830@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1831
1832 BUG_ON(value == 0);
1833
1834- got = me->module_core + me->arch.got_offset;
1835+ got = me->module_core_rw + me->arch.got_offset;
1836 for (i = 0; got[i].addr; i++)
1837 if (got[i].addr == value)
1838 goto out;
1839@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1840 #ifdef CONFIG_64BIT
1841 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1842 {
1843- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1844+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1845
1846 if (!value) {
1847 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1848@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1849
1850 /* Create new one */
1851 fdesc->addr = value;
1852- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1853+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1854 return (Elf_Addr)fdesc;
1855 }
1856 #endif /* CONFIG_64BIT */
1857@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1858
1859 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1860 end = table + sechdrs[me->arch.unwind_section].sh_size;
1861- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1862+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1863
1864 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1865 me->arch.unwind_section, table, end, gp);
1866diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1867index c9b9322..02d8940 100644
1868--- a/arch/parisc/kernel/sys_parisc.c
1869+++ b/arch/parisc/kernel/sys_parisc.c
1870@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
1871 /* At this point: (!vma || addr < vma->vm_end). */
1872 if (TASK_SIZE - len < addr)
1873 return -ENOMEM;
1874- if (!vma || addr + len <= vma->vm_start)
1875+ if (check_heap_stack_gap(vma, addr, len))
1876 return addr;
1877 addr = vma->vm_end;
1878 }
1879@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
1880 /* At this point: (!vma || addr < vma->vm_end). */
1881 if (TASK_SIZE - len < addr)
1882 return -ENOMEM;
1883- if (!vma || addr + len <= vma->vm_start)
1884+ if (check_heap_stack_gap(vma, addr, len))
1885 return addr;
1886 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1887 if (addr < vma->vm_end) /* handle wraparound */
1888@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1889 if (flags & MAP_FIXED)
1890 return addr;
1891 if (!addr)
1892- addr = TASK_UNMAPPED_BASE;
1893+ addr = current->mm->mmap_base;
1894
1895 if (filp) {
1896 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1897diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1898index f19e660..414fe24 100644
1899--- a/arch/parisc/kernel/traps.c
1900+++ b/arch/parisc/kernel/traps.c
1901@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1902
1903 down_read(&current->mm->mmap_sem);
1904 vma = find_vma(current->mm,regs->iaoq[0]);
1905- if (vma && (regs->iaoq[0] >= vma->vm_start)
1906- && (vma->vm_flags & VM_EXEC)) {
1907-
1908+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1909 fault_address = regs->iaoq[0];
1910 fault_space = regs->iasq[0];
1911
1912diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1913index 18162ce..94de376 100644
1914--- a/arch/parisc/mm/fault.c
1915+++ b/arch/parisc/mm/fault.c
1916@@ -15,6 +15,7 @@
1917 #include <linux/sched.h>
1918 #include <linux/interrupt.h>
1919 #include <linux/module.h>
1920+#include <linux/unistd.h>
1921
1922 #include <asm/uaccess.h>
1923 #include <asm/traps.h>
1924@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
1925 static unsigned long
1926 parisc_acctyp(unsigned long code, unsigned int inst)
1927 {
1928- if (code == 6 || code == 16)
1929+ if (code == 6 || code == 7 || code == 16)
1930 return VM_EXEC;
1931
1932 switch (inst & 0xf0000000) {
1933@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
1934 }
1935 #endif
1936
1937+#ifdef CONFIG_PAX_PAGEEXEC
1938+/*
1939+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1940+ *
1941+ * returns 1 when task should be killed
1942+ * 2 when rt_sigreturn trampoline was detected
1943+ * 3 when unpatched PLT trampoline was detected
1944+ */
1945+static int pax_handle_fetch_fault(struct pt_regs *regs)
1946+{
1947+
1948+#ifdef CONFIG_PAX_EMUPLT
1949+ int err;
1950+
1951+ do { /* PaX: unpatched PLT emulation */
1952+ unsigned int bl, depwi;
1953+
1954+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1955+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1956+
1957+ if (err)
1958+ break;
1959+
1960+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1961+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1962+
1963+ err = get_user(ldw, (unsigned int *)addr);
1964+ err |= get_user(bv, (unsigned int *)(addr+4));
1965+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1966+
1967+ if (err)
1968+ break;
1969+
1970+ if (ldw == 0x0E801096U &&
1971+ bv == 0xEAC0C000U &&
1972+ ldw2 == 0x0E881095U)
1973+ {
1974+ unsigned int resolver, map;
1975+
1976+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1977+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1978+ if (err)
1979+ break;
1980+
1981+ regs->gr[20] = instruction_pointer(regs)+8;
1982+ regs->gr[21] = map;
1983+ regs->gr[22] = resolver;
1984+ regs->iaoq[0] = resolver | 3UL;
1985+ regs->iaoq[1] = regs->iaoq[0] + 4;
1986+ return 3;
1987+ }
1988+ }
1989+ } while (0);
1990+#endif
1991+
1992+#ifdef CONFIG_PAX_EMUTRAMP
1993+
1994+#ifndef CONFIG_PAX_EMUSIGRT
1995+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1996+ return 1;
1997+#endif
1998+
1999+ do { /* PaX: rt_sigreturn emulation */
2000+ unsigned int ldi1, ldi2, bel, nop;
2001+
2002+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2003+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2004+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2005+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2006+
2007+ if (err)
2008+ break;
2009+
2010+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2011+ ldi2 == 0x3414015AU &&
2012+ bel == 0xE4008200U &&
2013+ nop == 0x08000240U)
2014+ {
2015+ regs->gr[25] = (ldi1 & 2) >> 1;
2016+ regs->gr[20] = __NR_rt_sigreturn;
2017+ regs->gr[31] = regs->iaoq[1] + 16;
2018+ regs->sr[0] = regs->iasq[1];
2019+ regs->iaoq[0] = 0x100UL;
2020+ regs->iaoq[1] = regs->iaoq[0] + 4;
2021+ regs->iasq[0] = regs->sr[2];
2022+ regs->iasq[1] = regs->sr[2];
2023+ return 2;
2024+ }
2025+ } while (0);
2026+#endif
2027+
2028+ return 1;
2029+}
2030+
2031+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2032+{
2033+ unsigned long i;
2034+
2035+ printk(KERN_ERR "PAX: bytes at PC: ");
2036+ for (i = 0; i < 5; i++) {
2037+ unsigned int c;
2038+ if (get_user(c, (unsigned int *)pc+i))
2039+ printk(KERN_CONT "???????? ");
2040+ else
2041+ printk(KERN_CONT "%08x ", c);
2042+ }
2043+ printk("\n");
2044+}
2045+#endif
2046+
2047 int fixup_exception(struct pt_regs *regs)
2048 {
2049 const struct exception_table_entry *fix;
2050@@ -192,8 +303,33 @@ good_area:
2051
2052 acc_type = parisc_acctyp(code,regs->iir);
2053
2054- if ((vma->vm_flags & acc_type) != acc_type)
2055+ if ((vma->vm_flags & acc_type) != acc_type) {
2056+
2057+#ifdef CONFIG_PAX_PAGEEXEC
2058+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2059+ (address & ~3UL) == instruction_pointer(regs))
2060+ {
2061+ up_read(&mm->mmap_sem);
2062+ switch (pax_handle_fetch_fault(regs)) {
2063+
2064+#ifdef CONFIG_PAX_EMUPLT
2065+ case 3:
2066+ return;
2067+#endif
2068+
2069+#ifdef CONFIG_PAX_EMUTRAMP
2070+ case 2:
2071+ return;
2072+#endif
2073+
2074+ }
2075+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2076+ do_group_exit(SIGKILL);
2077+ }
2078+#endif
2079+
2080 goto bad_area;
2081+ }
2082
2083 /*
2084 * If for any reason at all we couldn't handle the fault, make
2085diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2086index 3bf9cca..e7457d0 100644
2087--- a/arch/powerpc/include/asm/elf.h
2088+++ b/arch/powerpc/include/asm/elf.h
2089@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2090 the loader. We need to make sure that it is out of the way of the program
2091 that it will "exec", and that there is sufficient room for the brk. */
2092
2093-extern unsigned long randomize_et_dyn(unsigned long base);
2094-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2095+#define ELF_ET_DYN_BASE (0x20000000)
2096+
2097+#ifdef CONFIG_PAX_ASLR
2098+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2099+
2100+#ifdef __powerpc64__
2101+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2102+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2103+#else
2104+#define PAX_DELTA_MMAP_LEN 15
2105+#define PAX_DELTA_STACK_LEN 15
2106+#endif
2107+#endif
2108
2109 /*
2110 * Our registers are always unsigned longs, whether we're a 32 bit
2111@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2112 (0x7ff >> (PAGE_SHIFT - 12)) : \
2113 (0x3ffff >> (PAGE_SHIFT - 12)))
2114
2115-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2116-#define arch_randomize_brk arch_randomize_brk
2117-
2118 #endif /* __KERNEL__ */
2119
2120 /*
2121diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2122index bca8fdc..61e9580 100644
2123--- a/arch/powerpc/include/asm/kmap_types.h
2124+++ b/arch/powerpc/include/asm/kmap_types.h
2125@@ -27,6 +27,7 @@ enum km_type {
2126 KM_PPC_SYNC_PAGE,
2127 KM_PPC_SYNC_ICACHE,
2128 KM_KDB,
2129+ KM_CLEARPAGE,
2130 KM_TYPE_NR
2131 };
2132
2133diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2134index d4a7f64..451de1c 100644
2135--- a/arch/powerpc/include/asm/mman.h
2136+++ b/arch/powerpc/include/asm/mman.h
2137@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2138 }
2139 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2140
2141-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2142+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2143 {
2144 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2145 }
2146diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2147index dd9c4fd..a2ced87 100644
2148--- a/arch/powerpc/include/asm/page.h
2149+++ b/arch/powerpc/include/asm/page.h
2150@@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156+#define VM_DATA_DEFAULT_FLAGS32 \
2157+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162@@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166+#define ktla_ktva(addr) (addr)
2167+#define ktva_ktla(addr) (addr)
2168+
2169 /*
2170 * Use the top bit of the higher-level page table entries to indicate whether
2171 * the entries we point to contain hugepages. This works because we know that
2172diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2173index fb40ede..d3ce956 100644
2174--- a/arch/powerpc/include/asm/page_64.h
2175+++ b/arch/powerpc/include/asm/page_64.h
2176@@ -144,15 +144,18 @@ do { \
2177 * stack by default, so in the absence of a PT_GNU_STACK program header
2178 * we turn execute permission off.
2179 */
2180-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2181- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2182+#define VM_STACK_DEFAULT_FLAGS32 \
2183+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2184+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2185
2186 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2187 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2188
2189+#ifndef CONFIG_PAX_PAGEEXEC
2190 #define VM_STACK_DEFAULT_FLAGS \
2191 (is_32bit_task() ? \
2192 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2193+#endif
2194
2195 #include <asm-generic/getorder.h>
2196
2197diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2198index 88b0bd9..e32bc67 100644
2199--- a/arch/powerpc/include/asm/pgtable.h
2200+++ b/arch/powerpc/include/asm/pgtable.h
2201@@ -2,6 +2,7 @@
2202 #define _ASM_POWERPC_PGTABLE_H
2203 #ifdef __KERNEL__
2204
2205+#include <linux/const.h>
2206 #ifndef __ASSEMBLY__
2207 #include <asm/processor.h> /* For TASK_SIZE */
2208 #include <asm/mmu.h>
2209diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2210index 4aad413..85d86bf 100644
2211--- a/arch/powerpc/include/asm/pte-hash32.h
2212+++ b/arch/powerpc/include/asm/pte-hash32.h
2213@@ -21,6 +21,7 @@
2214 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2215 #define _PAGE_USER 0x004 /* usermode access allowed */
2216 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2217+#define _PAGE_EXEC _PAGE_GUARDED
2218 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2219 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2220 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2221diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2222index 559da19..7e5835c 100644
2223--- a/arch/powerpc/include/asm/reg.h
2224+++ b/arch/powerpc/include/asm/reg.h
2225@@ -212,6 +212,7 @@
2226 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2227 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2228 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2229+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2230 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2231 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2232 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2233diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2234index e30a13d..2b7d994 100644
2235--- a/arch/powerpc/include/asm/system.h
2236+++ b/arch/powerpc/include/asm/system.h
2237@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2238 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2239 #endif
2240
2241-extern unsigned long arch_align_stack(unsigned long sp);
2242+#define arch_align_stack(x) ((x) & ~0xfUL)
2243
2244 /* Used in very early kernel initialization. */
2245 extern unsigned long reloc_offset(void);
2246diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2247index bd0fb84..a42a14b 100644
2248--- a/arch/powerpc/include/asm/uaccess.h
2249+++ b/arch/powerpc/include/asm/uaccess.h
2250@@ -13,6 +13,8 @@
2251 #define VERIFY_READ 0
2252 #define VERIFY_WRITE 1
2253
2254+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2255+
2256 /*
2257 * The fs value determines whether argument validity checking should be
2258 * performed or not. If get_fs() == USER_DS, checking is performed, with
2259@@ -327,52 +329,6 @@ do { \
2260 extern unsigned long __copy_tofrom_user(void __user *to,
2261 const void __user *from, unsigned long size);
2262
2263-#ifndef __powerpc64__
2264-
2265-static inline unsigned long copy_from_user(void *to,
2266- const void __user *from, unsigned long n)
2267-{
2268- unsigned long over;
2269-
2270- if (access_ok(VERIFY_READ, from, n))
2271- return __copy_tofrom_user((__force void __user *)to, from, n);
2272- if ((unsigned long)from < TASK_SIZE) {
2273- over = (unsigned long)from + n - TASK_SIZE;
2274- return __copy_tofrom_user((__force void __user *)to, from,
2275- n - over) + over;
2276- }
2277- return n;
2278-}
2279-
2280-static inline unsigned long copy_to_user(void __user *to,
2281- const void *from, unsigned long n)
2282-{
2283- unsigned long over;
2284-
2285- if (access_ok(VERIFY_WRITE, to, n))
2286- return __copy_tofrom_user(to, (__force void __user *)from, n);
2287- if ((unsigned long)to < TASK_SIZE) {
2288- over = (unsigned long)to + n - TASK_SIZE;
2289- return __copy_tofrom_user(to, (__force void __user *)from,
2290- n - over) + over;
2291- }
2292- return n;
2293-}
2294-
2295-#else /* __powerpc64__ */
2296-
2297-#define __copy_in_user(to, from, size) \
2298- __copy_tofrom_user((to), (from), (size))
2299-
2300-extern unsigned long copy_from_user(void *to, const void __user *from,
2301- unsigned long n);
2302-extern unsigned long copy_to_user(void __user *to, const void *from,
2303- unsigned long n);
2304-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2305- unsigned long n);
2306-
2307-#endif /* __powerpc64__ */
2308-
2309 static inline unsigned long __copy_from_user_inatomic(void *to,
2310 const void __user *from, unsigned long n)
2311 {
2312@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2313 if (ret == 0)
2314 return 0;
2315 }
2316+
2317+ if (!__builtin_constant_p(n))
2318+ check_object_size(to, n, false);
2319+
2320 return __copy_tofrom_user((__force void __user *)to, from, n);
2321 }
2322
2323@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2324 if (ret == 0)
2325 return 0;
2326 }
2327+
2328+ if (!__builtin_constant_p(n))
2329+ check_object_size(from, n, true);
2330+
2331 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2332 }
2333
2334@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2335 return __copy_to_user_inatomic(to, from, size);
2336 }
2337
2338+#ifndef __powerpc64__
2339+
2340+static inline unsigned long __must_check copy_from_user(void *to,
2341+ const void __user *from, unsigned long n)
2342+{
2343+ unsigned long over;
2344+
2345+ if ((long)n < 0)
2346+ return n;
2347+
2348+ if (access_ok(VERIFY_READ, from, n)) {
2349+ if (!__builtin_constant_p(n))
2350+ check_object_size(to, n, false);
2351+ return __copy_tofrom_user((__force void __user *)to, from, n);
2352+ }
2353+ if ((unsigned long)from < TASK_SIZE) {
2354+ over = (unsigned long)from + n - TASK_SIZE;
2355+ if (!__builtin_constant_p(n - over))
2356+ check_object_size(to, n - over, false);
2357+ return __copy_tofrom_user((__force void __user *)to, from,
2358+ n - over) + over;
2359+ }
2360+ return n;
2361+}
2362+
2363+static inline unsigned long __must_check copy_to_user(void __user *to,
2364+ const void *from, unsigned long n)
2365+{
2366+ unsigned long over;
2367+
2368+ if ((long)n < 0)
2369+ return n;
2370+
2371+ if (access_ok(VERIFY_WRITE, to, n)) {
2372+ if (!__builtin_constant_p(n))
2373+ check_object_size(from, n, true);
2374+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2375+ }
2376+ if ((unsigned long)to < TASK_SIZE) {
2377+ over = (unsigned long)to + n - TASK_SIZE;
2378+ if (!__builtin_constant_p(n))
2379+ check_object_size(from, n - over, true);
2380+ return __copy_tofrom_user(to, (__force void __user *)from,
2381+ n - over) + over;
2382+ }
2383+ return n;
2384+}
2385+
2386+#else /* __powerpc64__ */
2387+
2388+#define __copy_in_user(to, from, size) \
2389+ __copy_tofrom_user((to), (from), (size))
2390+
2391+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2392+{
2393+ if ((long)n < 0 || n > INT_MAX)
2394+ return n;
2395+
2396+ if (!__builtin_constant_p(n))
2397+ check_object_size(to, n, false);
2398+
2399+ if (likely(access_ok(VERIFY_READ, from, n)))
2400+ n = __copy_from_user(to, from, n);
2401+ else
2402+ memset(to, 0, n);
2403+ return n;
2404+}
2405+
2406+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2407+{
2408+ if ((long)n < 0 || n > INT_MAX)
2409+ return n;
2410+
2411+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2412+ if (!__builtin_constant_p(n))
2413+ check_object_size(from, n, true);
2414+ n = __copy_to_user(to, from, n);
2415+ }
2416+ return n;
2417+}
2418+
2419+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2420+ unsigned long n);
2421+
2422+#endif /* __powerpc64__ */
2423+
2424 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2425
2426 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2427diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2428index 429983c..7af363b 100644
2429--- a/arch/powerpc/kernel/exceptions-64e.S
2430+++ b/arch/powerpc/kernel/exceptions-64e.S
2431@@ -587,6 +587,7 @@ storage_fault_common:
2432 std r14,_DAR(r1)
2433 std r15,_DSISR(r1)
2434 addi r3,r1,STACK_FRAME_OVERHEAD
2435+ bl .save_nvgprs
2436 mr r4,r14
2437 mr r5,r15
2438 ld r14,PACA_EXGEN+EX_R14(r13)
2439@@ -596,8 +597,7 @@ storage_fault_common:
2440 cmpdi r3,0
2441 bne- 1f
2442 b .ret_from_except_lite
2443-1: bl .save_nvgprs
2444- mr r5,r3
2445+1: mr r5,r3
2446 addi r3,r1,STACK_FRAME_OVERHEAD
2447 ld r4,_DAR(r1)
2448 bl .bad_page_fault
2449diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2450index cf9c69b..ebc9640 100644
2451--- a/arch/powerpc/kernel/exceptions-64s.S
2452+++ b/arch/powerpc/kernel/exceptions-64s.S
2453@@ -1004,10 +1004,10 @@ handle_page_fault:
2454 11: ld r4,_DAR(r1)
2455 ld r5,_DSISR(r1)
2456 addi r3,r1,STACK_FRAME_OVERHEAD
2457+ bl .save_nvgprs
2458 bl .do_page_fault
2459 cmpdi r3,0
2460 beq+ 13f
2461- bl .save_nvgprs
2462 mr r5,r3
2463 addi r3,r1,STACK_FRAME_OVERHEAD
2464 lwz r4,_DAR(r1)
2465diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2466index 0b6d796..d760ddb 100644
2467--- a/arch/powerpc/kernel/module_32.c
2468+++ b/arch/powerpc/kernel/module_32.c
2469@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2470 me->arch.core_plt_section = i;
2471 }
2472 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2473- printk("Module doesn't contain .plt or .init.plt sections.\n");
2474+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2475 return -ENOEXEC;
2476 }
2477
2478@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2479
2480 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2481 /* Init, or core PLT? */
2482- if (location >= mod->module_core
2483- && location < mod->module_core + mod->core_size)
2484+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2485+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2486 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2487- else
2488+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2489+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2490 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2491+ else {
2492+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2493+ return ~0UL;
2494+ }
2495
2496 /* Find this entry, or if that fails, the next avail. entry */
2497 while (entry->jump[0]) {
2498diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2499index 6457574..08b28d3 100644
2500--- a/arch/powerpc/kernel/process.c
2501+++ b/arch/powerpc/kernel/process.c
2502@@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2503 * Lookup NIP late so we have the best change of getting the
2504 * above info out without failing
2505 */
2506- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2507- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2508+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2509+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2510 #endif
2511 show_stack(current, (unsigned long *) regs->gpr[1]);
2512 if (!user_mode(regs))
2513@@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2514 newsp = stack[0];
2515 ip = stack[STACK_FRAME_LR_SAVE];
2516 if (!firstframe || ip != lr) {
2517- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2518+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2519 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2520 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2521- printk(" (%pS)",
2522+ printk(" (%pA)",
2523 (void *)current->ret_stack[curr_frame].ret);
2524 curr_frame--;
2525 }
2526@@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2527 struct pt_regs *regs = (struct pt_regs *)
2528 (sp + STACK_FRAME_OVERHEAD);
2529 lr = regs->link;
2530- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2531+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2532 regs->trap, (void *)regs->nip, (void *)lr);
2533 firstframe = 1;
2534 }
2535@@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2536 }
2537
2538 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2539-
2540-unsigned long arch_align_stack(unsigned long sp)
2541-{
2542- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2543- sp -= get_random_int() & ~PAGE_MASK;
2544- return sp & ~0xf;
2545-}
2546-
2547-static inline unsigned long brk_rnd(void)
2548-{
2549- unsigned long rnd = 0;
2550-
2551- /* 8MB for 32bit, 1GB for 64bit */
2552- if (is_32bit_task())
2553- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2554- else
2555- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2556-
2557- return rnd << PAGE_SHIFT;
2558-}
2559-
2560-unsigned long arch_randomize_brk(struct mm_struct *mm)
2561-{
2562- unsigned long base = mm->brk;
2563- unsigned long ret;
2564-
2565-#ifdef CONFIG_PPC_STD_MMU_64
2566- /*
2567- * If we are using 1TB segments and we are allowed to randomise
2568- * the heap, we can put it above 1TB so it is backed by a 1TB
2569- * segment. Otherwise the heap will be in the bottom 1TB
2570- * which always uses 256MB segments and this may result in a
2571- * performance penalty.
2572- */
2573- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2574- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2575-#endif
2576-
2577- ret = PAGE_ALIGN(base + brk_rnd());
2578-
2579- if (ret < mm->brk)
2580- return mm->brk;
2581-
2582- return ret;
2583-}
2584-
2585-unsigned long randomize_et_dyn(unsigned long base)
2586-{
2587- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2588-
2589- if (ret < base)
2590- return base;
2591-
2592- return ret;
2593-}
2594diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2595index 836a5a1..27289a3 100644
2596--- a/arch/powerpc/kernel/signal_32.c
2597+++ b/arch/powerpc/kernel/signal_32.c
2598@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2599 /* Save user registers on the stack */
2600 frame = &rt_sf->uc.uc_mcontext;
2601 addr = frame;
2602- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2603+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2604 if (save_user_regs(regs, frame, 0, 1))
2605 goto badframe;
2606 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2607diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2608index a50b5ec..547078a 100644
2609--- a/arch/powerpc/kernel/signal_64.c
2610+++ b/arch/powerpc/kernel/signal_64.c
2611@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2612 current->thread.fpscr.val = 0;
2613
2614 /* Set up to return from userspace. */
2615- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2616+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2617 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2618 } else {
2619 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2620diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2621index 5459d14..10f8070 100644
2622--- a/arch/powerpc/kernel/traps.c
2623+++ b/arch/powerpc/kernel/traps.c
2624@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2625 static inline void pmac_backlight_unblank(void) { }
2626 #endif
2627
2628+extern void gr_handle_kernel_exploit(void);
2629+
2630 int die(const char *str, struct pt_regs *regs, long err)
2631 {
2632 static struct {
2633@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2634 if (panic_on_oops)
2635 panic("Fatal exception");
2636
2637+ gr_handle_kernel_exploit();
2638+
2639 oops_exit();
2640 do_exit(err);
2641
2642diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2643index 7d14bb6..1305601 100644
2644--- a/arch/powerpc/kernel/vdso.c
2645+++ b/arch/powerpc/kernel/vdso.c
2646@@ -35,6 +35,7 @@
2647 #include <asm/firmware.h>
2648 #include <asm/vdso.h>
2649 #include <asm/vdso_datapage.h>
2650+#include <asm/mman.h>
2651
2652 #include "setup.h"
2653
2654@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2655 vdso_base = VDSO32_MBASE;
2656 #endif
2657
2658- current->mm->context.vdso_base = 0;
2659+ current->mm->context.vdso_base = ~0UL;
2660
2661 /* vDSO has a problem and was disabled, just don't "enable" it for the
2662 * process
2663@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2664 vdso_base = get_unmapped_area(NULL, vdso_base,
2665 (vdso_pages << PAGE_SHIFT) +
2666 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2667- 0, 0);
2668+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2669 if (IS_ERR_VALUE(vdso_base)) {
2670 rc = vdso_base;
2671 goto fail_mmapsem;
2672diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2673index 5eea6f3..5d10396 100644
2674--- a/arch/powerpc/lib/usercopy_64.c
2675+++ b/arch/powerpc/lib/usercopy_64.c
2676@@ -9,22 +9,6 @@
2677 #include <linux/module.h>
2678 #include <asm/uaccess.h>
2679
2680-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2681-{
2682- if (likely(access_ok(VERIFY_READ, from, n)))
2683- n = __copy_from_user(to, from, n);
2684- else
2685- memset(to, 0, n);
2686- return n;
2687-}
2688-
2689-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2690-{
2691- if (likely(access_ok(VERIFY_WRITE, to, n)))
2692- n = __copy_to_user(to, from, n);
2693- return n;
2694-}
2695-
2696 unsigned long copy_in_user(void __user *to, const void __user *from,
2697 unsigned long n)
2698 {
2699@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2700 return n;
2701 }
2702
2703-EXPORT_SYMBOL(copy_from_user);
2704-EXPORT_SYMBOL(copy_to_user);
2705 EXPORT_SYMBOL(copy_in_user);
2706
2707diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2708index 5efe8c9..db9ceef 100644
2709--- a/arch/powerpc/mm/fault.c
2710+++ b/arch/powerpc/mm/fault.c
2711@@ -32,6 +32,10 @@
2712 #include <linux/perf_event.h>
2713 #include <linux/magic.h>
2714 #include <linux/ratelimit.h>
2715+#include <linux/slab.h>
2716+#include <linux/pagemap.h>
2717+#include <linux/compiler.h>
2718+#include <linux/unistd.h>
2719
2720 #include <asm/firmware.h>
2721 #include <asm/page.h>
2722@@ -43,6 +47,7 @@
2723 #include <asm/tlbflush.h>
2724 #include <asm/siginfo.h>
2725 #include <mm/mmu_decl.h>
2726+#include <asm/ptrace.h>
2727
2728 #ifdef CONFIG_KPROBES
2729 static inline int notify_page_fault(struct pt_regs *regs)
2730@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2731 }
2732 #endif
2733
2734+#ifdef CONFIG_PAX_PAGEEXEC
2735+/*
2736+ * PaX: decide what to do with offenders (regs->nip = fault address)
2737+ *
2738+ * returns 1 when task should be killed
2739+ */
2740+static int pax_handle_fetch_fault(struct pt_regs *regs)
2741+{
2742+ return 1;
2743+}
2744+
2745+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2746+{
2747+ unsigned long i;
2748+
2749+ printk(KERN_ERR "PAX: bytes at PC: ");
2750+ for (i = 0; i < 5; i++) {
2751+ unsigned int c;
2752+ if (get_user(c, (unsigned int __user *)pc+i))
2753+ printk(KERN_CONT "???????? ");
2754+ else
2755+ printk(KERN_CONT "%08x ", c);
2756+ }
2757+ printk("\n");
2758+}
2759+#endif
2760+
2761 /*
2762 * Check whether the instruction at regs->nip is a store using
2763 * an update addressing form which will update r1.
2764@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2765 * indicate errors in DSISR but can validly be set in SRR1.
2766 */
2767 if (trap == 0x400)
2768- error_code &= 0x48200000;
2769+ error_code &= 0x58200000;
2770 else
2771 is_write = error_code & DSISR_ISSTORE;
2772 #else
2773@@ -259,7 +291,7 @@ good_area:
2774 * "undefined". Of those that can be set, this is the only
2775 * one which seems bad.
2776 */
2777- if (error_code & 0x10000000)
2778+ if (error_code & DSISR_GUARDED)
2779 /* Guarded storage error. */
2780 goto bad_area;
2781 #endif /* CONFIG_8xx */
2782@@ -274,7 +306,7 @@ good_area:
2783 * processors use the same I/D cache coherency mechanism
2784 * as embedded.
2785 */
2786- if (error_code & DSISR_PROTFAULT)
2787+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2788 goto bad_area;
2789 #endif /* CONFIG_PPC_STD_MMU */
2790
2791@@ -343,6 +375,23 @@ bad_area:
2792 bad_area_nosemaphore:
2793 /* User mode accesses cause a SIGSEGV */
2794 if (user_mode(regs)) {
2795+
2796+#ifdef CONFIG_PAX_PAGEEXEC
2797+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2798+#ifdef CONFIG_PPC_STD_MMU
2799+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2800+#else
2801+ if (is_exec && regs->nip == address) {
2802+#endif
2803+ switch (pax_handle_fetch_fault(regs)) {
2804+ }
2805+
2806+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2807+ do_group_exit(SIGKILL);
2808+ }
2809+ }
2810+#endif
2811+
2812 _exception(SIGSEGV, regs, code, address);
2813 return 0;
2814 }
2815diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2816index 5a783d8..c23e14b 100644
2817--- a/arch/powerpc/mm/mmap_64.c
2818+++ b/arch/powerpc/mm/mmap_64.c
2819@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2820 */
2821 if (mmap_is_legacy()) {
2822 mm->mmap_base = TASK_UNMAPPED_BASE;
2823+
2824+#ifdef CONFIG_PAX_RANDMMAP
2825+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2826+ mm->mmap_base += mm->delta_mmap;
2827+#endif
2828+
2829 mm->get_unmapped_area = arch_get_unmapped_area;
2830 mm->unmap_area = arch_unmap_area;
2831 } else {
2832 mm->mmap_base = mmap_base();
2833+
2834+#ifdef CONFIG_PAX_RANDMMAP
2835+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2836+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2837+#endif
2838+
2839 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2840 mm->unmap_area = arch_unmap_area_topdown;
2841 }
2842diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2843index 73709f7..6b90313 100644
2844--- a/arch/powerpc/mm/slice.c
2845+++ b/arch/powerpc/mm/slice.c
2846@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
2847 if ((mm->task_size - len) < addr)
2848 return 0;
2849 vma = find_vma(mm, addr);
2850- return (!vma || (addr + len) <= vma->vm_start);
2851+ return check_heap_stack_gap(vma, addr, len);
2852 }
2853
2854 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2855@@ -256,7 +256,7 @@ full_search:
2856 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2857 continue;
2858 }
2859- if (!vma || addr + len <= vma->vm_start) {
2860+ if (check_heap_stack_gap(vma, addr, len)) {
2861 /*
2862 * Remember the place where we stopped the search:
2863 */
2864@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2865 }
2866 }
2867
2868- addr = mm->mmap_base;
2869- while (addr > len) {
2870+ if (mm->mmap_base < len)
2871+ addr = -ENOMEM;
2872+ else
2873+ addr = mm->mmap_base - len;
2874+
2875+ while (!IS_ERR_VALUE(addr)) {
2876 /* Go down by chunk size */
2877- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2878+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2879
2880 /* Check for hit with different page size */
2881 mask = slice_range_to_mask(addr, len);
2882@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2883 * return with success:
2884 */
2885 vma = find_vma(mm, addr);
2886- if (!vma || (addr + len) <= vma->vm_start) {
2887+ if (check_heap_stack_gap(vma, addr, len)) {
2888 /* remember the address as a hint for next time */
2889 if (use_cache)
2890 mm->free_area_cache = addr;
2891@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2892 mm->cached_hole_size = vma->vm_start - addr;
2893
2894 /* try just below the current vma->vm_start */
2895- addr = vma->vm_start;
2896+ addr = skip_heap_stack_gap(vma, len);
2897 }
2898
2899 /*
2900@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
2901 if (fixed && addr > (mm->task_size - len))
2902 return -EINVAL;
2903
2904+#ifdef CONFIG_PAX_RANDMMAP
2905+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2906+ addr = 0;
2907+#endif
2908+
2909 /* If hint, make sure it matches our alignment restrictions */
2910 if (!fixed && addr) {
2911 addr = _ALIGN_UP(addr, 1ul << pshift);
2912diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2913index 547f1a6..0b22b53 100644
2914--- a/arch/s390/include/asm/elf.h
2915+++ b/arch/s390/include/asm/elf.h
2916@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2917 the loader. We need to make sure that it is out of the way of the program
2918 that it will "exec", and that there is sufficient room for the brk. */
2919
2920-extern unsigned long randomize_et_dyn(unsigned long base);
2921-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2922+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2923+
2924+#ifdef CONFIG_PAX_ASLR
2925+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2926+
2927+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
2928+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
2929+#endif
2930
2931 /* This yields a mask that user programs can use to figure out what
2932 instruction set this CPU supports. */
2933@@ -211,7 +217,4 @@ struct linux_binprm;
2934 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2935 int arch_setup_additional_pages(struct linux_binprm *, int);
2936
2937-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2938-#define arch_randomize_brk arch_randomize_brk
2939-
2940 #endif
2941diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2942index ef573c1..75a1ce6 100644
2943--- a/arch/s390/include/asm/system.h
2944+++ b/arch/s390/include/asm/system.h
2945@@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
2946 extern void (*_machine_halt)(void);
2947 extern void (*_machine_power_off)(void);
2948
2949-extern unsigned long arch_align_stack(unsigned long sp);
2950+#define arch_align_stack(x) ((x) & ~0xfUL)
2951
2952 static inline int tprot(unsigned long addr)
2953 {
2954diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2955index 2b23885..e136e31 100644
2956--- a/arch/s390/include/asm/uaccess.h
2957+++ b/arch/s390/include/asm/uaccess.h
2958@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2959 copy_to_user(void __user *to, const void *from, unsigned long n)
2960 {
2961 might_fault();
2962+
2963+ if ((long)n < 0)
2964+ return n;
2965+
2966 if (access_ok(VERIFY_WRITE, to, n))
2967 n = __copy_to_user(to, from, n);
2968 return n;
2969@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
2970 static inline unsigned long __must_check
2971 __copy_from_user(void *to, const void __user *from, unsigned long n)
2972 {
2973+ if ((long)n < 0)
2974+ return n;
2975+
2976 if (__builtin_constant_p(n) && (n <= 256))
2977 return uaccess.copy_from_user_small(n, from, to);
2978 else
2979@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
2980 unsigned int sz = __compiletime_object_size(to);
2981
2982 might_fault();
2983+
2984+ if ((long)n < 0)
2985+ return n;
2986+
2987 if (unlikely(sz != -1 && sz < n)) {
2988 copy_from_user_overflow();
2989 return n;
2990diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2991index dfcb343..eda788a 100644
2992--- a/arch/s390/kernel/module.c
2993+++ b/arch/s390/kernel/module.c
2994@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2995
2996 /* Increase core size by size of got & plt and set start
2997 offsets for got and plt. */
2998- me->core_size = ALIGN(me->core_size, 4);
2999- me->arch.got_offset = me->core_size;
3000- me->core_size += me->arch.got_size;
3001- me->arch.plt_offset = me->core_size;
3002- me->core_size += me->arch.plt_size;
3003+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3004+ me->arch.got_offset = me->core_size_rw;
3005+ me->core_size_rw += me->arch.got_size;
3006+ me->arch.plt_offset = me->core_size_rx;
3007+ me->core_size_rx += me->arch.plt_size;
3008 return 0;
3009 }
3010
3011@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3012 if (info->got_initialized == 0) {
3013 Elf_Addr *gotent;
3014
3015- gotent = me->module_core + me->arch.got_offset +
3016+ gotent = me->module_core_rw + me->arch.got_offset +
3017 info->got_offset;
3018 *gotent = val;
3019 info->got_initialized = 1;
3020@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3021 else if (r_type == R_390_GOTENT ||
3022 r_type == R_390_GOTPLTENT)
3023 *(unsigned int *) loc =
3024- (val + (Elf_Addr) me->module_core - loc) >> 1;
3025+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3026 else if (r_type == R_390_GOT64 ||
3027 r_type == R_390_GOTPLT64)
3028 *(unsigned long *) loc = val;
3029@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3030 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3031 if (info->plt_initialized == 0) {
3032 unsigned int *ip;
3033- ip = me->module_core + me->arch.plt_offset +
3034+ ip = me->module_core_rx + me->arch.plt_offset +
3035 info->plt_offset;
3036 #ifndef CONFIG_64BIT
3037 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3038@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3039 val - loc + 0xffffUL < 0x1ffffeUL) ||
3040 (r_type == R_390_PLT32DBL &&
3041 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3042- val = (Elf_Addr) me->module_core +
3043+ val = (Elf_Addr) me->module_core_rx +
3044 me->arch.plt_offset +
3045 info->plt_offset;
3046 val += rela->r_addend - loc;
3047@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3048 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3049 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3050 val = val + rela->r_addend -
3051- ((Elf_Addr) me->module_core + me->arch.got_offset);
3052+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3053 if (r_type == R_390_GOTOFF16)
3054 *(unsigned short *) loc = val;
3055 else if (r_type == R_390_GOTOFF32)
3056@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3057 break;
3058 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3059 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3060- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3061+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3062 rela->r_addend - loc;
3063 if (r_type == R_390_GOTPC)
3064 *(unsigned int *) loc = val;
3065diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3066index 9451b21..ed8956f 100644
3067--- a/arch/s390/kernel/process.c
3068+++ b/arch/s390/kernel/process.c
3069@@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3070 }
3071 return 0;
3072 }
3073-
3074-unsigned long arch_align_stack(unsigned long sp)
3075-{
3076- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3077- sp -= get_random_int() & ~PAGE_MASK;
3078- return sp & ~0xf;
3079-}
3080-
3081-static inline unsigned long brk_rnd(void)
3082-{
3083- /* 8MB for 32bit, 1GB for 64bit */
3084- if (is_32bit_task())
3085- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3086- else
3087- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3088-}
3089-
3090-unsigned long arch_randomize_brk(struct mm_struct *mm)
3091-{
3092- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3093-
3094- if (ret < mm->brk)
3095- return mm->brk;
3096- return ret;
3097-}
3098-
3099-unsigned long randomize_et_dyn(unsigned long base)
3100-{
3101- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3102-
3103- if (!(current->flags & PF_RANDOMIZE))
3104- return base;
3105- if (ret < base)
3106- return base;
3107- return ret;
3108-}
3109diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3110index f09c748..cf9ec1d 100644
3111--- a/arch/s390/mm/mmap.c
3112+++ b/arch/s390/mm/mmap.c
3113@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3114 */
3115 if (mmap_is_legacy()) {
3116 mm->mmap_base = TASK_UNMAPPED_BASE;
3117+
3118+#ifdef CONFIG_PAX_RANDMMAP
3119+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3120+ mm->mmap_base += mm->delta_mmap;
3121+#endif
3122+
3123 mm->get_unmapped_area = arch_get_unmapped_area;
3124 mm->unmap_area = arch_unmap_area;
3125 } else {
3126 mm->mmap_base = mmap_base();
3127+
3128+#ifdef CONFIG_PAX_RANDMMAP
3129+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3130+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3131+#endif
3132+
3133 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3134 mm->unmap_area = arch_unmap_area_topdown;
3135 }
3136@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3137 */
3138 if (mmap_is_legacy()) {
3139 mm->mmap_base = TASK_UNMAPPED_BASE;
3140+
3141+#ifdef CONFIG_PAX_RANDMMAP
3142+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3143+ mm->mmap_base += mm->delta_mmap;
3144+#endif
3145+
3146 mm->get_unmapped_area = s390_get_unmapped_area;
3147 mm->unmap_area = arch_unmap_area;
3148 } else {
3149 mm->mmap_base = mmap_base();
3150+
3151+#ifdef CONFIG_PAX_RANDMMAP
3152+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3153+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3154+#endif
3155+
3156 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3157 mm->unmap_area = arch_unmap_area_topdown;
3158 }
3159diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3160index 589d5c7..669e274 100644
3161--- a/arch/score/include/asm/system.h
3162+++ b/arch/score/include/asm/system.h
3163@@ -17,7 +17,7 @@ do { \
3164 #define finish_arch_switch(prev) do {} while (0)
3165
3166 typedef void (*vi_handler_t)(void);
3167-extern unsigned long arch_align_stack(unsigned long sp);
3168+#define arch_align_stack(x) (x)
3169
3170 #define mb() barrier()
3171 #define rmb() barrier()
3172diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3173index 25d0803..d6c8e36 100644
3174--- a/arch/score/kernel/process.c
3175+++ b/arch/score/kernel/process.c
3176@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3177
3178 return task_pt_regs(task)->cp0_epc;
3179 }
3180-
3181-unsigned long arch_align_stack(unsigned long sp)
3182-{
3183- return sp;
3184-}
3185diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3186index afeb710..d1d1289 100644
3187--- a/arch/sh/mm/mmap.c
3188+++ b/arch/sh/mm/mmap.c
3189@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3190 addr = PAGE_ALIGN(addr);
3191
3192 vma = find_vma(mm, addr);
3193- if (TASK_SIZE - len >= addr &&
3194- (!vma || addr + len <= vma->vm_start))
3195+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3196 return addr;
3197 }
3198
3199@@ -106,7 +105,7 @@ full_search:
3200 }
3201 return -ENOMEM;
3202 }
3203- if (likely(!vma || addr + len <= vma->vm_start)) {
3204+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3205 /*
3206 * Remember the place where we stopped the search:
3207 */
3208@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3209 addr = PAGE_ALIGN(addr);
3210
3211 vma = find_vma(mm, addr);
3212- if (TASK_SIZE - len >= addr &&
3213- (!vma || addr + len <= vma->vm_start))
3214+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3215 return addr;
3216 }
3217
3218@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3219 /* make sure it can fit in the remaining address space */
3220 if (likely(addr > len)) {
3221 vma = find_vma(mm, addr-len);
3222- if (!vma || addr <= vma->vm_start) {
3223+ if (check_heap_stack_gap(vma, addr - len, len)) {
3224 /* remember the address as a hint for next time */
3225 return (mm->free_area_cache = addr-len);
3226 }
3227@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3228 if (unlikely(mm->mmap_base < len))
3229 goto bottomup;
3230
3231- addr = mm->mmap_base-len;
3232- if (do_colour_align)
3233- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3234+ addr = mm->mmap_base - len;
3235
3236 do {
3237+ if (do_colour_align)
3238+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3239 /*
3240 * Lookup failure means no vma is above this address,
3241 * else if new region fits below vma->vm_start,
3242 * return with success:
3243 */
3244 vma = find_vma(mm, addr);
3245- if (likely(!vma || addr+len <= vma->vm_start)) {
3246+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3247 /* remember the address as a hint for next time */
3248 return (mm->free_area_cache = addr);
3249 }
3250@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3251 mm->cached_hole_size = vma->vm_start - addr;
3252
3253 /* try just below the current vma->vm_start */
3254- addr = vma->vm_start-len;
3255- if (do_colour_align)
3256- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3257- } while (likely(len < vma->vm_start));
3258+ addr = skip_heap_stack_gap(vma, len);
3259+ } while (!IS_ERR_VALUE(addr));
3260
3261 bottomup:
3262 /*
3263diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3264index ad1fb5d..fc5315b 100644
3265--- a/arch/sparc/Makefile
3266+++ b/arch/sparc/Makefile
3267@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3268 # Export what is needed by arch/sparc/boot/Makefile
3269 export VMLINUX_INIT VMLINUX_MAIN
3270 VMLINUX_INIT := $(head-y) $(init-y)
3271-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3272+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3273 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3274 VMLINUX_MAIN += $(drivers-y) $(net-y)
3275
3276diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3277index 9f421df..b81fc12 100644
3278--- a/arch/sparc/include/asm/atomic_64.h
3279+++ b/arch/sparc/include/asm/atomic_64.h
3280@@ -14,18 +14,40 @@
3281 #define ATOMIC64_INIT(i) { (i) }
3282
3283 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3284+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3285+{
3286+ return v->counter;
3287+}
3288 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3289+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3290+{
3291+ return v->counter;
3292+}
3293
3294 #define atomic_set(v, i) (((v)->counter) = i)
3295+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3296+{
3297+ v->counter = i;
3298+}
3299 #define atomic64_set(v, i) (((v)->counter) = i)
3300+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3301+{
3302+ v->counter = i;
3303+}
3304
3305 extern void atomic_add(int, atomic_t *);
3306+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3307 extern void atomic64_add(long, atomic64_t *);
3308+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3309 extern void atomic_sub(int, atomic_t *);
3310+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3311 extern void atomic64_sub(long, atomic64_t *);
3312+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3313
3314 extern int atomic_add_ret(int, atomic_t *);
3315+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3316 extern long atomic64_add_ret(long, atomic64_t *);
3317+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3318 extern int atomic_sub_ret(int, atomic_t *);
3319 extern long atomic64_sub_ret(long, atomic64_t *);
3320
3321@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3322 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3323
3324 #define atomic_inc_return(v) atomic_add_ret(1, v)
3325+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3326+{
3327+ return atomic_add_ret_unchecked(1, v);
3328+}
3329 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3330+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3331+{
3332+ return atomic64_add_ret_unchecked(1, v);
3333+}
3334
3335 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3336 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3337
3338 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3339+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3340+{
3341+ return atomic_add_ret_unchecked(i, v);
3342+}
3343 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3344+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3345+{
3346+ return atomic64_add_ret_unchecked(i, v);
3347+}
3348
3349 /*
3350 * atomic_inc_and_test - increment and test
3351@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3352 * other cases.
3353 */
3354 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3355+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3356+{
3357+ return atomic_inc_return_unchecked(v) == 0;
3358+}
3359 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3360
3361 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3362@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3363 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3364
3365 #define atomic_inc(v) atomic_add(1, v)
3366+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3367+{
3368+ atomic_add_unchecked(1, v);
3369+}
3370 #define atomic64_inc(v) atomic64_add(1, v)
3371+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3372+{
3373+ atomic64_add_unchecked(1, v);
3374+}
3375
3376 #define atomic_dec(v) atomic_sub(1, v)
3377+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3378+{
3379+ atomic_sub_unchecked(1, v);
3380+}
3381 #define atomic64_dec(v) atomic64_sub(1, v)
3382+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3383+{
3384+ atomic64_sub_unchecked(1, v);
3385+}
3386
3387 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3388 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3389
3390 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3391+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3392+{
3393+ return cmpxchg(&v->counter, old, new);
3394+}
3395 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3396+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3397+{
3398+ return xchg(&v->counter, new);
3399+}
3400
3401 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3402 {
3403- int c, old;
3404+ int c, old, new;
3405 c = atomic_read(v);
3406 for (;;) {
3407- if (unlikely(c == (u)))
3408+ if (unlikely(c == u))
3409 break;
3410- old = atomic_cmpxchg((v), c, c + (a));
3411+
3412+ asm volatile("addcc %2, %0, %0\n"
3413+
3414+#ifdef CONFIG_PAX_REFCOUNT
3415+ "tvs %%icc, 6\n"
3416+#endif
3417+
3418+ : "=r" (new)
3419+ : "0" (c), "ir" (a)
3420+ : "cc");
3421+
3422+ old = atomic_cmpxchg(v, c, new);
3423 if (likely(old == c))
3424 break;
3425 c = old;
3426@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3427 #define atomic64_cmpxchg(v, o, n) \
3428 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3429 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3430+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3431+{
3432+ return xchg(&v->counter, new);
3433+}
3434
3435 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3436 {
3437- long c, old;
3438+ long c, old, new;
3439 c = atomic64_read(v);
3440 for (;;) {
3441- if (unlikely(c == (u)))
3442+ if (unlikely(c == u))
3443 break;
3444- old = atomic64_cmpxchg((v), c, c + (a));
3445+
3446+ asm volatile("addcc %2, %0, %0\n"
3447+
3448+#ifdef CONFIG_PAX_REFCOUNT
3449+ "tvs %%xcc, 6\n"
3450+#endif
3451+
3452+ : "=r" (new)
3453+ : "0" (c), "ir" (a)
3454+ : "cc");
3455+
3456+ old = atomic64_cmpxchg(v, c, new);
3457 if (likely(old == c))
3458 break;
3459 c = old;
3460 }
3461- return c != (u);
3462+ return c != u;
3463 }
3464
3465 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3466diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3467index 69358b5..17b4745 100644
3468--- a/arch/sparc/include/asm/cache.h
3469+++ b/arch/sparc/include/asm/cache.h
3470@@ -10,7 +10,7 @@
3471 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3472
3473 #define L1_CACHE_SHIFT 5
3474-#define L1_CACHE_BYTES 32
3475+#define L1_CACHE_BYTES 32UL
3476
3477 #ifdef CONFIG_SPARC32
3478 #define SMP_CACHE_BYTES_SHIFT 5
3479diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3480index 4269ca6..e3da77f 100644
3481--- a/arch/sparc/include/asm/elf_32.h
3482+++ b/arch/sparc/include/asm/elf_32.h
3483@@ -114,6 +114,13 @@ typedef struct {
3484
3485 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3486
3487+#ifdef CONFIG_PAX_ASLR
3488+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3489+
3490+#define PAX_DELTA_MMAP_LEN 16
3491+#define PAX_DELTA_STACK_LEN 16
3492+#endif
3493+
3494 /* This yields a mask that user programs can use to figure out what
3495 instruction set this cpu supports. This can NOT be done in userspace
3496 on Sparc. */
3497diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3498index 7df8b7f..4946269 100644
3499--- a/arch/sparc/include/asm/elf_64.h
3500+++ b/arch/sparc/include/asm/elf_64.h
3501@@ -180,6 +180,13 @@ typedef struct {
3502 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3503 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3504
3505+#ifdef CONFIG_PAX_ASLR
3506+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3507+
3508+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3509+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3510+#endif
3511+
3512 extern unsigned long sparc64_elf_hwcap;
3513 #define ELF_HWCAP sparc64_elf_hwcap
3514
3515diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3516index a790cc6..091ed94 100644
3517--- a/arch/sparc/include/asm/pgtable_32.h
3518+++ b/arch/sparc/include/asm/pgtable_32.h
3519@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3520 BTFIXUPDEF_INT(page_none)
3521 BTFIXUPDEF_INT(page_copy)
3522 BTFIXUPDEF_INT(page_readonly)
3523+
3524+#ifdef CONFIG_PAX_PAGEEXEC
3525+BTFIXUPDEF_INT(page_shared_noexec)
3526+BTFIXUPDEF_INT(page_copy_noexec)
3527+BTFIXUPDEF_INT(page_readonly_noexec)
3528+#endif
3529+
3530 BTFIXUPDEF_INT(page_kernel)
3531
3532 #define PMD_SHIFT SUN4C_PMD_SHIFT
3533@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3534 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3535 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3536
3537+#ifdef CONFIG_PAX_PAGEEXEC
3538+extern pgprot_t PAGE_SHARED_NOEXEC;
3539+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3540+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3541+#else
3542+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3543+# define PAGE_COPY_NOEXEC PAGE_COPY
3544+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3545+#endif
3546+
3547 extern unsigned long page_kernel;
3548
3549 #ifdef MODULE
3550diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3551index f6ae2b2..b03ffc7 100644
3552--- a/arch/sparc/include/asm/pgtsrmmu.h
3553+++ b/arch/sparc/include/asm/pgtsrmmu.h
3554@@ -115,6 +115,13 @@
3555 SRMMU_EXEC | SRMMU_REF)
3556 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3557 SRMMU_EXEC | SRMMU_REF)
3558+
3559+#ifdef CONFIG_PAX_PAGEEXEC
3560+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3561+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3562+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3563+#endif
3564+
3565 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3566 SRMMU_DIRTY | SRMMU_REF)
3567
3568diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3569index 9689176..63c18ea 100644
3570--- a/arch/sparc/include/asm/spinlock_64.h
3571+++ b/arch/sparc/include/asm/spinlock_64.h
3572@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3573
3574 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3575
3576-static void inline arch_read_lock(arch_rwlock_t *lock)
3577+static inline void arch_read_lock(arch_rwlock_t *lock)
3578 {
3579 unsigned long tmp1, tmp2;
3580
3581 __asm__ __volatile__ (
3582 "1: ldsw [%2], %0\n"
3583 " brlz,pn %0, 2f\n"
3584-"4: add %0, 1, %1\n"
3585+"4: addcc %0, 1, %1\n"
3586+
3587+#ifdef CONFIG_PAX_REFCOUNT
3588+" tvs %%icc, 6\n"
3589+#endif
3590+
3591 " cas [%2], %0, %1\n"
3592 " cmp %0, %1\n"
3593 " bne,pn %%icc, 1b\n"
3594@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3595 " .previous"
3596 : "=&r" (tmp1), "=&r" (tmp2)
3597 : "r" (lock)
3598- : "memory");
3599+ : "memory", "cc");
3600 }
3601
3602-static int inline arch_read_trylock(arch_rwlock_t *lock)
3603+static inline int arch_read_trylock(arch_rwlock_t *lock)
3604 {
3605 int tmp1, tmp2;
3606
3607@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3608 "1: ldsw [%2], %0\n"
3609 " brlz,a,pn %0, 2f\n"
3610 " mov 0, %0\n"
3611-" add %0, 1, %1\n"
3612+" addcc %0, 1, %1\n"
3613+
3614+#ifdef CONFIG_PAX_REFCOUNT
3615+" tvs %%icc, 6\n"
3616+#endif
3617+
3618 " cas [%2], %0, %1\n"
3619 " cmp %0, %1\n"
3620 " bne,pn %%icc, 1b\n"
3621@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3622 return tmp1;
3623 }
3624
3625-static void inline arch_read_unlock(arch_rwlock_t *lock)
3626+static inline void arch_read_unlock(arch_rwlock_t *lock)
3627 {
3628 unsigned long tmp1, tmp2;
3629
3630 __asm__ __volatile__(
3631 "1: lduw [%2], %0\n"
3632-" sub %0, 1, %1\n"
3633+" subcc %0, 1, %1\n"
3634+
3635+#ifdef CONFIG_PAX_REFCOUNT
3636+" tvs %%icc, 6\n"
3637+#endif
3638+
3639 " cas [%2], %0, %1\n"
3640 " cmp %0, %1\n"
3641 " bne,pn %%xcc, 1b\n"
3642@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3643 : "memory");
3644 }
3645
3646-static void inline arch_write_lock(arch_rwlock_t *lock)
3647+static inline void arch_write_lock(arch_rwlock_t *lock)
3648 {
3649 unsigned long mask, tmp1, tmp2;
3650
3651@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3652 : "memory");
3653 }
3654
3655-static void inline arch_write_unlock(arch_rwlock_t *lock)
3656+static inline void arch_write_unlock(arch_rwlock_t *lock)
3657 {
3658 __asm__ __volatile__(
3659 " stw %%g0, [%0]"
3660@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3661 : "memory");
3662 }
3663
3664-static int inline arch_write_trylock(arch_rwlock_t *lock)
3665+static inline int arch_write_trylock(arch_rwlock_t *lock)
3666 {
3667 unsigned long mask, tmp1, tmp2, result;
3668
3669diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3670index fa57532..e1a4c53 100644
3671--- a/arch/sparc/include/asm/thread_info_32.h
3672+++ b/arch/sparc/include/asm/thread_info_32.h
3673@@ -50,6 +50,8 @@ struct thread_info {
3674 unsigned long w_saved;
3675
3676 struct restart_block restart_block;
3677+
3678+ unsigned long lowest_stack;
3679 };
3680
3681 /*
3682diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3683index 60d86be..952dea1 100644
3684--- a/arch/sparc/include/asm/thread_info_64.h
3685+++ b/arch/sparc/include/asm/thread_info_64.h
3686@@ -63,6 +63,8 @@ struct thread_info {
3687 struct pt_regs *kern_una_regs;
3688 unsigned int kern_una_insn;
3689
3690+ unsigned long lowest_stack;
3691+
3692 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3693 };
3694
3695diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3696index e88fbe5..96b0ce5 100644
3697--- a/arch/sparc/include/asm/uaccess.h
3698+++ b/arch/sparc/include/asm/uaccess.h
3699@@ -1,5 +1,13 @@
3700 #ifndef ___ASM_SPARC_UACCESS_H
3701 #define ___ASM_SPARC_UACCESS_H
3702+
3703+#ifdef __KERNEL__
3704+#ifndef __ASSEMBLY__
3705+#include <linux/types.h>
3706+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3707+#endif
3708+#endif
3709+
3710 #if defined(__sparc__) && defined(__arch64__)
3711 #include <asm/uaccess_64.h>
3712 #else
3713diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3714index 8303ac4..07f333d 100644
3715--- a/arch/sparc/include/asm/uaccess_32.h
3716+++ b/arch/sparc/include/asm/uaccess_32.h
3717@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3718
3719 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3720 {
3721- if (n && __access_ok((unsigned long) to, n))
3722+ if ((long)n < 0)
3723+ return n;
3724+
3725+ if (n && __access_ok((unsigned long) to, n)) {
3726+ if (!__builtin_constant_p(n))
3727+ check_object_size(from, n, true);
3728 return __copy_user(to, (__force void __user *) from, n);
3729- else
3730+ } else
3731 return n;
3732 }
3733
3734 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3735 {
3736+ if ((long)n < 0)
3737+ return n;
3738+
3739+ if (!__builtin_constant_p(n))
3740+ check_object_size(from, n, true);
3741+
3742 return __copy_user(to, (__force void __user *) from, n);
3743 }
3744
3745 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3746 {
3747- if (n && __access_ok((unsigned long) from, n))
3748+ if ((long)n < 0)
3749+ return n;
3750+
3751+ if (n && __access_ok((unsigned long) from, n)) {
3752+ if (!__builtin_constant_p(n))
3753+ check_object_size(to, n, false);
3754 return __copy_user((__force void __user *) to, from, n);
3755- else
3756+ } else
3757 return n;
3758 }
3759
3760 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3761 {
3762+ if ((long)n < 0)
3763+ return n;
3764+
3765 return __copy_user((__force void __user *) to, from, n);
3766 }
3767
3768diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3769index 3e1449f..5293a0e 100644
3770--- a/arch/sparc/include/asm/uaccess_64.h
3771+++ b/arch/sparc/include/asm/uaccess_64.h
3772@@ -10,6 +10,7 @@
3773 #include <linux/compiler.h>
3774 #include <linux/string.h>
3775 #include <linux/thread_info.h>
3776+#include <linux/kernel.h>
3777 #include <asm/asi.h>
3778 #include <asm/system.h>
3779 #include <asm/spitfire.h>
3780@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
3781 static inline unsigned long __must_check
3782 copy_from_user(void *to, const void __user *from, unsigned long size)
3783 {
3784- unsigned long ret = ___copy_from_user(to, from, size);
3785+ unsigned long ret;
3786
3787+ if ((long)size < 0 || size > INT_MAX)
3788+ return size;
3789+
3790+ if (!__builtin_constant_p(size))
3791+ check_object_size(to, size, false);
3792+
3793+ ret = ___copy_from_user(to, from, size);
3794 if (unlikely(ret))
3795 ret = copy_from_user_fixup(to, from, size);
3796
3797@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
3798 static inline unsigned long __must_check
3799 copy_to_user(void __user *to, const void *from, unsigned long size)
3800 {
3801- unsigned long ret = ___copy_to_user(to, from, size);
3802+ unsigned long ret;
3803
3804+ if ((long)size < 0 || size > INT_MAX)
3805+ return size;
3806+
3807+ if (!__builtin_constant_p(size))
3808+ check_object_size(from, size, true);
3809+
3810+ ret = ___copy_to_user(to, from, size);
3811 if (unlikely(ret))
3812 ret = copy_to_user_fixup(to, from, size);
3813 return ret;
3814diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3815index cb85458..e063f17 100644
3816--- a/arch/sparc/kernel/Makefile
3817+++ b/arch/sparc/kernel/Makefile
3818@@ -3,7 +3,7 @@
3819 #
3820
3821 asflags-y := -ansi
3822-ccflags-y := -Werror
3823+#ccflags-y := -Werror
3824
3825 extra-y := head_$(BITS).o
3826 extra-y += init_task.o
3827diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3828index f793742..4d880af 100644
3829--- a/arch/sparc/kernel/process_32.c
3830+++ b/arch/sparc/kernel/process_32.c
3831@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3832 rw->ins[4], rw->ins[5],
3833 rw->ins[6],
3834 rw->ins[7]);
3835- printk("%pS\n", (void *) rw->ins[7]);
3836+ printk("%pA\n", (void *) rw->ins[7]);
3837 rw = (struct reg_window32 *) rw->ins[6];
3838 }
3839 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3840@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3841
3842 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3843 r->psr, r->pc, r->npc, r->y, print_tainted());
3844- printk("PC: <%pS>\n", (void *) r->pc);
3845+ printk("PC: <%pA>\n", (void *) r->pc);
3846 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3847 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3848 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3849 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3850 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3851 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3852- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3853+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3854
3855 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3856 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3857@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
3858 rw = (struct reg_window32 *) fp;
3859 pc = rw->ins[7];
3860 printk("[%08lx : ", pc);
3861- printk("%pS ] ", (void *) pc);
3862+ printk("%pA ] ", (void *) pc);
3863 fp = rw->ins[6];
3864 } while (++count < 16);
3865 printk("\n");
3866diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3867index 3739a06..48b2ff0 100644
3868--- a/arch/sparc/kernel/process_64.c
3869+++ b/arch/sparc/kernel/process_64.c
3870@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
3871 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3872 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3873 if (regs->tstate & TSTATE_PRIV)
3874- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3875+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3876 }
3877
3878 void show_regs(struct pt_regs *regs)
3879 {
3880 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3881 regs->tpc, regs->tnpc, regs->y, print_tainted());
3882- printk("TPC: <%pS>\n", (void *) regs->tpc);
3883+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3884 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3885 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3886 regs->u_regs[3]);
3887@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3888 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3889 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3890 regs->u_regs[15]);
3891- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3892+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3893 show_regwindow(regs);
3894 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3895 }
3896@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
3897 ((tp && tp->task) ? tp->task->pid : -1));
3898
3899 if (gp->tstate & TSTATE_PRIV) {
3900- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3901+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3902 (void *) gp->tpc,
3903 (void *) gp->o7,
3904 (void *) gp->i7,
3905diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3906index 42b282f..28ce9f2 100644
3907--- a/arch/sparc/kernel/sys_sparc_32.c
3908+++ b/arch/sparc/kernel/sys_sparc_32.c
3909@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3910 if (ARCH_SUN4C && len > 0x20000000)
3911 return -ENOMEM;
3912 if (!addr)
3913- addr = TASK_UNMAPPED_BASE;
3914+ addr = current->mm->mmap_base;
3915
3916 if (flags & MAP_SHARED)
3917 addr = COLOUR_ALIGN(addr);
3918@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3919 }
3920 if (TASK_SIZE - PAGE_SIZE - len < addr)
3921 return -ENOMEM;
3922- if (!vmm || addr + len <= vmm->vm_start)
3923+ if (check_heap_stack_gap(vmm, addr, len))
3924 return addr;
3925 addr = vmm->vm_end;
3926 if (flags & MAP_SHARED)
3927diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3928index 441521a..b767073 100644
3929--- a/arch/sparc/kernel/sys_sparc_64.c
3930+++ b/arch/sparc/kernel/sys_sparc_64.c
3931@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3932 /* We do not accept a shared mapping if it would violate
3933 * cache aliasing constraints.
3934 */
3935- if ((flags & MAP_SHARED) &&
3936+ if ((filp || (flags & MAP_SHARED)) &&
3937 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3938 return -EINVAL;
3939 return addr;
3940@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3941 if (filp || (flags & MAP_SHARED))
3942 do_color_align = 1;
3943
3944+#ifdef CONFIG_PAX_RANDMMAP
3945+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3946+#endif
3947+
3948 if (addr) {
3949 if (do_color_align)
3950 addr = COLOUR_ALIGN(addr, pgoff);
3951@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3952 addr = PAGE_ALIGN(addr);
3953
3954 vma = find_vma(mm, addr);
3955- if (task_size - len >= addr &&
3956- (!vma || addr + len <= vma->vm_start))
3957+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3958 return addr;
3959 }
3960
3961 if (len > mm->cached_hole_size) {
3962- start_addr = addr = mm->free_area_cache;
3963+ start_addr = addr = mm->free_area_cache;
3964 } else {
3965- start_addr = addr = TASK_UNMAPPED_BASE;
3966+ start_addr = addr = mm->mmap_base;
3967 mm->cached_hole_size = 0;
3968 }
3969
3970@@ -174,14 +177,14 @@ full_search:
3971 vma = find_vma(mm, VA_EXCLUDE_END);
3972 }
3973 if (unlikely(task_size < addr)) {
3974- if (start_addr != TASK_UNMAPPED_BASE) {
3975- start_addr = addr = TASK_UNMAPPED_BASE;
3976+ if (start_addr != mm->mmap_base) {
3977+ start_addr = addr = mm->mmap_base;
3978 mm->cached_hole_size = 0;
3979 goto full_search;
3980 }
3981 return -ENOMEM;
3982 }
3983- if (likely(!vma || addr + len <= vma->vm_start)) {
3984+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3985 /*
3986 * Remember the place where we stopped the search:
3987 */
3988@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3989 /* We do not accept a shared mapping if it would violate
3990 * cache aliasing constraints.
3991 */
3992- if ((flags & MAP_SHARED) &&
3993+ if ((filp || (flags & MAP_SHARED)) &&
3994 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3995 return -EINVAL;
3996 return addr;
3997@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3998 addr = PAGE_ALIGN(addr);
3999
4000 vma = find_vma(mm, addr);
4001- if (task_size - len >= addr &&
4002- (!vma || addr + len <= vma->vm_start))
4003+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4004 return addr;
4005 }
4006
4007@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4008 /* make sure it can fit in the remaining address space */
4009 if (likely(addr > len)) {
4010 vma = find_vma(mm, addr-len);
4011- if (!vma || addr <= vma->vm_start) {
4012+ if (check_heap_stack_gap(vma, addr - len, len)) {
4013 /* remember the address as a hint for next time */
4014 return (mm->free_area_cache = addr-len);
4015 }
4016@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4017 if (unlikely(mm->mmap_base < len))
4018 goto bottomup;
4019
4020- addr = mm->mmap_base-len;
4021- if (do_color_align)
4022- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4023+ addr = mm->mmap_base - len;
4024
4025 do {
4026+ if (do_color_align)
4027+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4028 /*
4029 * Lookup failure means no vma is above this address,
4030 * else if new region fits below vma->vm_start,
4031 * return with success:
4032 */
4033 vma = find_vma(mm, addr);
4034- if (likely(!vma || addr+len <= vma->vm_start)) {
4035+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4036 /* remember the address as a hint for next time */
4037 return (mm->free_area_cache = addr);
4038 }
4039@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4040 mm->cached_hole_size = vma->vm_start - addr;
4041
4042 /* try just below the current vma->vm_start */
4043- addr = vma->vm_start-len;
4044- if (do_color_align)
4045- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4046- } while (likely(len < vma->vm_start));
4047+ addr = skip_heap_stack_gap(vma, len);
4048+ } while (!IS_ERR_VALUE(addr));
4049
4050 bottomup:
4051 /*
4052@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 gap == RLIM_INFINITY ||
4054 sysctl_legacy_va_layout) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4056+
4057+#ifdef CONFIG_PAX_RANDMMAP
4058+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4059+ mm->mmap_base += mm->delta_mmap;
4060+#endif
4061+
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4066 gap = (task_size / 6 * 5);
4067
4068 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4069+
4070+#ifdef CONFIG_PAX_RANDMMAP
4071+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4072+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4073+#endif
4074+
4075 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4076 mm->unmap_area = arch_unmap_area_topdown;
4077 }
4078diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4079index 591f20c..0f1b925 100644
4080--- a/arch/sparc/kernel/traps_32.c
4081+++ b/arch/sparc/kernel/traps_32.c
4082@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4083 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4084 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4085
4086+extern void gr_handle_kernel_exploit(void);
4087+
4088 void die_if_kernel(char *str, struct pt_regs *regs)
4089 {
4090 static int die_counter;
4091@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4092 count++ < 30 &&
4093 (((unsigned long) rw) >= PAGE_OFFSET) &&
4094 !(((unsigned long) rw) & 0x7)) {
4095- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4096+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4097 (void *) rw->ins[7]);
4098 rw = (struct reg_window32 *)rw->ins[6];
4099 }
4100 }
4101 printk("Instruction DUMP:");
4102 instruction_dump ((unsigned long *) regs->pc);
4103- if(regs->psr & PSR_PS)
4104+ if(regs->psr & PSR_PS) {
4105+ gr_handle_kernel_exploit();
4106 do_exit(SIGKILL);
4107+ }
4108 do_exit(SIGSEGV);
4109 }
4110
4111diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4112index 0cbdaa4..438e4c9 100644
4113--- a/arch/sparc/kernel/traps_64.c
4114+++ b/arch/sparc/kernel/traps_64.c
4115@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4116 i + 1,
4117 p->trapstack[i].tstate, p->trapstack[i].tpc,
4118 p->trapstack[i].tnpc, p->trapstack[i].tt);
4119- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4120+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4121 }
4122 }
4123
4124@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4125
4126 lvl -= 0x100;
4127 if (regs->tstate & TSTATE_PRIV) {
4128+
4129+#ifdef CONFIG_PAX_REFCOUNT
4130+ if (lvl == 6)
4131+ pax_report_refcount_overflow(regs);
4132+#endif
4133+
4134 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4135 die_if_kernel(buffer, regs);
4136 }
4137@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4138 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4139 {
4140 char buffer[32];
4141-
4142+
4143 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4144 0, lvl, SIGTRAP) == NOTIFY_STOP)
4145 return;
4146
4147+#ifdef CONFIG_PAX_REFCOUNT
4148+ if (lvl == 6)
4149+ pax_report_refcount_overflow(regs);
4150+#endif
4151+
4152 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4153
4154 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4155@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4156 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4157 printk("%s" "ERROR(%d): ",
4158 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4159- printk("TPC<%pS>\n", (void *) regs->tpc);
4160+ printk("TPC<%pA>\n", (void *) regs->tpc);
4161 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4162 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4163 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4164@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4165 smp_processor_id(),
4166 (type & 0x1) ? 'I' : 'D',
4167 regs->tpc);
4168- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4169+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4170 panic("Irrecoverable Cheetah+ parity error.");
4171 }
4172
4173@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4174 smp_processor_id(),
4175 (type & 0x1) ? 'I' : 'D',
4176 regs->tpc);
4177- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4178+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4179 }
4180
4181 struct sun4v_error_entry {
4182@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4183
4184 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4185 regs->tpc, tl);
4186- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4187+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4188 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4189- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4190+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4191 (void *) regs->u_regs[UREG_I7]);
4192 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4193 "pte[%lx] error[%lx]\n",
4194@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4195
4196 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4197 regs->tpc, tl);
4198- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4199+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4200 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4201- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4202+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4203 (void *) regs->u_regs[UREG_I7]);
4204 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4205 "pte[%lx] error[%lx]\n",
4206@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4207 fp = (unsigned long)sf->fp + STACK_BIAS;
4208 }
4209
4210- printk(" [%016lx] %pS\n", pc, (void *) pc);
4211+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4212 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4213 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4214 int index = tsk->curr_ret_stack;
4215 if (tsk->ret_stack && index >= graph) {
4216 pc = tsk->ret_stack[index - graph].ret;
4217- printk(" [%016lx] %pS\n", pc, (void *) pc);
4218+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4219 graph++;
4220 }
4221 }
4222@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4223 return (struct reg_window *) (fp + STACK_BIAS);
4224 }
4225
4226+extern void gr_handle_kernel_exploit(void);
4227+
4228 void die_if_kernel(char *str, struct pt_regs *regs)
4229 {
4230 static int die_counter;
4231@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4232 while (rw &&
4233 count++ < 30 &&
4234 kstack_valid(tp, (unsigned long) rw)) {
4235- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4236+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4237 (void *) rw->ins[7]);
4238
4239 rw = kernel_stack_up(rw);
4240@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4241 }
4242 user_instruction_dump ((unsigned int __user *) regs->tpc);
4243 }
4244- if (regs->tstate & TSTATE_PRIV)
4245+ if (regs->tstate & TSTATE_PRIV) {
4246+ gr_handle_kernel_exploit();
4247 do_exit(SIGKILL);
4248+ }
4249 do_exit(SIGSEGV);
4250 }
4251 EXPORT_SYMBOL(die_if_kernel);
4252diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4253index 76e4ac1..78f8bb1 100644
4254--- a/arch/sparc/kernel/unaligned_64.c
4255+++ b/arch/sparc/kernel/unaligned_64.c
4256@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4257 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4258
4259 if (__ratelimit(&ratelimit)) {
4260- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4261+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4262 regs->tpc, (void *) regs->tpc);
4263 }
4264 }
4265diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4266index a3fc437..fea9957 100644
4267--- a/arch/sparc/lib/Makefile
4268+++ b/arch/sparc/lib/Makefile
4269@@ -2,7 +2,7 @@
4270 #
4271
4272 asflags-y := -ansi -DST_DIV0=0x02
4273-ccflags-y := -Werror
4274+#ccflags-y := -Werror
4275
4276 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4277 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4278diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4279index 59186e0..f747d7a 100644
4280--- a/arch/sparc/lib/atomic_64.S
4281+++ b/arch/sparc/lib/atomic_64.S
4282@@ -18,7 +18,12 @@
4283 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4284 BACKOFF_SETUP(%o2)
4285 1: lduw [%o1], %g1
4286- add %g1, %o0, %g7
4287+ addcc %g1, %o0, %g7
4288+
4289+#ifdef CONFIG_PAX_REFCOUNT
4290+ tvs %icc, 6
4291+#endif
4292+
4293 cas [%o1], %g1, %g7
4294 cmp %g1, %g7
4295 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4296@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4297 2: BACKOFF_SPIN(%o2, %o3, 1b)
4298 .size atomic_add, .-atomic_add
4299
4300+ .globl atomic_add_unchecked
4301+ .type atomic_add_unchecked,#function
4302+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4303+ BACKOFF_SETUP(%o2)
4304+1: lduw [%o1], %g1
4305+ add %g1, %o0, %g7
4306+ cas [%o1], %g1, %g7
4307+ cmp %g1, %g7
4308+ bne,pn %icc, 2f
4309+ nop
4310+ retl
4311+ nop
4312+2: BACKOFF_SPIN(%o2, %o3, 1b)
4313+ .size atomic_add_unchecked, .-atomic_add_unchecked
4314+
4315 .globl atomic_sub
4316 .type atomic_sub,#function
4317 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4318 BACKOFF_SETUP(%o2)
4319 1: lduw [%o1], %g1
4320- sub %g1, %o0, %g7
4321+ subcc %g1, %o0, %g7
4322+
4323+#ifdef CONFIG_PAX_REFCOUNT
4324+ tvs %icc, 6
4325+#endif
4326+
4327 cas [%o1], %g1, %g7
4328 cmp %g1, %g7
4329 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4330@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4331 2: BACKOFF_SPIN(%o2, %o3, 1b)
4332 .size atomic_sub, .-atomic_sub
4333
4334+ .globl atomic_sub_unchecked
4335+ .type atomic_sub_unchecked,#function
4336+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4337+ BACKOFF_SETUP(%o2)
4338+1: lduw [%o1], %g1
4339+ sub %g1, %o0, %g7
4340+ cas [%o1], %g1, %g7
4341+ cmp %g1, %g7
4342+ bne,pn %icc, 2f
4343+ nop
4344+ retl
4345+ nop
4346+2: BACKOFF_SPIN(%o2, %o3, 1b)
4347+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4348+
4349 .globl atomic_add_ret
4350 .type atomic_add_ret,#function
4351 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4352 BACKOFF_SETUP(%o2)
4353 1: lduw [%o1], %g1
4354- add %g1, %o0, %g7
4355+ addcc %g1, %o0, %g7
4356+
4357+#ifdef CONFIG_PAX_REFCOUNT
4358+ tvs %icc, 6
4359+#endif
4360+
4361 cas [%o1], %g1, %g7
4362 cmp %g1, %g7
4363 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4364@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4365 2: BACKOFF_SPIN(%o2, %o3, 1b)
4366 .size atomic_add_ret, .-atomic_add_ret
4367
4368+ .globl atomic_add_ret_unchecked
4369+ .type atomic_add_ret_unchecked,#function
4370+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4371+ BACKOFF_SETUP(%o2)
4372+1: lduw [%o1], %g1
4373+ addcc %g1, %o0, %g7
4374+ cas [%o1], %g1, %g7
4375+ cmp %g1, %g7
4376+ bne,pn %icc, 2f
4377+ add %g7, %o0, %g7
4378+ sra %g7, 0, %o0
4379+ retl
4380+ nop
4381+2: BACKOFF_SPIN(%o2, %o3, 1b)
4382+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4383+
4384 .globl atomic_sub_ret
4385 .type atomic_sub_ret,#function
4386 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4387 BACKOFF_SETUP(%o2)
4388 1: lduw [%o1], %g1
4389- sub %g1, %o0, %g7
4390+ subcc %g1, %o0, %g7
4391+
4392+#ifdef CONFIG_PAX_REFCOUNT
4393+ tvs %icc, 6
4394+#endif
4395+
4396 cas [%o1], %g1, %g7
4397 cmp %g1, %g7
4398 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4399@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4400 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4401 BACKOFF_SETUP(%o2)
4402 1: ldx [%o1], %g1
4403- add %g1, %o0, %g7
4404+ addcc %g1, %o0, %g7
4405+
4406+#ifdef CONFIG_PAX_REFCOUNT
4407+ tvs %xcc, 6
4408+#endif
4409+
4410 casx [%o1], %g1, %g7
4411 cmp %g1, %g7
4412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4413@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4414 2: BACKOFF_SPIN(%o2, %o3, 1b)
4415 .size atomic64_add, .-atomic64_add
4416
4417+ .globl atomic64_add_unchecked
4418+ .type atomic64_add_unchecked,#function
4419+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4420+ BACKOFF_SETUP(%o2)
4421+1: ldx [%o1], %g1
4422+ addcc %g1, %o0, %g7
4423+ casx [%o1], %g1, %g7
4424+ cmp %g1, %g7
4425+ bne,pn %xcc, 2f
4426+ nop
4427+ retl
4428+ nop
4429+2: BACKOFF_SPIN(%o2, %o3, 1b)
4430+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4431+
4432 .globl atomic64_sub
4433 .type atomic64_sub,#function
4434 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4435 BACKOFF_SETUP(%o2)
4436 1: ldx [%o1], %g1
4437- sub %g1, %o0, %g7
4438+ subcc %g1, %o0, %g7
4439+
4440+#ifdef CONFIG_PAX_REFCOUNT
4441+ tvs %xcc, 6
4442+#endif
4443+
4444 casx [%o1], %g1, %g7
4445 cmp %g1, %g7
4446 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4447@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4448 2: BACKOFF_SPIN(%o2, %o3, 1b)
4449 .size atomic64_sub, .-atomic64_sub
4450
4451+ .globl atomic64_sub_unchecked
4452+ .type atomic64_sub_unchecked,#function
4453+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4454+ BACKOFF_SETUP(%o2)
4455+1: ldx [%o1], %g1
4456+ subcc %g1, %o0, %g7
4457+ casx [%o1], %g1, %g7
4458+ cmp %g1, %g7
4459+ bne,pn %xcc, 2f
4460+ nop
4461+ retl
4462+ nop
4463+2: BACKOFF_SPIN(%o2, %o3, 1b)
4464+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4465+
4466 .globl atomic64_add_ret
4467 .type atomic64_add_ret,#function
4468 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4469 BACKOFF_SETUP(%o2)
4470 1: ldx [%o1], %g1
4471- add %g1, %o0, %g7
4472+ addcc %g1, %o0, %g7
4473+
4474+#ifdef CONFIG_PAX_REFCOUNT
4475+ tvs %xcc, 6
4476+#endif
4477+
4478 casx [%o1], %g1, %g7
4479 cmp %g1, %g7
4480 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4481@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4482 2: BACKOFF_SPIN(%o2, %o3, 1b)
4483 .size atomic64_add_ret, .-atomic64_add_ret
4484
4485+ .globl atomic64_add_ret_unchecked
4486+ .type atomic64_add_ret_unchecked,#function
4487+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4488+ BACKOFF_SETUP(%o2)
4489+1: ldx [%o1], %g1
4490+ addcc %g1, %o0, %g7
4491+ casx [%o1], %g1, %g7
4492+ cmp %g1, %g7
4493+ bne,pn %xcc, 2f
4494+ add %g7, %o0, %g7
4495+ mov %g7, %o0
4496+ retl
4497+ nop
4498+2: BACKOFF_SPIN(%o2, %o3, 1b)
4499+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4500+
4501 .globl atomic64_sub_ret
4502 .type atomic64_sub_ret,#function
4503 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4504 BACKOFF_SETUP(%o2)
4505 1: ldx [%o1], %g1
4506- sub %g1, %o0, %g7
4507+ subcc %g1, %o0, %g7
4508+
4509+#ifdef CONFIG_PAX_REFCOUNT
4510+ tvs %xcc, 6
4511+#endif
4512+
4513 casx [%o1], %g1, %g7
4514 cmp %g1, %g7
4515 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4516diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4517index 1b30bb3..b4a16c7 100644
4518--- a/arch/sparc/lib/ksyms.c
4519+++ b/arch/sparc/lib/ksyms.c
4520@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4521
4522 /* Atomic counter implementation. */
4523 EXPORT_SYMBOL(atomic_add);
4524+EXPORT_SYMBOL(atomic_add_unchecked);
4525 EXPORT_SYMBOL(atomic_add_ret);
4526+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4527 EXPORT_SYMBOL(atomic_sub);
4528+EXPORT_SYMBOL(atomic_sub_unchecked);
4529 EXPORT_SYMBOL(atomic_sub_ret);
4530 EXPORT_SYMBOL(atomic64_add);
4531+EXPORT_SYMBOL(atomic64_add_unchecked);
4532 EXPORT_SYMBOL(atomic64_add_ret);
4533+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4534 EXPORT_SYMBOL(atomic64_sub);
4535+EXPORT_SYMBOL(atomic64_sub_unchecked);
4536 EXPORT_SYMBOL(atomic64_sub_ret);
4537
4538 /* Atomic bit operations. */
4539diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4540index 301421c..e2535d1 100644
4541--- a/arch/sparc/mm/Makefile
4542+++ b/arch/sparc/mm/Makefile
4543@@ -2,7 +2,7 @@
4544 #
4545
4546 asflags-y := -ansi
4547-ccflags-y := -Werror
4548+#ccflags-y := -Werror
4549
4550 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4551 obj-y += fault_$(BITS).o
4552diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4553index 8023fd7..c8e89e9 100644
4554--- a/arch/sparc/mm/fault_32.c
4555+++ b/arch/sparc/mm/fault_32.c
4556@@ -21,6 +21,9 @@
4557 #include <linux/perf_event.h>
4558 #include <linux/interrupt.h>
4559 #include <linux/kdebug.h>
4560+#include <linux/slab.h>
4561+#include <linux/pagemap.h>
4562+#include <linux/compiler.h>
4563
4564 #include <asm/system.h>
4565 #include <asm/page.h>
4566@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4567 return safe_compute_effective_address(regs, insn);
4568 }
4569
4570+#ifdef CONFIG_PAX_PAGEEXEC
4571+#ifdef CONFIG_PAX_DLRESOLVE
4572+static void pax_emuplt_close(struct vm_area_struct *vma)
4573+{
4574+ vma->vm_mm->call_dl_resolve = 0UL;
4575+}
4576+
4577+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4578+{
4579+ unsigned int *kaddr;
4580+
4581+ vmf->page = alloc_page(GFP_HIGHUSER);
4582+ if (!vmf->page)
4583+ return VM_FAULT_OOM;
4584+
4585+ kaddr = kmap(vmf->page);
4586+ memset(kaddr, 0, PAGE_SIZE);
4587+ kaddr[0] = 0x9DE3BFA8U; /* save */
4588+ flush_dcache_page(vmf->page);
4589+ kunmap(vmf->page);
4590+ return VM_FAULT_MAJOR;
4591+}
4592+
4593+static const struct vm_operations_struct pax_vm_ops = {
4594+ .close = pax_emuplt_close,
4595+ .fault = pax_emuplt_fault
4596+};
4597+
4598+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4599+{
4600+ int ret;
4601+
4602+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4603+ vma->vm_mm = current->mm;
4604+ vma->vm_start = addr;
4605+ vma->vm_end = addr + PAGE_SIZE;
4606+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4607+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4608+ vma->vm_ops = &pax_vm_ops;
4609+
4610+ ret = insert_vm_struct(current->mm, vma);
4611+ if (ret)
4612+ return ret;
4613+
4614+ ++current->mm->total_vm;
4615+ return 0;
4616+}
4617+#endif
4618+
4619+/*
4620+ * PaX: decide what to do with offenders (regs->pc = fault address)
4621+ *
4622+ * returns 1 when task should be killed
4623+ * 2 when patched PLT trampoline was detected
4624+ * 3 when unpatched PLT trampoline was detected
4625+ */
4626+static int pax_handle_fetch_fault(struct pt_regs *regs)
4627+{
4628+
4629+#ifdef CONFIG_PAX_EMUPLT
4630+ int err;
4631+
4632+ do { /* PaX: patched PLT emulation #1 */
4633+ unsigned int sethi1, sethi2, jmpl;
4634+
4635+ err = get_user(sethi1, (unsigned int *)regs->pc);
4636+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4638+
4639+ if (err)
4640+ break;
4641+
4642+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4643+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4644+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4645+ {
4646+ unsigned int addr;
4647+
4648+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4649+ addr = regs->u_regs[UREG_G1];
4650+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4651+ regs->pc = addr;
4652+ regs->npc = addr+4;
4653+ return 2;
4654+ }
4655+ } while (0);
4656+
4657+ { /* PaX: patched PLT emulation #2 */
4658+ unsigned int ba;
4659+
4660+ err = get_user(ba, (unsigned int *)regs->pc);
4661+
4662+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4663+ unsigned int addr;
4664+
4665+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4666+ regs->pc = addr;
4667+ regs->npc = addr+4;
4668+ return 2;
4669+ }
4670+ }
4671+
4672+ do { /* PaX: patched PLT emulation #3 */
4673+ unsigned int sethi, jmpl, nop;
4674+
4675+ err = get_user(sethi, (unsigned int *)regs->pc);
4676+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4677+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4678+
4679+ if (err)
4680+ break;
4681+
4682+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4683+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4684+ nop == 0x01000000U)
4685+ {
4686+ unsigned int addr;
4687+
4688+ addr = (sethi & 0x003FFFFFU) << 10;
4689+ regs->u_regs[UREG_G1] = addr;
4690+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4691+ regs->pc = addr;
4692+ regs->npc = addr+4;
4693+ return 2;
4694+ }
4695+ } while (0);
4696+
4697+ do { /* PaX: unpatched PLT emulation step 1 */
4698+ unsigned int sethi, ba, nop;
4699+
4700+ err = get_user(sethi, (unsigned int *)regs->pc);
4701+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4702+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4703+
4704+ if (err)
4705+ break;
4706+
4707+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4708+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4709+ nop == 0x01000000U)
4710+ {
4711+ unsigned int addr, save, call;
4712+
4713+ if ((ba & 0xFFC00000U) == 0x30800000U)
4714+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4715+ else
4716+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4717+
4718+ err = get_user(save, (unsigned int *)addr);
4719+ err |= get_user(call, (unsigned int *)(addr+4));
4720+ err |= get_user(nop, (unsigned int *)(addr+8));
4721+ if (err)
4722+ break;
4723+
4724+#ifdef CONFIG_PAX_DLRESOLVE
4725+ if (save == 0x9DE3BFA8U &&
4726+ (call & 0xC0000000U) == 0x40000000U &&
4727+ nop == 0x01000000U)
4728+ {
4729+ struct vm_area_struct *vma;
4730+ unsigned long call_dl_resolve;
4731+
4732+ down_read(&current->mm->mmap_sem);
4733+ call_dl_resolve = current->mm->call_dl_resolve;
4734+ up_read(&current->mm->mmap_sem);
4735+ if (likely(call_dl_resolve))
4736+ goto emulate;
4737+
4738+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4739+
4740+ down_write(&current->mm->mmap_sem);
4741+ if (current->mm->call_dl_resolve) {
4742+ call_dl_resolve = current->mm->call_dl_resolve;
4743+ up_write(&current->mm->mmap_sem);
4744+ if (vma)
4745+ kmem_cache_free(vm_area_cachep, vma);
4746+ goto emulate;
4747+ }
4748+
4749+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4750+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4751+ up_write(&current->mm->mmap_sem);
4752+ if (vma)
4753+ kmem_cache_free(vm_area_cachep, vma);
4754+ return 1;
4755+ }
4756+
4757+ if (pax_insert_vma(vma, call_dl_resolve)) {
4758+ up_write(&current->mm->mmap_sem);
4759+ kmem_cache_free(vm_area_cachep, vma);
4760+ return 1;
4761+ }
4762+
4763+ current->mm->call_dl_resolve = call_dl_resolve;
4764+ up_write(&current->mm->mmap_sem);
4765+
4766+emulate:
4767+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4768+ regs->pc = call_dl_resolve;
4769+ regs->npc = addr+4;
4770+ return 3;
4771+ }
4772+#endif
4773+
4774+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4775+ if ((save & 0xFFC00000U) == 0x05000000U &&
4776+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4777+ nop == 0x01000000U)
4778+ {
4779+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4780+ regs->u_regs[UREG_G2] = addr + 4;
4781+ addr = (save & 0x003FFFFFU) << 10;
4782+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4783+ regs->pc = addr;
4784+ regs->npc = addr+4;
4785+ return 3;
4786+ }
4787+ }
4788+ } while (0);
4789+
4790+ do { /* PaX: unpatched PLT emulation step 2 */
4791+ unsigned int save, call, nop;
4792+
4793+ err = get_user(save, (unsigned int *)(regs->pc-4));
4794+ err |= get_user(call, (unsigned int *)regs->pc);
4795+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4796+ if (err)
4797+ break;
4798+
4799+ if (save == 0x9DE3BFA8U &&
4800+ (call & 0xC0000000U) == 0x40000000U &&
4801+ nop == 0x01000000U)
4802+ {
4803+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4804+
4805+ regs->u_regs[UREG_RETPC] = regs->pc;
4806+ regs->pc = dl_resolve;
4807+ regs->npc = dl_resolve+4;
4808+ return 3;
4809+ }
4810+ } while (0);
4811+#endif
4812+
4813+ return 1;
4814+}
4815+
4816+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4817+{
4818+ unsigned long i;
4819+
4820+ printk(KERN_ERR "PAX: bytes at PC: ");
4821+ for (i = 0; i < 8; i++) {
4822+ unsigned int c;
4823+ if (get_user(c, (unsigned int *)pc+i))
4824+ printk(KERN_CONT "???????? ");
4825+ else
4826+ printk(KERN_CONT "%08x ", c);
4827+ }
4828+ printk("\n");
4829+}
4830+#endif
4831+
4832 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4833 int text_fault)
4834 {
4835@@ -280,6 +545,24 @@ good_area:
4836 if(!(vma->vm_flags & VM_WRITE))
4837 goto bad_area;
4838 } else {
4839+
4840+#ifdef CONFIG_PAX_PAGEEXEC
4841+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4842+ up_read(&mm->mmap_sem);
4843+ switch (pax_handle_fetch_fault(regs)) {
4844+
4845+#ifdef CONFIG_PAX_EMUPLT
4846+ case 2:
4847+ case 3:
4848+ return;
4849+#endif
4850+
4851+ }
4852+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4853+ do_group_exit(SIGKILL);
4854+ }
4855+#endif
4856+
4857 /* Allow reads even for write-only mappings */
4858 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4859 goto bad_area;
4860diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4861index 504c062..6fcb9c6 100644
4862--- a/arch/sparc/mm/fault_64.c
4863+++ b/arch/sparc/mm/fault_64.c
4864@@ -21,6 +21,9 @@
4865 #include <linux/kprobes.h>
4866 #include <linux/kdebug.h>
4867 #include <linux/percpu.h>
4868+#include <linux/slab.h>
4869+#include <linux/pagemap.h>
4870+#include <linux/compiler.h>
4871
4872 #include <asm/page.h>
4873 #include <asm/pgtable.h>
4874@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
4875 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4876 regs->tpc);
4877 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4878- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4879+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4880 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4881 dump_stack();
4882 unhandled_fault(regs->tpc, current, regs);
4883@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
4884 show_regs(regs);
4885 }
4886
4887+#ifdef CONFIG_PAX_PAGEEXEC
4888+#ifdef CONFIG_PAX_DLRESOLVE
4889+static void pax_emuplt_close(struct vm_area_struct *vma)
4890+{
4891+ vma->vm_mm->call_dl_resolve = 0UL;
4892+}
4893+
4894+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4895+{
4896+ unsigned int *kaddr;
4897+
4898+ vmf->page = alloc_page(GFP_HIGHUSER);
4899+ if (!vmf->page)
4900+ return VM_FAULT_OOM;
4901+
4902+ kaddr = kmap(vmf->page);
4903+ memset(kaddr, 0, PAGE_SIZE);
4904+ kaddr[0] = 0x9DE3BFA8U; /* save */
4905+ flush_dcache_page(vmf->page);
4906+ kunmap(vmf->page);
4907+ return VM_FAULT_MAJOR;
4908+}
4909+
4910+static const struct vm_operations_struct pax_vm_ops = {
4911+ .close = pax_emuplt_close,
4912+ .fault = pax_emuplt_fault
4913+};
4914+
4915+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4916+{
4917+ int ret;
4918+
4919+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4920+ vma->vm_mm = current->mm;
4921+ vma->vm_start = addr;
4922+ vma->vm_end = addr + PAGE_SIZE;
4923+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4924+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4925+ vma->vm_ops = &pax_vm_ops;
4926+
4927+ ret = insert_vm_struct(current->mm, vma);
4928+ if (ret)
4929+ return ret;
4930+
4931+ ++current->mm->total_vm;
4932+ return 0;
4933+}
4934+#endif
4935+
4936+/*
4937+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4938+ *
4939+ * returns 1 when task should be killed
4940+ * 2 when patched PLT trampoline was detected
4941+ * 3 when unpatched PLT trampoline was detected
4942+ */
4943+static int pax_handle_fetch_fault(struct pt_regs *regs)
4944+{
4945+
4946+#ifdef CONFIG_PAX_EMUPLT
4947+ int err;
4948+
4949+ do { /* PaX: patched PLT emulation #1 */
4950+ unsigned int sethi1, sethi2, jmpl;
4951+
4952+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4953+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4954+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4955+
4956+ if (err)
4957+ break;
4958+
4959+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4960+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4961+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4962+ {
4963+ unsigned long addr;
4964+
4965+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4966+ addr = regs->u_regs[UREG_G1];
4967+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4968+
4969+ if (test_thread_flag(TIF_32BIT))
4970+ addr &= 0xFFFFFFFFUL;
4971+
4972+ regs->tpc = addr;
4973+ regs->tnpc = addr+4;
4974+ return 2;
4975+ }
4976+ } while (0);
4977+
4978+ { /* PaX: patched PLT emulation #2 */
4979+ unsigned int ba;
4980+
4981+ err = get_user(ba, (unsigned int *)regs->tpc);
4982+
4983+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4984+ unsigned long addr;
4985+
4986+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4987+
4988+ if (test_thread_flag(TIF_32BIT))
4989+ addr &= 0xFFFFFFFFUL;
4990+
4991+ regs->tpc = addr;
4992+ regs->tnpc = addr+4;
4993+ return 2;
4994+ }
4995+ }
4996+
4997+ do { /* PaX: patched PLT emulation #3 */
4998+ unsigned int sethi, jmpl, nop;
4999+
5000+ err = get_user(sethi, (unsigned int *)regs->tpc);
5001+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5002+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5003+
5004+ if (err)
5005+ break;
5006+
5007+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5008+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5009+ nop == 0x01000000U)
5010+ {
5011+ unsigned long addr;
5012+
5013+ addr = (sethi & 0x003FFFFFU) << 10;
5014+ regs->u_regs[UREG_G1] = addr;
5015+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5016+
5017+ if (test_thread_flag(TIF_32BIT))
5018+ addr &= 0xFFFFFFFFUL;
5019+
5020+ regs->tpc = addr;
5021+ regs->tnpc = addr+4;
5022+ return 2;
5023+ }
5024+ } while (0);
5025+
5026+ do { /* PaX: patched PLT emulation #4 */
5027+ unsigned int sethi, mov1, call, mov2;
5028+
5029+ err = get_user(sethi, (unsigned int *)regs->tpc);
5030+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5031+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5032+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5033+
5034+ if (err)
5035+ break;
5036+
5037+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5038+ mov1 == 0x8210000FU &&
5039+ (call & 0xC0000000U) == 0x40000000U &&
5040+ mov2 == 0x9E100001U)
5041+ {
5042+ unsigned long addr;
5043+
5044+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5045+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5046+
5047+ if (test_thread_flag(TIF_32BIT))
5048+ addr &= 0xFFFFFFFFUL;
5049+
5050+ regs->tpc = addr;
5051+ regs->tnpc = addr+4;
5052+ return 2;
5053+ }
5054+ } while (0);
5055+
5056+ do { /* PaX: patched PLT emulation #5 */
5057+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5058+
5059+ err = get_user(sethi, (unsigned int *)regs->tpc);
5060+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5061+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5062+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5063+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5064+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5065+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5066+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5067+
5068+ if (err)
5069+ break;
5070+
5071+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5072+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5073+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5074+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5075+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5076+ sllx == 0x83287020U &&
5077+ jmpl == 0x81C04005U &&
5078+ nop == 0x01000000U)
5079+ {
5080+ unsigned long addr;
5081+
5082+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5083+ regs->u_regs[UREG_G1] <<= 32;
5084+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5085+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5086+ regs->tpc = addr;
5087+ regs->tnpc = addr+4;
5088+ return 2;
5089+ }
5090+ } while (0);
5091+
5092+ do { /* PaX: patched PLT emulation #6 */
5093+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5094+
5095+ err = get_user(sethi, (unsigned int *)regs->tpc);
5096+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5097+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5098+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5099+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5100+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5101+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5102+
5103+ if (err)
5104+ break;
5105+
5106+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5107+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5108+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5109+ sllx == 0x83287020U &&
5110+ (or & 0xFFFFE000U) == 0x8A116000U &&
5111+ jmpl == 0x81C04005U &&
5112+ nop == 0x01000000U)
5113+ {
5114+ unsigned long addr;
5115+
5116+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5117+ regs->u_regs[UREG_G1] <<= 32;
5118+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5119+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5120+ regs->tpc = addr;
5121+ regs->tnpc = addr+4;
5122+ return 2;
5123+ }
5124+ } while (0);
5125+
5126+ do { /* PaX: unpatched PLT emulation step 1 */
5127+ unsigned int sethi, ba, nop;
5128+
5129+ err = get_user(sethi, (unsigned int *)regs->tpc);
5130+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5131+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5132+
5133+ if (err)
5134+ break;
5135+
5136+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5137+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5138+ nop == 0x01000000U)
5139+ {
5140+ unsigned long addr;
5141+ unsigned int save, call;
5142+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5143+
5144+ if ((ba & 0xFFC00000U) == 0x30800000U)
5145+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5146+ else
5147+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5148+
5149+ if (test_thread_flag(TIF_32BIT))
5150+ addr &= 0xFFFFFFFFUL;
5151+
5152+ err = get_user(save, (unsigned int *)addr);
5153+ err |= get_user(call, (unsigned int *)(addr+4));
5154+ err |= get_user(nop, (unsigned int *)(addr+8));
5155+ if (err)
5156+ break;
5157+
5158+#ifdef CONFIG_PAX_DLRESOLVE
5159+ if (save == 0x9DE3BFA8U &&
5160+ (call & 0xC0000000U) == 0x40000000U &&
5161+ nop == 0x01000000U)
5162+ {
5163+ struct vm_area_struct *vma;
5164+ unsigned long call_dl_resolve;
5165+
5166+ down_read(&current->mm->mmap_sem);
5167+ call_dl_resolve = current->mm->call_dl_resolve;
5168+ up_read(&current->mm->mmap_sem);
5169+ if (likely(call_dl_resolve))
5170+ goto emulate;
5171+
5172+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5173+
5174+ down_write(&current->mm->mmap_sem);
5175+ if (current->mm->call_dl_resolve) {
5176+ call_dl_resolve = current->mm->call_dl_resolve;
5177+ up_write(&current->mm->mmap_sem);
5178+ if (vma)
5179+ kmem_cache_free(vm_area_cachep, vma);
5180+ goto emulate;
5181+ }
5182+
5183+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5184+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5185+ up_write(&current->mm->mmap_sem);
5186+ if (vma)
5187+ kmem_cache_free(vm_area_cachep, vma);
5188+ return 1;
5189+ }
5190+
5191+ if (pax_insert_vma(vma, call_dl_resolve)) {
5192+ up_write(&current->mm->mmap_sem);
5193+ kmem_cache_free(vm_area_cachep, vma);
5194+ return 1;
5195+ }
5196+
5197+ current->mm->call_dl_resolve = call_dl_resolve;
5198+ up_write(&current->mm->mmap_sem);
5199+
5200+emulate:
5201+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5202+ regs->tpc = call_dl_resolve;
5203+ regs->tnpc = addr+4;
5204+ return 3;
5205+ }
5206+#endif
5207+
5208+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5209+ if ((save & 0xFFC00000U) == 0x05000000U &&
5210+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5211+ nop == 0x01000000U)
5212+ {
5213+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5214+ regs->u_regs[UREG_G2] = addr + 4;
5215+ addr = (save & 0x003FFFFFU) << 10;
5216+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5217+
5218+ if (test_thread_flag(TIF_32BIT))
5219+ addr &= 0xFFFFFFFFUL;
5220+
5221+ regs->tpc = addr;
5222+ regs->tnpc = addr+4;
5223+ return 3;
5224+ }
5225+
5226+ /* PaX: 64-bit PLT stub */
5227+ err = get_user(sethi1, (unsigned int *)addr);
5228+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5229+ err |= get_user(or1, (unsigned int *)(addr+8));
5230+ err |= get_user(or2, (unsigned int *)(addr+12));
5231+ err |= get_user(sllx, (unsigned int *)(addr+16));
5232+ err |= get_user(add, (unsigned int *)(addr+20));
5233+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5234+ err |= get_user(nop, (unsigned int *)(addr+28));
5235+ if (err)
5236+ break;
5237+
5238+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5239+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5240+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5241+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5242+ sllx == 0x89293020U &&
5243+ add == 0x8A010005U &&
5244+ jmpl == 0x89C14000U &&
5245+ nop == 0x01000000U)
5246+ {
5247+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5248+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5249+ regs->u_regs[UREG_G4] <<= 32;
5250+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5251+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5252+ regs->u_regs[UREG_G4] = addr + 24;
5253+ addr = regs->u_regs[UREG_G5];
5254+ regs->tpc = addr;
5255+ regs->tnpc = addr+4;
5256+ return 3;
5257+ }
5258+ }
5259+ } while (0);
5260+
5261+#ifdef CONFIG_PAX_DLRESOLVE
5262+ do { /* PaX: unpatched PLT emulation step 2 */
5263+ unsigned int save, call, nop;
5264+
5265+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5266+ err |= get_user(call, (unsigned int *)regs->tpc);
5267+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5268+ if (err)
5269+ break;
5270+
5271+ if (save == 0x9DE3BFA8U &&
5272+ (call & 0xC0000000U) == 0x40000000U &&
5273+ nop == 0x01000000U)
5274+ {
5275+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5276+
5277+ if (test_thread_flag(TIF_32BIT))
5278+ dl_resolve &= 0xFFFFFFFFUL;
5279+
5280+ regs->u_regs[UREG_RETPC] = regs->tpc;
5281+ regs->tpc = dl_resolve;
5282+ regs->tnpc = dl_resolve+4;
5283+ return 3;
5284+ }
5285+ } while (0);
5286+#endif
5287+
5288+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5289+ unsigned int sethi, ba, nop;
5290+
5291+ err = get_user(sethi, (unsigned int *)regs->tpc);
5292+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5293+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5294+
5295+ if (err)
5296+ break;
5297+
5298+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5299+ (ba & 0xFFF00000U) == 0x30600000U &&
5300+ nop == 0x01000000U)
5301+ {
5302+ unsigned long addr;
5303+
5304+ addr = (sethi & 0x003FFFFFU) << 10;
5305+ regs->u_regs[UREG_G1] = addr;
5306+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5307+
5308+ if (test_thread_flag(TIF_32BIT))
5309+ addr &= 0xFFFFFFFFUL;
5310+
5311+ regs->tpc = addr;
5312+ regs->tnpc = addr+4;
5313+ return 2;
5314+ }
5315+ } while (0);
5316+
5317+#endif
5318+
5319+ return 1;
5320+}
5321+
5322+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5323+{
5324+ unsigned long i;
5325+
5326+ printk(KERN_ERR "PAX: bytes at PC: ");
5327+ for (i = 0; i < 8; i++) {
5328+ unsigned int c;
5329+ if (get_user(c, (unsigned int *)pc+i))
5330+ printk(KERN_CONT "???????? ");
5331+ else
5332+ printk(KERN_CONT "%08x ", c);
5333+ }
5334+ printk("\n");
5335+}
5336+#endif
5337+
5338 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5339 {
5340 struct mm_struct *mm = current->mm;
5341@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5342 if (!vma)
5343 goto bad_area;
5344
5345+#ifdef CONFIG_PAX_PAGEEXEC
5346+ /* PaX: detect ITLB misses on non-exec pages */
5347+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5348+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5349+ {
5350+ if (address != regs->tpc)
5351+ goto good_area;
5352+
5353+ up_read(&mm->mmap_sem);
5354+ switch (pax_handle_fetch_fault(regs)) {
5355+
5356+#ifdef CONFIG_PAX_EMUPLT
5357+ case 2:
5358+ case 3:
5359+ return;
5360+#endif
5361+
5362+ }
5363+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5364+ do_group_exit(SIGKILL);
5365+ }
5366+#endif
5367+
5368 /* Pure DTLB misses do not tell us whether the fault causing
5369 * load/store/atomic was a write or not, it only says that there
5370 * was no match. So in such a case we (carefully) read the
5371diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5372index 07e1453..0a7d9e9 100644
5373--- a/arch/sparc/mm/hugetlbpage.c
5374+++ b/arch/sparc/mm/hugetlbpage.c
5375@@ -67,7 +67,7 @@ full_search:
5376 }
5377 return -ENOMEM;
5378 }
5379- if (likely(!vma || addr + len <= vma->vm_start)) {
5380+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5381 /*
5382 * Remember the place where we stopped the search:
5383 */
5384@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5385 /* make sure it can fit in the remaining address space */
5386 if (likely(addr > len)) {
5387 vma = find_vma(mm, addr-len);
5388- if (!vma || addr <= vma->vm_start) {
5389+ if (check_heap_stack_gap(vma, addr - len, len)) {
5390 /* remember the address as a hint for next time */
5391 return (mm->free_area_cache = addr-len);
5392 }
5393@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5394 if (unlikely(mm->mmap_base < len))
5395 goto bottomup;
5396
5397- addr = (mm->mmap_base-len) & HPAGE_MASK;
5398+ addr = mm->mmap_base - len;
5399
5400 do {
5401+ addr &= HPAGE_MASK;
5402 /*
5403 * Lookup failure means no vma is above this address,
5404 * else if new region fits below vma->vm_start,
5405 * return with success:
5406 */
5407 vma = find_vma(mm, addr);
5408- if (likely(!vma || addr+len <= vma->vm_start)) {
5409+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5410 /* remember the address as a hint for next time */
5411 return (mm->free_area_cache = addr);
5412 }
5413@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 mm->cached_hole_size = vma->vm_start - addr;
5415
5416 /* try just below the current vma->vm_start */
5417- addr = (vma->vm_start-len) & HPAGE_MASK;
5418- } while (likely(len < vma->vm_start));
5419+ addr = skip_heap_stack_gap(vma, len);
5420+ } while (!IS_ERR_VALUE(addr));
5421
5422 bottomup:
5423 /*
5424@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5425 if (addr) {
5426 addr = ALIGN(addr, HPAGE_SIZE);
5427 vma = find_vma(mm, addr);
5428- if (task_size - len >= addr &&
5429- (!vma || addr + len <= vma->vm_start))
5430+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5431 return addr;
5432 }
5433 if (mm->get_unmapped_area == arch_get_unmapped_area)
5434diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5435index 7b00de6..78239f4 100644
5436--- a/arch/sparc/mm/init_32.c
5437+++ b/arch/sparc/mm/init_32.c
5438@@ -316,6 +316,9 @@ extern void device_scan(void);
5439 pgprot_t PAGE_SHARED __read_mostly;
5440 EXPORT_SYMBOL(PAGE_SHARED);
5441
5442+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5443+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5444+
5445 void __init paging_init(void)
5446 {
5447 switch(sparc_cpu_model) {
5448@@ -344,17 +347,17 @@ void __init paging_init(void)
5449
5450 /* Initialize the protection map with non-constant, MMU dependent values. */
5451 protection_map[0] = PAGE_NONE;
5452- protection_map[1] = PAGE_READONLY;
5453- protection_map[2] = PAGE_COPY;
5454- protection_map[3] = PAGE_COPY;
5455+ protection_map[1] = PAGE_READONLY_NOEXEC;
5456+ protection_map[2] = PAGE_COPY_NOEXEC;
5457+ protection_map[3] = PAGE_COPY_NOEXEC;
5458 protection_map[4] = PAGE_READONLY;
5459 protection_map[5] = PAGE_READONLY;
5460 protection_map[6] = PAGE_COPY;
5461 protection_map[7] = PAGE_COPY;
5462 protection_map[8] = PAGE_NONE;
5463- protection_map[9] = PAGE_READONLY;
5464- protection_map[10] = PAGE_SHARED;
5465- protection_map[11] = PAGE_SHARED;
5466+ protection_map[9] = PAGE_READONLY_NOEXEC;
5467+ protection_map[10] = PAGE_SHARED_NOEXEC;
5468+ protection_map[11] = PAGE_SHARED_NOEXEC;
5469 protection_map[12] = PAGE_READONLY;
5470 protection_map[13] = PAGE_READONLY;
5471 protection_map[14] = PAGE_SHARED;
5472diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5473index cbef74e..c38fead 100644
5474--- a/arch/sparc/mm/srmmu.c
5475+++ b/arch/sparc/mm/srmmu.c
5476@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5477 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5478 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5479 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5480+
5481+#ifdef CONFIG_PAX_PAGEEXEC
5482+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5483+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5484+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5485+#endif
5486+
5487 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5488 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5489
5490diff --git a/arch/um/Makefile b/arch/um/Makefile
5491index 7730af6..cce5b19 100644
5492--- a/arch/um/Makefile
5493+++ b/arch/um/Makefile
5494@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5495 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5496 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5497
5498+ifdef CONSTIFY_PLUGIN
5499+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5500+endif
5501+
5502 #This will adjust *FLAGS accordingly to the platform.
5503 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5504
5505diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5506index 6c03acd..a5e0215 100644
5507--- a/arch/um/include/asm/kmap_types.h
5508+++ b/arch/um/include/asm/kmap_types.h
5509@@ -23,6 +23,7 @@ enum km_type {
5510 KM_IRQ1,
5511 KM_SOFTIRQ0,
5512 KM_SOFTIRQ1,
5513+ KM_CLEARPAGE,
5514 KM_TYPE_NR
5515 };
5516
5517diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5518index 7cfc3ce..cbd1a58 100644
5519--- a/arch/um/include/asm/page.h
5520+++ b/arch/um/include/asm/page.h
5521@@ -14,6 +14,9 @@
5522 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5523 #define PAGE_MASK (~(PAGE_SIZE-1))
5524
5525+#define ktla_ktva(addr) (addr)
5526+#define ktva_ktla(addr) (addr)
5527+
5528 #ifndef __ASSEMBLY__
5529
5530 struct page;
5531diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5532index c533835..84db18e 100644
5533--- a/arch/um/kernel/process.c
5534+++ b/arch/um/kernel/process.c
5535@@ -406,22 +406,6 @@ int singlestepping(void * t)
5536 return 2;
5537 }
5538
5539-/*
5540- * Only x86 and x86_64 have an arch_align_stack().
5541- * All other arches have "#define arch_align_stack(x) (x)"
5542- * in their asm/system.h
5543- * As this is included in UML from asm-um/system-generic.h,
5544- * we can use it to behave as the subarch does.
5545- */
5546-#ifndef arch_align_stack
5547-unsigned long arch_align_stack(unsigned long sp)
5548-{
5549- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5550- sp -= get_random_int() % 8192;
5551- return sp & ~0xf;
5552-}
5553-#endif
5554-
5555 unsigned long get_wchan(struct task_struct *p)
5556 {
5557 unsigned long stack_page, sp, ip;
5558diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5559index efb4294..61bc18c 100644
5560--- a/arch/x86/Kconfig
5561+++ b/arch/x86/Kconfig
5562@@ -235,7 +235,7 @@ config X86_HT
5563
5564 config X86_32_LAZY_GS
5565 def_bool y
5566- depends on X86_32 && !CC_STACKPROTECTOR
5567+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5568
5569 config ARCH_HWEIGHT_CFLAGS
5570 string
5571@@ -1022,7 +1022,7 @@ choice
5572
5573 config NOHIGHMEM
5574 bool "off"
5575- depends on !X86_NUMAQ
5576+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5577 ---help---
5578 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5579 However, the address space of 32-bit x86 processors is only 4
5580@@ -1059,7 +1059,7 @@ config NOHIGHMEM
5581
5582 config HIGHMEM4G
5583 bool "4GB"
5584- depends on !X86_NUMAQ
5585+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5586 ---help---
5587 Select this if you have a 32-bit processor and between 1 and 4
5588 gigabytes of physical RAM.
5589@@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5590 hex
5591 default 0xB0000000 if VMSPLIT_3G_OPT
5592 default 0x80000000 if VMSPLIT_2G
5593- default 0x78000000 if VMSPLIT_2G_OPT
5594+ default 0x70000000 if VMSPLIT_2G_OPT
5595 default 0x40000000 if VMSPLIT_1G
5596 default 0xC0000000
5597 depends on X86_32
5598@@ -1496,6 +1496,7 @@ config SECCOMP
5599
5600 config CC_STACKPROTECTOR
5601 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5602+ depends on X86_64 || !PAX_MEMORY_UDEREF
5603 ---help---
5604 This option turns on the -fstack-protector GCC feature. This
5605 feature puts, at the beginning of functions, a canary value on
5606@@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5607 config PHYSICAL_START
5608 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5609 default "0x1000000"
5610+ range 0x400000 0x40000000
5611 ---help---
5612 This gives the physical address where the kernel is loaded.
5613
5614@@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5615 config PHYSICAL_ALIGN
5616 hex "Alignment value to which kernel should be aligned" if X86_32
5617 default "0x1000000"
5618+ range 0x400000 0x1000000 if PAX_KERNEXEC
5619 range 0x2000 0x1000000
5620 ---help---
5621 This value puts the alignment restrictions on physical address
5622@@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5623 Say N if you want to disable CPU hotplug.
5624
5625 config COMPAT_VDSO
5626- def_bool y
5627+ def_bool n
5628 prompt "Compat VDSO support"
5629 depends on X86_32 || IA32_EMULATION
5630+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5631 ---help---
5632 Map the 32-bit VDSO to the predictable old-style address too.
5633
5634diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5635index e3ca7e0..b30b28a 100644
5636--- a/arch/x86/Kconfig.cpu
5637+++ b/arch/x86/Kconfig.cpu
5638@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5639
5640 config X86_F00F_BUG
5641 def_bool y
5642- depends on M586MMX || M586TSC || M586 || M486 || M386
5643+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5644
5645 config X86_INVD_BUG
5646 def_bool y
5647@@ -365,7 +365,7 @@ config X86_POPAD_OK
5648
5649 config X86_ALIGNMENT_16
5650 def_bool y
5651- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5652+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5653
5654 config X86_INTEL_USERCOPY
5655 def_bool y
5656@@ -411,7 +411,7 @@ config X86_CMPXCHG64
5657 # generates cmov.
5658 config X86_CMOV
5659 def_bool y
5660- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5661+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5662
5663 config X86_MINIMUM_CPU_FAMILY
5664 int
5665diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5666index bf56e17..05f9891 100644
5667--- a/arch/x86/Kconfig.debug
5668+++ b/arch/x86/Kconfig.debug
5669@@ -81,7 +81,7 @@ config X86_PTDUMP
5670 config DEBUG_RODATA
5671 bool "Write protect kernel read-only data structures"
5672 default y
5673- depends on DEBUG_KERNEL
5674+ depends on DEBUG_KERNEL && BROKEN
5675 ---help---
5676 Mark the kernel read-only data as write-protected in the pagetables,
5677 in order to catch accidental (and incorrect) writes to such const
5678@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5679
5680 config DEBUG_SET_MODULE_RONX
5681 bool "Set loadable kernel module data as NX and text as RO"
5682- depends on MODULES
5683+ depends on MODULES && BROKEN
5684 ---help---
5685 This option helps catch unintended modifications to loadable
5686 kernel module's text and read-only data. It also prevents execution
5687diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5688index b02e509..2631e48 100644
5689--- a/arch/x86/Makefile
5690+++ b/arch/x86/Makefile
5691@@ -46,6 +46,7 @@ else
5692 UTS_MACHINE := x86_64
5693 CHECKFLAGS += -D__x86_64__ -m64
5694
5695+ biarch := $(call cc-option,-m64)
5696 KBUILD_AFLAGS += -m64
5697 KBUILD_CFLAGS += -m64
5698
5699@@ -195,3 +196,12 @@ define archhelp
5700 echo ' FDARGS="..." arguments for the booted kernel'
5701 echo ' FDINITRD=file initrd for the booted kernel'
5702 endef
5703+
5704+define OLD_LD
5705+
5706+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5707+*** Please upgrade your binutils to 2.18 or newer
5708+endef
5709+
5710+archprepare:
5711+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5712diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5713index 95365a8..52f857b 100644
5714--- a/arch/x86/boot/Makefile
5715+++ b/arch/x86/boot/Makefile
5716@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5717 $(call cc-option, -fno-stack-protector) \
5718 $(call cc-option, -mpreferred-stack-boundary=2)
5719 KBUILD_CFLAGS += $(call cc-option, -m32)
5720+ifdef CONSTIFY_PLUGIN
5721+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5722+endif
5723 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5724 GCOV_PROFILE := n
5725
5726diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5727index 878e4b9..20537ab 100644
5728--- a/arch/x86/boot/bitops.h
5729+++ b/arch/x86/boot/bitops.h
5730@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5731 u8 v;
5732 const u32 *p = (const u32 *)addr;
5733
5734- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5735+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5736 return v;
5737 }
5738
5739@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5740
5741 static inline void set_bit(int nr, void *addr)
5742 {
5743- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5744+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5745 }
5746
5747 #endif /* BOOT_BITOPS_H */
5748diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5749index c7093bd..d4247ffe0 100644
5750--- a/arch/x86/boot/boot.h
5751+++ b/arch/x86/boot/boot.h
5752@@ -85,7 +85,7 @@ static inline void io_delay(void)
5753 static inline u16 ds(void)
5754 {
5755 u16 seg;
5756- asm("movw %%ds,%0" : "=rm" (seg));
5757+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5758 return seg;
5759 }
5760
5761@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
5762 static inline int memcmp(const void *s1, const void *s2, size_t len)
5763 {
5764 u8 diff;
5765- asm("repe; cmpsb; setnz %0"
5766+ asm volatile("repe; cmpsb; setnz %0"
5767 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5768 return diff;
5769 }
5770diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5771index 09664ef..edc5d03 100644
5772--- a/arch/x86/boot/compressed/Makefile
5773+++ b/arch/x86/boot/compressed/Makefile
5774@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5775 KBUILD_CFLAGS += $(cflags-y)
5776 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5777 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5778+ifdef CONSTIFY_PLUGIN
5779+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5780+endif
5781
5782 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5783 GCOV_PROFILE := n
5784diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5785index 67a655a..b924059 100644
5786--- a/arch/x86/boot/compressed/head_32.S
5787+++ b/arch/x86/boot/compressed/head_32.S
5788@@ -76,7 +76,7 @@ ENTRY(startup_32)
5789 notl %eax
5790 andl %eax, %ebx
5791 #else
5792- movl $LOAD_PHYSICAL_ADDR, %ebx
5793+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5794 #endif
5795
5796 /* Target address to relocate to for decompression */
5797@@ -162,7 +162,7 @@ relocated:
5798 * and where it was actually loaded.
5799 */
5800 movl %ebp, %ebx
5801- subl $LOAD_PHYSICAL_ADDR, %ebx
5802+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5803 jz 2f /* Nothing to be done if loaded at compiled addr. */
5804 /*
5805 * Process relocations.
5806@@ -170,8 +170,7 @@ relocated:
5807
5808 1: subl $4, %edi
5809 movl (%edi), %ecx
5810- testl %ecx, %ecx
5811- jz 2f
5812+ jecxz 2f
5813 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5814 jmp 1b
5815 2:
5816diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5817index 35af09d..99c9676 100644
5818--- a/arch/x86/boot/compressed/head_64.S
5819+++ b/arch/x86/boot/compressed/head_64.S
5820@@ -91,7 +91,7 @@ ENTRY(startup_32)
5821 notl %eax
5822 andl %eax, %ebx
5823 #else
5824- movl $LOAD_PHYSICAL_ADDR, %ebx
5825+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5826 #endif
5827
5828 /* Target address to relocate to for decompression */
5829@@ -233,7 +233,7 @@ ENTRY(startup_64)
5830 notq %rax
5831 andq %rax, %rbp
5832 #else
5833- movq $LOAD_PHYSICAL_ADDR, %rbp
5834+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5835 #endif
5836
5837 /* Target address to relocate to for decompression */
5838diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5839index 3a19d04..7c1d55a 100644
5840--- a/arch/x86/boot/compressed/misc.c
5841+++ b/arch/x86/boot/compressed/misc.c
5842@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5843 case PT_LOAD:
5844 #ifdef CONFIG_RELOCATABLE
5845 dest = output;
5846- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5847+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5848 #else
5849 dest = (void *)(phdr->p_paddr);
5850 #endif
5851@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
5852 error("Destination address too large");
5853 #endif
5854 #ifndef CONFIG_RELOCATABLE
5855- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5856+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5857 error("Wrong destination address");
5858 #endif
5859
5860diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5861index 89bbf4e..869908e 100644
5862--- a/arch/x86/boot/compressed/relocs.c
5863+++ b/arch/x86/boot/compressed/relocs.c
5864@@ -13,8 +13,11 @@
5865
5866 static void die(char *fmt, ...);
5867
5868+#include "../../../../include/generated/autoconf.h"
5869+
5870 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5871 static Elf32_Ehdr ehdr;
5872+static Elf32_Phdr *phdr;
5873 static unsigned long reloc_count, reloc_idx;
5874 static unsigned long *relocs;
5875
5876@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5877 }
5878 }
5879
5880+static void read_phdrs(FILE *fp)
5881+{
5882+ unsigned int i;
5883+
5884+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5885+ if (!phdr) {
5886+ die("Unable to allocate %d program headers\n",
5887+ ehdr.e_phnum);
5888+ }
5889+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5890+ die("Seek to %d failed: %s\n",
5891+ ehdr.e_phoff, strerror(errno));
5892+ }
5893+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5894+ die("Cannot read ELF program headers: %s\n",
5895+ strerror(errno));
5896+ }
5897+ for(i = 0; i < ehdr.e_phnum; i++) {
5898+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5899+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5900+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5901+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5902+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5903+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5904+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5905+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5906+ }
5907+
5908+}
5909+
5910 static void read_shdrs(FILE *fp)
5911 {
5912- int i;
5913+ unsigned int i;
5914 Elf32_Shdr shdr;
5915
5916 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5917@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5918
5919 static void read_strtabs(FILE *fp)
5920 {
5921- int i;
5922+ unsigned int i;
5923 for (i = 0; i < ehdr.e_shnum; i++) {
5924 struct section *sec = &secs[i];
5925 if (sec->shdr.sh_type != SHT_STRTAB) {
5926@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5927
5928 static void read_symtabs(FILE *fp)
5929 {
5930- int i,j;
5931+ unsigned int i,j;
5932 for (i = 0; i < ehdr.e_shnum; i++) {
5933 struct section *sec = &secs[i];
5934 if (sec->shdr.sh_type != SHT_SYMTAB) {
5935@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5936
5937 static void read_relocs(FILE *fp)
5938 {
5939- int i,j;
5940+ unsigned int i,j;
5941+ uint32_t base;
5942+
5943 for (i = 0; i < ehdr.e_shnum; i++) {
5944 struct section *sec = &secs[i];
5945 if (sec->shdr.sh_type != SHT_REL) {
5946@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5947 die("Cannot read symbol table: %s\n",
5948 strerror(errno));
5949 }
5950+ base = 0;
5951+ for (j = 0; j < ehdr.e_phnum; j++) {
5952+ if (phdr[j].p_type != PT_LOAD )
5953+ continue;
5954+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5955+ continue;
5956+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5957+ break;
5958+ }
5959 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5960 Elf32_Rel *rel = &sec->reltab[j];
5961- rel->r_offset = elf32_to_cpu(rel->r_offset);
5962+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5963 rel->r_info = elf32_to_cpu(rel->r_info);
5964 }
5965 }
5966@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5967
5968 static void print_absolute_symbols(void)
5969 {
5970- int i;
5971+ unsigned int i;
5972 printf("Absolute symbols\n");
5973 printf(" Num: Value Size Type Bind Visibility Name\n");
5974 for (i = 0; i < ehdr.e_shnum; i++) {
5975 struct section *sec = &secs[i];
5976 char *sym_strtab;
5977 Elf32_Sym *sh_symtab;
5978- int j;
5979+ unsigned int j;
5980
5981 if (sec->shdr.sh_type != SHT_SYMTAB) {
5982 continue;
5983@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5984
5985 static void print_absolute_relocs(void)
5986 {
5987- int i, printed = 0;
5988+ unsigned int i, printed = 0;
5989
5990 for (i = 0; i < ehdr.e_shnum; i++) {
5991 struct section *sec = &secs[i];
5992 struct section *sec_applies, *sec_symtab;
5993 char *sym_strtab;
5994 Elf32_Sym *sh_symtab;
5995- int j;
5996+ unsigned int j;
5997 if (sec->shdr.sh_type != SHT_REL) {
5998 continue;
5999 }
6000@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6001
6002 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6003 {
6004- int i;
6005+ unsigned int i;
6006 /* Walk through the relocations */
6007 for (i = 0; i < ehdr.e_shnum; i++) {
6008 char *sym_strtab;
6009 Elf32_Sym *sh_symtab;
6010 struct section *sec_applies, *sec_symtab;
6011- int j;
6012+ unsigned int j;
6013 struct section *sec = &secs[i];
6014
6015 if (sec->shdr.sh_type != SHT_REL) {
6016@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6017 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6018 continue;
6019 }
6020+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6021+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6022+ continue;
6023+
6024+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6025+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6026+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6027+ continue;
6028+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6029+ continue;
6030+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6031+ continue;
6032+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6033+ continue;
6034+#endif
6035+
6036 switch (r_type) {
6037 case R_386_NONE:
6038 case R_386_PC32:
6039@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6040
6041 static void emit_relocs(int as_text)
6042 {
6043- int i;
6044+ unsigned int i;
6045 /* Count how many relocations I have and allocate space for them. */
6046 reloc_count = 0;
6047 walk_relocs(count_reloc);
6048@@ -665,6 +725,7 @@ int main(int argc, char **argv)
6049 fname, strerror(errno));
6050 }
6051 read_ehdr(fp);
6052+ read_phdrs(fp);
6053 read_shdrs(fp);
6054 read_strtabs(fp);
6055 read_symtabs(fp);
6056diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6057index 4d3ff03..e4972ff 100644
6058--- a/arch/x86/boot/cpucheck.c
6059+++ b/arch/x86/boot/cpucheck.c
6060@@ -74,7 +74,7 @@ static int has_fpu(void)
6061 u16 fcw = -1, fsw = -1;
6062 u32 cr0;
6063
6064- asm("movl %%cr0,%0" : "=r" (cr0));
6065+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6066 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6067 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6068 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6069@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6070 {
6071 u32 f0, f1;
6072
6073- asm("pushfl ; "
6074+ asm volatile("pushfl ; "
6075 "pushfl ; "
6076 "popl %0 ; "
6077 "movl %0,%1 ; "
6078@@ -115,7 +115,7 @@ static void get_flags(void)
6079 set_bit(X86_FEATURE_FPU, cpu.flags);
6080
6081 if (has_eflag(X86_EFLAGS_ID)) {
6082- asm("cpuid"
6083+ asm volatile("cpuid"
6084 : "=a" (max_intel_level),
6085 "=b" (cpu_vendor[0]),
6086 "=d" (cpu_vendor[1]),
6087@@ -124,7 +124,7 @@ static void get_flags(void)
6088
6089 if (max_intel_level >= 0x00000001 &&
6090 max_intel_level <= 0x0000ffff) {
6091- asm("cpuid"
6092+ asm volatile("cpuid"
6093 : "=a" (tfms),
6094 "=c" (cpu.flags[4]),
6095 "=d" (cpu.flags[0])
6096@@ -136,7 +136,7 @@ static void get_flags(void)
6097 cpu.model += ((tfms >> 16) & 0xf) << 4;
6098 }
6099
6100- asm("cpuid"
6101+ asm volatile("cpuid"
6102 : "=a" (max_amd_level)
6103 : "a" (0x80000000)
6104 : "ebx", "ecx", "edx");
6105@@ -144,7 +144,7 @@ static void get_flags(void)
6106 if (max_amd_level >= 0x80000001 &&
6107 max_amd_level <= 0x8000ffff) {
6108 u32 eax = 0x80000001;
6109- asm("cpuid"
6110+ asm volatile("cpuid"
6111 : "+a" (eax),
6112 "=c" (cpu.flags[6]),
6113 "=d" (cpu.flags[1])
6114@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6115 u32 ecx = MSR_K7_HWCR;
6116 u32 eax, edx;
6117
6118- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6119+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6120 eax &= ~(1 << 15);
6121- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6122+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6123
6124 get_flags(); /* Make sure it really did something */
6125 err = check_flags();
6126@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6127 u32 ecx = MSR_VIA_FCR;
6128 u32 eax, edx;
6129
6130- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6131+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6132 eax |= (1<<1)|(1<<7);
6133- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6134+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6135
6136 set_bit(X86_FEATURE_CX8, cpu.flags);
6137 err = check_flags();
6138@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6139 u32 eax, edx;
6140 u32 level = 1;
6141
6142- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6143- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6144- asm("cpuid"
6145+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6146+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6147+ asm volatile("cpuid"
6148 : "+a" (level), "=d" (cpu.flags[0])
6149 : : "ecx", "ebx");
6150- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6151+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6152
6153 err = check_flags();
6154 }
6155diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6156index bdb4d45..0476680 100644
6157--- a/arch/x86/boot/header.S
6158+++ b/arch/x86/boot/header.S
6159@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6160 # single linked list of
6161 # struct setup_data
6162
6163-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6164+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6165
6166 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6167 #define VO_INIT_SIZE (VO__end - VO__text)
6168diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6169index db75d07..8e6d0af 100644
6170--- a/arch/x86/boot/memory.c
6171+++ b/arch/x86/boot/memory.c
6172@@ -19,7 +19,7 @@
6173
6174 static int detect_memory_e820(void)
6175 {
6176- int count = 0;
6177+ unsigned int count = 0;
6178 struct biosregs ireg, oreg;
6179 struct e820entry *desc = boot_params.e820_map;
6180 static struct e820entry buf; /* static so it is zeroed */
6181diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6182index 11e8c6e..fdbb1ed 100644
6183--- a/arch/x86/boot/video-vesa.c
6184+++ b/arch/x86/boot/video-vesa.c
6185@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6186
6187 boot_params.screen_info.vesapm_seg = oreg.es;
6188 boot_params.screen_info.vesapm_off = oreg.di;
6189+ boot_params.screen_info.vesapm_size = oreg.cx;
6190 }
6191
6192 /*
6193diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6194index 43eda28..5ab5fdb 100644
6195--- a/arch/x86/boot/video.c
6196+++ b/arch/x86/boot/video.c
6197@@ -96,7 +96,7 @@ static void store_mode_params(void)
6198 static unsigned int get_entry(void)
6199 {
6200 char entry_buf[4];
6201- int i, len = 0;
6202+ unsigned int i, len = 0;
6203 int key;
6204 unsigned int v;
6205
6206diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6207index 5b577d5..3c1fed4 100644
6208--- a/arch/x86/crypto/aes-x86_64-asm_64.S
6209+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6210@@ -8,6 +8,8 @@
6211 * including this sentence is retained in full.
6212 */
6213
6214+#include <asm/alternative-asm.h>
6215+
6216 .extern crypto_ft_tab
6217 .extern crypto_it_tab
6218 .extern crypto_fl_tab
6219@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6220 je B192; \
6221 leaq 32(r9),r9;
6222
6223+#define ret pax_force_retaddr 0, 1; ret
6224+
6225 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6226 movq r1,r2; \
6227 movq r3,r4; \
6228diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6229index be6d9e3..21fbbca 100644
6230--- a/arch/x86/crypto/aesni-intel_asm.S
6231+++ b/arch/x86/crypto/aesni-intel_asm.S
6232@@ -31,6 +31,7 @@
6233
6234 #include <linux/linkage.h>
6235 #include <asm/inst.h>
6236+#include <asm/alternative-asm.h>
6237
6238 #ifdef __x86_64__
6239 .data
6240@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6241 pop %r14
6242 pop %r13
6243 pop %r12
6244+ pax_force_retaddr 0, 1
6245 ret
6246+ENDPROC(aesni_gcm_dec)
6247
6248
6249 /*****************************************************************************
6250@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6251 pop %r14
6252 pop %r13
6253 pop %r12
6254+ pax_force_retaddr 0, 1
6255 ret
6256+ENDPROC(aesni_gcm_enc)
6257
6258 #endif
6259
6260@@ -1714,6 +1719,7 @@ _key_expansion_256a:
6261 pxor %xmm1, %xmm0
6262 movaps %xmm0, (TKEYP)
6263 add $0x10, TKEYP
6264+ pax_force_retaddr_bts
6265 ret
6266
6267 .align 4
6268@@ -1738,6 +1744,7 @@ _key_expansion_192a:
6269 shufps $0b01001110, %xmm2, %xmm1
6270 movaps %xmm1, 0x10(TKEYP)
6271 add $0x20, TKEYP
6272+ pax_force_retaddr_bts
6273 ret
6274
6275 .align 4
6276@@ -1757,6 +1764,7 @@ _key_expansion_192b:
6277
6278 movaps %xmm0, (TKEYP)
6279 add $0x10, TKEYP
6280+ pax_force_retaddr_bts
6281 ret
6282
6283 .align 4
6284@@ -1769,6 +1777,7 @@ _key_expansion_256b:
6285 pxor %xmm1, %xmm2
6286 movaps %xmm2, (TKEYP)
6287 add $0x10, TKEYP
6288+ pax_force_retaddr_bts
6289 ret
6290
6291 /*
6292@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6293 #ifndef __x86_64__
6294 popl KEYP
6295 #endif
6296+ pax_force_retaddr 0, 1
6297 ret
6298+ENDPROC(aesni_set_key)
6299
6300 /*
6301 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6302@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6303 popl KLEN
6304 popl KEYP
6305 #endif
6306+ pax_force_retaddr 0, 1
6307 ret
6308+ENDPROC(aesni_enc)
6309
6310 /*
6311 * _aesni_enc1: internal ABI
6312@@ -1959,6 +1972,7 @@ _aesni_enc1:
6313 AESENC KEY STATE
6314 movaps 0x70(TKEYP), KEY
6315 AESENCLAST KEY STATE
6316+ pax_force_retaddr_bts
6317 ret
6318
6319 /*
6320@@ -2067,6 +2081,7 @@ _aesni_enc4:
6321 AESENCLAST KEY STATE2
6322 AESENCLAST KEY STATE3
6323 AESENCLAST KEY STATE4
6324+ pax_force_retaddr_bts
6325 ret
6326
6327 /*
6328@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6329 popl KLEN
6330 popl KEYP
6331 #endif
6332+ pax_force_retaddr 0, 1
6333 ret
6334+ENDPROC(aesni_dec)
6335
6336 /*
6337 * _aesni_dec1: internal ABI
6338@@ -2146,6 +2163,7 @@ _aesni_dec1:
6339 AESDEC KEY STATE
6340 movaps 0x70(TKEYP), KEY
6341 AESDECLAST KEY STATE
6342+ pax_force_retaddr_bts
6343 ret
6344
6345 /*
6346@@ -2254,6 +2272,7 @@ _aesni_dec4:
6347 AESDECLAST KEY STATE2
6348 AESDECLAST KEY STATE3
6349 AESDECLAST KEY STATE4
6350+ pax_force_retaddr_bts
6351 ret
6352
6353 /*
6354@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6355 popl KEYP
6356 popl LEN
6357 #endif
6358+ pax_force_retaddr 0, 1
6359 ret
6360+ENDPROC(aesni_ecb_enc)
6361
6362 /*
6363 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6364@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6365 popl KEYP
6366 popl LEN
6367 #endif
6368+ pax_force_retaddr 0, 1
6369 ret
6370+ENDPROC(aesni_ecb_dec)
6371
6372 /*
6373 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6374@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6375 popl LEN
6376 popl IVP
6377 #endif
6378+ pax_force_retaddr 0, 1
6379 ret
6380+ENDPROC(aesni_cbc_enc)
6381
6382 /*
6383 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6384@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6385 popl LEN
6386 popl IVP
6387 #endif
6388+ pax_force_retaddr 0, 1
6389 ret
6390+ENDPROC(aesni_cbc_dec)
6391
6392 #ifdef __x86_64__
6393 .align 16
6394@@ -2524,6 +2551,7 @@ _aesni_inc_init:
6395 mov $1, TCTR_LOW
6396 MOVQ_R64_XMM TCTR_LOW INC
6397 MOVQ_R64_XMM CTR TCTR_LOW
6398+ pax_force_retaddr_bts
6399 ret
6400
6401 /*
6402@@ -2552,6 +2580,7 @@ _aesni_inc:
6403 .Linc_low:
6404 movaps CTR, IV
6405 PSHUFB_XMM BSWAP_MASK IV
6406+ pax_force_retaddr_bts
6407 ret
6408
6409 /*
6410@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6411 .Lctr_enc_ret:
6412 movups IV, (IVP)
6413 .Lctr_enc_just_ret:
6414+ pax_force_retaddr 0, 1
6415 ret
6416+ENDPROC(aesni_ctr_enc)
6417 #endif
6418diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6419index 391d245..67f35c2 100644
6420--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6421+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6422@@ -20,6 +20,8 @@
6423 *
6424 */
6425
6426+#include <asm/alternative-asm.h>
6427+
6428 .file "blowfish-x86_64-asm.S"
6429 .text
6430
6431@@ -151,9 +153,11 @@ __blowfish_enc_blk:
6432 jnz __enc_xor;
6433
6434 write_block();
6435+ pax_force_retaddr 0, 1
6436 ret;
6437 __enc_xor:
6438 xor_block();
6439+ pax_force_retaddr 0, 1
6440 ret;
6441
6442 .align 8
6443@@ -188,6 +192,7 @@ blowfish_dec_blk:
6444
6445 movq %r11, %rbp;
6446
6447+ pax_force_retaddr 0, 1
6448 ret;
6449
6450 /**********************************************************************
6451@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6452
6453 popq %rbx;
6454 popq %rbp;
6455+ pax_force_retaddr 0, 1
6456 ret;
6457
6458 __enc_xor4:
6459@@ -349,6 +355,7 @@ __enc_xor4:
6460
6461 popq %rbx;
6462 popq %rbp;
6463+ pax_force_retaddr 0, 1
6464 ret;
6465
6466 .align 8
6467@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6468 popq %rbx;
6469 popq %rbp;
6470
6471+ pax_force_retaddr 0, 1
6472 ret;
6473
6474diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6475index 6214a9b..1f4fc9a 100644
6476--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6477+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6478@@ -1,3 +1,5 @@
6479+#include <asm/alternative-asm.h>
6480+
6481 # enter ECRYPT_encrypt_bytes
6482 .text
6483 .p2align 5
6484@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6485 add %r11,%rsp
6486 mov %rdi,%rax
6487 mov %rsi,%rdx
6488+ pax_force_retaddr 0, 1
6489 ret
6490 # bytesatleast65:
6491 ._bytesatleast65:
6492@@ -891,6 +894,7 @@ ECRYPT_keysetup:
6493 add %r11,%rsp
6494 mov %rdi,%rax
6495 mov %rsi,%rdx
6496+ pax_force_retaddr
6497 ret
6498 # enter ECRYPT_ivsetup
6499 .text
6500@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6501 add %r11,%rsp
6502 mov %rdi,%rax
6503 mov %rsi,%rdx
6504+ pax_force_retaddr
6505 ret
6506diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6507index b2c2f57..8470cab 100644
6508--- a/arch/x86/crypto/sha1_ssse3_asm.S
6509+++ b/arch/x86/crypto/sha1_ssse3_asm.S
6510@@ -28,6 +28,8 @@
6511 * (at your option) any later version.
6512 */
6513
6514+#include <asm/alternative-asm.h>
6515+
6516 #define CTX %rdi // arg1
6517 #define BUF %rsi // arg2
6518 #define CNT %rdx // arg3
6519@@ -104,6 +106,7 @@
6520 pop %r12
6521 pop %rbp
6522 pop %rbx
6523+ pax_force_retaddr 0, 1
6524 ret
6525
6526 .size \name, .-\name
6527diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6528index 5b012a2..36d5364 100644
6529--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6530+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6531@@ -20,6 +20,8 @@
6532 *
6533 */
6534
6535+#include <asm/alternative-asm.h>
6536+
6537 .file "twofish-x86_64-asm-3way.S"
6538 .text
6539
6540@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6541 popq %r13;
6542 popq %r14;
6543 popq %r15;
6544+ pax_force_retaddr 0, 1
6545 ret;
6546
6547 __enc_xor3:
6548@@ -271,6 +274,7 @@ __enc_xor3:
6549 popq %r13;
6550 popq %r14;
6551 popq %r15;
6552+ pax_force_retaddr 0, 1
6553 ret;
6554
6555 .global twofish_dec_blk_3way
6556@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6557 popq %r13;
6558 popq %r14;
6559 popq %r15;
6560+ pax_force_retaddr 0, 1
6561 ret;
6562
6563diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6564index 7bcf3fc..f53832f 100644
6565--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6566+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6567@@ -21,6 +21,7 @@
6568 .text
6569
6570 #include <asm/asm-offsets.h>
6571+#include <asm/alternative-asm.h>
6572
6573 #define a_offset 0
6574 #define b_offset 4
6575@@ -268,6 +269,7 @@ twofish_enc_blk:
6576
6577 popq R1
6578 movq $1,%rax
6579+ pax_force_retaddr 0, 1
6580 ret
6581
6582 twofish_dec_blk:
6583@@ -319,4 +321,5 @@ twofish_dec_blk:
6584
6585 popq R1
6586 movq $1,%rax
6587+ pax_force_retaddr 0, 1
6588 ret
6589diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6590index fd84387..0b4af7d 100644
6591--- a/arch/x86/ia32/ia32_aout.c
6592+++ b/arch/x86/ia32/ia32_aout.c
6593@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6594 unsigned long dump_start, dump_size;
6595 struct user32 dump;
6596
6597+ memset(&dump, 0, sizeof(dump));
6598+
6599 fs = get_fs();
6600 set_fs(KERNEL_DS);
6601 has_dumped = 1;
6602diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6603index 6557769..ef6ae89 100644
6604--- a/arch/x86/ia32/ia32_signal.c
6605+++ b/arch/x86/ia32/ia32_signal.c
6606@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6607 }
6608 seg = get_fs();
6609 set_fs(KERNEL_DS);
6610- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6611+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6612 set_fs(seg);
6613 if (ret >= 0 && uoss_ptr) {
6614 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6615@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6616 */
6617 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6618 size_t frame_size,
6619- void **fpstate)
6620+ void __user **fpstate)
6621 {
6622 unsigned long sp;
6623
6624@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6625
6626 if (used_math()) {
6627 sp = sp - sig_xstate_ia32_size;
6628- *fpstate = (struct _fpstate_ia32 *) sp;
6629+ *fpstate = (struct _fpstate_ia32 __user *) sp;
6630 if (save_i387_xstate_ia32(*fpstate) < 0)
6631 return (void __user *) -1L;
6632 }
6633@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6634 sp -= frame_size;
6635 /* Align the stack pointer according to the i386 ABI,
6636 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6637- sp = ((sp + 4) & -16ul) - 4;
6638+ sp = ((sp - 12) & -16ul) - 4;
6639 return (void __user *) sp;
6640 }
6641
6642@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6643 * These are actually not used anymore, but left because some
6644 * gdb versions depend on them as a marker.
6645 */
6646- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6647+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6648 } put_user_catch(err);
6649
6650 if (err)
6651@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6652 0xb8,
6653 __NR_ia32_rt_sigreturn,
6654 0x80cd,
6655- 0,
6656+ 0
6657 };
6658
6659 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6660@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6661
6662 if (ka->sa.sa_flags & SA_RESTORER)
6663 restorer = ka->sa.sa_restorer;
6664+ else if (current->mm->context.vdso)
6665+ /* Return stub is in 32bit vsyscall page */
6666+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6667 else
6668- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6669- rt_sigreturn);
6670+ restorer = &frame->retcode;
6671 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6672
6673 /*
6674 * Not actually used anymore, but left because some gdb
6675 * versions need it.
6676 */
6677- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6678+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6679 } put_user_catch(err);
6680
6681 if (err)
6682diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6683index a6253ec..4ad2120 100644
6684--- a/arch/x86/ia32/ia32entry.S
6685+++ b/arch/x86/ia32/ia32entry.S
6686@@ -13,7 +13,9 @@
6687 #include <asm/thread_info.h>
6688 #include <asm/segment.h>
6689 #include <asm/irqflags.h>
6690+#include <asm/pgtable.h>
6691 #include <linux/linkage.h>
6692+#include <asm/alternative-asm.h>
6693
6694 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6695 #include <linux/elf-em.h>
6696@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6697 ENDPROC(native_irq_enable_sysexit)
6698 #endif
6699
6700+ .macro pax_enter_kernel_user
6701+ pax_set_fptr_mask
6702+#ifdef CONFIG_PAX_MEMORY_UDEREF
6703+ call pax_enter_kernel_user
6704+#endif
6705+ .endm
6706+
6707+ .macro pax_exit_kernel_user
6708+#ifdef CONFIG_PAX_MEMORY_UDEREF
6709+ call pax_exit_kernel_user
6710+#endif
6711+#ifdef CONFIG_PAX_RANDKSTACK
6712+ pushq %rax
6713+ pushq %r11
6714+ call pax_randomize_kstack
6715+ popq %r11
6716+ popq %rax
6717+#endif
6718+ .endm
6719+
6720+.macro pax_erase_kstack
6721+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6722+ call pax_erase_kstack
6723+#endif
6724+.endm
6725+
6726 /*
6727 * 32bit SYSENTER instruction entry.
6728 *
6729@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6730 CFI_REGISTER rsp,rbp
6731 SWAPGS_UNSAFE_STACK
6732 movq PER_CPU_VAR(kernel_stack), %rsp
6733- addq $(KERNEL_STACK_OFFSET),%rsp
6734- /*
6735- * No need to follow this irqs on/off section: the syscall
6736- * disabled irqs, here we enable it straight after entry:
6737- */
6738- ENABLE_INTERRUPTS(CLBR_NONE)
6739 movl %ebp,%ebp /* zero extension */
6740 pushq_cfi $__USER32_DS
6741 /*CFI_REL_OFFSET ss,0*/
6742@@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6743 CFI_REL_OFFSET rsp,0
6744 pushfq_cfi
6745 /*CFI_REL_OFFSET rflags,0*/
6746- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6747- CFI_REGISTER rip,r10
6748+ orl $X86_EFLAGS_IF,(%rsp)
6749+ GET_THREAD_INFO(%r11)
6750+ movl TI_sysenter_return(%r11), %r11d
6751+ CFI_REGISTER rip,r11
6752 pushq_cfi $__USER32_CS
6753 /*CFI_REL_OFFSET cs,0*/
6754 movl %eax, %eax
6755- pushq_cfi %r10
6756+ pushq_cfi %r11
6757 CFI_REL_OFFSET rip,0
6758 pushq_cfi %rax
6759 cld
6760 SAVE_ARGS 0,1,0
6761+ pax_enter_kernel_user
6762+ /*
6763+ * No need to follow this irqs on/off section: the syscall
6764+ * disabled irqs, here we enable it straight after entry:
6765+ */
6766+ ENABLE_INTERRUPTS(CLBR_NONE)
6767 /* no need to do an access_ok check here because rbp has been
6768 32bit zero extended */
6769+
6770+#ifdef CONFIG_PAX_MEMORY_UDEREF
6771+ mov $PAX_USER_SHADOW_BASE,%r11
6772+ add %r11,%rbp
6773+#endif
6774+
6775 1: movl (%rbp),%ebp
6776 .section __ex_table,"a"
6777 .quad 1b,ia32_badarg
6778 .previous
6779- GET_THREAD_INFO(%r10)
6780- orl $TS_COMPAT,TI_status(%r10)
6781- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6782+ GET_THREAD_INFO(%r11)
6783+ orl $TS_COMPAT,TI_status(%r11)
6784+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6785 CFI_REMEMBER_STATE
6786 jnz sysenter_tracesys
6787 cmpq $(IA32_NR_syscalls-1),%rax
6788@@ -162,13 +198,15 @@ sysenter_do_call:
6789 sysenter_dispatch:
6790 call *ia32_sys_call_table(,%rax,8)
6791 movq %rax,RAX-ARGOFFSET(%rsp)
6792- GET_THREAD_INFO(%r10)
6793+ GET_THREAD_INFO(%r11)
6794 DISABLE_INTERRUPTS(CLBR_NONE)
6795 TRACE_IRQS_OFF
6796- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6797+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6798 jnz sysexit_audit
6799 sysexit_from_sys_call:
6800- andl $~TS_COMPAT,TI_status(%r10)
6801+ pax_exit_kernel_user
6802+ pax_erase_kstack
6803+ andl $~TS_COMPAT,TI_status(%r11)
6804 /* clear IF, that popfq doesn't enable interrupts early */
6805 andl $~0x200,EFLAGS-R11(%rsp)
6806 movl RIP-R11(%rsp),%edx /* User %eip */
6807@@ -194,6 +232,9 @@ sysexit_from_sys_call:
6808 movl %eax,%esi /* 2nd arg: syscall number */
6809 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6810 call audit_syscall_entry
6811+
6812+ pax_erase_kstack
6813+
6814 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6815 cmpq $(IA32_NR_syscalls-1),%rax
6816 ja ia32_badsys
6817@@ -205,7 +246,7 @@ sysexit_from_sys_call:
6818 .endm
6819
6820 .macro auditsys_exit exit
6821- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6822+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6823 jnz ia32_ret_from_sys_call
6824 TRACE_IRQS_ON
6825 sti
6826@@ -215,12 +256,12 @@ sysexit_from_sys_call:
6827 movzbl %al,%edi /* zero-extend that into %edi */
6828 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6829 call audit_syscall_exit
6830- GET_THREAD_INFO(%r10)
6831+ GET_THREAD_INFO(%r11)
6832 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6833 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6834 cli
6835 TRACE_IRQS_OFF
6836- testl %edi,TI_flags(%r10)
6837+ testl %edi,TI_flags(%r11)
6838 jz \exit
6839 CLEAR_RREGS -ARGOFFSET
6840 jmp int_with_check
6841@@ -238,7 +279,7 @@ sysexit_audit:
6842
6843 sysenter_tracesys:
6844 #ifdef CONFIG_AUDITSYSCALL
6845- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6846+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6847 jz sysenter_auditsys
6848 #endif
6849 SAVE_REST
6850@@ -246,6 +287,9 @@ sysenter_tracesys:
6851 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6852 movq %rsp,%rdi /* &pt_regs -> arg1 */
6853 call syscall_trace_enter
6854+
6855+ pax_erase_kstack
6856+
6857 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6858 RESTORE_REST
6859 cmpq $(IA32_NR_syscalls-1),%rax
6860@@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
6861 ENTRY(ia32_cstar_target)
6862 CFI_STARTPROC32 simple
6863 CFI_SIGNAL_FRAME
6864- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6865+ CFI_DEF_CFA rsp,0
6866 CFI_REGISTER rip,rcx
6867 /*CFI_REGISTER rflags,r11*/
6868 SWAPGS_UNSAFE_STACK
6869 movl %esp,%r8d
6870 CFI_REGISTER rsp,r8
6871 movq PER_CPU_VAR(kernel_stack),%rsp
6872+ SAVE_ARGS 8*6,0,0
6873+ pax_enter_kernel_user
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879- SAVE_ARGS 8,0,0
6880 movl %eax,%eax /* zero extension */
6881 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6882 movq %rcx,RIP-ARGOFFSET(%rsp)
6883@@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
6884 /* no need to do an access_ok check here because r8 has been
6885 32bit zero extended */
6886 /* hardware stack frame is complete now */
6887+
6888+#ifdef CONFIG_PAX_MEMORY_UDEREF
6889+ mov $PAX_USER_SHADOW_BASE,%r11
6890+ add %r11,%r8
6891+#endif
6892+
6893 1: movl (%r8),%r9d
6894 .section __ex_table,"a"
6895 .quad 1b,ia32_badarg
6896 .previous
6897- GET_THREAD_INFO(%r10)
6898- orl $TS_COMPAT,TI_status(%r10)
6899- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6900+ GET_THREAD_INFO(%r11)
6901+ orl $TS_COMPAT,TI_status(%r11)
6902+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6903 CFI_REMEMBER_STATE
6904 jnz cstar_tracesys
6905 cmpq $IA32_NR_syscalls-1,%rax
6906@@ -321,13 +372,15 @@ cstar_do_call:
6907 cstar_dispatch:
6908 call *ia32_sys_call_table(,%rax,8)
6909 movq %rax,RAX-ARGOFFSET(%rsp)
6910- GET_THREAD_INFO(%r10)
6911+ GET_THREAD_INFO(%r11)
6912 DISABLE_INTERRUPTS(CLBR_NONE)
6913 TRACE_IRQS_OFF
6914- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6915+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6916 jnz sysretl_audit
6917 sysretl_from_sys_call:
6918- andl $~TS_COMPAT,TI_status(%r10)
6919+ pax_exit_kernel_user
6920+ pax_erase_kstack
6921+ andl $~TS_COMPAT,TI_status(%r11)
6922 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6923 movl RIP-ARGOFFSET(%rsp),%ecx
6924 CFI_REGISTER rip,rcx
6925@@ -355,7 +408,7 @@ sysretl_audit:
6926
6927 cstar_tracesys:
6928 #ifdef CONFIG_AUDITSYSCALL
6929- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6930+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6931 jz cstar_auditsys
6932 #endif
6933 xchgl %r9d,%ebp
6934@@ -364,6 +417,9 @@ cstar_tracesys:
6935 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6936 movq %rsp,%rdi /* &pt_regs -> arg1 */
6937 call syscall_trace_enter
6938+
6939+ pax_erase_kstack
6940+
6941 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6942 RESTORE_REST
6943 xchgl %ebp,%r9d
6944@@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
6945 CFI_REL_OFFSET rip,RIP-RIP
6946 PARAVIRT_ADJUST_EXCEPTION_FRAME
6947 SWAPGS
6948- /*
6949- * No need to follow this irqs on/off section: the syscall
6950- * disabled irqs and here we enable it straight after entry:
6951- */
6952- ENABLE_INTERRUPTS(CLBR_NONE)
6953 movl %eax,%eax
6954 pushq_cfi %rax
6955 cld
6956 /* note the registers are not zero extended to the sf.
6957 this could be a problem. */
6958 SAVE_ARGS 0,1,0
6959- GET_THREAD_INFO(%r10)
6960- orl $TS_COMPAT,TI_status(%r10)
6961- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6962+ pax_enter_kernel_user
6963+ /*
6964+ * No need to follow this irqs on/off section: the syscall
6965+ * disabled irqs and here we enable it straight after entry:
6966+ */
6967+ ENABLE_INTERRUPTS(CLBR_NONE)
6968+ GET_THREAD_INFO(%r11)
6969+ orl $TS_COMPAT,TI_status(%r11)
6970+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6971 jnz ia32_tracesys
6972 cmpq $(IA32_NR_syscalls-1),%rax
6973 ja ia32_badsys
6974@@ -441,6 +498,9 @@ ia32_tracesys:
6975 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6976 movq %rsp,%rdi /* &pt_regs -> arg1 */
6977 call syscall_trace_enter
6978+
6979+ pax_erase_kstack
6980+
6981 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6982 RESTORE_REST
6983 cmpq $(IA32_NR_syscalls-1),%rax
6984@@ -455,6 +515,7 @@ ia32_badsys:
6985
6986 quiet_ni_syscall:
6987 movq $-ENOSYS,%rax
6988+ pax_force_retaddr
6989 ret
6990 CFI_ENDPROC
6991
6992diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6993index f6f5c53..b358b28 100644
6994--- a/arch/x86/ia32/sys_ia32.c
6995+++ b/arch/x86/ia32/sys_ia32.c
6996@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6997 */
6998 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6999 {
7000- typeof(ubuf->st_uid) uid = 0;
7001- typeof(ubuf->st_gid) gid = 0;
7002+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
7003+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
7004 SET_UID(uid, stat->uid);
7005 SET_GID(gid, stat->gid);
7006 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7007@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7008 }
7009 set_fs(KERNEL_DS);
7010 ret = sys_rt_sigprocmask(how,
7011- set ? (sigset_t __user *)&s : NULL,
7012- oset ? (sigset_t __user *)&s : NULL,
7013+ set ? (sigset_t __force_user *)&s : NULL,
7014+ oset ? (sigset_t __force_user *)&s : NULL,
7015 sigsetsize);
7016 set_fs(old_fs);
7017 if (ret)
7018@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7019 return alarm_setitimer(seconds);
7020 }
7021
7022-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7023+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7024 int options)
7025 {
7026 return compat_sys_wait4(pid, stat_addr, options, NULL);
7027@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7028 mm_segment_t old_fs = get_fs();
7029
7030 set_fs(KERNEL_DS);
7031- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7032+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7033 set_fs(old_fs);
7034 if (put_compat_timespec(&t, interval))
7035 return -EFAULT;
7036@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7037 mm_segment_t old_fs = get_fs();
7038
7039 set_fs(KERNEL_DS);
7040- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7041+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7042 set_fs(old_fs);
7043 if (!ret) {
7044 switch (_NSIG_WORDS) {
7045@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7046 if (copy_siginfo_from_user32(&info, uinfo))
7047 return -EFAULT;
7048 set_fs(KERNEL_DS);
7049- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7050+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7051 set_fs(old_fs);
7052 return ret;
7053 }
7054@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7055 return -EFAULT;
7056
7057 set_fs(KERNEL_DS);
7058- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7059+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7060 count);
7061 set_fs(old_fs);
7062
7063diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7064index 091508b..e245ff2 100644
7065--- a/arch/x86/include/asm/alternative-asm.h
7066+++ b/arch/x86/include/asm/alternative-asm.h
7067@@ -4,10 +4,10 @@
7068
7069 #ifdef CONFIG_SMP
7070 .macro LOCK_PREFIX
7071-1: lock
7072+672: lock
7073 .section .smp_locks,"a"
7074 .balign 4
7075- .long 1b - .
7076+ .long 672b - .
7077 .previous
7078 .endm
7079 #else
7080@@ -15,6 +15,45 @@
7081 .endm
7082 #endif
7083
7084+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
7085+ .macro pax_force_retaddr_bts rip=0
7086+ btsq $63,\rip(%rsp)
7087+ .endm
7088+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7089+ .macro pax_force_retaddr rip=0, reload=0
7090+ btsq $63,\rip(%rsp)
7091+ .endm
7092+ .macro pax_force_fptr ptr
7093+ btsq $63,\ptr
7094+ .endm
7095+ .macro pax_set_fptr_mask
7096+ .endm
7097+#endif
7098+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7099+ .macro pax_force_retaddr rip=0, reload=0
7100+ .if \reload
7101+ pax_set_fptr_mask
7102+ .endif
7103+ orq %r10,\rip(%rsp)
7104+ .endm
7105+ .macro pax_force_fptr ptr
7106+ orq %r10,\ptr
7107+ .endm
7108+ .macro pax_set_fptr_mask
7109+ movabs $0x8000000000000000,%r10
7110+ .endm
7111+#endif
7112+#else
7113+ .macro pax_force_retaddr rip=0, reload=0
7114+ .endm
7115+ .macro pax_force_fptr ptr
7116+ .endm
7117+ .macro pax_force_retaddr_bts rip=0
7118+ .endm
7119+ .macro pax_set_fptr_mask
7120+ .endm
7121+#endif
7122+
7123 .macro altinstruction_entry orig alt feature orig_len alt_len
7124 .long \orig - .
7125 .long \alt - .
7126diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7127index 37ad100..7d47faa 100644
7128--- a/arch/x86/include/asm/alternative.h
7129+++ b/arch/x86/include/asm/alternative.h
7130@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7131 ".section .discard,\"aw\",@progbits\n" \
7132 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7133 ".previous\n" \
7134- ".section .altinstr_replacement, \"ax\"\n" \
7135+ ".section .altinstr_replacement, \"a\"\n" \
7136 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7137 ".previous"
7138
7139diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7140index 1a6c09a..fec2432 100644
7141--- a/arch/x86/include/asm/apic.h
7142+++ b/arch/x86/include/asm/apic.h
7143@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7144
7145 #ifdef CONFIG_X86_LOCAL_APIC
7146
7147-extern unsigned int apic_verbosity;
7148+extern int apic_verbosity;
7149 extern int local_apic_timer_c2_ok;
7150
7151 extern int disable_apic;
7152diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7153index 20370c6..a2eb9b0 100644
7154--- a/arch/x86/include/asm/apm.h
7155+++ b/arch/x86/include/asm/apm.h
7156@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7157 __asm__ __volatile__(APM_DO_ZERO_SEGS
7158 "pushl %%edi\n\t"
7159 "pushl %%ebp\n\t"
7160- "lcall *%%cs:apm_bios_entry\n\t"
7161+ "lcall *%%ss:apm_bios_entry\n\t"
7162 "setc %%al\n\t"
7163 "popl %%ebp\n\t"
7164 "popl %%edi\n\t"
7165@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7166 __asm__ __volatile__(APM_DO_ZERO_SEGS
7167 "pushl %%edi\n\t"
7168 "pushl %%ebp\n\t"
7169- "lcall *%%cs:apm_bios_entry\n\t"
7170+ "lcall *%%ss:apm_bios_entry\n\t"
7171 "setc %%bl\n\t"
7172 "popl %%ebp\n\t"
7173 "popl %%edi\n\t"
7174diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7175index 58cb6d4..ca9010d 100644
7176--- a/arch/x86/include/asm/atomic.h
7177+++ b/arch/x86/include/asm/atomic.h
7178@@ -22,7 +22,18 @@
7179 */
7180 static inline int atomic_read(const atomic_t *v)
7181 {
7182- return (*(volatile int *)&(v)->counter);
7183+ return (*(volatile const int *)&(v)->counter);
7184+}
7185+
7186+/**
7187+ * atomic_read_unchecked - read atomic variable
7188+ * @v: pointer of type atomic_unchecked_t
7189+ *
7190+ * Atomically reads the value of @v.
7191+ */
7192+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7193+{
7194+ return (*(volatile const int *)&(v)->counter);
7195 }
7196
7197 /**
7198@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7199 }
7200
7201 /**
7202+ * atomic_set_unchecked - set atomic variable
7203+ * @v: pointer of type atomic_unchecked_t
7204+ * @i: required value
7205+ *
7206+ * Atomically sets the value of @v to @i.
7207+ */
7208+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7209+{
7210+ v->counter = i;
7211+}
7212+
7213+/**
7214 * atomic_add - add integer to atomic variable
7215 * @i: integer value to add
7216 * @v: pointer of type atomic_t
7217@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7218 */
7219 static inline void atomic_add(int i, atomic_t *v)
7220 {
7221- asm volatile(LOCK_PREFIX "addl %1,%0"
7222+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7223+
7224+#ifdef CONFIG_PAX_REFCOUNT
7225+ "jno 0f\n"
7226+ LOCK_PREFIX "subl %1,%0\n"
7227+ "int $4\n0:\n"
7228+ _ASM_EXTABLE(0b, 0b)
7229+#endif
7230+
7231+ : "+m" (v->counter)
7232+ : "ir" (i));
7233+}
7234+
7235+/**
7236+ * atomic_add_unchecked - add integer to atomic variable
7237+ * @i: integer value to add
7238+ * @v: pointer of type atomic_unchecked_t
7239+ *
7240+ * Atomically adds @i to @v.
7241+ */
7242+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7243+{
7244+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7245 : "+m" (v->counter)
7246 : "ir" (i));
7247 }
7248@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7249 */
7250 static inline void atomic_sub(int i, atomic_t *v)
7251 {
7252- asm volatile(LOCK_PREFIX "subl %1,%0"
7253+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7254+
7255+#ifdef CONFIG_PAX_REFCOUNT
7256+ "jno 0f\n"
7257+ LOCK_PREFIX "addl %1,%0\n"
7258+ "int $4\n0:\n"
7259+ _ASM_EXTABLE(0b, 0b)
7260+#endif
7261+
7262+ : "+m" (v->counter)
7263+ : "ir" (i));
7264+}
7265+
7266+/**
7267+ * atomic_sub_unchecked - subtract integer from atomic variable
7268+ * @i: integer value to subtract
7269+ * @v: pointer of type atomic_unchecked_t
7270+ *
7271+ * Atomically subtracts @i from @v.
7272+ */
7273+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7274+{
7275+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7276 : "+m" (v->counter)
7277 : "ir" (i));
7278 }
7279@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7280 {
7281 unsigned char c;
7282
7283- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7284+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7285+
7286+#ifdef CONFIG_PAX_REFCOUNT
7287+ "jno 0f\n"
7288+ LOCK_PREFIX "addl %2,%0\n"
7289+ "int $4\n0:\n"
7290+ _ASM_EXTABLE(0b, 0b)
7291+#endif
7292+
7293+ "sete %1\n"
7294 : "+m" (v->counter), "=qm" (c)
7295 : "ir" (i) : "memory");
7296 return c;
7297@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7298 */
7299 static inline void atomic_inc(atomic_t *v)
7300 {
7301- asm volatile(LOCK_PREFIX "incl %0"
7302+ asm volatile(LOCK_PREFIX "incl %0\n"
7303+
7304+#ifdef CONFIG_PAX_REFCOUNT
7305+ "jno 0f\n"
7306+ LOCK_PREFIX "decl %0\n"
7307+ "int $4\n0:\n"
7308+ _ASM_EXTABLE(0b, 0b)
7309+#endif
7310+
7311+ : "+m" (v->counter));
7312+}
7313+
7314+/**
7315+ * atomic_inc_unchecked - increment atomic variable
7316+ * @v: pointer of type atomic_unchecked_t
7317+ *
7318+ * Atomically increments @v by 1.
7319+ */
7320+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7321+{
7322+ asm volatile(LOCK_PREFIX "incl %0\n"
7323 : "+m" (v->counter));
7324 }
7325
7326@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7327 */
7328 static inline void atomic_dec(atomic_t *v)
7329 {
7330- asm volatile(LOCK_PREFIX "decl %0"
7331+ asm volatile(LOCK_PREFIX "decl %0\n"
7332+
7333+#ifdef CONFIG_PAX_REFCOUNT
7334+ "jno 0f\n"
7335+ LOCK_PREFIX "incl %0\n"
7336+ "int $4\n0:\n"
7337+ _ASM_EXTABLE(0b, 0b)
7338+#endif
7339+
7340+ : "+m" (v->counter));
7341+}
7342+
7343+/**
7344+ * atomic_dec_unchecked - decrement atomic variable
7345+ * @v: pointer of type atomic_unchecked_t
7346+ *
7347+ * Atomically decrements @v by 1.
7348+ */
7349+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7350+{
7351+ asm volatile(LOCK_PREFIX "decl %0\n"
7352 : "+m" (v->counter));
7353 }
7354
7355@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7356 {
7357 unsigned char c;
7358
7359- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7360+ asm volatile(LOCK_PREFIX "decl %0\n"
7361+
7362+#ifdef CONFIG_PAX_REFCOUNT
7363+ "jno 0f\n"
7364+ LOCK_PREFIX "incl %0\n"
7365+ "int $4\n0:\n"
7366+ _ASM_EXTABLE(0b, 0b)
7367+#endif
7368+
7369+ "sete %1\n"
7370 : "+m" (v->counter), "=qm" (c)
7371 : : "memory");
7372 return c != 0;
7373@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7374 {
7375 unsigned char c;
7376
7377- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7378+ asm volatile(LOCK_PREFIX "incl %0\n"
7379+
7380+#ifdef CONFIG_PAX_REFCOUNT
7381+ "jno 0f\n"
7382+ LOCK_PREFIX "decl %0\n"
7383+ "int $4\n0:\n"
7384+ _ASM_EXTABLE(0b, 0b)
7385+#endif
7386+
7387+ "sete %1\n"
7388+ : "+m" (v->counter), "=qm" (c)
7389+ : : "memory");
7390+ return c != 0;
7391+}
7392+
7393+/**
7394+ * atomic_inc_and_test_unchecked - increment and test
7395+ * @v: pointer of type atomic_unchecked_t
7396+ *
7397+ * Atomically increments @v by 1
7398+ * and returns true if the result is zero, or false for all
7399+ * other cases.
7400+ */
7401+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7402+{
7403+ unsigned char c;
7404+
7405+ asm volatile(LOCK_PREFIX "incl %0\n"
7406+ "sete %1\n"
7407 : "+m" (v->counter), "=qm" (c)
7408 : : "memory");
7409 return c != 0;
7410@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7411 {
7412 unsigned char c;
7413
7414- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7415+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7416+
7417+#ifdef CONFIG_PAX_REFCOUNT
7418+ "jno 0f\n"
7419+ LOCK_PREFIX "subl %2,%0\n"
7420+ "int $4\n0:\n"
7421+ _ASM_EXTABLE(0b, 0b)
7422+#endif
7423+
7424+ "sets %1\n"
7425 : "+m" (v->counter), "=qm" (c)
7426 : "ir" (i) : "memory");
7427 return c;
7428@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7429 goto no_xadd;
7430 #endif
7431 /* Modern 486+ processor */
7432- return i + xadd(&v->counter, i);
7433+ return i + xadd_check_overflow(&v->counter, i);
7434
7435 #ifdef CONFIG_M386
7436 no_xadd: /* Legacy 386 processor */
7437@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7438 }
7439
7440 /**
7441+ * atomic_add_return_unchecked - add integer and return
7442+ * @i: integer value to add
7443+ * @v: pointer of type atomic_unchecked_t
7444+ *
7445+ * Atomically adds @i to @v and returns @i + @v
7446+ */
7447+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7448+{
7449+#ifdef CONFIG_M386
7450+ int __i;
7451+ unsigned long flags;
7452+ if (unlikely(boot_cpu_data.x86 <= 3))
7453+ goto no_xadd;
7454+#endif
7455+ /* Modern 486+ processor */
7456+ return i + xadd(&v->counter, i);
7457+
7458+#ifdef CONFIG_M386
7459+no_xadd: /* Legacy 386 processor */
7460+ raw_local_irq_save(flags);
7461+ __i = atomic_read_unchecked(v);
7462+ atomic_set_unchecked(v, i + __i);
7463+ raw_local_irq_restore(flags);
7464+ return i + __i;
7465+#endif
7466+}
7467+
7468+/**
7469 * atomic_sub_return - subtract integer and return
7470 * @v: pointer of type atomic_t
7471 * @i: integer value to subtract
7472@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7473 }
7474
7475 #define atomic_inc_return(v) (atomic_add_return(1, v))
7476+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7477+{
7478+ return atomic_add_return_unchecked(1, v);
7479+}
7480 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7481
7482 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7483@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7484 return cmpxchg(&v->counter, old, new);
7485 }
7486
7487+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7488+{
7489+ return cmpxchg(&v->counter, old, new);
7490+}
7491+
7492 static inline int atomic_xchg(atomic_t *v, int new)
7493 {
7494 return xchg(&v->counter, new);
7495 }
7496
7497+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7498+{
7499+ return xchg(&v->counter, new);
7500+}
7501+
7502 /**
7503 * __atomic_add_unless - add unless the number is already a given value
7504 * @v: pointer of type atomic_t
7505@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7506 */
7507 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7508 {
7509- int c, old;
7510+ int c, old, new;
7511 c = atomic_read(v);
7512 for (;;) {
7513- if (unlikely(c == (u)))
7514+ if (unlikely(c == u))
7515 break;
7516- old = atomic_cmpxchg((v), c, c + (a));
7517+
7518+ asm volatile("addl %2,%0\n"
7519+
7520+#ifdef CONFIG_PAX_REFCOUNT
7521+ "jno 0f\n"
7522+ "subl %2,%0\n"
7523+ "int $4\n0:\n"
7524+ _ASM_EXTABLE(0b, 0b)
7525+#endif
7526+
7527+ : "=r" (new)
7528+ : "0" (c), "ir" (a));
7529+
7530+ old = atomic_cmpxchg(v, c, new);
7531 if (likely(old == c))
7532 break;
7533 c = old;
7534@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7535 return c;
7536 }
7537
7538+/**
7539+ * atomic_inc_not_zero_hint - increment if not null
7540+ * @v: pointer of type atomic_t
7541+ * @hint: probable value of the atomic before the increment
7542+ *
7543+ * This version of atomic_inc_not_zero() gives a hint of probable
7544+ * value of the atomic. This helps processor to not read the memory
7545+ * before doing the atomic read/modify/write cycle, lowering
7546+ * number of bus transactions on some arches.
7547+ *
7548+ * Returns: 0 if increment was not done, 1 otherwise.
7549+ */
7550+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7551+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7552+{
7553+ int val, c = hint, new;
7554+
7555+ /* sanity test, should be removed by compiler if hint is a constant */
7556+ if (!hint)
7557+ return __atomic_add_unless(v, 1, 0);
7558+
7559+ do {
7560+ asm volatile("incl %0\n"
7561+
7562+#ifdef CONFIG_PAX_REFCOUNT
7563+ "jno 0f\n"
7564+ "decl %0\n"
7565+ "int $4\n0:\n"
7566+ _ASM_EXTABLE(0b, 0b)
7567+#endif
7568+
7569+ : "=r" (new)
7570+ : "0" (c));
7571+
7572+ val = atomic_cmpxchg(v, c, new);
7573+ if (val == c)
7574+ return 1;
7575+ c = val;
7576+ } while (c);
7577+
7578+ return 0;
7579+}
7580
7581 /*
7582 * atomic_dec_if_positive - decrement by 1 if old value positive
7583diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7584index 24098aa..1e37723 100644
7585--- a/arch/x86/include/asm/atomic64_32.h
7586+++ b/arch/x86/include/asm/atomic64_32.h
7587@@ -12,6 +12,14 @@ typedef struct {
7588 u64 __aligned(8) counter;
7589 } atomic64_t;
7590
7591+#ifdef CONFIG_PAX_REFCOUNT
7592+typedef struct {
7593+ u64 __aligned(8) counter;
7594+} atomic64_unchecked_t;
7595+#else
7596+typedef atomic64_t atomic64_unchecked_t;
7597+#endif
7598+
7599 #define ATOMIC64_INIT(val) { (val) }
7600
7601 #ifdef CONFIG_X86_CMPXCHG64
7602@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7603 }
7604
7605 /**
7606+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7607+ * @p: pointer to type atomic64_unchecked_t
7608+ * @o: expected value
7609+ * @n: new value
7610+ *
7611+ * Atomically sets @v to @n if it was equal to @o and returns
7612+ * the old value.
7613+ */
7614+
7615+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7616+{
7617+ return cmpxchg64(&v->counter, o, n);
7618+}
7619+
7620+/**
7621 * atomic64_xchg - xchg atomic64 variable
7622 * @v: pointer to type atomic64_t
7623 * @n: value to assign
7624@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7625 }
7626
7627 /**
7628+ * atomic64_set_unchecked - set atomic64 variable
7629+ * @v: pointer to type atomic64_unchecked_t
7630+ * @n: value to assign
7631+ *
7632+ * Atomically sets the value of @v to @n.
7633+ */
7634+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7635+{
7636+ unsigned high = (unsigned)(i >> 32);
7637+ unsigned low = (unsigned)i;
7638+ asm volatile(ATOMIC64_ALTERNATIVE(set)
7639+ : "+b" (low), "+c" (high)
7640+ : "S" (v)
7641+ : "eax", "edx", "memory"
7642+ );
7643+}
7644+
7645+/**
7646 * atomic64_read - read atomic64 variable
7647 * @v: pointer to type atomic64_t
7648 *
7649@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7650 }
7651
7652 /**
7653+ * atomic64_read_unchecked - read atomic64 variable
7654+ * @v: pointer to type atomic64_unchecked_t
7655+ *
7656+ * Atomically reads the value of @v and returns it.
7657+ */
7658+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7659+{
7660+ long long r;
7661+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7662+ : "=A" (r), "+c" (v)
7663+ : : "memory"
7664+ );
7665+ return r;
7666+ }
7667+
7668+/**
7669 * atomic64_add_return - add and return
7670 * @i: integer value to add
7671 * @v: pointer to type atomic64_t
7672@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7673 return i;
7674 }
7675
7676+/**
7677+ * atomic64_add_return_unchecked - add and return
7678+ * @i: integer value to add
7679+ * @v: pointer to type atomic64_unchecked_t
7680+ *
7681+ * Atomically adds @i to @v and returns @i + *@v
7682+ */
7683+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7684+{
7685+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7686+ : "+A" (i), "+c" (v)
7687+ : : "memory"
7688+ );
7689+ return i;
7690+}
7691+
7692 /*
7693 * Other variants with different arithmetic operators:
7694 */
7695@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7696 return a;
7697 }
7698
7699+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7700+{
7701+ long long a;
7702+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7703+ : "=A" (a)
7704+ : "S" (v)
7705+ : "memory", "ecx"
7706+ );
7707+ return a;
7708+}
7709+
7710 static inline long long atomic64_dec_return(atomic64_t *v)
7711 {
7712 long long a;
7713@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7714 }
7715
7716 /**
7717+ * atomic64_add_unchecked - add integer to atomic64 variable
7718+ * @i: integer value to add
7719+ * @v: pointer to type atomic64_unchecked_t
7720+ *
7721+ * Atomically adds @i to @v.
7722+ */
7723+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7724+{
7725+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7726+ : "+A" (i), "+c" (v)
7727+ : : "memory"
7728+ );
7729+ return i;
7730+}
7731+
7732+/**
7733 * atomic64_sub - subtract the atomic64 variable
7734 * @i: integer value to subtract
7735 * @v: pointer to type atomic64_t
7736diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7737index 0e1cbfc..5623683 100644
7738--- a/arch/x86/include/asm/atomic64_64.h
7739+++ b/arch/x86/include/asm/atomic64_64.h
7740@@ -18,7 +18,19 @@
7741 */
7742 static inline long atomic64_read(const atomic64_t *v)
7743 {
7744- return (*(volatile long *)&(v)->counter);
7745+ return (*(volatile const long *)&(v)->counter);
7746+}
7747+
7748+/**
7749+ * atomic64_read_unchecked - read atomic64 variable
7750+ * @v: pointer of type atomic64_unchecked_t
7751+ *
7752+ * Atomically reads the value of @v.
7753+ * Doesn't imply a read memory barrier.
7754+ */
7755+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7756+{
7757+ return (*(volatile const long *)&(v)->counter);
7758 }
7759
7760 /**
7761@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
7762 }
7763
7764 /**
7765+ * atomic64_set_unchecked - set atomic64 variable
7766+ * @v: pointer to type atomic64_unchecked_t
7767+ * @i: required value
7768+ *
7769+ * Atomically sets the value of @v to @i.
7770+ */
7771+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7772+{
7773+ v->counter = i;
7774+}
7775+
7776+/**
7777 * atomic64_add - add integer to atomic64 variable
7778 * @i: integer value to add
7779 * @v: pointer to type atomic64_t
7780@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
7781 */
7782 static inline void atomic64_add(long i, atomic64_t *v)
7783 {
7784+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7785+
7786+#ifdef CONFIG_PAX_REFCOUNT
7787+ "jno 0f\n"
7788+ LOCK_PREFIX "subq %1,%0\n"
7789+ "int $4\n0:\n"
7790+ _ASM_EXTABLE(0b, 0b)
7791+#endif
7792+
7793+ : "=m" (v->counter)
7794+ : "er" (i), "m" (v->counter));
7795+}
7796+
7797+/**
7798+ * atomic64_add_unchecked - add integer to atomic64 variable
7799+ * @i: integer value to add
7800+ * @v: pointer to type atomic64_unchecked_t
7801+ *
7802+ * Atomically adds @i to @v.
7803+ */
7804+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7805+{
7806 asm volatile(LOCK_PREFIX "addq %1,%0"
7807 : "=m" (v->counter)
7808 : "er" (i), "m" (v->counter));
7809@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
7810 */
7811 static inline void atomic64_sub(long i, atomic64_t *v)
7812 {
7813- asm volatile(LOCK_PREFIX "subq %1,%0"
7814+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7815+
7816+#ifdef CONFIG_PAX_REFCOUNT
7817+ "jno 0f\n"
7818+ LOCK_PREFIX "addq %1,%0\n"
7819+ "int $4\n0:\n"
7820+ _ASM_EXTABLE(0b, 0b)
7821+#endif
7822+
7823+ : "=m" (v->counter)
7824+ : "er" (i), "m" (v->counter));
7825+}
7826+
7827+/**
7828+ * atomic64_sub_unchecked - subtract the atomic64 variable
7829+ * @i: integer value to subtract
7830+ * @v: pointer to type atomic64_unchecked_t
7831+ *
7832+ * Atomically subtracts @i from @v.
7833+ */
7834+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7835+{
7836+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7837 : "=m" (v->counter)
7838 : "er" (i), "m" (v->counter));
7839 }
7840@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7841 {
7842 unsigned char c;
7843
7844- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7845+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7846+
7847+#ifdef CONFIG_PAX_REFCOUNT
7848+ "jno 0f\n"
7849+ LOCK_PREFIX "addq %2,%0\n"
7850+ "int $4\n0:\n"
7851+ _ASM_EXTABLE(0b, 0b)
7852+#endif
7853+
7854+ "sete %1\n"
7855 : "=m" (v->counter), "=qm" (c)
7856 : "er" (i), "m" (v->counter) : "memory");
7857 return c;
7858@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7859 */
7860 static inline void atomic64_inc(atomic64_t *v)
7861 {
7862+ asm volatile(LOCK_PREFIX "incq %0\n"
7863+
7864+#ifdef CONFIG_PAX_REFCOUNT
7865+ "jno 0f\n"
7866+ LOCK_PREFIX "decq %0\n"
7867+ "int $4\n0:\n"
7868+ _ASM_EXTABLE(0b, 0b)
7869+#endif
7870+
7871+ : "=m" (v->counter)
7872+ : "m" (v->counter));
7873+}
7874+
7875+/**
7876+ * atomic64_inc_unchecked - increment atomic64 variable
7877+ * @v: pointer to type atomic64_unchecked_t
7878+ *
7879+ * Atomically increments @v by 1.
7880+ */
7881+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7882+{
7883 asm volatile(LOCK_PREFIX "incq %0"
7884 : "=m" (v->counter)
7885 : "m" (v->counter));
7886@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
7887 */
7888 static inline void atomic64_dec(atomic64_t *v)
7889 {
7890- asm volatile(LOCK_PREFIX "decq %0"
7891+ asm volatile(LOCK_PREFIX "decq %0\n"
7892+
7893+#ifdef CONFIG_PAX_REFCOUNT
7894+ "jno 0f\n"
7895+ LOCK_PREFIX "incq %0\n"
7896+ "int $4\n0:\n"
7897+ _ASM_EXTABLE(0b, 0b)
7898+#endif
7899+
7900+ : "=m" (v->counter)
7901+ : "m" (v->counter));
7902+}
7903+
7904+/**
7905+ * atomic64_dec_unchecked - decrement atomic64 variable
7906+ * @v: pointer to type atomic64_t
7907+ *
7908+ * Atomically decrements @v by 1.
7909+ */
7910+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7911+{
7912+ asm volatile(LOCK_PREFIX "decq %0\n"
7913 : "=m" (v->counter)
7914 : "m" (v->counter));
7915 }
7916@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
7917 {
7918 unsigned char c;
7919
7920- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7921+ asm volatile(LOCK_PREFIX "decq %0\n"
7922+
7923+#ifdef CONFIG_PAX_REFCOUNT
7924+ "jno 0f\n"
7925+ LOCK_PREFIX "incq %0\n"
7926+ "int $4\n0:\n"
7927+ _ASM_EXTABLE(0b, 0b)
7928+#endif
7929+
7930+ "sete %1\n"
7931 : "=m" (v->counter), "=qm" (c)
7932 : "m" (v->counter) : "memory");
7933 return c != 0;
7934@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
7935 {
7936 unsigned char c;
7937
7938- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7939+ asm volatile(LOCK_PREFIX "incq %0\n"
7940+
7941+#ifdef CONFIG_PAX_REFCOUNT
7942+ "jno 0f\n"
7943+ LOCK_PREFIX "decq %0\n"
7944+ "int $4\n0:\n"
7945+ _ASM_EXTABLE(0b, 0b)
7946+#endif
7947+
7948+ "sete %1\n"
7949 : "=m" (v->counter), "=qm" (c)
7950 : "m" (v->counter) : "memory");
7951 return c != 0;
7952@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7953 {
7954 unsigned char c;
7955
7956- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7957+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
7958+
7959+#ifdef CONFIG_PAX_REFCOUNT
7960+ "jno 0f\n"
7961+ LOCK_PREFIX "subq %2,%0\n"
7962+ "int $4\n0:\n"
7963+ _ASM_EXTABLE(0b, 0b)
7964+#endif
7965+
7966+ "sets %1\n"
7967 : "=m" (v->counter), "=qm" (c)
7968 : "er" (i), "m" (v->counter) : "memory");
7969 return c;
7970@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7971 */
7972 static inline long atomic64_add_return(long i, atomic64_t *v)
7973 {
7974+ return i + xadd_check_overflow(&v->counter, i);
7975+}
7976+
7977+/**
7978+ * atomic64_add_return_unchecked - add and return
7979+ * @i: integer value to add
7980+ * @v: pointer to type atomic64_unchecked_t
7981+ *
7982+ * Atomically adds @i to @v and returns @i + @v
7983+ */
7984+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7985+{
7986 return i + xadd(&v->counter, i);
7987 }
7988
7989@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
7990 }
7991
7992 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7993+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7994+{
7995+ return atomic64_add_return_unchecked(1, v);
7996+}
7997 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7998
7999 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8000@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8001 return cmpxchg(&v->counter, old, new);
8002 }
8003
8004+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8005+{
8006+ return cmpxchg(&v->counter, old, new);
8007+}
8008+
8009 static inline long atomic64_xchg(atomic64_t *v, long new)
8010 {
8011 return xchg(&v->counter, new);
8012@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8013 */
8014 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8015 {
8016- long c, old;
8017+ long c, old, new;
8018 c = atomic64_read(v);
8019 for (;;) {
8020- if (unlikely(c == (u)))
8021+ if (unlikely(c == u))
8022 break;
8023- old = atomic64_cmpxchg((v), c, c + (a));
8024+
8025+ asm volatile("add %2,%0\n"
8026+
8027+#ifdef CONFIG_PAX_REFCOUNT
8028+ "jno 0f\n"
8029+ "sub %2,%0\n"
8030+ "int $4\n0:\n"
8031+ _ASM_EXTABLE(0b, 0b)
8032+#endif
8033+
8034+ : "=r" (new)
8035+ : "0" (c), "ir" (a));
8036+
8037+ old = atomic64_cmpxchg(v, c, new);
8038 if (likely(old == c))
8039 break;
8040 c = old;
8041 }
8042- return c != (u);
8043+ return c != u;
8044 }
8045
8046 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8047diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8048index 1775d6e..b65017f 100644
8049--- a/arch/x86/include/asm/bitops.h
8050+++ b/arch/x86/include/asm/bitops.h
8051@@ -38,7 +38,7 @@
8052 * a mask operation on a byte.
8053 */
8054 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8055-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8056+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8057 #define CONST_MASK(nr) (1 << ((nr) & 7))
8058
8059 /**
8060diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8061index 5e1a2ee..c9f9533 100644
8062--- a/arch/x86/include/asm/boot.h
8063+++ b/arch/x86/include/asm/boot.h
8064@@ -11,10 +11,15 @@
8065 #include <asm/pgtable_types.h>
8066
8067 /* Physical address where kernel should be loaded. */
8068-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8069+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8070 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8071 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8072
8073+#ifndef __ASSEMBLY__
8074+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8075+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8076+#endif
8077+
8078 /* Minimum kernel alignment, as a power of two */
8079 #ifdef CONFIG_X86_64
8080 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8081diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8082index 48f99f1..d78ebf9 100644
8083--- a/arch/x86/include/asm/cache.h
8084+++ b/arch/x86/include/asm/cache.h
8085@@ -5,12 +5,13 @@
8086
8087 /* L1 cache line size */
8088 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8089-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8090+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8091
8092 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8093+#define __read_only __attribute__((__section__(".data..read_only")))
8094
8095 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8096-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8097+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8098
8099 #ifdef CONFIG_X86_VSMP
8100 #ifdef CONFIG_SMP
8101diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8102index 4e12668..501d239 100644
8103--- a/arch/x86/include/asm/cacheflush.h
8104+++ b/arch/x86/include/asm/cacheflush.h
8105@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8106 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8107
8108 if (pg_flags == _PGMT_DEFAULT)
8109- return -1;
8110+ return ~0UL;
8111 else if (pg_flags == _PGMT_WC)
8112 return _PAGE_CACHE_WC;
8113 else if (pg_flags == _PGMT_UC_MINUS)
8114diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8115index 46fc474..b02b0f9 100644
8116--- a/arch/x86/include/asm/checksum_32.h
8117+++ b/arch/x86/include/asm/checksum_32.h
8118@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8119 int len, __wsum sum,
8120 int *src_err_ptr, int *dst_err_ptr);
8121
8122+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8123+ int len, __wsum sum,
8124+ int *src_err_ptr, int *dst_err_ptr);
8125+
8126+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8127+ int len, __wsum sum,
8128+ int *src_err_ptr, int *dst_err_ptr);
8129+
8130 /*
8131 * Note: when you get a NULL pointer exception here this means someone
8132 * passed in an incorrect kernel address to one of these functions.
8133@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8134 int *err_ptr)
8135 {
8136 might_sleep();
8137- return csum_partial_copy_generic((__force void *)src, dst,
8138+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8139 len, sum, err_ptr, NULL);
8140 }
8141
8142@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8143 {
8144 might_sleep();
8145 if (access_ok(VERIFY_WRITE, dst, len))
8146- return csum_partial_copy_generic(src, (__force void *)dst,
8147+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8148 len, sum, NULL, err_ptr);
8149
8150 if (len)
8151diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8152index 5d3acdf..6447a02 100644
8153--- a/arch/x86/include/asm/cmpxchg.h
8154+++ b/arch/x86/include/asm/cmpxchg.h
8155@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8156 __compiletime_error("Bad argument size for cmpxchg");
8157 extern void __xadd_wrong_size(void)
8158 __compiletime_error("Bad argument size for xadd");
8159+extern void __xadd_check_overflow_wrong_size(void)
8160+ __compiletime_error("Bad argument size for xadd_check_overflow");
8161
8162 /*
8163 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8164@@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8165 __ret; \
8166 })
8167
8168+#define __xadd_check_overflow(ptr, inc, lock) \
8169+ ({ \
8170+ __typeof__ (*(ptr)) __ret = (inc); \
8171+ switch (sizeof(*(ptr))) { \
8172+ case __X86_CASE_L: \
8173+ asm volatile (lock "xaddl %0, %1\n" \
8174+ "jno 0f\n" \
8175+ "mov %0,%1\n" \
8176+ "int $4\n0:\n" \
8177+ _ASM_EXTABLE(0b, 0b) \
8178+ : "+r" (__ret), "+m" (*(ptr)) \
8179+ : : "memory", "cc"); \
8180+ break; \
8181+ case __X86_CASE_Q: \
8182+ asm volatile (lock "xaddq %q0, %1\n" \
8183+ "jno 0f\n" \
8184+ "mov %0,%1\n" \
8185+ "int $4\n0:\n" \
8186+ _ASM_EXTABLE(0b, 0b) \
8187+ : "+r" (__ret), "+m" (*(ptr)) \
8188+ : : "memory", "cc"); \
8189+ break; \
8190+ default: \
8191+ __xadd_check_overflow_wrong_size(); \
8192+ } \
8193+ __ret; \
8194+ })
8195+
8196 /*
8197 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8198 * value of "*ptr".
8199@@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8200 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8201 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8202
8203+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8204+
8205 #endif /* ASM_X86_CMPXCHG_H */
8206diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8207index f3444f7..051a196 100644
8208--- a/arch/x86/include/asm/cpufeature.h
8209+++ b/arch/x86/include/asm/cpufeature.h
8210@@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8211 ".section .discard,\"aw\",@progbits\n"
8212 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8213 ".previous\n"
8214- ".section .altinstr_replacement,\"ax\"\n"
8215+ ".section .altinstr_replacement,\"a\"\n"
8216 "3: movb $1,%0\n"
8217 "4:\n"
8218 ".previous\n"
8219diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8220index 41935fa..3b40db8 100644
8221--- a/arch/x86/include/asm/desc.h
8222+++ b/arch/x86/include/asm/desc.h
8223@@ -4,6 +4,7 @@
8224 #include <asm/desc_defs.h>
8225 #include <asm/ldt.h>
8226 #include <asm/mmu.h>
8227+#include <asm/pgtable.h>
8228
8229 #include <linux/smp.h>
8230
8231@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8232
8233 desc->type = (info->read_exec_only ^ 1) << 1;
8234 desc->type |= info->contents << 2;
8235+ desc->type |= info->seg_not_present ^ 1;
8236
8237 desc->s = 1;
8238 desc->dpl = 0x3;
8239@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8240 }
8241
8242 extern struct desc_ptr idt_descr;
8243-extern gate_desc idt_table[];
8244-
8245-struct gdt_page {
8246- struct desc_struct gdt[GDT_ENTRIES];
8247-} __attribute__((aligned(PAGE_SIZE)));
8248-
8249-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8250+extern gate_desc idt_table[256];
8251
8252+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8253 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8254 {
8255- return per_cpu(gdt_page, cpu).gdt;
8256+ return cpu_gdt_table[cpu];
8257 }
8258
8259 #ifdef CONFIG_X86_64
8260@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8261 unsigned long base, unsigned dpl, unsigned flags,
8262 unsigned short seg)
8263 {
8264- gate->a = (seg << 16) | (base & 0xffff);
8265- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8266+ gate->gate.offset_low = base;
8267+ gate->gate.seg = seg;
8268+ gate->gate.reserved = 0;
8269+ gate->gate.type = type;
8270+ gate->gate.s = 0;
8271+ gate->gate.dpl = dpl;
8272+ gate->gate.p = 1;
8273+ gate->gate.offset_high = base >> 16;
8274 }
8275
8276 #endif
8277@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8278
8279 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8280 {
8281+ pax_open_kernel();
8282 memcpy(&idt[entry], gate, sizeof(*gate));
8283+ pax_close_kernel();
8284 }
8285
8286 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8287 {
8288+ pax_open_kernel();
8289 memcpy(&ldt[entry], desc, 8);
8290+ pax_close_kernel();
8291 }
8292
8293 static inline void
8294@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8295 default: size = sizeof(*gdt); break;
8296 }
8297
8298+ pax_open_kernel();
8299 memcpy(&gdt[entry], desc, size);
8300+ pax_close_kernel();
8301 }
8302
8303 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8304@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8305
8306 static inline void native_load_tr_desc(void)
8307 {
8308+ pax_open_kernel();
8309 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8310+ pax_close_kernel();
8311 }
8312
8313 static inline void native_load_gdt(const struct desc_ptr *dtr)
8314@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8315 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8316 unsigned int i;
8317
8318+ pax_open_kernel();
8319 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8320 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8321+ pax_close_kernel();
8322 }
8323
8324 #define _LDT_empty(info) \
8325@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8326 desc->limit = (limit >> 16) & 0xf;
8327 }
8328
8329-static inline void _set_gate(int gate, unsigned type, void *addr,
8330+static inline void _set_gate(int gate, unsigned type, const void *addr,
8331 unsigned dpl, unsigned ist, unsigned seg)
8332 {
8333 gate_desc s;
8334@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8335 * Pentium F0 0F bugfix can have resulted in the mapped
8336 * IDT being write-protected.
8337 */
8338-static inline void set_intr_gate(unsigned int n, void *addr)
8339+static inline void set_intr_gate(unsigned int n, const void *addr)
8340 {
8341 BUG_ON((unsigned)n > 0xFF);
8342 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8343@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8344 /*
8345 * This routine sets up an interrupt gate at directory privilege level 3.
8346 */
8347-static inline void set_system_intr_gate(unsigned int n, void *addr)
8348+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8349 {
8350 BUG_ON((unsigned)n > 0xFF);
8351 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8352 }
8353
8354-static inline void set_system_trap_gate(unsigned int n, void *addr)
8355+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8356 {
8357 BUG_ON((unsigned)n > 0xFF);
8358 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8359 }
8360
8361-static inline void set_trap_gate(unsigned int n, void *addr)
8362+static inline void set_trap_gate(unsigned int n, const void *addr)
8363 {
8364 BUG_ON((unsigned)n > 0xFF);
8365 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8366@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8367 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8368 {
8369 BUG_ON((unsigned)n > 0xFF);
8370- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8371+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8372 }
8373
8374-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8375+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8376 {
8377 BUG_ON((unsigned)n > 0xFF);
8378 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8379 }
8380
8381-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8382+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8383 {
8384 BUG_ON((unsigned)n > 0xFF);
8385 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8386 }
8387
8388+#ifdef CONFIG_X86_32
8389+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8390+{
8391+ struct desc_struct d;
8392+
8393+ if (likely(limit))
8394+ limit = (limit - 1UL) >> PAGE_SHIFT;
8395+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8396+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8397+}
8398+#endif
8399+
8400 #endif /* _ASM_X86_DESC_H */
8401diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8402index 278441f..b95a174 100644
8403--- a/arch/x86/include/asm/desc_defs.h
8404+++ b/arch/x86/include/asm/desc_defs.h
8405@@ -31,6 +31,12 @@ struct desc_struct {
8406 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8407 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8408 };
8409+ struct {
8410+ u16 offset_low;
8411+ u16 seg;
8412+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8413+ unsigned offset_high: 16;
8414+ } gate;
8415 };
8416 } __attribute__((packed));
8417
8418diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8419index 908b969..a1f4eb4 100644
8420--- a/arch/x86/include/asm/e820.h
8421+++ b/arch/x86/include/asm/e820.h
8422@@ -69,7 +69,7 @@ struct e820map {
8423 #define ISA_START_ADDRESS 0xa0000
8424 #define ISA_END_ADDRESS 0x100000
8425
8426-#define BIOS_BEGIN 0x000a0000
8427+#define BIOS_BEGIN 0x000c0000
8428 #define BIOS_END 0x00100000
8429
8430 #define BIOS_ROM_BASE 0xffe00000
8431diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8432index 5f962df..7289f09 100644
8433--- a/arch/x86/include/asm/elf.h
8434+++ b/arch/x86/include/asm/elf.h
8435@@ -238,7 +238,25 @@ extern int force_personality32;
8436 the loader. We need to make sure that it is out of the way of the program
8437 that it will "exec", and that there is sufficient room for the brk. */
8438
8439+#ifdef CONFIG_PAX_SEGMEXEC
8440+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8441+#else
8442 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8443+#endif
8444+
8445+#ifdef CONFIG_PAX_ASLR
8446+#ifdef CONFIG_X86_32
8447+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8448+
8449+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8450+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8451+#else
8452+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8453+
8454+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8455+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8456+#endif
8457+#endif
8458
8459 /* This yields a mask that user programs can use to figure out what
8460 instruction set this CPU supports. This could be done in user space,
8461@@ -291,9 +309,7 @@ do { \
8462
8463 #define ARCH_DLINFO \
8464 do { \
8465- if (vdso_enabled) \
8466- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8467- (unsigned long)current->mm->context.vdso); \
8468+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8469 } while (0)
8470
8471 #define AT_SYSINFO 32
8472@@ -304,7 +320,7 @@ do { \
8473
8474 #endif /* !CONFIG_X86_32 */
8475
8476-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8477+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8478
8479 #define VDSO_ENTRY \
8480 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8481@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8482 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8483 #define compat_arch_setup_additional_pages syscall32_setup_pages
8484
8485-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8486-#define arch_randomize_brk arch_randomize_brk
8487-
8488 /*
8489 * True on X86_32 or when emulating IA32 on X86_64
8490 */
8491diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8492index cc70c1c..d96d011 100644
8493--- a/arch/x86/include/asm/emergency-restart.h
8494+++ b/arch/x86/include/asm/emergency-restart.h
8495@@ -15,6 +15,6 @@ enum reboot_type {
8496
8497 extern enum reboot_type reboot_type;
8498
8499-extern void machine_emergency_restart(void);
8500+extern void machine_emergency_restart(void) __noreturn;
8501
8502 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8503diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8504index d09bb03..4ea4194 100644
8505--- a/arch/x86/include/asm/futex.h
8506+++ b/arch/x86/include/asm/futex.h
8507@@ -12,16 +12,18 @@
8508 #include <asm/system.h>
8509
8510 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8511+ typecheck(u32 __user *, uaddr); \
8512 asm volatile("1:\t" insn "\n" \
8513 "2:\t.section .fixup,\"ax\"\n" \
8514 "3:\tmov\t%3, %1\n" \
8515 "\tjmp\t2b\n" \
8516 "\t.previous\n" \
8517 _ASM_EXTABLE(1b, 3b) \
8518- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8519+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8520 : "i" (-EFAULT), "0" (oparg), "1" (0))
8521
8522 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8523+ typecheck(u32 __user *, uaddr); \
8524 asm volatile("1:\tmovl %2, %0\n" \
8525 "\tmovl\t%0, %3\n" \
8526 "\t" insn "\n" \
8527@@ -34,7 +36,7 @@
8528 _ASM_EXTABLE(1b, 4b) \
8529 _ASM_EXTABLE(2b, 4b) \
8530 : "=&a" (oldval), "=&r" (ret), \
8531- "+m" (*uaddr), "=&r" (tem) \
8532+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8533 : "r" (oparg), "i" (-EFAULT), "1" (0))
8534
8535 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8536@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8537
8538 switch (op) {
8539 case FUTEX_OP_SET:
8540- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8541+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8542 break;
8543 case FUTEX_OP_ADD:
8544- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8545+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8546 uaddr, oparg);
8547 break;
8548 case FUTEX_OP_OR:
8549@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8550 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8551 return -EFAULT;
8552
8553- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8554+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8555 "2:\t.section .fixup, \"ax\"\n"
8556 "3:\tmov %3, %0\n"
8557 "\tjmp 2b\n"
8558 "\t.previous\n"
8559 _ASM_EXTABLE(1b, 3b)
8560- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8561+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8562 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8563 : "memory"
8564 );
8565diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8566index eb92a6e..b98b2f4 100644
8567--- a/arch/x86/include/asm/hw_irq.h
8568+++ b/arch/x86/include/asm/hw_irq.h
8569@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8570 extern void enable_IO_APIC(void);
8571
8572 /* Statistics */
8573-extern atomic_t irq_err_count;
8574-extern atomic_t irq_mis_count;
8575+extern atomic_unchecked_t irq_err_count;
8576+extern atomic_unchecked_t irq_mis_count;
8577
8578 /* EISA */
8579 extern void eisa_set_level_irq(unsigned int irq);
8580diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8581index c9e09ea..73888df 100644
8582--- a/arch/x86/include/asm/i387.h
8583+++ b/arch/x86/include/asm/i387.h
8584@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8585 {
8586 int err;
8587
8588+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8589+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8590+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8591+#endif
8592+
8593 /* See comment in fxsave() below. */
8594 #ifdef CONFIG_AS_FXSAVEQ
8595 asm volatile("1: fxrstorq %[fx]\n\t"
8596@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8597 {
8598 int err;
8599
8600+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8601+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8602+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8603+#endif
8604+
8605 /*
8606 * Clear the bytes not touched by the fxsave and reserved
8607 * for the SW usage.
8608@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8609 #endif /* CONFIG_X86_64 */
8610
8611 /* We need a safe address that is cheap to find and that is already
8612- in L1 during context switch. The best choices are unfortunately
8613- different for UP and SMP */
8614-#ifdef CONFIG_SMP
8615-#define safe_address (__per_cpu_offset[0])
8616-#else
8617-#define safe_address (kstat_cpu(0).cpustat.user)
8618-#endif
8619+ in L1 during context switch. */
8620+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8621
8622 /*
8623 * These must be called with preempt disabled
8624@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8625 struct thread_info *me = current_thread_info();
8626 preempt_disable();
8627 if (me->status & TS_USEDFPU)
8628- __save_init_fpu(me->task);
8629+ __save_init_fpu(current);
8630 else
8631 clts();
8632 }
8633diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8634index d8e8eef..99f81ae 100644
8635--- a/arch/x86/include/asm/io.h
8636+++ b/arch/x86/include/asm/io.h
8637@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8638
8639 #include <linux/vmalloc.h>
8640
8641+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8642+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8643+{
8644+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8645+}
8646+
8647+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8648+{
8649+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8650+}
8651+
8652 /*
8653 * Convert a virtual cached pointer to an uncached pointer
8654 */
8655diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8656index bba3cf8..06bc8da 100644
8657--- a/arch/x86/include/asm/irqflags.h
8658+++ b/arch/x86/include/asm/irqflags.h
8659@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8660 sti; \
8661 sysexit
8662
8663+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8664+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8665+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8666+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8667+
8668 #else
8669 #define INTERRUPT_RETURN iret
8670 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8671diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8672index 5478825..839e88c 100644
8673--- a/arch/x86/include/asm/kprobes.h
8674+++ b/arch/x86/include/asm/kprobes.h
8675@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8676 #define RELATIVEJUMP_SIZE 5
8677 #define RELATIVECALL_OPCODE 0xe8
8678 #define RELATIVE_ADDR_SIZE 4
8679-#define MAX_STACK_SIZE 64
8680-#define MIN_STACK_SIZE(ADDR) \
8681- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8682- THREAD_SIZE - (unsigned long)(ADDR))) \
8683- ? (MAX_STACK_SIZE) \
8684- : (((unsigned long)current_thread_info()) + \
8685- THREAD_SIZE - (unsigned long)(ADDR)))
8686+#define MAX_STACK_SIZE 64UL
8687+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8688
8689 #define flush_insn_slot(p) do { } while (0)
8690
8691diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8692index b4973f4..7c4d3fc 100644
8693--- a/arch/x86/include/asm/kvm_host.h
8694+++ b/arch/x86/include/asm/kvm_host.h
8695@@ -459,7 +459,7 @@ struct kvm_arch {
8696 unsigned int n_requested_mmu_pages;
8697 unsigned int n_max_mmu_pages;
8698 unsigned int indirect_shadow_pages;
8699- atomic_t invlpg_counter;
8700+ atomic_unchecked_t invlpg_counter;
8701 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8702 /*
8703 * Hash table of struct kvm_mmu_page.
8704@@ -638,7 +638,7 @@ struct kvm_x86_ops {
8705 int (*check_intercept)(struct kvm_vcpu *vcpu,
8706 struct x86_instruction_info *info,
8707 enum x86_intercept_stage stage);
8708-};
8709+} __do_const;
8710
8711 struct kvm_arch_async_pf {
8712 u32 token;
8713diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8714index 9cdae5d..300d20f 100644
8715--- a/arch/x86/include/asm/local.h
8716+++ b/arch/x86/include/asm/local.h
8717@@ -18,26 +18,58 @@ typedef struct {
8718
8719 static inline void local_inc(local_t *l)
8720 {
8721- asm volatile(_ASM_INC "%0"
8722+ asm volatile(_ASM_INC "%0\n"
8723+
8724+#ifdef CONFIG_PAX_REFCOUNT
8725+ "jno 0f\n"
8726+ _ASM_DEC "%0\n"
8727+ "int $4\n0:\n"
8728+ _ASM_EXTABLE(0b, 0b)
8729+#endif
8730+
8731 : "+m" (l->a.counter));
8732 }
8733
8734 static inline void local_dec(local_t *l)
8735 {
8736- asm volatile(_ASM_DEC "%0"
8737+ asm volatile(_ASM_DEC "%0\n"
8738+
8739+#ifdef CONFIG_PAX_REFCOUNT
8740+ "jno 0f\n"
8741+ _ASM_INC "%0\n"
8742+ "int $4\n0:\n"
8743+ _ASM_EXTABLE(0b, 0b)
8744+#endif
8745+
8746 : "+m" (l->a.counter));
8747 }
8748
8749 static inline void local_add(long i, local_t *l)
8750 {
8751- asm volatile(_ASM_ADD "%1,%0"
8752+ asm volatile(_ASM_ADD "%1,%0\n"
8753+
8754+#ifdef CONFIG_PAX_REFCOUNT
8755+ "jno 0f\n"
8756+ _ASM_SUB "%1,%0\n"
8757+ "int $4\n0:\n"
8758+ _ASM_EXTABLE(0b, 0b)
8759+#endif
8760+
8761 : "+m" (l->a.counter)
8762 : "ir" (i));
8763 }
8764
8765 static inline void local_sub(long i, local_t *l)
8766 {
8767- asm volatile(_ASM_SUB "%1,%0"
8768+ asm volatile(_ASM_SUB "%1,%0\n"
8769+
8770+#ifdef CONFIG_PAX_REFCOUNT
8771+ "jno 0f\n"
8772+ _ASM_ADD "%1,%0\n"
8773+ "int $4\n0:\n"
8774+ _ASM_EXTABLE(0b, 0b)
8775+#endif
8776+
8777 : "+m" (l->a.counter)
8778 : "ir" (i));
8779 }
8780@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
8781 {
8782 unsigned char c;
8783
8784- asm volatile(_ASM_SUB "%2,%0; sete %1"
8785+ asm volatile(_ASM_SUB "%2,%0\n"
8786+
8787+#ifdef CONFIG_PAX_REFCOUNT
8788+ "jno 0f\n"
8789+ _ASM_ADD "%2,%0\n"
8790+ "int $4\n0:\n"
8791+ _ASM_EXTABLE(0b, 0b)
8792+#endif
8793+
8794+ "sete %1\n"
8795 : "+m" (l->a.counter), "=qm" (c)
8796 : "ir" (i) : "memory");
8797 return c;
8798@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
8799 {
8800 unsigned char c;
8801
8802- asm volatile(_ASM_DEC "%0; sete %1"
8803+ asm volatile(_ASM_DEC "%0\n"
8804+
8805+#ifdef CONFIG_PAX_REFCOUNT
8806+ "jno 0f\n"
8807+ _ASM_INC "%0\n"
8808+ "int $4\n0:\n"
8809+ _ASM_EXTABLE(0b, 0b)
8810+#endif
8811+
8812+ "sete %1\n"
8813 : "+m" (l->a.counter), "=qm" (c)
8814 : : "memory");
8815 return c != 0;
8816@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
8817 {
8818 unsigned char c;
8819
8820- asm volatile(_ASM_INC "%0; sete %1"
8821+ asm volatile(_ASM_INC "%0\n"
8822+
8823+#ifdef CONFIG_PAX_REFCOUNT
8824+ "jno 0f\n"
8825+ _ASM_DEC "%0\n"
8826+ "int $4\n0:\n"
8827+ _ASM_EXTABLE(0b, 0b)
8828+#endif
8829+
8830+ "sete %1\n"
8831 : "+m" (l->a.counter), "=qm" (c)
8832 : : "memory");
8833 return c != 0;
8834@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
8835 {
8836 unsigned char c;
8837
8838- asm volatile(_ASM_ADD "%2,%0; sets %1"
8839+ asm volatile(_ASM_ADD "%2,%0\n"
8840+
8841+#ifdef CONFIG_PAX_REFCOUNT
8842+ "jno 0f\n"
8843+ _ASM_SUB "%2,%0\n"
8844+ "int $4\n0:\n"
8845+ _ASM_EXTABLE(0b, 0b)
8846+#endif
8847+
8848+ "sets %1\n"
8849 : "+m" (l->a.counter), "=qm" (c)
8850 : "ir" (i) : "memory");
8851 return c;
8852@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
8853 #endif
8854 /* Modern 486+ processor */
8855 __i = i;
8856- asm volatile(_ASM_XADD "%0, %1;"
8857+ asm volatile(_ASM_XADD "%0, %1\n"
8858+
8859+#ifdef CONFIG_PAX_REFCOUNT
8860+ "jno 0f\n"
8861+ _ASM_MOV "%0,%1\n"
8862+ "int $4\n0:\n"
8863+ _ASM_EXTABLE(0b, 0b)
8864+#endif
8865+
8866 : "+r" (i), "+m" (l->a.counter)
8867 : : "memory");
8868 return i + __i;
8869diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8870index 593e51d..fa69c9a 100644
8871--- a/arch/x86/include/asm/mman.h
8872+++ b/arch/x86/include/asm/mman.h
8873@@ -5,4 +5,14 @@
8874
8875 #include <asm-generic/mman.h>
8876
8877+#ifdef __KERNEL__
8878+#ifndef __ASSEMBLY__
8879+#ifdef CONFIG_X86_32
8880+#define arch_mmap_check i386_mmap_check
8881+int i386_mmap_check(unsigned long addr, unsigned long len,
8882+ unsigned long flags);
8883+#endif
8884+#endif
8885+#endif
8886+
8887 #endif /* _ASM_X86_MMAN_H */
8888diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8889index 5f55e69..e20bfb1 100644
8890--- a/arch/x86/include/asm/mmu.h
8891+++ b/arch/x86/include/asm/mmu.h
8892@@ -9,7 +9,7 @@
8893 * we put the segment information here.
8894 */
8895 typedef struct {
8896- void *ldt;
8897+ struct desc_struct *ldt;
8898 int size;
8899
8900 #ifdef CONFIG_X86_64
8901@@ -18,7 +18,19 @@ typedef struct {
8902 #endif
8903
8904 struct mutex lock;
8905- void *vdso;
8906+ unsigned long vdso;
8907+
8908+#ifdef CONFIG_X86_32
8909+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8910+ unsigned long user_cs_base;
8911+ unsigned long user_cs_limit;
8912+
8913+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8914+ cpumask_t cpu_user_cs_mask;
8915+#endif
8916+
8917+#endif
8918+#endif
8919 } mm_context_t;
8920
8921 #ifdef CONFIG_SMP
8922diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8923index 6902152..399f3a2 100644
8924--- a/arch/x86/include/asm/mmu_context.h
8925+++ b/arch/x86/include/asm/mmu_context.h
8926@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
8927
8928 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8929 {
8930+
8931+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8932+ unsigned int i;
8933+ pgd_t *pgd;
8934+
8935+ pax_open_kernel();
8936+ pgd = get_cpu_pgd(smp_processor_id());
8937+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8938+ set_pgd_batched(pgd+i, native_make_pgd(0));
8939+ pax_close_kernel();
8940+#endif
8941+
8942 #ifdef CONFIG_SMP
8943 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8944 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8945@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8946 struct task_struct *tsk)
8947 {
8948 unsigned cpu = smp_processor_id();
8949+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8950+ int tlbstate = TLBSTATE_OK;
8951+#endif
8952
8953 if (likely(prev != next)) {
8954 #ifdef CONFIG_SMP
8955+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8956+ tlbstate = percpu_read(cpu_tlbstate.state);
8957+#endif
8958 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8959 percpu_write(cpu_tlbstate.active_mm, next);
8960 #endif
8961 cpumask_set_cpu(cpu, mm_cpumask(next));
8962
8963 /* Re-load page tables */
8964+#ifdef CONFIG_PAX_PER_CPU_PGD
8965+ pax_open_kernel();
8966+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8967+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8968+ pax_close_kernel();
8969+ load_cr3(get_cpu_pgd(cpu));
8970+#else
8971 load_cr3(next->pgd);
8972+#endif
8973
8974 /* stop flush ipis for the previous mm */
8975 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8976@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8977 */
8978 if (unlikely(prev->context.ldt != next->context.ldt))
8979 load_LDT_nolock(&next->context);
8980- }
8981+
8982+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8983+ if (!(__supported_pte_mask & _PAGE_NX)) {
8984+ smp_mb__before_clear_bit();
8985+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8986+ smp_mb__after_clear_bit();
8987+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8988+ }
8989+#endif
8990+
8991+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8992+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8993+ prev->context.user_cs_limit != next->context.user_cs_limit))
8994+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8995 #ifdef CONFIG_SMP
8996+ else if (unlikely(tlbstate != TLBSTATE_OK))
8997+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8998+#endif
8999+#endif
9000+
9001+ }
9002 else {
9003+
9004+#ifdef CONFIG_PAX_PER_CPU_PGD
9005+ pax_open_kernel();
9006+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9007+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9008+ pax_close_kernel();
9009+ load_cr3(get_cpu_pgd(cpu));
9010+#endif
9011+
9012+#ifdef CONFIG_SMP
9013 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9014 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9015
9016@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9017 * tlb flush IPI delivery. We must reload CR3
9018 * to make sure to use no freed page tables.
9019 */
9020+
9021+#ifndef CONFIG_PAX_PER_CPU_PGD
9022 load_cr3(next->pgd);
9023+#endif
9024+
9025 load_LDT_nolock(&next->context);
9026+
9027+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9028+ if (!(__supported_pte_mask & _PAGE_NX))
9029+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9030+#endif
9031+
9032+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9033+#ifdef CONFIG_PAX_PAGEEXEC
9034+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9035+#endif
9036+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9037+#endif
9038+
9039 }
9040+#endif
9041 }
9042-#endif
9043 }
9044
9045 #define activate_mm(prev, next) \
9046diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9047index 9eae775..c914fea 100644
9048--- a/arch/x86/include/asm/module.h
9049+++ b/arch/x86/include/asm/module.h
9050@@ -5,6 +5,7 @@
9051
9052 #ifdef CONFIG_X86_64
9053 /* X86_64 does not define MODULE_PROC_FAMILY */
9054+#define MODULE_PROC_FAMILY ""
9055 #elif defined CONFIG_M386
9056 #define MODULE_PROC_FAMILY "386 "
9057 #elif defined CONFIG_M486
9058@@ -59,8 +60,20 @@
9059 #error unknown processor family
9060 #endif
9061
9062-#ifdef CONFIG_X86_32
9063-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9064+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9065+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9066+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9067+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9068+#else
9069+#define MODULE_PAX_KERNEXEC ""
9070 #endif
9071
9072+#ifdef CONFIG_PAX_MEMORY_UDEREF
9073+#define MODULE_PAX_UDEREF "UDEREF "
9074+#else
9075+#define MODULE_PAX_UDEREF ""
9076+#endif
9077+
9078+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9079+
9080 #endif /* _ASM_X86_MODULE_H */
9081diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9082index 7639dbf..e08a58c 100644
9083--- a/arch/x86/include/asm/page_64_types.h
9084+++ b/arch/x86/include/asm/page_64_types.h
9085@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9086
9087 /* duplicated to the one in bootmem.h */
9088 extern unsigned long max_pfn;
9089-extern unsigned long phys_base;
9090+extern const unsigned long phys_base;
9091
9092 extern unsigned long __phys_addr(unsigned long);
9093 #define __phys_reloc_hide(x) (x)
9094diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9095index a7d2db9..edb023e 100644
9096--- a/arch/x86/include/asm/paravirt.h
9097+++ b/arch/x86/include/asm/paravirt.h
9098@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9099 val);
9100 }
9101
9102+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9103+{
9104+ pgdval_t val = native_pgd_val(pgd);
9105+
9106+ if (sizeof(pgdval_t) > sizeof(long))
9107+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9108+ val, (u64)val >> 32);
9109+ else
9110+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9111+ val);
9112+}
9113+
9114 static inline void pgd_clear(pgd_t *pgdp)
9115 {
9116 set_pgd(pgdp, __pgd(0));
9117@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9118 pv_mmu_ops.set_fixmap(idx, phys, flags);
9119 }
9120
9121+#ifdef CONFIG_PAX_KERNEXEC
9122+static inline unsigned long pax_open_kernel(void)
9123+{
9124+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9125+}
9126+
9127+static inline unsigned long pax_close_kernel(void)
9128+{
9129+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9130+}
9131+#else
9132+static inline unsigned long pax_open_kernel(void) { return 0; }
9133+static inline unsigned long pax_close_kernel(void) { return 0; }
9134+#endif
9135+
9136 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9137
9138 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9139@@ -964,7 +991,7 @@ extern void default_banner(void);
9140
9141 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9142 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9143-#define PARA_INDIRECT(addr) *%cs:addr
9144+#define PARA_INDIRECT(addr) *%ss:addr
9145 #endif
9146
9147 #define INTERRUPT_RETURN \
9148@@ -1041,6 +1068,21 @@ extern void default_banner(void);
9149 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9150 CLBR_NONE, \
9151 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9152+
9153+#define GET_CR0_INTO_RDI \
9154+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9155+ mov %rax,%rdi
9156+
9157+#define SET_RDI_INTO_CR0 \
9158+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9159+
9160+#define GET_CR3_INTO_RDI \
9161+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9162+ mov %rax,%rdi
9163+
9164+#define SET_RDI_INTO_CR3 \
9165+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9166+
9167 #endif /* CONFIG_X86_32 */
9168
9169 #endif /* __ASSEMBLY__ */
9170diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9171index 8e8b9a4..f07d725 100644
9172--- a/arch/x86/include/asm/paravirt_types.h
9173+++ b/arch/x86/include/asm/paravirt_types.h
9174@@ -84,20 +84,20 @@ struct pv_init_ops {
9175 */
9176 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9177 unsigned long addr, unsigned len);
9178-};
9179+} __no_const;
9180
9181
9182 struct pv_lazy_ops {
9183 /* Set deferred update mode, used for batching operations. */
9184 void (*enter)(void);
9185 void (*leave)(void);
9186-};
9187+} __no_const;
9188
9189 struct pv_time_ops {
9190 unsigned long long (*sched_clock)(void);
9191 unsigned long long (*steal_clock)(int cpu);
9192 unsigned long (*get_tsc_khz)(void);
9193-};
9194+} __no_const;
9195
9196 struct pv_cpu_ops {
9197 /* hooks for various privileged instructions */
9198@@ -193,7 +193,7 @@ struct pv_cpu_ops {
9199
9200 void (*start_context_switch)(struct task_struct *prev);
9201 void (*end_context_switch)(struct task_struct *next);
9202-};
9203+} __no_const;
9204
9205 struct pv_irq_ops {
9206 /*
9207@@ -224,7 +224,7 @@ struct pv_apic_ops {
9208 unsigned long start_eip,
9209 unsigned long start_esp);
9210 #endif
9211-};
9212+} __no_const;
9213
9214 struct pv_mmu_ops {
9215 unsigned long (*read_cr2)(void);
9216@@ -313,6 +313,7 @@ struct pv_mmu_ops {
9217 struct paravirt_callee_save make_pud;
9218
9219 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9220+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9221 #endif /* PAGETABLE_LEVELS == 4 */
9222 #endif /* PAGETABLE_LEVELS >= 3 */
9223
9224@@ -324,6 +325,12 @@ struct pv_mmu_ops {
9225 an mfn. We can tell which is which from the index. */
9226 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9227 phys_addr_t phys, pgprot_t flags);
9228+
9229+#ifdef CONFIG_PAX_KERNEXEC
9230+ unsigned long (*pax_open_kernel)(void);
9231+ unsigned long (*pax_close_kernel)(void);
9232+#endif
9233+
9234 };
9235
9236 struct arch_spinlock;
9237@@ -334,7 +341,7 @@ struct pv_lock_ops {
9238 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9239 int (*spin_trylock)(struct arch_spinlock *lock);
9240 void (*spin_unlock)(struct arch_spinlock *lock);
9241-};
9242+} __no_const;
9243
9244 /* This contains all the paravirt structures: we get a convenient
9245 * number for each function using the offset which we use to indicate
9246diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9247index b4389a4..b7ff22c 100644
9248--- a/arch/x86/include/asm/pgalloc.h
9249+++ b/arch/x86/include/asm/pgalloc.h
9250@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9251 pmd_t *pmd, pte_t *pte)
9252 {
9253 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9254+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9255+}
9256+
9257+static inline void pmd_populate_user(struct mm_struct *mm,
9258+ pmd_t *pmd, pte_t *pte)
9259+{
9260+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9261 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9262 }
9263
9264diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9265index 98391db..8f6984e 100644
9266--- a/arch/x86/include/asm/pgtable-2level.h
9267+++ b/arch/x86/include/asm/pgtable-2level.h
9268@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9269
9270 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9271 {
9272+ pax_open_kernel();
9273 *pmdp = pmd;
9274+ pax_close_kernel();
9275 }
9276
9277 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9278diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9279index effff47..f9e4035 100644
9280--- a/arch/x86/include/asm/pgtable-3level.h
9281+++ b/arch/x86/include/asm/pgtable-3level.h
9282@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9283
9284 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9285 {
9286+ pax_open_kernel();
9287 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9288+ pax_close_kernel();
9289 }
9290
9291 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9292 {
9293+ pax_open_kernel();
9294 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9295+ pax_close_kernel();
9296 }
9297
9298 /*
9299diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9300index 18601c8..3d716d1 100644
9301--- a/arch/x86/include/asm/pgtable.h
9302+++ b/arch/x86/include/asm/pgtable.h
9303@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9304
9305 #ifndef __PAGETABLE_PUD_FOLDED
9306 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9307+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9308 #define pgd_clear(pgd) native_pgd_clear(pgd)
9309 #endif
9310
9311@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9312
9313 #define arch_end_context_switch(prev) do {} while(0)
9314
9315+#define pax_open_kernel() native_pax_open_kernel()
9316+#define pax_close_kernel() native_pax_close_kernel()
9317 #endif /* CONFIG_PARAVIRT */
9318
9319+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9320+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9321+
9322+#ifdef CONFIG_PAX_KERNEXEC
9323+static inline unsigned long native_pax_open_kernel(void)
9324+{
9325+ unsigned long cr0;
9326+
9327+ preempt_disable();
9328+ barrier();
9329+ cr0 = read_cr0() ^ X86_CR0_WP;
9330+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9331+ write_cr0(cr0);
9332+ return cr0 ^ X86_CR0_WP;
9333+}
9334+
9335+static inline unsigned long native_pax_close_kernel(void)
9336+{
9337+ unsigned long cr0;
9338+
9339+ cr0 = read_cr0() ^ X86_CR0_WP;
9340+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9341+ write_cr0(cr0);
9342+ barrier();
9343+ preempt_enable_no_resched();
9344+ return cr0 ^ X86_CR0_WP;
9345+}
9346+#else
9347+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9348+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9349+#endif
9350+
9351 /*
9352 * The following only work if pte_present() is true.
9353 * Undefined behaviour if not..
9354 */
9355+static inline int pte_user(pte_t pte)
9356+{
9357+ return pte_val(pte) & _PAGE_USER;
9358+}
9359+
9360 static inline int pte_dirty(pte_t pte)
9361 {
9362 return pte_flags(pte) & _PAGE_DIRTY;
9363@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9364 return pte_clear_flags(pte, _PAGE_RW);
9365 }
9366
9367+static inline pte_t pte_mkread(pte_t pte)
9368+{
9369+ return __pte(pte_val(pte) | _PAGE_USER);
9370+}
9371+
9372 static inline pte_t pte_mkexec(pte_t pte)
9373 {
9374- return pte_clear_flags(pte, _PAGE_NX);
9375+#ifdef CONFIG_X86_PAE
9376+ if (__supported_pte_mask & _PAGE_NX)
9377+ return pte_clear_flags(pte, _PAGE_NX);
9378+ else
9379+#endif
9380+ return pte_set_flags(pte, _PAGE_USER);
9381+}
9382+
9383+static inline pte_t pte_exprotect(pte_t pte)
9384+{
9385+#ifdef CONFIG_X86_PAE
9386+ if (__supported_pte_mask & _PAGE_NX)
9387+ return pte_set_flags(pte, _PAGE_NX);
9388+ else
9389+#endif
9390+ return pte_clear_flags(pte, _PAGE_USER);
9391 }
9392
9393 static inline pte_t pte_mkdirty(pte_t pte)
9394@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9395 #endif
9396
9397 #ifndef __ASSEMBLY__
9398+
9399+#ifdef CONFIG_PAX_PER_CPU_PGD
9400+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9401+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9402+{
9403+ return cpu_pgd[cpu];
9404+}
9405+#endif
9406+
9407 #include <linux/mm_types.h>
9408
9409 static inline int pte_none(pte_t pte)
9410@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9411
9412 static inline int pgd_bad(pgd_t pgd)
9413 {
9414- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9415+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9416 }
9417
9418 static inline int pgd_none(pgd_t pgd)
9419@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9420 * pgd_offset() returns a (pgd_t *)
9421 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9422 */
9423-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9424+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9425+
9426+#ifdef CONFIG_PAX_PER_CPU_PGD
9427+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9428+#endif
9429+
9430 /*
9431 * a shortcut which implies the use of the kernel's pgd, instead
9432 * of a process's
9433@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9434 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9435 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9436
9437+#ifdef CONFIG_X86_32
9438+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9439+#else
9440+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9441+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9442+
9443+#ifdef CONFIG_PAX_MEMORY_UDEREF
9444+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9445+#else
9446+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9447+#endif
9448+
9449+#endif
9450+
9451 #ifndef __ASSEMBLY__
9452
9453 extern int direct_gbpages;
9454@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9455 * dst and src can be on the same page, but the range must not overlap,
9456 * and must not cross a page boundary.
9457 */
9458-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9459+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9460 {
9461- memcpy(dst, src, count * sizeof(pgd_t));
9462+ pax_open_kernel();
9463+ while (count--)
9464+ *dst++ = *src++;
9465+ pax_close_kernel();
9466 }
9467
9468+#ifdef CONFIG_PAX_PER_CPU_PGD
9469+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9470+#endif
9471+
9472+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9473+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9474+#else
9475+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9476+#endif
9477
9478 #include <asm-generic/pgtable.h>
9479 #endif /* __ASSEMBLY__ */
9480diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9481index 0c92113..34a77c6 100644
9482--- a/arch/x86/include/asm/pgtable_32.h
9483+++ b/arch/x86/include/asm/pgtable_32.h
9484@@ -25,9 +25,6 @@
9485 struct mm_struct;
9486 struct vm_area_struct;
9487
9488-extern pgd_t swapper_pg_dir[1024];
9489-extern pgd_t initial_page_table[1024];
9490-
9491 static inline void pgtable_cache_init(void) { }
9492 static inline void check_pgt_cache(void) { }
9493 void paging_init(void);
9494@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9495 # include <asm/pgtable-2level.h>
9496 #endif
9497
9498+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9499+extern pgd_t initial_page_table[PTRS_PER_PGD];
9500+#ifdef CONFIG_X86_PAE
9501+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9502+#endif
9503+
9504 #if defined(CONFIG_HIGHPTE)
9505 #define pte_offset_map(dir, address) \
9506 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9507@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9508 /* Clear a kernel PTE and flush it from the TLB */
9509 #define kpte_clear_flush(ptep, vaddr) \
9510 do { \
9511+ pax_open_kernel(); \
9512 pte_clear(&init_mm, (vaddr), (ptep)); \
9513+ pax_close_kernel(); \
9514 __flush_tlb_one((vaddr)); \
9515 } while (0)
9516
9517@@ -74,6 +79,9 @@ do { \
9518
9519 #endif /* !__ASSEMBLY__ */
9520
9521+#define HAVE_ARCH_UNMAPPED_AREA
9522+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9523+
9524 /*
9525 * kern_addr_valid() is (1) for FLATMEM and (0) for
9526 * SPARSEMEM and DISCONTIGMEM
9527diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9528index ed5903b..c7fe163 100644
9529--- a/arch/x86/include/asm/pgtable_32_types.h
9530+++ b/arch/x86/include/asm/pgtable_32_types.h
9531@@ -8,7 +8,7 @@
9532 */
9533 #ifdef CONFIG_X86_PAE
9534 # include <asm/pgtable-3level_types.h>
9535-# define PMD_SIZE (1UL << PMD_SHIFT)
9536+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9537 # define PMD_MASK (~(PMD_SIZE - 1))
9538 #else
9539 # include <asm/pgtable-2level_types.h>
9540@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9541 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9542 #endif
9543
9544+#ifdef CONFIG_PAX_KERNEXEC
9545+#ifndef __ASSEMBLY__
9546+extern unsigned char MODULES_EXEC_VADDR[];
9547+extern unsigned char MODULES_EXEC_END[];
9548+#endif
9549+#include <asm/boot.h>
9550+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9551+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9552+#else
9553+#define ktla_ktva(addr) (addr)
9554+#define ktva_ktla(addr) (addr)
9555+#endif
9556+
9557 #define MODULES_VADDR VMALLOC_START
9558 #define MODULES_END VMALLOC_END
9559 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9560diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9561index 975f709..107976d 100644
9562--- a/arch/x86/include/asm/pgtable_64.h
9563+++ b/arch/x86/include/asm/pgtable_64.h
9564@@ -16,10 +16,14 @@
9565
9566 extern pud_t level3_kernel_pgt[512];
9567 extern pud_t level3_ident_pgt[512];
9568+extern pud_t level3_vmalloc_start_pgt[512];
9569+extern pud_t level3_vmalloc_end_pgt[512];
9570+extern pud_t level3_vmemmap_pgt[512];
9571+extern pud_t level2_vmemmap_pgt[512];
9572 extern pmd_t level2_kernel_pgt[512];
9573 extern pmd_t level2_fixmap_pgt[512];
9574-extern pmd_t level2_ident_pgt[512];
9575-extern pgd_t init_level4_pgt[];
9576+extern pmd_t level2_ident_pgt[512*2];
9577+extern pgd_t init_level4_pgt[512];
9578
9579 #define swapper_pg_dir init_level4_pgt
9580
9581@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9582
9583 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9584 {
9585+ pax_open_kernel();
9586 *pmdp = pmd;
9587+ pax_close_kernel();
9588 }
9589
9590 static inline void native_pmd_clear(pmd_t *pmd)
9591@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9592
9593 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9594 {
9595+ pax_open_kernel();
9596+ *pgdp = pgd;
9597+ pax_close_kernel();
9598+}
9599+
9600+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9601+{
9602 *pgdp = pgd;
9603 }
9604
9605diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9606index 766ea16..5b96cb3 100644
9607--- a/arch/x86/include/asm/pgtable_64_types.h
9608+++ b/arch/x86/include/asm/pgtable_64_types.h
9609@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9610 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9611 #define MODULES_END _AC(0xffffffffff000000, UL)
9612 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9613+#define MODULES_EXEC_VADDR MODULES_VADDR
9614+#define MODULES_EXEC_END MODULES_END
9615+
9616+#define ktla_ktva(addr) (addr)
9617+#define ktva_ktla(addr) (addr)
9618
9619 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9620diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9621index 013286a..8b42f4f 100644
9622--- a/arch/x86/include/asm/pgtable_types.h
9623+++ b/arch/x86/include/asm/pgtable_types.h
9624@@ -16,13 +16,12 @@
9625 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9626 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9627 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9628-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9629+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9630 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9631 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9632 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9633-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9634-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9635-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9636+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9637+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9638 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9639
9640 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9641@@ -40,7 +39,6 @@
9642 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9643 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9644 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9645-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9646 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9647 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9648 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9649@@ -57,8 +55,10 @@
9650
9651 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9652 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9653-#else
9654+#elif defined(CONFIG_KMEMCHECK)
9655 #define _PAGE_NX (_AT(pteval_t, 0))
9656+#else
9657+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9658 #endif
9659
9660 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9661@@ -96,6 +96,9 @@
9662 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9663 _PAGE_ACCESSED)
9664
9665+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9666+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9667+
9668 #define __PAGE_KERNEL_EXEC \
9669 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9670 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9671@@ -106,7 +109,7 @@
9672 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9673 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9674 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9675-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9676+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9677 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9678 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9679 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9680@@ -168,8 +171,8 @@
9681 * bits are combined, this will alow user to access the high address mapped
9682 * VDSO in the presence of CONFIG_COMPAT_VDSO
9683 */
9684-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9685-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9686+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9687+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9688 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9689 #endif
9690
9691@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9692 {
9693 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9694 }
9695+#endif
9696
9697+#if PAGETABLE_LEVELS == 3
9698+#include <asm-generic/pgtable-nopud.h>
9699+#endif
9700+
9701+#if PAGETABLE_LEVELS == 2
9702+#include <asm-generic/pgtable-nopmd.h>
9703+#endif
9704+
9705+#ifndef __ASSEMBLY__
9706 #if PAGETABLE_LEVELS > 3
9707 typedef struct { pudval_t pud; } pud_t;
9708
9709@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9710 return pud.pud;
9711 }
9712 #else
9713-#include <asm-generic/pgtable-nopud.h>
9714-
9715 static inline pudval_t native_pud_val(pud_t pud)
9716 {
9717 return native_pgd_val(pud.pgd);
9718@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9719 return pmd.pmd;
9720 }
9721 #else
9722-#include <asm-generic/pgtable-nopmd.h>
9723-
9724 static inline pmdval_t native_pmd_val(pmd_t pmd)
9725 {
9726 return native_pgd_val(pmd.pud.pgd);
9727@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9728
9729 extern pteval_t __supported_pte_mask;
9730 extern void set_nx(void);
9731-extern int nx_enabled;
9732
9733 #define pgprot_writecombine pgprot_writecombine
9734 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9735diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9736index b650435..eefa566 100644
9737--- a/arch/x86/include/asm/processor.h
9738+++ b/arch/x86/include/asm/processor.h
9739@@ -268,7 +268,7 @@ struct tss_struct {
9740
9741 } ____cacheline_aligned;
9742
9743-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9744+extern struct tss_struct init_tss[NR_CPUS];
9745
9746 /*
9747 * Save the original ist values for checking stack pointers during debugging
9748@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
9749 */
9750 #define TASK_SIZE PAGE_OFFSET
9751 #define TASK_SIZE_MAX TASK_SIZE
9752+
9753+#ifdef CONFIG_PAX_SEGMEXEC
9754+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9755+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9756+#else
9757 #define STACK_TOP TASK_SIZE
9758-#define STACK_TOP_MAX STACK_TOP
9759+#endif
9760+
9761+#define STACK_TOP_MAX TASK_SIZE
9762
9763 #define INIT_THREAD { \
9764- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9765+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9766 .vm86_info = NULL, \
9767 .sysenter_cs = __KERNEL_CS, \
9768 .io_bitmap_ptr = NULL, \
9769@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
9770 */
9771 #define INIT_TSS { \
9772 .x86_tss = { \
9773- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9774+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9775 .ss0 = __KERNEL_DS, \
9776 .ss1 = __KERNEL_CS, \
9777 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9778@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
9779 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9780
9781 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9782-#define KSTK_TOP(info) \
9783-({ \
9784- unsigned long *__ptr = (unsigned long *)(info); \
9785- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9786-})
9787+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9788
9789 /*
9790 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9791@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9792 #define task_pt_regs(task) \
9793 ({ \
9794 struct pt_regs *__regs__; \
9795- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9796+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9797 __regs__ - 1; \
9798 })
9799
9800@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9801 /*
9802 * User space process size. 47bits minus one guard page.
9803 */
9804-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9805+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9806
9807 /* This decides where the kernel will search for a free chunk of vm
9808 * space during mmap's.
9809 */
9810 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9811- 0xc0000000 : 0xFFFFe000)
9812+ 0xc0000000 : 0xFFFFf000)
9813
9814 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9815 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9816@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9817 #define STACK_TOP_MAX TASK_SIZE_MAX
9818
9819 #define INIT_THREAD { \
9820- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9821+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9822 }
9823
9824 #define INIT_TSS { \
9825- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9826+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9827 }
9828
9829 /*
9830@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
9831 */
9832 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9833
9834+#ifdef CONFIG_PAX_SEGMEXEC
9835+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9836+#endif
9837+
9838 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9839
9840 /* Get/set a process' ability to use the timestamp counter instruction */
9841diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9842index 3566454..4bdfb8c 100644
9843--- a/arch/x86/include/asm/ptrace.h
9844+++ b/arch/x86/include/asm/ptrace.h
9845@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
9846 }
9847
9848 /*
9849- * user_mode_vm(regs) determines whether a register set came from user mode.
9850+ * user_mode(regs) determines whether a register set came from user mode.
9851 * This is true if V8086 mode was enabled OR if the register set was from
9852 * protected mode with RPL-3 CS value. This tricky test checks that with
9853 * one comparison. Many places in the kernel can bypass this full check
9854- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9855+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9856+ * be used.
9857 */
9858-static inline int user_mode(struct pt_regs *regs)
9859+static inline int user_mode_novm(struct pt_regs *regs)
9860 {
9861 #ifdef CONFIG_X86_32
9862 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9863 #else
9864- return !!(regs->cs & 3);
9865+ return !!(regs->cs & SEGMENT_RPL_MASK);
9866 #endif
9867 }
9868
9869-static inline int user_mode_vm(struct pt_regs *regs)
9870+static inline int user_mode(struct pt_regs *regs)
9871 {
9872 #ifdef CONFIG_X86_32
9873 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9874 USER_RPL;
9875 #else
9876- return user_mode(regs);
9877+ return user_mode_novm(regs);
9878 #endif
9879 }
9880
9881@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
9882 #ifdef CONFIG_X86_64
9883 static inline bool user_64bit_mode(struct pt_regs *regs)
9884 {
9885+ unsigned long cs = regs->cs & 0xffff;
9886 #ifndef CONFIG_PARAVIRT
9887 /*
9888 * On non-paravirt systems, this is the only long mode CPL 3
9889 * selector. We do not allow long mode selectors in the LDT.
9890 */
9891- return regs->cs == __USER_CS;
9892+ return cs == __USER_CS;
9893 #else
9894 /* Headers are too twisted for this to go in paravirt.h. */
9895- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9896+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9897 #endif
9898 }
9899 #endif
9900diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9901index 92f29706..a79cbbb 100644
9902--- a/arch/x86/include/asm/reboot.h
9903+++ b/arch/x86/include/asm/reboot.h
9904@@ -6,19 +6,19 @@
9905 struct pt_regs;
9906
9907 struct machine_ops {
9908- void (*restart)(char *cmd);
9909- void (*halt)(void);
9910- void (*power_off)(void);
9911+ void (* __noreturn restart)(char *cmd);
9912+ void (* __noreturn halt)(void);
9913+ void (* __noreturn power_off)(void);
9914 void (*shutdown)(void);
9915 void (*crash_shutdown)(struct pt_regs *);
9916- void (*emergency_restart)(void);
9917-};
9918+ void (* __noreturn emergency_restart)(void);
9919+} __no_const;
9920
9921 extern struct machine_ops machine_ops;
9922
9923 void native_machine_crash_shutdown(struct pt_regs *regs);
9924 void native_machine_shutdown(void);
9925-void machine_real_restart(unsigned int type);
9926+void machine_real_restart(unsigned int type) __noreturn;
9927 /* These must match dispatch_table in reboot_32.S */
9928 #define MRR_BIOS 0
9929 #define MRR_APM 1
9930diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9931index 2dbe4a7..ce1db00 100644
9932--- a/arch/x86/include/asm/rwsem.h
9933+++ b/arch/x86/include/asm/rwsem.h
9934@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
9935 {
9936 asm volatile("# beginning down_read\n\t"
9937 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9938+
9939+#ifdef CONFIG_PAX_REFCOUNT
9940+ "jno 0f\n"
9941+ LOCK_PREFIX _ASM_DEC "(%1)\n"
9942+ "int $4\n0:\n"
9943+ _ASM_EXTABLE(0b, 0b)
9944+#endif
9945+
9946 /* adds 0x00000001 */
9947 " jns 1f\n"
9948 " call call_rwsem_down_read_failed\n"
9949@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
9950 "1:\n\t"
9951 " mov %1,%2\n\t"
9952 " add %3,%2\n\t"
9953+
9954+#ifdef CONFIG_PAX_REFCOUNT
9955+ "jno 0f\n"
9956+ "sub %3,%2\n"
9957+ "int $4\n0:\n"
9958+ _ASM_EXTABLE(0b, 0b)
9959+#endif
9960+
9961 " jle 2f\n\t"
9962 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9963 " jnz 1b\n\t"
9964@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
9965 long tmp;
9966 asm volatile("# beginning down_write\n\t"
9967 LOCK_PREFIX " xadd %1,(%2)\n\t"
9968+
9969+#ifdef CONFIG_PAX_REFCOUNT
9970+ "jno 0f\n"
9971+ "mov %1,(%2)\n"
9972+ "int $4\n0:\n"
9973+ _ASM_EXTABLE(0b, 0b)
9974+#endif
9975+
9976 /* adds 0xffff0001, returns the old value */
9977 " test %1,%1\n\t"
9978 /* was the count 0 before? */
9979@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
9980 long tmp;
9981 asm volatile("# beginning __up_read\n\t"
9982 LOCK_PREFIX " xadd %1,(%2)\n\t"
9983+
9984+#ifdef CONFIG_PAX_REFCOUNT
9985+ "jno 0f\n"
9986+ "mov %1,(%2)\n"
9987+ "int $4\n0:\n"
9988+ _ASM_EXTABLE(0b, 0b)
9989+#endif
9990+
9991 /* subtracts 1, returns the old value */
9992 " jns 1f\n\t"
9993 " call call_rwsem_wake\n" /* expects old value in %edx */
9994@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
9995 long tmp;
9996 asm volatile("# beginning __up_write\n\t"
9997 LOCK_PREFIX " xadd %1,(%2)\n\t"
9998+
9999+#ifdef CONFIG_PAX_REFCOUNT
10000+ "jno 0f\n"
10001+ "mov %1,(%2)\n"
10002+ "int $4\n0:\n"
10003+ _ASM_EXTABLE(0b, 0b)
10004+#endif
10005+
10006 /* subtracts 0xffff0001, returns the old value */
10007 " jns 1f\n\t"
10008 " call call_rwsem_wake\n" /* expects old value in %edx */
10009@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10010 {
10011 asm volatile("# beginning __downgrade_write\n\t"
10012 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10013+
10014+#ifdef CONFIG_PAX_REFCOUNT
10015+ "jno 0f\n"
10016+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10017+ "int $4\n0:\n"
10018+ _ASM_EXTABLE(0b, 0b)
10019+#endif
10020+
10021 /*
10022 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10023 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10024@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10025 */
10026 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10027 {
10028- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10029+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10030+
10031+#ifdef CONFIG_PAX_REFCOUNT
10032+ "jno 0f\n"
10033+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10034+ "int $4\n0:\n"
10035+ _ASM_EXTABLE(0b, 0b)
10036+#endif
10037+
10038 : "+m" (sem->count)
10039 : "er" (delta));
10040 }
10041@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10042 */
10043 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10044 {
10045- return delta + xadd(&sem->count, delta);
10046+ return delta + xadd_check_overflow(&sem->count, delta);
10047 }
10048
10049 #endif /* __KERNEL__ */
10050diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10051index 5e64171..f58957e 100644
10052--- a/arch/x86/include/asm/segment.h
10053+++ b/arch/x86/include/asm/segment.h
10054@@ -64,10 +64,15 @@
10055 * 26 - ESPFIX small SS
10056 * 27 - per-cpu [ offset to per-cpu data area ]
10057 * 28 - stack_canary-20 [ for stack protector ]
10058- * 29 - unused
10059- * 30 - unused
10060+ * 29 - PCI BIOS CS
10061+ * 30 - PCI BIOS DS
10062 * 31 - TSS for double fault handler
10063 */
10064+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10065+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10066+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10067+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10068+
10069 #define GDT_ENTRY_TLS_MIN 6
10070 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10071
10072@@ -79,6 +84,8 @@
10073
10074 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10075
10076+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10077+
10078 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10079
10080 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10081@@ -104,6 +111,12 @@
10082 #define __KERNEL_STACK_CANARY 0
10083 #endif
10084
10085+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10086+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10087+
10088+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10089+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10090+
10091 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10092
10093 /*
10094@@ -141,7 +154,7 @@
10095 */
10096
10097 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10098-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10099+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10100
10101
10102 #else
10103@@ -165,6 +178,8 @@
10104 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10105 #define __USER32_DS __USER_DS
10106
10107+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10108+
10109 #define GDT_ENTRY_TSS 8 /* needs two entries */
10110 #define GDT_ENTRY_LDT 10 /* needs two entries */
10111 #define GDT_ENTRY_TLS_MIN 12
10112@@ -185,6 +200,7 @@
10113 #endif
10114
10115 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10116+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10117 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10118 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10119 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10120diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10121index 73b11bc..d4a3b63 100644
10122--- a/arch/x86/include/asm/smp.h
10123+++ b/arch/x86/include/asm/smp.h
10124@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10125 /* cpus sharing the last level cache: */
10126 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10127 DECLARE_PER_CPU(u16, cpu_llc_id);
10128-DECLARE_PER_CPU(int, cpu_number);
10129+DECLARE_PER_CPU(unsigned int, cpu_number);
10130
10131 static inline struct cpumask *cpu_sibling_mask(int cpu)
10132 {
10133@@ -77,7 +77,7 @@ struct smp_ops {
10134
10135 void (*send_call_func_ipi)(const struct cpumask *mask);
10136 void (*send_call_func_single_ipi)(int cpu);
10137-};
10138+} __no_const;
10139
10140 /* Globals due to paravirt */
10141 extern void set_cpu_sibling_map(int cpu);
10142@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10143 extern int safe_smp_processor_id(void);
10144
10145 #elif defined(CONFIG_X86_64_SMP)
10146-#define raw_smp_processor_id() (percpu_read(cpu_number))
10147-
10148-#define stack_smp_processor_id() \
10149-({ \
10150- struct thread_info *ti; \
10151- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10152- ti->cpu; \
10153-})
10154+#define raw_smp_processor_id() (percpu_read(cpu_number))
10155+#define stack_smp_processor_id() raw_smp_processor_id()
10156 #define safe_smp_processor_id() smp_processor_id()
10157
10158 #endif
10159diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10160index 972c260..43ab1fd 100644
10161--- a/arch/x86/include/asm/spinlock.h
10162+++ b/arch/x86/include/asm/spinlock.h
10163@@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10164 static inline void arch_read_lock(arch_rwlock_t *rw)
10165 {
10166 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10167+
10168+#ifdef CONFIG_PAX_REFCOUNT
10169+ "jno 0f\n"
10170+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10171+ "int $4\n0:\n"
10172+ _ASM_EXTABLE(0b, 0b)
10173+#endif
10174+
10175 "jns 1f\n"
10176 "call __read_lock_failed\n\t"
10177 "1:\n"
10178@@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10179 static inline void arch_write_lock(arch_rwlock_t *rw)
10180 {
10181 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10182+
10183+#ifdef CONFIG_PAX_REFCOUNT
10184+ "jno 0f\n"
10185+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10186+ "int $4\n0:\n"
10187+ _ASM_EXTABLE(0b, 0b)
10188+#endif
10189+
10190 "jz 1f\n"
10191 "call __write_lock_failed\n\t"
10192 "1:\n"
10193@@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10194
10195 static inline void arch_read_unlock(arch_rwlock_t *rw)
10196 {
10197- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10198+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10199+
10200+#ifdef CONFIG_PAX_REFCOUNT
10201+ "jno 0f\n"
10202+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10203+ "int $4\n0:\n"
10204+ _ASM_EXTABLE(0b, 0b)
10205+#endif
10206+
10207 :"+m" (rw->lock) : : "memory");
10208 }
10209
10210 static inline void arch_write_unlock(arch_rwlock_t *rw)
10211 {
10212- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10213+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10214+
10215+#ifdef CONFIG_PAX_REFCOUNT
10216+ "jno 0f\n"
10217+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10218+ "int $4\n0:\n"
10219+ _ASM_EXTABLE(0b, 0b)
10220+#endif
10221+
10222 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10223 }
10224
10225diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10226index 1575177..cb23f52 100644
10227--- a/arch/x86/include/asm/stackprotector.h
10228+++ b/arch/x86/include/asm/stackprotector.h
10229@@ -48,7 +48,7 @@
10230 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10231 */
10232 #define GDT_STACK_CANARY_INIT \
10233- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10234+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10235
10236 /*
10237 * Initialize the stackprotector canary value.
10238@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10239
10240 static inline void load_stack_canary_segment(void)
10241 {
10242-#ifdef CONFIG_X86_32
10243+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10244 asm volatile ("mov %0, %%gs" : : "r" (0));
10245 #endif
10246 }
10247diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10248index 70bbe39..4ae2bd4 100644
10249--- a/arch/x86/include/asm/stacktrace.h
10250+++ b/arch/x86/include/asm/stacktrace.h
10251@@ -11,28 +11,20 @@
10252
10253 extern int kstack_depth_to_print;
10254
10255-struct thread_info;
10256+struct task_struct;
10257 struct stacktrace_ops;
10258
10259-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10260- unsigned long *stack,
10261- unsigned long bp,
10262- const struct stacktrace_ops *ops,
10263- void *data,
10264- unsigned long *end,
10265- int *graph);
10266+typedef unsigned long walk_stack_t(struct task_struct *task,
10267+ void *stack_start,
10268+ unsigned long *stack,
10269+ unsigned long bp,
10270+ const struct stacktrace_ops *ops,
10271+ void *data,
10272+ unsigned long *end,
10273+ int *graph);
10274
10275-extern unsigned long
10276-print_context_stack(struct thread_info *tinfo,
10277- unsigned long *stack, unsigned long bp,
10278- const struct stacktrace_ops *ops, void *data,
10279- unsigned long *end, int *graph);
10280-
10281-extern unsigned long
10282-print_context_stack_bp(struct thread_info *tinfo,
10283- unsigned long *stack, unsigned long bp,
10284- const struct stacktrace_ops *ops, void *data,
10285- unsigned long *end, int *graph);
10286+extern walk_stack_t print_context_stack;
10287+extern walk_stack_t print_context_stack_bp;
10288
10289 /* Generic stack tracer with callbacks */
10290
10291@@ -40,7 +32,7 @@ struct stacktrace_ops {
10292 void (*address)(void *data, unsigned long address, int reliable);
10293 /* On negative return stop dumping */
10294 int (*stack)(void *data, char *name);
10295- walk_stack_t walk_stack;
10296+ walk_stack_t *walk_stack;
10297 };
10298
10299 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10300diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10301index cb23852..2dde194 100644
10302--- a/arch/x86/include/asm/sys_ia32.h
10303+++ b/arch/x86/include/asm/sys_ia32.h
10304@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10305 compat_sigset_t __user *, unsigned int);
10306 asmlinkage long sys32_alarm(unsigned int);
10307
10308-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10309+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10310 asmlinkage long sys32_sysfs(int, u32, u32);
10311
10312 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10313diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10314index 2d2f01c..f985723 100644
10315--- a/arch/x86/include/asm/system.h
10316+++ b/arch/x86/include/asm/system.h
10317@@ -129,7 +129,7 @@ do { \
10318 "call __switch_to\n\t" \
10319 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10320 __switch_canary \
10321- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10322+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10323 "movq %%rax,%%rdi\n\t" \
10324 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10325 "jnz ret_from_fork\n\t" \
10326@@ -140,7 +140,7 @@ do { \
10327 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10328 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10329 [_tif_fork] "i" (_TIF_FORK), \
10330- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10331+ [thread_info] "m" (current_tinfo), \
10332 [current_task] "m" (current_task) \
10333 __switch_canary_iparam \
10334 : "memory", "cc" __EXTRA_CLOBBER)
10335@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10336 {
10337 unsigned long __limit;
10338 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10339- return __limit + 1;
10340+ return __limit;
10341 }
10342
10343 static inline void native_clts(void)
10344@@ -397,13 +397,13 @@ void enable_hlt(void);
10345
10346 void cpu_idle_wait(void);
10347
10348-extern unsigned long arch_align_stack(unsigned long sp);
10349+#define arch_align_stack(x) ((x) & ~0xfUL)
10350 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10351
10352 void default_idle(void);
10353 bool set_pm_idle_to_default(void);
10354
10355-void stop_this_cpu(void *dummy);
10356+void stop_this_cpu(void *dummy) __noreturn;
10357
10358 /*
10359 * Force strict CPU ordering.
10360diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10361index a1fe5c1..ee326d8 100644
10362--- a/arch/x86/include/asm/thread_info.h
10363+++ b/arch/x86/include/asm/thread_info.h
10364@@ -10,6 +10,7 @@
10365 #include <linux/compiler.h>
10366 #include <asm/page.h>
10367 #include <asm/types.h>
10368+#include <asm/percpu.h>
10369
10370 /*
10371 * low level task data that entry.S needs immediate access to
10372@@ -24,7 +25,6 @@ struct exec_domain;
10373 #include <linux/atomic.h>
10374
10375 struct thread_info {
10376- struct task_struct *task; /* main task structure */
10377 struct exec_domain *exec_domain; /* execution domain */
10378 __u32 flags; /* low level flags */
10379 __u32 status; /* thread synchronous flags */
10380@@ -34,18 +34,12 @@ struct thread_info {
10381 mm_segment_t addr_limit;
10382 struct restart_block restart_block;
10383 void __user *sysenter_return;
10384-#ifdef CONFIG_X86_32
10385- unsigned long previous_esp; /* ESP of the previous stack in
10386- case of nested (IRQ) stacks
10387- */
10388- __u8 supervisor_stack[0];
10389-#endif
10390+ unsigned long lowest_stack;
10391 int uaccess_err;
10392 };
10393
10394-#define INIT_THREAD_INFO(tsk) \
10395+#define INIT_THREAD_INFO \
10396 { \
10397- .task = &tsk, \
10398 .exec_domain = &default_exec_domain, \
10399 .flags = 0, \
10400 .cpu = 0, \
10401@@ -56,7 +50,7 @@ struct thread_info {
10402 }, \
10403 }
10404
10405-#define init_thread_info (init_thread_union.thread_info)
10406+#define init_thread_info (init_thread_union.stack)
10407 #define init_stack (init_thread_union.stack)
10408
10409 #else /* !__ASSEMBLY__ */
10410@@ -170,45 +164,40 @@ struct thread_info {
10411 ret; \
10412 })
10413
10414-#ifdef CONFIG_X86_32
10415-
10416-#define STACK_WARN (THREAD_SIZE/8)
10417-/*
10418- * macros/functions for gaining access to the thread information structure
10419- *
10420- * preempt_count needs to be 1 initially, until the scheduler is functional.
10421- */
10422-#ifndef __ASSEMBLY__
10423-
10424-
10425-/* how to get the current stack pointer from C */
10426-register unsigned long current_stack_pointer asm("esp") __used;
10427-
10428-/* how to get the thread information struct from C */
10429-static inline struct thread_info *current_thread_info(void)
10430-{
10431- return (struct thread_info *)
10432- (current_stack_pointer & ~(THREAD_SIZE - 1));
10433-}
10434-
10435-#else /* !__ASSEMBLY__ */
10436-
10437+#ifdef __ASSEMBLY__
10438 /* how to get the thread information struct from ASM */
10439 #define GET_THREAD_INFO(reg) \
10440- movl $-THREAD_SIZE, reg; \
10441- andl %esp, reg
10442+ mov PER_CPU_VAR(current_tinfo), reg
10443
10444 /* use this one if reg already contains %esp */
10445-#define GET_THREAD_INFO_WITH_ESP(reg) \
10446- andl $-THREAD_SIZE, reg
10447+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10448+#else
10449+/* how to get the thread information struct from C */
10450+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10451+
10452+static __always_inline struct thread_info *current_thread_info(void)
10453+{
10454+ return percpu_read_stable(current_tinfo);
10455+}
10456+#endif
10457+
10458+#ifdef CONFIG_X86_32
10459+
10460+#define STACK_WARN (THREAD_SIZE/8)
10461+/*
10462+ * macros/functions for gaining access to the thread information structure
10463+ *
10464+ * preempt_count needs to be 1 initially, until the scheduler is functional.
10465+ */
10466+#ifndef __ASSEMBLY__
10467+
10468+/* how to get the current stack pointer from C */
10469+register unsigned long current_stack_pointer asm("esp") __used;
10470
10471 #endif
10472
10473 #else /* X86_32 */
10474
10475-#include <asm/percpu.h>
10476-#define KERNEL_STACK_OFFSET (5*8)
10477-
10478 /*
10479 * macros/functions for gaining access to the thread information structure
10480 * preempt_count needs to be 1 initially, until the scheduler is functional.
10481@@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10482 #ifndef __ASSEMBLY__
10483 DECLARE_PER_CPU(unsigned long, kernel_stack);
10484
10485-static inline struct thread_info *current_thread_info(void)
10486-{
10487- struct thread_info *ti;
10488- ti = (void *)(percpu_read_stable(kernel_stack) +
10489- KERNEL_STACK_OFFSET - THREAD_SIZE);
10490- return ti;
10491-}
10492-
10493-#else /* !__ASSEMBLY__ */
10494-
10495-/* how to get the thread information struct from ASM */
10496-#define GET_THREAD_INFO(reg) \
10497- movq PER_CPU_VAR(kernel_stack),reg ; \
10498- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10499-
10500+/* how to get the current stack pointer from C */
10501+register unsigned long current_stack_pointer asm("rsp") __used;
10502 #endif
10503
10504 #endif /* !X86_32 */
10505@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10506 extern void free_thread_info(struct thread_info *ti);
10507 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10508 #define arch_task_cache_init arch_task_cache_init
10509+
10510+#define __HAVE_THREAD_FUNCTIONS
10511+#define task_thread_info(task) (&(task)->tinfo)
10512+#define task_stack_page(task) ((task)->stack)
10513+#define setup_thread_stack(p, org) do {} while (0)
10514+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10515+
10516+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10517+extern struct task_struct *alloc_task_struct_node(int node);
10518+extern void free_task_struct(struct task_struct *);
10519+
10520 #endif
10521 #endif /* _ASM_X86_THREAD_INFO_H */
10522diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10523index 36361bf..324f262 100644
10524--- a/arch/x86/include/asm/uaccess.h
10525+++ b/arch/x86/include/asm/uaccess.h
10526@@ -7,12 +7,15 @@
10527 #include <linux/compiler.h>
10528 #include <linux/thread_info.h>
10529 #include <linux/string.h>
10530+#include <linux/sched.h>
10531 #include <asm/asm.h>
10532 #include <asm/page.h>
10533
10534 #define VERIFY_READ 0
10535 #define VERIFY_WRITE 1
10536
10537+extern void check_object_size(const void *ptr, unsigned long n, bool to);
10538+
10539 /*
10540 * The fs value determines whether argument validity checking should be
10541 * performed or not. If get_fs() == USER_DS, checking is performed, with
10542@@ -28,7 +31,12 @@
10543
10544 #define get_ds() (KERNEL_DS)
10545 #define get_fs() (current_thread_info()->addr_limit)
10546+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10547+void __set_fs(mm_segment_t x);
10548+void set_fs(mm_segment_t x);
10549+#else
10550 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10551+#endif
10552
10553 #define segment_eq(a, b) ((a).seg == (b).seg)
10554
10555@@ -76,7 +84,33 @@
10556 * checks that the pointer is in the user space range - after calling
10557 * this function, memory access functions may still return -EFAULT.
10558 */
10559-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10560+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10561+#define access_ok(type, addr, size) \
10562+({ \
10563+ long __size = size; \
10564+ unsigned long __addr = (unsigned long)addr; \
10565+ unsigned long __addr_ao = __addr & PAGE_MASK; \
10566+ unsigned long __end_ao = __addr + __size - 1; \
10567+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10568+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10569+ while(__addr_ao <= __end_ao) { \
10570+ char __c_ao; \
10571+ __addr_ao += PAGE_SIZE; \
10572+ if (__size > PAGE_SIZE) \
10573+ cond_resched(); \
10574+ if (__get_user(__c_ao, (char __user *)__addr)) \
10575+ break; \
10576+ if (type != VERIFY_WRITE) { \
10577+ __addr = __addr_ao; \
10578+ continue; \
10579+ } \
10580+ if (__put_user(__c_ao, (char __user *)__addr)) \
10581+ break; \
10582+ __addr = __addr_ao; \
10583+ } \
10584+ } \
10585+ __ret_ao; \
10586+})
10587
10588 /*
10589 * The exception table consists of pairs of addresses: the first is the
10590@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10591 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10592 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10593
10594-
10595+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10596+#define __copyuser_seg "gs;"
10597+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10598+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10599+#else
10600+#define __copyuser_seg
10601+#define __COPYUSER_SET_ES
10602+#define __COPYUSER_RESTORE_ES
10603+#endif
10604
10605 #ifdef CONFIG_X86_32
10606 #define __put_user_asm_u64(x, addr, err, errret) \
10607- asm volatile("1: movl %%eax,0(%2)\n" \
10608- "2: movl %%edx,4(%2)\n" \
10609+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10610+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10611 "3:\n" \
10612 ".section .fixup,\"ax\"\n" \
10613 "4: movl %3,%0\n" \
10614@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10615 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10616
10617 #define __put_user_asm_ex_u64(x, addr) \
10618- asm volatile("1: movl %%eax,0(%1)\n" \
10619- "2: movl %%edx,4(%1)\n" \
10620+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10621+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10622 "3:\n" \
10623 _ASM_EXTABLE(1b, 2b - 1b) \
10624 _ASM_EXTABLE(2b, 3b - 2b) \
10625@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10626 __typeof__(*(ptr)) __pu_val; \
10627 __chk_user_ptr(ptr); \
10628 might_fault(); \
10629- __pu_val = x; \
10630+ __pu_val = (x); \
10631 switch (sizeof(*(ptr))) { \
10632 case 1: \
10633 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10634@@ -373,7 +415,7 @@ do { \
10635 } while (0)
10636
10637 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10638- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10639+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10640 "2:\n" \
10641 ".section .fixup,\"ax\"\n" \
10642 "3: mov %3,%0\n" \
10643@@ -381,7 +423,7 @@ do { \
10644 " jmp 2b\n" \
10645 ".previous\n" \
10646 _ASM_EXTABLE(1b, 3b) \
10647- : "=r" (err), ltype(x) \
10648+ : "=r" (err), ltype (x) \
10649 : "m" (__m(addr)), "i" (errret), "0" (err))
10650
10651 #define __get_user_size_ex(x, ptr, size) \
10652@@ -406,7 +448,7 @@ do { \
10653 } while (0)
10654
10655 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10656- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10657+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10658 "2:\n" \
10659 _ASM_EXTABLE(1b, 2b - 1b) \
10660 : ltype(x) : "m" (__m(addr)))
10661@@ -423,13 +465,24 @@ do { \
10662 int __gu_err; \
10663 unsigned long __gu_val; \
10664 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10665- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10666+ (x) = (__typeof__(*(ptr)))__gu_val; \
10667 __gu_err; \
10668 })
10669
10670 /* FIXME: this hack is definitely wrong -AK */
10671 struct __large_struct { unsigned long buf[100]; };
10672-#define __m(x) (*(struct __large_struct __user *)(x))
10673+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10674+#define ____m(x) \
10675+({ \
10676+ unsigned long ____x = (unsigned long)(x); \
10677+ if (____x < PAX_USER_SHADOW_BASE) \
10678+ ____x += PAX_USER_SHADOW_BASE; \
10679+ (void __user *)____x; \
10680+})
10681+#else
10682+#define ____m(x) (x)
10683+#endif
10684+#define __m(x) (*(struct __large_struct __user *)____m(x))
10685
10686 /*
10687 * Tell gcc we read from memory instead of writing: this is because
10688@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10689 * aliasing issues.
10690 */
10691 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10692- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10693+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10694 "2:\n" \
10695 ".section .fixup,\"ax\"\n" \
10696 "3: mov %3,%0\n" \
10697@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10698 ".previous\n" \
10699 _ASM_EXTABLE(1b, 3b) \
10700 : "=r"(err) \
10701- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10702+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10703
10704 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10705- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10706+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10707 "2:\n" \
10708 _ASM_EXTABLE(1b, 2b - 1b) \
10709 : : ltype(x), "m" (__m(addr)))
10710@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10711 * On error, the variable @x is set to zero.
10712 */
10713
10714+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10715+#define __get_user(x, ptr) get_user((x), (ptr))
10716+#else
10717 #define __get_user(x, ptr) \
10718 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10719+#endif
10720
10721 /**
10722 * __put_user: - Write a simple value into user space, with less checking.
10723@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10724 * Returns zero on success, or -EFAULT on error.
10725 */
10726
10727+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10728+#define __put_user(x, ptr) put_user((x), (ptr))
10729+#else
10730 #define __put_user(x, ptr) \
10731 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10732+#endif
10733
10734 #define __get_user_unaligned __get_user
10735 #define __put_user_unaligned __put_user
10736@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10737 #define get_user_ex(x, ptr) do { \
10738 unsigned long __gue_val; \
10739 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10740- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10741+ (x) = (__typeof__(*(ptr)))__gue_val; \
10742 } while (0)
10743
10744 #ifdef CONFIG_X86_WP_WORKS_OK
10745diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10746index 566e803..b9521e9 100644
10747--- a/arch/x86/include/asm/uaccess_32.h
10748+++ b/arch/x86/include/asm/uaccess_32.h
10749@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10750 static __always_inline unsigned long __must_check
10751 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10752 {
10753+ if ((long)n < 0)
10754+ return n;
10755+
10756 if (__builtin_constant_p(n)) {
10757 unsigned long ret;
10758
10759@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10760 return ret;
10761 }
10762 }
10763+ if (!__builtin_constant_p(n))
10764+ check_object_size(from, n, true);
10765 return __copy_to_user_ll(to, from, n);
10766 }
10767
10768@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
10769 __copy_to_user(void __user *to, const void *from, unsigned long n)
10770 {
10771 might_fault();
10772+
10773 return __copy_to_user_inatomic(to, from, n);
10774 }
10775
10776 static __always_inline unsigned long
10777 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10778 {
10779+ if ((long)n < 0)
10780+ return n;
10781+
10782 /* Avoid zeroing the tail if the copy fails..
10783 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10784 * but as the zeroing behaviour is only significant when n is not
10785@@ -137,6 +146,10 @@ static __always_inline unsigned long
10786 __copy_from_user(void *to, const void __user *from, unsigned long n)
10787 {
10788 might_fault();
10789+
10790+ if ((long)n < 0)
10791+ return n;
10792+
10793 if (__builtin_constant_p(n)) {
10794 unsigned long ret;
10795
10796@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
10797 return ret;
10798 }
10799 }
10800+ if (!__builtin_constant_p(n))
10801+ check_object_size(to, n, false);
10802 return __copy_from_user_ll(to, from, n);
10803 }
10804
10805@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
10806 const void __user *from, unsigned long n)
10807 {
10808 might_fault();
10809+
10810+ if ((long)n < 0)
10811+ return n;
10812+
10813 if (__builtin_constant_p(n)) {
10814 unsigned long ret;
10815
10816@@ -181,15 +200,19 @@ static __always_inline unsigned long
10817 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10818 unsigned long n)
10819 {
10820- return __copy_from_user_ll_nocache_nozero(to, from, n);
10821+ if ((long)n < 0)
10822+ return n;
10823+
10824+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10825 }
10826
10827-unsigned long __must_check copy_to_user(void __user *to,
10828- const void *from, unsigned long n);
10829-unsigned long __must_check _copy_from_user(void *to,
10830- const void __user *from,
10831- unsigned long n);
10832-
10833+extern void copy_to_user_overflow(void)
10834+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10835+ __compiletime_error("copy_to_user() buffer size is not provably correct")
10836+#else
10837+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
10838+#endif
10839+;
10840
10841 extern void copy_from_user_overflow(void)
10842 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10843@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
10844 #endif
10845 ;
10846
10847-static inline unsigned long __must_check copy_from_user(void *to,
10848- const void __user *from,
10849- unsigned long n)
10850+/**
10851+ * copy_to_user: - Copy a block of data into user space.
10852+ * @to: Destination address, in user space.
10853+ * @from: Source address, in kernel space.
10854+ * @n: Number of bytes to copy.
10855+ *
10856+ * Context: User context only. This function may sleep.
10857+ *
10858+ * Copy data from kernel space to user space.
10859+ *
10860+ * Returns number of bytes that could not be copied.
10861+ * On success, this will be zero.
10862+ */
10863+static inline unsigned long __must_check
10864+copy_to_user(void __user *to, const void *from, unsigned long n)
10865+{
10866+ int sz = __compiletime_object_size(from);
10867+
10868+ if (unlikely(sz != -1 && sz < n))
10869+ copy_to_user_overflow();
10870+ else if (access_ok(VERIFY_WRITE, to, n))
10871+ n = __copy_to_user(to, from, n);
10872+ return n;
10873+}
10874+
10875+/**
10876+ * copy_from_user: - Copy a block of data from user space.
10877+ * @to: Destination address, in kernel space.
10878+ * @from: Source address, in user space.
10879+ * @n: Number of bytes to copy.
10880+ *
10881+ * Context: User context only. This function may sleep.
10882+ *
10883+ * Copy data from user space to kernel space.
10884+ *
10885+ * Returns number of bytes that could not be copied.
10886+ * On success, this will be zero.
10887+ *
10888+ * If some data could not be copied, this function will pad the copied
10889+ * data to the requested size using zero bytes.
10890+ */
10891+static inline unsigned long __must_check
10892+copy_from_user(void *to, const void __user *from, unsigned long n)
10893 {
10894 int sz = __compiletime_object_size(to);
10895
10896- if (likely(sz == -1 || sz >= n))
10897- n = _copy_from_user(to, from, n);
10898- else
10899+ if (unlikely(sz != -1 && sz < n))
10900 copy_from_user_overflow();
10901-
10902+ else if (access_ok(VERIFY_READ, from, n))
10903+ n = __copy_from_user(to, from, n);
10904+ else if ((long)n > 0) {
10905+ if (!__builtin_constant_p(n))
10906+ check_object_size(to, n, false);
10907+ memset(to, 0, n);
10908+ }
10909 return n;
10910 }
10911
10912diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10913index 1c66d30..e66922c 100644
10914--- a/arch/x86/include/asm/uaccess_64.h
10915+++ b/arch/x86/include/asm/uaccess_64.h
10916@@ -10,6 +10,9 @@
10917 #include <asm/alternative.h>
10918 #include <asm/cpufeature.h>
10919 #include <asm/page.h>
10920+#include <asm/pgtable.h>
10921+
10922+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10923
10924 /*
10925 * Copy To/From Userspace
10926@@ -17,12 +20,12 @@
10927
10928 /* Handles exceptions in both to and from, but doesn't do access_ok */
10929 __must_check unsigned long
10930-copy_user_generic_string(void *to, const void *from, unsigned len);
10931+copy_user_generic_string(void *to, const void *from, unsigned long len);
10932 __must_check unsigned long
10933-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
10934+copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
10935
10936 static __always_inline __must_check unsigned long
10937-copy_user_generic(void *to, const void *from, unsigned len)
10938+copy_user_generic(void *to, const void *from, unsigned long len)
10939 {
10940 unsigned ret;
10941
10942@@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
10943 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
10944 "=d" (len)),
10945 "1" (to), "2" (from), "3" (len)
10946- : "memory", "rcx", "r8", "r9", "r10", "r11");
10947+ : "memory", "rcx", "r8", "r9", "r11");
10948 return ret;
10949 }
10950
10951+static __always_inline __must_check unsigned long
10952+__copy_to_user(void __user *to, const void *from, unsigned long len);
10953+static __always_inline __must_check unsigned long
10954+__copy_from_user(void *to, const void __user *from, unsigned long len);
10955 __must_check unsigned long
10956-_copy_to_user(void __user *to, const void *from, unsigned len);
10957-__must_check unsigned long
10958-_copy_from_user(void *to, const void __user *from, unsigned len);
10959-__must_check unsigned long
10960-copy_in_user(void __user *to, const void __user *from, unsigned len);
10961+copy_in_user(void __user *to, const void __user *from, unsigned long len);
10962
10963 static inline unsigned long __must_check copy_from_user(void *to,
10964 const void __user *from,
10965 unsigned long n)
10966 {
10967- int sz = __compiletime_object_size(to);
10968-
10969 might_fault();
10970- if (likely(sz == -1 || sz >= n))
10971- n = _copy_from_user(to, from, n);
10972-#ifdef CONFIG_DEBUG_VM
10973- else
10974- WARN(1, "Buffer overflow detected!\n");
10975-#endif
10976+
10977+ if (access_ok(VERIFY_READ, from, n))
10978+ n = __copy_from_user(to, from, n);
10979+ else if (n < INT_MAX) {
10980+ if (!__builtin_constant_p(n))
10981+ check_object_size(to, n, false);
10982+ memset(to, 0, n);
10983+ }
10984 return n;
10985 }
10986
10987 static __always_inline __must_check
10988-int copy_to_user(void __user *dst, const void *src, unsigned size)
10989+int copy_to_user(void __user *dst, const void *src, unsigned long size)
10990 {
10991 might_fault();
10992
10993- return _copy_to_user(dst, src, size);
10994+ if (access_ok(VERIFY_WRITE, dst, size))
10995+ size = __copy_to_user(dst, src, size);
10996+ return size;
10997 }
10998
10999 static __always_inline __must_check
11000-int __copy_from_user(void *dst, const void __user *src, unsigned size)
11001+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
11002 {
11003- int ret = 0;
11004+ int sz = __compiletime_object_size(dst);
11005+ unsigned ret = 0;
11006
11007 might_fault();
11008- if (!__builtin_constant_p(size))
11009- return copy_user_generic(dst, (__force void *)src, size);
11010+
11011+ if (size > INT_MAX)
11012+ return size;
11013+
11014+#ifdef CONFIG_PAX_MEMORY_UDEREF
11015+ if (!__access_ok(VERIFY_READ, src, size))
11016+ return size;
11017+#endif
11018+
11019+ if (unlikely(sz != -1 && sz < size)) {
11020+#ifdef CONFIG_DEBUG_VM
11021+ WARN(1, "Buffer overflow detected!\n");
11022+#endif
11023+ return size;
11024+ }
11025+
11026+ if (!__builtin_constant_p(size)) {
11027+ check_object_size(dst, size, false);
11028+
11029+#ifdef CONFIG_PAX_MEMORY_UDEREF
11030+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11031+ src += PAX_USER_SHADOW_BASE;
11032+#endif
11033+
11034+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11035+ }
11036 switch (size) {
11037- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11038+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11039 ret, "b", "b", "=q", 1);
11040 return ret;
11041- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11042+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11043 ret, "w", "w", "=r", 2);
11044 return ret;
11045- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11046+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11047 ret, "l", "k", "=r", 4);
11048 return ret;
11049- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11050+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11051 ret, "q", "", "=r", 8);
11052 return ret;
11053 case 10:
11054- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11055+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11056 ret, "q", "", "=r", 10);
11057 if (unlikely(ret))
11058 return ret;
11059 __get_user_asm(*(u16 *)(8 + (char *)dst),
11060- (u16 __user *)(8 + (char __user *)src),
11061+ (const u16 __user *)(8 + (const char __user *)src),
11062 ret, "w", "w", "=r", 2);
11063 return ret;
11064 case 16:
11065- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11066+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11067 ret, "q", "", "=r", 16);
11068 if (unlikely(ret))
11069 return ret;
11070 __get_user_asm(*(u64 *)(8 + (char *)dst),
11071- (u64 __user *)(8 + (char __user *)src),
11072+ (const u64 __user *)(8 + (const char __user *)src),
11073 ret, "q", "", "=r", 8);
11074 return ret;
11075 default:
11076- return copy_user_generic(dst, (__force void *)src, size);
11077+
11078+#ifdef CONFIG_PAX_MEMORY_UDEREF
11079+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11080+ src += PAX_USER_SHADOW_BASE;
11081+#endif
11082+
11083+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11084 }
11085 }
11086
11087 static __always_inline __must_check
11088-int __copy_to_user(void __user *dst, const void *src, unsigned size)
11089+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11090 {
11091- int ret = 0;
11092+ int sz = __compiletime_object_size(src);
11093+ unsigned ret = 0;
11094
11095 might_fault();
11096- if (!__builtin_constant_p(size))
11097- return copy_user_generic((__force void *)dst, src, size);
11098+
11099+ if (size > INT_MAX)
11100+ return size;
11101+
11102+#ifdef CONFIG_PAX_MEMORY_UDEREF
11103+ if (!__access_ok(VERIFY_WRITE, dst, size))
11104+ return size;
11105+#endif
11106+
11107+ if (unlikely(sz != -1 && sz < size)) {
11108+#ifdef CONFIG_DEBUG_VM
11109+ WARN(1, "Buffer overflow detected!\n");
11110+#endif
11111+ return size;
11112+ }
11113+
11114+ if (!__builtin_constant_p(size)) {
11115+ check_object_size(src, size, true);
11116+
11117+#ifdef CONFIG_PAX_MEMORY_UDEREF
11118+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11119+ dst += PAX_USER_SHADOW_BASE;
11120+#endif
11121+
11122+ return copy_user_generic((__force_kernel void *)dst, src, size);
11123+ }
11124 switch (size) {
11125- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11126+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11127 ret, "b", "b", "iq", 1);
11128 return ret;
11129- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11130+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11131 ret, "w", "w", "ir", 2);
11132 return ret;
11133- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11134+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11135 ret, "l", "k", "ir", 4);
11136 return ret;
11137- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11138+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11139 ret, "q", "", "er", 8);
11140 return ret;
11141 case 10:
11142- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11143+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11144 ret, "q", "", "er", 10);
11145 if (unlikely(ret))
11146 return ret;
11147 asm("":::"memory");
11148- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11149+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11150 ret, "w", "w", "ir", 2);
11151 return ret;
11152 case 16:
11153- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11154+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11155 ret, "q", "", "er", 16);
11156 if (unlikely(ret))
11157 return ret;
11158 asm("":::"memory");
11159- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11160+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11161 ret, "q", "", "er", 8);
11162 return ret;
11163 default:
11164- return copy_user_generic((__force void *)dst, src, size);
11165+
11166+#ifdef CONFIG_PAX_MEMORY_UDEREF
11167+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11168+ dst += PAX_USER_SHADOW_BASE;
11169+#endif
11170+
11171+ return copy_user_generic((__force_kernel void *)dst, src, size);
11172 }
11173 }
11174
11175 static __always_inline __must_check
11176-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11177+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11178 {
11179- int ret = 0;
11180+ unsigned ret = 0;
11181
11182 might_fault();
11183- if (!__builtin_constant_p(size))
11184- return copy_user_generic((__force void *)dst,
11185- (__force void *)src, size);
11186+
11187+ if (size > INT_MAX)
11188+ return size;
11189+
11190+#ifdef CONFIG_PAX_MEMORY_UDEREF
11191+ if (!__access_ok(VERIFY_READ, src, size))
11192+ return size;
11193+ if (!__access_ok(VERIFY_WRITE, dst, size))
11194+ return size;
11195+#endif
11196+
11197+ if (!__builtin_constant_p(size)) {
11198+
11199+#ifdef CONFIG_PAX_MEMORY_UDEREF
11200+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11201+ src += PAX_USER_SHADOW_BASE;
11202+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11203+ dst += PAX_USER_SHADOW_BASE;
11204+#endif
11205+
11206+ return copy_user_generic((__force_kernel void *)dst,
11207+ (__force_kernel const void *)src, size);
11208+ }
11209 switch (size) {
11210 case 1: {
11211 u8 tmp;
11212- __get_user_asm(tmp, (u8 __user *)src,
11213+ __get_user_asm(tmp, (const u8 __user *)src,
11214 ret, "b", "b", "=q", 1);
11215 if (likely(!ret))
11216 __put_user_asm(tmp, (u8 __user *)dst,
11217@@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11218 }
11219 case 2: {
11220 u16 tmp;
11221- __get_user_asm(tmp, (u16 __user *)src,
11222+ __get_user_asm(tmp, (const u16 __user *)src,
11223 ret, "w", "w", "=r", 2);
11224 if (likely(!ret))
11225 __put_user_asm(tmp, (u16 __user *)dst,
11226@@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11227
11228 case 4: {
11229 u32 tmp;
11230- __get_user_asm(tmp, (u32 __user *)src,
11231+ __get_user_asm(tmp, (const u32 __user *)src,
11232 ret, "l", "k", "=r", 4);
11233 if (likely(!ret))
11234 __put_user_asm(tmp, (u32 __user *)dst,
11235@@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11236 }
11237 case 8: {
11238 u64 tmp;
11239- __get_user_asm(tmp, (u64 __user *)src,
11240+ __get_user_asm(tmp, (const u64 __user *)src,
11241 ret, "q", "", "=r", 8);
11242 if (likely(!ret))
11243 __put_user_asm(tmp, (u64 __user *)dst,
11244@@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11245 return ret;
11246 }
11247 default:
11248- return copy_user_generic((__force void *)dst,
11249- (__force void *)src, size);
11250+
11251+#ifdef CONFIG_PAX_MEMORY_UDEREF
11252+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11253+ src += PAX_USER_SHADOW_BASE;
11254+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11255+ dst += PAX_USER_SHADOW_BASE;
11256+#endif
11257+
11258+ return copy_user_generic((__force_kernel void *)dst,
11259+ (__force_kernel const void *)src, size);
11260 }
11261 }
11262
11263@@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11264 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11265
11266 static __must_check __always_inline int
11267-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11268+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11269 {
11270- return copy_user_generic(dst, (__force const void *)src, size);
11271+ if (size > INT_MAX)
11272+ return size;
11273+
11274+#ifdef CONFIG_PAX_MEMORY_UDEREF
11275+ if (!__access_ok(VERIFY_READ, src, size))
11276+ return size;
11277+
11278+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11279+ src += PAX_USER_SHADOW_BASE;
11280+#endif
11281+
11282+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11283 }
11284
11285-static __must_check __always_inline int
11286-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11287+static __must_check __always_inline unsigned long
11288+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11289 {
11290- return copy_user_generic((__force void *)dst, src, size);
11291+ if (size > INT_MAX)
11292+ return size;
11293+
11294+#ifdef CONFIG_PAX_MEMORY_UDEREF
11295+ if (!__access_ok(VERIFY_WRITE, dst, size))
11296+ return size;
11297+
11298+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11299+ dst += PAX_USER_SHADOW_BASE;
11300+#endif
11301+
11302+ return copy_user_generic((__force_kernel void *)dst, src, size);
11303 }
11304
11305-extern long __copy_user_nocache(void *dst, const void __user *src,
11306- unsigned size, int zerorest);
11307+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11308+ unsigned long size, int zerorest);
11309
11310-static inline int
11311-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11312+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11313 {
11314 might_sleep();
11315+
11316+ if (size > INT_MAX)
11317+ return size;
11318+
11319+#ifdef CONFIG_PAX_MEMORY_UDEREF
11320+ if (!__access_ok(VERIFY_READ, src, size))
11321+ return size;
11322+#endif
11323+
11324 return __copy_user_nocache(dst, src, size, 1);
11325 }
11326
11327-static inline int
11328-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11329- unsigned size)
11330+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11331+ unsigned long size)
11332 {
11333+ if (size > INT_MAX)
11334+ return size;
11335+
11336+#ifdef CONFIG_PAX_MEMORY_UDEREF
11337+ if (!__access_ok(VERIFY_READ, src, size))
11338+ return size;
11339+#endif
11340+
11341 return __copy_user_nocache(dst, src, size, 0);
11342 }
11343
11344-unsigned long
11345-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11346+extern unsigned long
11347+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11348
11349 #endif /* _ASM_X86_UACCESS_64_H */
11350diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11351index bb05228..d763d5b 100644
11352--- a/arch/x86/include/asm/vdso.h
11353+++ b/arch/x86/include/asm/vdso.h
11354@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11355 #define VDSO32_SYMBOL(base, name) \
11356 ({ \
11357 extern const char VDSO32_##name[]; \
11358- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11359+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11360 })
11361 #endif
11362
11363diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11364index 1971e65..1e3559b 100644
11365--- a/arch/x86/include/asm/x86_init.h
11366+++ b/arch/x86/include/asm/x86_init.h
11367@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11368 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11369 void (*find_smp_config)(void);
11370 void (*get_smp_config)(unsigned int early);
11371-};
11372+} __no_const;
11373
11374 /**
11375 * struct x86_init_resources - platform specific resource related ops
11376@@ -42,7 +42,7 @@ struct x86_init_resources {
11377 void (*probe_roms)(void);
11378 void (*reserve_resources)(void);
11379 char *(*memory_setup)(void);
11380-};
11381+} __no_const;
11382
11383 /**
11384 * struct x86_init_irqs - platform specific interrupt setup
11385@@ -55,7 +55,7 @@ struct x86_init_irqs {
11386 void (*pre_vector_init)(void);
11387 void (*intr_init)(void);
11388 void (*trap_init)(void);
11389-};
11390+} __no_const;
11391
11392 /**
11393 * struct x86_init_oem - oem platform specific customizing functions
11394@@ -65,7 +65,7 @@ struct x86_init_irqs {
11395 struct x86_init_oem {
11396 void (*arch_setup)(void);
11397 void (*banner)(void);
11398-};
11399+} __no_const;
11400
11401 /**
11402 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11403@@ -76,7 +76,7 @@ struct x86_init_oem {
11404 */
11405 struct x86_init_mapping {
11406 void (*pagetable_reserve)(u64 start, u64 end);
11407-};
11408+} __no_const;
11409
11410 /**
11411 * struct x86_init_paging - platform specific paging functions
11412@@ -86,7 +86,7 @@ struct x86_init_mapping {
11413 struct x86_init_paging {
11414 void (*pagetable_setup_start)(pgd_t *base);
11415 void (*pagetable_setup_done)(pgd_t *base);
11416-};
11417+} __no_const;
11418
11419 /**
11420 * struct x86_init_timers - platform specific timer setup
11421@@ -101,7 +101,7 @@ struct x86_init_timers {
11422 void (*tsc_pre_init)(void);
11423 void (*timer_init)(void);
11424 void (*wallclock_init)(void);
11425-};
11426+} __no_const;
11427
11428 /**
11429 * struct x86_init_iommu - platform specific iommu setup
11430@@ -109,7 +109,7 @@ struct x86_init_timers {
11431 */
11432 struct x86_init_iommu {
11433 int (*iommu_init)(void);
11434-};
11435+} __no_const;
11436
11437 /**
11438 * struct x86_init_pci - platform specific pci init functions
11439@@ -123,7 +123,7 @@ struct x86_init_pci {
11440 int (*init)(void);
11441 void (*init_irq)(void);
11442 void (*fixup_irqs)(void);
11443-};
11444+} __no_const;
11445
11446 /**
11447 * struct x86_init_ops - functions for platform specific setup
11448@@ -139,7 +139,7 @@ struct x86_init_ops {
11449 struct x86_init_timers timers;
11450 struct x86_init_iommu iommu;
11451 struct x86_init_pci pci;
11452-};
11453+} __no_const;
11454
11455 /**
11456 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11457@@ -147,7 +147,7 @@ struct x86_init_ops {
11458 */
11459 struct x86_cpuinit_ops {
11460 void (*setup_percpu_clockev)(void);
11461-};
11462+} __no_const;
11463
11464 /**
11465 * struct x86_platform_ops - platform specific runtime functions
11466@@ -169,7 +169,7 @@ struct x86_platform_ops {
11467 void (*nmi_init)(void);
11468 unsigned char (*get_nmi_reason)(void);
11469 int (*i8042_detect)(void);
11470-};
11471+} __no_const;
11472
11473 struct pci_dev;
11474
11475@@ -177,7 +177,7 @@ struct x86_msi_ops {
11476 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11477 void (*teardown_msi_irq)(unsigned int irq);
11478 void (*teardown_msi_irqs)(struct pci_dev *dev);
11479-};
11480+} __no_const;
11481
11482 extern struct x86_init_ops x86_init;
11483 extern struct x86_cpuinit_ops x86_cpuinit;
11484diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11485index c6ce245..ffbdab7 100644
11486--- a/arch/x86/include/asm/xsave.h
11487+++ b/arch/x86/include/asm/xsave.h
11488@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11489 {
11490 int err;
11491
11492+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11493+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11494+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11495+#endif
11496+
11497 /*
11498 * Clear the xsave header first, so that reserved fields are
11499 * initialized to zero.
11500@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11501 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11502 {
11503 int err;
11504- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11505+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11506 u32 lmask = mask;
11507 u32 hmask = mask >> 32;
11508
11509+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11510+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11511+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11512+#endif
11513+
11514 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11515 "2:\n"
11516 ".section .fixup,\"ax\"\n"
11517diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11518index 6a564ac..9b1340c 100644
11519--- a/arch/x86/kernel/acpi/realmode/Makefile
11520+++ b/arch/x86/kernel/acpi/realmode/Makefile
11521@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11522 $(call cc-option, -fno-stack-protector) \
11523 $(call cc-option, -mpreferred-stack-boundary=2)
11524 KBUILD_CFLAGS += $(call cc-option, -m32)
11525+ifdef CONSTIFY_PLUGIN
11526+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11527+endif
11528 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11529 GCOV_PROFILE := n
11530
11531diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11532index b4fd836..4358fe3 100644
11533--- a/arch/x86/kernel/acpi/realmode/wakeup.S
11534+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11535@@ -108,6 +108,9 @@ wakeup_code:
11536 /* Do any other stuff... */
11537
11538 #ifndef CONFIG_64BIT
11539+ /* Recheck NX bit overrides (64bit path does this in trampoline */
11540+ call verify_cpu
11541+
11542 /* This could also be done in C code... */
11543 movl pmode_cr3, %eax
11544 movl %eax, %cr3
11545@@ -131,6 +134,7 @@ wakeup_code:
11546 movl pmode_cr0, %eax
11547 movl %eax, %cr0
11548 jmp pmode_return
11549+# include "../../verify_cpu.S"
11550 #else
11551 pushw $0
11552 pushw trampoline_segment
11553diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11554index 103b6ab..2004d0a 100644
11555--- a/arch/x86/kernel/acpi/sleep.c
11556+++ b/arch/x86/kernel/acpi/sleep.c
11557@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11558 header->trampoline_segment = trampoline_address() >> 4;
11559 #ifdef CONFIG_SMP
11560 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11561+
11562+ pax_open_kernel();
11563 early_gdt_descr.address =
11564 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11565+ pax_close_kernel();
11566+
11567 initial_gs = per_cpu_offset(smp_processor_id());
11568 #endif
11569 initial_code = (unsigned long)wakeup_long64;
11570diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11571index 13ab720..95d5442 100644
11572--- a/arch/x86/kernel/acpi/wakeup_32.S
11573+++ b/arch/x86/kernel/acpi/wakeup_32.S
11574@@ -30,13 +30,11 @@ wakeup_pmode_return:
11575 # and restore the stack ... but you need gdt for this to work
11576 movl saved_context_esp, %esp
11577
11578- movl %cs:saved_magic, %eax
11579- cmpl $0x12345678, %eax
11580+ cmpl $0x12345678, saved_magic
11581 jne bogus_magic
11582
11583 # jump to place where we left off
11584- movl saved_eip, %eax
11585- jmp *%eax
11586+ jmp *(saved_eip)
11587
11588 bogus_magic:
11589 jmp bogus_magic
11590diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11591index 1f84794..e23f862 100644
11592--- a/arch/x86/kernel/alternative.c
11593+++ b/arch/x86/kernel/alternative.c
11594@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11595 */
11596 for (a = start; a < end; a++) {
11597 instr = (u8 *)&a->instr_offset + a->instr_offset;
11598+
11599+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11600+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11601+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11602+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11603+#endif
11604+
11605 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11606 BUG_ON(a->replacementlen > a->instrlen);
11607 BUG_ON(a->instrlen > sizeof(insnbuf));
11608@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11609 for (poff = start; poff < end; poff++) {
11610 u8 *ptr = (u8 *)poff + *poff;
11611
11612+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11613+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11614+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11615+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11616+#endif
11617+
11618 if (!*poff || ptr < text || ptr >= text_end)
11619 continue;
11620 /* turn DS segment override prefix into lock prefix */
11621- if (*ptr == 0x3e)
11622+ if (*ktla_ktva(ptr) == 0x3e)
11623 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11624 };
11625 mutex_unlock(&text_mutex);
11626@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11627 for (poff = start; poff < end; poff++) {
11628 u8 *ptr = (u8 *)poff + *poff;
11629
11630+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11631+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11632+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11633+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11634+#endif
11635+
11636 if (!*poff || ptr < text || ptr >= text_end)
11637 continue;
11638 /* turn lock prefix into DS segment override prefix */
11639- if (*ptr == 0xf0)
11640+ if (*ktla_ktva(ptr) == 0xf0)
11641 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11642 };
11643 mutex_unlock(&text_mutex);
11644@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11645
11646 BUG_ON(p->len > MAX_PATCH_LEN);
11647 /* prep the buffer with the original instructions */
11648- memcpy(insnbuf, p->instr, p->len);
11649+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11650 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11651 (unsigned long)p->instr, p->len);
11652
11653@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11654 if (smp_alt_once)
11655 free_init_pages("SMP alternatives",
11656 (unsigned long)__smp_locks,
11657- (unsigned long)__smp_locks_end);
11658+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11659
11660 restart_nmi();
11661 }
11662@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11663 * instructions. And on the local CPU you need to be protected again NMI or MCE
11664 * handlers seeing an inconsistent instruction while you patch.
11665 */
11666-void *__init_or_module text_poke_early(void *addr, const void *opcode,
11667+void *__kprobes text_poke_early(void *addr, const void *opcode,
11668 size_t len)
11669 {
11670 unsigned long flags;
11671 local_irq_save(flags);
11672- memcpy(addr, opcode, len);
11673+
11674+ pax_open_kernel();
11675+ memcpy(ktla_ktva(addr), opcode, len);
11676 sync_core();
11677+ pax_close_kernel();
11678+
11679 local_irq_restore(flags);
11680 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11681 that causes hangs on some VIA CPUs. */
11682@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11683 */
11684 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11685 {
11686- unsigned long flags;
11687- char *vaddr;
11688+ unsigned char *vaddr = ktla_ktva(addr);
11689 struct page *pages[2];
11690- int i;
11691+ size_t i;
11692
11693 if (!core_kernel_text((unsigned long)addr)) {
11694- pages[0] = vmalloc_to_page(addr);
11695- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11696+ pages[0] = vmalloc_to_page(vaddr);
11697+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11698 } else {
11699- pages[0] = virt_to_page(addr);
11700+ pages[0] = virt_to_page(vaddr);
11701 WARN_ON(!PageReserved(pages[0]));
11702- pages[1] = virt_to_page(addr + PAGE_SIZE);
11703+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11704 }
11705 BUG_ON(!pages[0]);
11706- local_irq_save(flags);
11707- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11708- if (pages[1])
11709- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11710- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11711- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11712- clear_fixmap(FIX_TEXT_POKE0);
11713- if (pages[1])
11714- clear_fixmap(FIX_TEXT_POKE1);
11715- local_flush_tlb();
11716- sync_core();
11717- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11718- that causes hangs on some VIA CPUs. */
11719+ text_poke_early(addr, opcode, len);
11720 for (i = 0; i < len; i++)
11721- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11722- local_irq_restore(flags);
11723+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11724 return addr;
11725 }
11726
11727diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11728index f98d84c..e402a69 100644
11729--- a/arch/x86/kernel/apic/apic.c
11730+++ b/arch/x86/kernel/apic/apic.c
11731@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11732 /*
11733 * Debug level, exported for io_apic.c
11734 */
11735-unsigned int apic_verbosity;
11736+int apic_verbosity;
11737
11738 int pic_mode;
11739
11740@@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11741 apic_write(APIC_ESR, 0);
11742 v1 = apic_read(APIC_ESR);
11743 ack_APIC_irq();
11744- atomic_inc(&irq_err_count);
11745+ atomic_inc_unchecked(&irq_err_count);
11746
11747 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11748 smp_processor_id(), v0 , v1);
11749diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11750index 6d939d7..0697fcc 100644
11751--- a/arch/x86/kernel/apic/io_apic.c
11752+++ b/arch/x86/kernel/apic/io_apic.c
11753@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11754 }
11755 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11756
11757-void lock_vector_lock(void)
11758+void lock_vector_lock(void) __acquires(vector_lock)
11759 {
11760 /* Used to the online set of cpus does not change
11761 * during assign_irq_vector.
11762@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
11763 raw_spin_lock(&vector_lock);
11764 }
11765
11766-void unlock_vector_lock(void)
11767+void unlock_vector_lock(void) __releases(vector_lock)
11768 {
11769 raw_spin_unlock(&vector_lock);
11770 }
11771@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
11772 ack_APIC_irq();
11773 }
11774
11775-atomic_t irq_mis_count;
11776+atomic_unchecked_t irq_mis_count;
11777
11778 static void ack_apic_level(struct irq_data *data)
11779 {
11780@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
11781 * at the cpu.
11782 */
11783 if (!(v & (1 << (i & 0x1f)))) {
11784- atomic_inc(&irq_mis_count);
11785+ atomic_inc_unchecked(&irq_mis_count);
11786
11787 eoi_ioapic_irq(irq, cfg);
11788 }
11789diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11790index a46bd38..6b906d7 100644
11791--- a/arch/x86/kernel/apm_32.c
11792+++ b/arch/x86/kernel/apm_32.c
11793@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
11794 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11795 * even though they are called in protected mode.
11796 */
11797-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11798+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11799 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11800
11801 static const char driver_version[] = "1.16ac"; /* no spaces */
11802@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
11803 BUG_ON(cpu != 0);
11804 gdt = get_cpu_gdt_table(cpu);
11805 save_desc_40 = gdt[0x40 / 8];
11806+
11807+ pax_open_kernel();
11808 gdt[0x40 / 8] = bad_bios_desc;
11809+ pax_close_kernel();
11810
11811 apm_irq_save(flags);
11812 APM_DO_SAVE_SEGS;
11813@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
11814 &call->esi);
11815 APM_DO_RESTORE_SEGS;
11816 apm_irq_restore(flags);
11817+
11818+ pax_open_kernel();
11819 gdt[0x40 / 8] = save_desc_40;
11820+ pax_close_kernel();
11821+
11822 put_cpu();
11823
11824 return call->eax & 0xff;
11825@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
11826 BUG_ON(cpu != 0);
11827 gdt = get_cpu_gdt_table(cpu);
11828 save_desc_40 = gdt[0x40 / 8];
11829+
11830+ pax_open_kernel();
11831 gdt[0x40 / 8] = bad_bios_desc;
11832+ pax_close_kernel();
11833
11834 apm_irq_save(flags);
11835 APM_DO_SAVE_SEGS;
11836@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
11837 &call->eax);
11838 APM_DO_RESTORE_SEGS;
11839 apm_irq_restore(flags);
11840+
11841+ pax_open_kernel();
11842 gdt[0x40 / 8] = save_desc_40;
11843+ pax_close_kernel();
11844+
11845 put_cpu();
11846 return error;
11847 }
11848@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
11849 * code to that CPU.
11850 */
11851 gdt = get_cpu_gdt_table(0);
11852+
11853+ pax_open_kernel();
11854 set_desc_base(&gdt[APM_CS >> 3],
11855 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11856 set_desc_base(&gdt[APM_CS_16 >> 3],
11857 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11858 set_desc_base(&gdt[APM_DS >> 3],
11859 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11860+ pax_close_kernel();
11861
11862 proc_create("apm", 0, NULL, &apm_file_ops);
11863
11864diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11865index 4f13faf..87db5d2 100644
11866--- a/arch/x86/kernel/asm-offsets.c
11867+++ b/arch/x86/kernel/asm-offsets.c
11868@@ -33,6 +33,8 @@ void common(void) {
11869 OFFSET(TI_status, thread_info, status);
11870 OFFSET(TI_addr_limit, thread_info, addr_limit);
11871 OFFSET(TI_preempt_count, thread_info, preempt_count);
11872+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11873+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11874
11875 BLANK();
11876 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11877@@ -53,8 +55,26 @@ void common(void) {
11878 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11879 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11880 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11881+
11882+#ifdef CONFIG_PAX_KERNEXEC
11883+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11884 #endif
11885
11886+#ifdef CONFIG_PAX_MEMORY_UDEREF
11887+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11888+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11889+#ifdef CONFIG_X86_64
11890+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11891+#endif
11892+#endif
11893+
11894+#endif
11895+
11896+ BLANK();
11897+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11898+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11899+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11900+
11901 #ifdef CONFIG_XEN
11902 BLANK();
11903 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11904diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11905index e72a119..6e2955d 100644
11906--- a/arch/x86/kernel/asm-offsets_64.c
11907+++ b/arch/x86/kernel/asm-offsets_64.c
11908@@ -69,6 +69,7 @@ int main(void)
11909 BLANK();
11910 #undef ENTRY
11911
11912+ DEFINE(TSS_size, sizeof(struct tss_struct));
11913 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11914 BLANK();
11915
11916diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11917index 25f24dc..4094a7f 100644
11918--- a/arch/x86/kernel/cpu/Makefile
11919+++ b/arch/x86/kernel/cpu/Makefile
11920@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11921 CFLAGS_REMOVE_perf_event.o = -pg
11922 endif
11923
11924-# Make sure load_percpu_segment has no stackprotector
11925-nostackp := $(call cc-option, -fno-stack-protector)
11926-CFLAGS_common.o := $(nostackp)
11927-
11928 obj-y := intel_cacheinfo.o scattered.o topology.o
11929 obj-y += proc.o capflags.o powerflags.o common.o
11930 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11931diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11932index 0bab2b1..d0a1bf8 100644
11933--- a/arch/x86/kernel/cpu/amd.c
11934+++ b/arch/x86/kernel/cpu/amd.c
11935@@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
11936 unsigned int size)
11937 {
11938 /* AMD errata T13 (order #21922) */
11939- if ((c->x86 == 6)) {
11940+ if (c->x86 == 6) {
11941 /* Duron Rev A0 */
11942 if (c->x86_model == 3 && c->x86_mask == 0)
11943 size = 64;
11944diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11945index aa003b1..47ea638 100644
11946--- a/arch/x86/kernel/cpu/common.c
11947+++ b/arch/x86/kernel/cpu/common.c
11948@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
11949
11950 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11951
11952-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11953-#ifdef CONFIG_X86_64
11954- /*
11955- * We need valid kernel segments for data and code in long mode too
11956- * IRET will check the segment types kkeil 2000/10/28
11957- * Also sysret mandates a special GDT layout
11958- *
11959- * TLS descriptors are currently at a different place compared to i386.
11960- * Hopefully nobody expects them at a fixed place (Wine?)
11961- */
11962- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11963- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11964- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11965- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11966- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11967- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11968-#else
11969- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11970- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11971- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11972- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11973- /*
11974- * Segments used for calling PnP BIOS have byte granularity.
11975- * They code segments and data segments have fixed 64k limits,
11976- * the transfer segment sizes are set at run time.
11977- */
11978- /* 32-bit code */
11979- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11980- /* 16-bit code */
11981- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11982- /* 16-bit data */
11983- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11984- /* 16-bit data */
11985- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11986- /* 16-bit data */
11987- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11988- /*
11989- * The APM segments have byte granularity and their bases
11990- * are set at run time. All have 64k limits.
11991- */
11992- /* 32-bit code */
11993- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11994- /* 16-bit code */
11995- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11996- /* data */
11997- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11998-
11999- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12000- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12001- GDT_STACK_CANARY_INIT
12002-#endif
12003-} };
12004-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12005-
12006 static int __init x86_xsave_setup(char *s)
12007 {
12008 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12009@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12010 {
12011 struct desc_ptr gdt_descr;
12012
12013- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12014+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12015 gdt_descr.size = GDT_SIZE - 1;
12016 load_gdt(&gdt_descr);
12017 /* Reload the per-cpu base */
12018@@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12019 /* Filter out anything that depends on CPUID levels we don't have */
12020 filter_cpuid_features(c, true);
12021
12022+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12023+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12024+#endif
12025+
12026 /* If the model name is still unset, do table lookup. */
12027 if (!c->x86_model_id[0]) {
12028 const char *p;
12029@@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12030 }
12031 __setup("clearcpuid=", setup_disablecpuid);
12032
12033+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12034+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12035+
12036 #ifdef CONFIG_X86_64
12037 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12038
12039@@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12040 EXPORT_PER_CPU_SYMBOL(current_task);
12041
12042 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12043- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12044+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12045 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12046
12047 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12048@@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12049 {
12050 memset(regs, 0, sizeof(struct pt_regs));
12051 regs->fs = __KERNEL_PERCPU;
12052- regs->gs = __KERNEL_STACK_CANARY;
12053+ savesegment(gs, regs->gs);
12054
12055 return regs;
12056 }
12057@@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12058 int i;
12059
12060 cpu = stack_smp_processor_id();
12061- t = &per_cpu(init_tss, cpu);
12062+ t = init_tss + cpu;
12063 oist = &per_cpu(orig_ist, cpu);
12064
12065 #ifdef CONFIG_NUMA
12066@@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12067 switch_to_new_gdt(cpu);
12068 loadsegment(fs, 0);
12069
12070- load_idt((const struct desc_ptr *)&idt_descr);
12071+ load_idt(&idt_descr);
12072
12073 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12074 syscall_init();
12075@@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12076 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12077 barrier();
12078
12079- x86_configure_nx();
12080 if (cpu != 0)
12081 enable_x2apic();
12082
12083@@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12084 {
12085 int cpu = smp_processor_id();
12086 struct task_struct *curr = current;
12087- struct tss_struct *t = &per_cpu(init_tss, cpu);
12088+ struct tss_struct *t = init_tss + cpu;
12089 struct thread_struct *thread = &curr->thread;
12090
12091 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12092diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12093index 5231312..a78a987 100644
12094--- a/arch/x86/kernel/cpu/intel.c
12095+++ b/arch/x86/kernel/cpu/intel.c
12096@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12097 * Update the IDT descriptor and reload the IDT so that
12098 * it uses the read-only mapped virtual address.
12099 */
12100- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12101+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12102 load_idt(&idt_descr);
12103 }
12104 #endif
12105diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12106index 2af127d..8ff7ac0 100644
12107--- a/arch/x86/kernel/cpu/mcheck/mce.c
12108+++ b/arch/x86/kernel/cpu/mcheck/mce.c
12109@@ -42,6 +42,7 @@
12110 #include <asm/processor.h>
12111 #include <asm/mce.h>
12112 #include <asm/msr.h>
12113+#include <asm/local.h>
12114
12115 #include "mce-internal.h"
12116
12117@@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12118 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12119 m->cs, m->ip);
12120
12121- if (m->cs == __KERNEL_CS)
12122+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12123 print_symbol("{%s}", m->ip);
12124 pr_cont("\n");
12125 }
12126@@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12127
12128 #define PANIC_TIMEOUT 5 /* 5 seconds */
12129
12130-static atomic_t mce_paniced;
12131+static atomic_unchecked_t mce_paniced;
12132
12133 static int fake_panic;
12134-static atomic_t mce_fake_paniced;
12135+static atomic_unchecked_t mce_fake_paniced;
12136
12137 /* Panic in progress. Enable interrupts and wait for final IPI */
12138 static void wait_for_panic(void)
12139@@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12140 /*
12141 * Make sure only one CPU runs in machine check panic
12142 */
12143- if (atomic_inc_return(&mce_paniced) > 1)
12144+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12145 wait_for_panic();
12146 barrier();
12147
12148@@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12149 console_verbose();
12150 } else {
12151 /* Don't log too much for fake panic */
12152- if (atomic_inc_return(&mce_fake_paniced) > 1)
12153+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12154 return;
12155 }
12156 /* First print corrected ones that are still unlogged */
12157@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12158 * might have been modified by someone else.
12159 */
12160 rmb();
12161- if (atomic_read(&mce_paniced))
12162+ if (atomic_read_unchecked(&mce_paniced))
12163 wait_for_panic();
12164 if (!monarch_timeout)
12165 goto out;
12166@@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12167 }
12168
12169 /* Call the installed machine check handler for this CPU setup. */
12170-void (*machine_check_vector)(struct pt_regs *, long error_code) =
12171+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12172 unexpected_machine_check;
12173
12174 /*
12175@@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12176 return;
12177 }
12178
12179+ pax_open_kernel();
12180 machine_check_vector = do_machine_check;
12181+ pax_close_kernel();
12182
12183 __mcheck_cpu_init_generic();
12184 __mcheck_cpu_init_vendor(c);
12185@@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12186 */
12187
12188 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12189-static int mce_chrdev_open_count; /* #times opened */
12190+static local_t mce_chrdev_open_count; /* #times opened */
12191 static int mce_chrdev_open_exclu; /* already open exclusive? */
12192
12193 static int mce_chrdev_open(struct inode *inode, struct file *file)
12194@@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12195 spin_lock(&mce_chrdev_state_lock);
12196
12197 if (mce_chrdev_open_exclu ||
12198- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12199+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12200 spin_unlock(&mce_chrdev_state_lock);
12201
12202 return -EBUSY;
12203@@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12204
12205 if (file->f_flags & O_EXCL)
12206 mce_chrdev_open_exclu = 1;
12207- mce_chrdev_open_count++;
12208+ local_inc(&mce_chrdev_open_count);
12209
12210 spin_unlock(&mce_chrdev_state_lock);
12211
12212@@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12213 {
12214 spin_lock(&mce_chrdev_state_lock);
12215
12216- mce_chrdev_open_count--;
12217+ local_dec(&mce_chrdev_open_count);
12218 mce_chrdev_open_exclu = 0;
12219
12220 spin_unlock(&mce_chrdev_state_lock);
12221@@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12222 static void mce_reset(void)
12223 {
12224 cpu_missing = 0;
12225- atomic_set(&mce_fake_paniced, 0);
12226+ atomic_set_unchecked(&mce_fake_paniced, 0);
12227 atomic_set(&mce_executing, 0);
12228 atomic_set(&mce_callin, 0);
12229 atomic_set(&global_nwo, 0);
12230diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12231index 5c0e653..0882b0a 100644
12232--- a/arch/x86/kernel/cpu/mcheck/p5.c
12233+++ b/arch/x86/kernel/cpu/mcheck/p5.c
12234@@ -12,6 +12,7 @@
12235 #include <asm/system.h>
12236 #include <asm/mce.h>
12237 #include <asm/msr.h>
12238+#include <asm/pgtable.h>
12239
12240 /* By default disabled */
12241 int mce_p5_enabled __read_mostly;
12242@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12243 if (!cpu_has(c, X86_FEATURE_MCE))
12244 return;
12245
12246+ pax_open_kernel();
12247 machine_check_vector = pentium_machine_check;
12248+ pax_close_kernel();
12249 /* Make sure the vector pointer is visible before we enable MCEs: */
12250 wmb();
12251
12252diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12253index 54060f5..c1a7577 100644
12254--- a/arch/x86/kernel/cpu/mcheck/winchip.c
12255+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12256@@ -11,6 +11,7 @@
12257 #include <asm/system.h>
12258 #include <asm/mce.h>
12259 #include <asm/msr.h>
12260+#include <asm/pgtable.h>
12261
12262 /* Machine check handler for WinChip C6: */
12263 static void winchip_machine_check(struct pt_regs *regs, long error_code)
12264@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12265 {
12266 u32 lo, hi;
12267
12268+ pax_open_kernel();
12269 machine_check_vector = winchip_machine_check;
12270+ pax_close_kernel();
12271 /* Make sure the vector pointer is visible before we enable MCEs: */
12272 wmb();
12273
12274diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12275index 6b96110..0da73eb 100644
12276--- a/arch/x86/kernel/cpu/mtrr/main.c
12277+++ b/arch/x86/kernel/cpu/mtrr/main.c
12278@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12279 u64 size_or_mask, size_and_mask;
12280 static bool mtrr_aps_delayed_init;
12281
12282-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12283+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12284
12285 const struct mtrr_ops *mtrr_if;
12286
12287diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12288index df5e41f..816c719 100644
12289--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12290+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12291@@ -25,7 +25,7 @@ struct mtrr_ops {
12292 int (*validate_add_page)(unsigned long base, unsigned long size,
12293 unsigned int type);
12294 int (*have_wrcomb)(void);
12295-};
12296+} __do_const;
12297
12298 extern int generic_get_free_region(unsigned long base, unsigned long size,
12299 int replace_reg);
12300diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12301index 2bda212..78cc605 100644
12302--- a/arch/x86/kernel/cpu/perf_event.c
12303+++ b/arch/x86/kernel/cpu/perf_event.c
12304@@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12305 break;
12306
12307 perf_callchain_store(entry, frame.return_address);
12308- fp = frame.next_frame;
12309+ fp = (const void __force_user *)frame.next_frame;
12310 }
12311 }
12312
12313diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12314index 13ad899..f642b9a 100644
12315--- a/arch/x86/kernel/crash.c
12316+++ b/arch/x86/kernel/crash.c
12317@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12318 {
12319 #ifdef CONFIG_X86_32
12320 struct pt_regs fixed_regs;
12321-#endif
12322
12323-#ifdef CONFIG_X86_32
12324- if (!user_mode_vm(regs)) {
12325+ if (!user_mode(regs)) {
12326 crash_fixup_ss_esp(&fixed_regs, regs);
12327 regs = &fixed_regs;
12328 }
12329diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12330index 37250fe..bf2ec74 100644
12331--- a/arch/x86/kernel/doublefault_32.c
12332+++ b/arch/x86/kernel/doublefault_32.c
12333@@ -11,7 +11,7 @@
12334
12335 #define DOUBLEFAULT_STACKSIZE (1024)
12336 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12337-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12338+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12339
12340 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12341
12342@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12343 unsigned long gdt, tss;
12344
12345 store_gdt(&gdt_desc);
12346- gdt = gdt_desc.address;
12347+ gdt = (unsigned long)gdt_desc.address;
12348
12349 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12350
12351@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12352 /* 0x2 bit is always set */
12353 .flags = X86_EFLAGS_SF | 0x2,
12354 .sp = STACK_START,
12355- .es = __USER_DS,
12356+ .es = __KERNEL_DS,
12357 .cs = __KERNEL_CS,
12358 .ss = __KERNEL_DS,
12359- .ds = __USER_DS,
12360+ .ds = __KERNEL_DS,
12361 .fs = __KERNEL_PERCPU,
12362
12363 .__cr3 = __pa_nodebug(swapper_pg_dir),
12364diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12365index 1aae78f..aab3a3d 100644
12366--- a/arch/x86/kernel/dumpstack.c
12367+++ b/arch/x86/kernel/dumpstack.c
12368@@ -2,6 +2,9 @@
12369 * Copyright (C) 1991, 1992 Linus Torvalds
12370 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12371 */
12372+#ifdef CONFIG_GRKERNSEC_HIDESYM
12373+#define __INCLUDED_BY_HIDESYM 1
12374+#endif
12375 #include <linux/kallsyms.h>
12376 #include <linux/kprobes.h>
12377 #include <linux/uaccess.h>
12378@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12379 static void
12380 print_ftrace_graph_addr(unsigned long addr, void *data,
12381 const struct stacktrace_ops *ops,
12382- struct thread_info *tinfo, int *graph)
12383+ struct task_struct *task, int *graph)
12384 {
12385- struct task_struct *task = tinfo->task;
12386 unsigned long ret_addr;
12387 int index = task->curr_ret_stack;
12388
12389@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12390 static inline void
12391 print_ftrace_graph_addr(unsigned long addr, void *data,
12392 const struct stacktrace_ops *ops,
12393- struct thread_info *tinfo, int *graph)
12394+ struct task_struct *task, int *graph)
12395 { }
12396 #endif
12397
12398@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12399 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12400 */
12401
12402-static inline int valid_stack_ptr(struct thread_info *tinfo,
12403- void *p, unsigned int size, void *end)
12404+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12405 {
12406- void *t = tinfo;
12407 if (end) {
12408 if (p < end && p >= (end-THREAD_SIZE))
12409 return 1;
12410@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12411 }
12412
12413 unsigned long
12414-print_context_stack(struct thread_info *tinfo,
12415+print_context_stack(struct task_struct *task, void *stack_start,
12416 unsigned long *stack, unsigned long bp,
12417 const struct stacktrace_ops *ops, void *data,
12418 unsigned long *end, int *graph)
12419 {
12420 struct stack_frame *frame = (struct stack_frame *)bp;
12421
12422- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12423+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12424 unsigned long addr;
12425
12426 addr = *stack;
12427@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12428 } else {
12429 ops->address(data, addr, 0);
12430 }
12431- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12432+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12433 }
12434 stack++;
12435 }
12436@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12437 EXPORT_SYMBOL_GPL(print_context_stack);
12438
12439 unsigned long
12440-print_context_stack_bp(struct thread_info *tinfo,
12441+print_context_stack_bp(struct task_struct *task, void *stack_start,
12442 unsigned long *stack, unsigned long bp,
12443 const struct stacktrace_ops *ops, void *data,
12444 unsigned long *end, int *graph)
12445@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12446 struct stack_frame *frame = (struct stack_frame *)bp;
12447 unsigned long *ret_addr = &frame->return_address;
12448
12449- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12450+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12451 unsigned long addr = *ret_addr;
12452
12453 if (!__kernel_text_address(addr))
12454@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12455 ops->address(data, addr, 1);
12456 frame = frame->next_frame;
12457 ret_addr = &frame->return_address;
12458- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12459+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12460 }
12461
12462 return (unsigned long)frame;
12463@@ -186,7 +186,7 @@ void dump_stack(void)
12464
12465 bp = stack_frame(current, NULL);
12466 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12467- current->pid, current->comm, print_tainted(),
12468+ task_pid_nr(current), current->comm, print_tainted(),
12469 init_utsname()->release,
12470 (int)strcspn(init_utsname()->version, " "),
12471 init_utsname()->version);
12472@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12473 }
12474 EXPORT_SYMBOL_GPL(oops_begin);
12475
12476+extern void gr_handle_kernel_exploit(void);
12477+
12478 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12479 {
12480 if (regs && kexec_should_crash(current))
12481@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12482 panic("Fatal exception in interrupt");
12483 if (panic_on_oops)
12484 panic("Fatal exception");
12485- do_exit(signr);
12486+
12487+ gr_handle_kernel_exploit();
12488+
12489+ do_group_exit(signr);
12490 }
12491
12492 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12493@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12494
12495 show_registers(regs);
12496 #ifdef CONFIG_X86_32
12497- if (user_mode_vm(regs)) {
12498+ if (user_mode(regs)) {
12499 sp = regs->sp;
12500 ss = regs->ss & 0xffff;
12501 } else {
12502@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12503 unsigned long flags = oops_begin();
12504 int sig = SIGSEGV;
12505
12506- if (!user_mode_vm(regs))
12507+ if (!user_mode(regs))
12508 report_bug(regs->ip, regs);
12509
12510 if (__die(str, regs, err))
12511diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12512index c99f9ed..2a15d80 100644
12513--- a/arch/x86/kernel/dumpstack_32.c
12514+++ b/arch/x86/kernel/dumpstack_32.c
12515@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12516 bp = stack_frame(task, regs);
12517
12518 for (;;) {
12519- struct thread_info *context;
12520+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12521
12522- context = (struct thread_info *)
12523- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12524- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12525+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12526
12527- stack = (unsigned long *)context->previous_esp;
12528- if (!stack)
12529+ if (stack_start == task_stack_page(task))
12530 break;
12531+ stack = *(unsigned long **)stack_start;
12532 if (ops->stack(data, "IRQ") < 0)
12533 break;
12534 touch_nmi_watchdog();
12535@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12536 * When in-kernel, we also print out the stack and code at the
12537 * time of the fault..
12538 */
12539- if (!user_mode_vm(regs)) {
12540+ if (!user_mode(regs)) {
12541 unsigned int code_prologue = code_bytes * 43 / 64;
12542 unsigned int code_len = code_bytes;
12543 unsigned char c;
12544 u8 *ip;
12545+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12546
12547 printk(KERN_EMERG "Stack:\n");
12548 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12549
12550 printk(KERN_EMERG "Code: ");
12551
12552- ip = (u8 *)regs->ip - code_prologue;
12553+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12554 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12555 /* try starting at IP */
12556- ip = (u8 *)regs->ip;
12557+ ip = (u8 *)regs->ip + cs_base;
12558 code_len = code_len - code_prologue + 1;
12559 }
12560 for (i = 0; i < code_len; i++, ip++) {
12561@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12562 printk(KERN_CONT " Bad EIP value.");
12563 break;
12564 }
12565- if (ip == (u8 *)regs->ip)
12566+ if (ip == (u8 *)regs->ip + cs_base)
12567 printk(KERN_CONT "<%02x> ", c);
12568 else
12569 printk(KERN_CONT "%02x ", c);
12570@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12571 {
12572 unsigned short ud2;
12573
12574+ ip = ktla_ktva(ip);
12575 if (ip < PAGE_OFFSET)
12576 return 0;
12577 if (probe_kernel_address((unsigned short *)ip, ud2))
12578@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12579
12580 return ud2 == 0x0b0f;
12581 }
12582+
12583+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12584+void pax_check_alloca(unsigned long size)
12585+{
12586+ unsigned long sp = (unsigned long)&sp, stack_left;
12587+
12588+ /* all kernel stacks are of the same size */
12589+ stack_left = sp & (THREAD_SIZE - 1);
12590+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12591+}
12592+EXPORT_SYMBOL(pax_check_alloca);
12593+#endif
12594diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12595index 6d728d9..279514e 100644
12596--- a/arch/x86/kernel/dumpstack_64.c
12597+++ b/arch/x86/kernel/dumpstack_64.c
12598@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12599 unsigned long *irq_stack_end =
12600 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12601 unsigned used = 0;
12602- struct thread_info *tinfo;
12603 int graph = 0;
12604 unsigned long dummy;
12605+ void *stack_start;
12606
12607 if (!task)
12608 task = current;
12609@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12610 * current stack address. If the stacks consist of nested
12611 * exceptions
12612 */
12613- tinfo = task_thread_info(task);
12614 for (;;) {
12615 char *id;
12616 unsigned long *estack_end;
12617+
12618 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12619 &used, &id);
12620
12621@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12622 if (ops->stack(data, id) < 0)
12623 break;
12624
12625- bp = ops->walk_stack(tinfo, stack, bp, ops,
12626+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12627 data, estack_end, &graph);
12628 ops->stack(data, "<EOE>");
12629 /*
12630@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12631 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12632 if (ops->stack(data, "IRQ") < 0)
12633 break;
12634- bp = ops->walk_stack(tinfo, stack, bp,
12635+ bp = ops->walk_stack(task, irq_stack, stack, bp,
12636 ops, data, irq_stack_end, &graph);
12637 /*
12638 * We link to the next stack (which would be
12639@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12640 /*
12641 * This handles the process stack:
12642 */
12643- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12644+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12645+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12646 put_cpu();
12647 }
12648 EXPORT_SYMBOL(dump_trace);
12649@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12650
12651 return ud2 == 0x0b0f;
12652 }
12653+
12654+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12655+void pax_check_alloca(unsigned long size)
12656+{
12657+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12658+ unsigned cpu, used;
12659+ char *id;
12660+
12661+ /* check the process stack first */
12662+ stack_start = (unsigned long)task_stack_page(current);
12663+ stack_end = stack_start + THREAD_SIZE;
12664+ if (likely(stack_start <= sp && sp < stack_end)) {
12665+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
12666+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12667+ return;
12668+ }
12669+
12670+ cpu = get_cpu();
12671+
12672+ /* check the irq stacks */
12673+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12674+ stack_start = stack_end - IRQ_STACK_SIZE;
12675+ if (stack_start <= sp && sp < stack_end) {
12676+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12677+ put_cpu();
12678+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12679+ return;
12680+ }
12681+
12682+ /* check the exception stacks */
12683+ used = 0;
12684+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12685+ stack_start = stack_end - EXCEPTION_STKSZ;
12686+ if (stack_end && stack_start <= sp && sp < stack_end) {
12687+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12688+ put_cpu();
12689+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12690+ return;
12691+ }
12692+
12693+ put_cpu();
12694+
12695+ /* unknown stack */
12696+ BUG();
12697+}
12698+EXPORT_SYMBOL(pax_check_alloca);
12699+#endif
12700diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12701index cd28a35..c72ed9a 100644
12702--- a/arch/x86/kernel/early_printk.c
12703+++ b/arch/x86/kernel/early_printk.c
12704@@ -7,6 +7,7 @@
12705 #include <linux/pci_regs.h>
12706 #include <linux/pci_ids.h>
12707 #include <linux/errno.h>
12708+#include <linux/sched.h>
12709 #include <asm/io.h>
12710 #include <asm/processor.h>
12711 #include <asm/fcntl.h>
12712diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12713index f3f6f53..0841b66 100644
12714--- a/arch/x86/kernel/entry_32.S
12715+++ b/arch/x86/kernel/entry_32.S
12716@@ -186,13 +186,146 @@
12717 /*CFI_REL_OFFSET gs, PT_GS*/
12718 .endm
12719 .macro SET_KERNEL_GS reg
12720+
12721+#ifdef CONFIG_CC_STACKPROTECTOR
12722 movl $(__KERNEL_STACK_CANARY), \reg
12723+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12724+ movl $(__USER_DS), \reg
12725+#else
12726+ xorl \reg, \reg
12727+#endif
12728+
12729 movl \reg, %gs
12730 .endm
12731
12732 #endif /* CONFIG_X86_32_LAZY_GS */
12733
12734-.macro SAVE_ALL
12735+.macro pax_enter_kernel
12736+#ifdef CONFIG_PAX_KERNEXEC
12737+ call pax_enter_kernel
12738+#endif
12739+.endm
12740+
12741+.macro pax_exit_kernel
12742+#ifdef CONFIG_PAX_KERNEXEC
12743+ call pax_exit_kernel
12744+#endif
12745+.endm
12746+
12747+#ifdef CONFIG_PAX_KERNEXEC
12748+ENTRY(pax_enter_kernel)
12749+#ifdef CONFIG_PARAVIRT
12750+ pushl %eax
12751+ pushl %ecx
12752+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12753+ mov %eax, %esi
12754+#else
12755+ mov %cr0, %esi
12756+#endif
12757+ bts $16, %esi
12758+ jnc 1f
12759+ mov %cs, %esi
12760+ cmp $__KERNEL_CS, %esi
12761+ jz 3f
12762+ ljmp $__KERNEL_CS, $3f
12763+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12764+2:
12765+#ifdef CONFIG_PARAVIRT
12766+ mov %esi, %eax
12767+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12768+#else
12769+ mov %esi, %cr0
12770+#endif
12771+3:
12772+#ifdef CONFIG_PARAVIRT
12773+ popl %ecx
12774+ popl %eax
12775+#endif
12776+ ret
12777+ENDPROC(pax_enter_kernel)
12778+
12779+ENTRY(pax_exit_kernel)
12780+#ifdef CONFIG_PARAVIRT
12781+ pushl %eax
12782+ pushl %ecx
12783+#endif
12784+ mov %cs, %esi
12785+ cmp $__KERNEXEC_KERNEL_CS, %esi
12786+ jnz 2f
12787+#ifdef CONFIG_PARAVIRT
12788+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12789+ mov %eax, %esi
12790+#else
12791+ mov %cr0, %esi
12792+#endif
12793+ btr $16, %esi
12794+ ljmp $__KERNEL_CS, $1f
12795+1:
12796+#ifdef CONFIG_PARAVIRT
12797+ mov %esi, %eax
12798+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12799+#else
12800+ mov %esi, %cr0
12801+#endif
12802+2:
12803+#ifdef CONFIG_PARAVIRT
12804+ popl %ecx
12805+ popl %eax
12806+#endif
12807+ ret
12808+ENDPROC(pax_exit_kernel)
12809+#endif
12810+
12811+.macro pax_erase_kstack
12812+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12813+ call pax_erase_kstack
12814+#endif
12815+.endm
12816+
12817+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12818+/*
12819+ * ebp: thread_info
12820+ * ecx, edx: can be clobbered
12821+ */
12822+ENTRY(pax_erase_kstack)
12823+ pushl %edi
12824+ pushl %eax
12825+
12826+ mov TI_lowest_stack(%ebp), %edi
12827+ mov $-0xBEEF, %eax
12828+ std
12829+
12830+1: mov %edi, %ecx
12831+ and $THREAD_SIZE_asm - 1, %ecx
12832+ shr $2, %ecx
12833+ repne scasl
12834+ jecxz 2f
12835+
12836+ cmp $2*16, %ecx
12837+ jc 2f
12838+
12839+ mov $2*16, %ecx
12840+ repe scasl
12841+ jecxz 2f
12842+ jne 1b
12843+
12844+2: cld
12845+ mov %esp, %ecx
12846+ sub %edi, %ecx
12847+ shr $2, %ecx
12848+ rep stosl
12849+
12850+ mov TI_task_thread_sp0(%ebp), %edi
12851+ sub $128, %edi
12852+ mov %edi, TI_lowest_stack(%ebp)
12853+
12854+ popl %eax
12855+ popl %edi
12856+ ret
12857+ENDPROC(pax_erase_kstack)
12858+#endif
12859+
12860+.macro __SAVE_ALL _DS
12861 cld
12862 PUSH_GS
12863 pushl_cfi %fs
12864@@ -215,7 +348,7 @@
12865 CFI_REL_OFFSET ecx, 0
12866 pushl_cfi %ebx
12867 CFI_REL_OFFSET ebx, 0
12868- movl $(__USER_DS), %edx
12869+ movl $\_DS, %edx
12870 movl %edx, %ds
12871 movl %edx, %es
12872 movl $(__KERNEL_PERCPU), %edx
12873@@ -223,6 +356,15 @@
12874 SET_KERNEL_GS %edx
12875 .endm
12876
12877+.macro SAVE_ALL
12878+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12879+ __SAVE_ALL __KERNEL_DS
12880+ pax_enter_kernel
12881+#else
12882+ __SAVE_ALL __USER_DS
12883+#endif
12884+.endm
12885+
12886 .macro RESTORE_INT_REGS
12887 popl_cfi %ebx
12888 CFI_RESTORE ebx
12889@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12890 popfl_cfi
12891 jmp syscall_exit
12892 CFI_ENDPROC
12893-END(ret_from_fork)
12894+ENDPROC(ret_from_fork)
12895
12896 /*
12897 * Interrupt exit functions should be protected against kprobes
12898@@ -333,7 +475,15 @@ check_userspace:
12899 movb PT_CS(%esp), %al
12900 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12901 cmpl $USER_RPL, %eax
12902+
12903+#ifdef CONFIG_PAX_KERNEXEC
12904+ jae resume_userspace
12905+
12906+ PAX_EXIT_KERNEL
12907+ jmp resume_kernel
12908+#else
12909 jb resume_kernel # not returning to v8086 or userspace
12910+#endif
12911
12912 ENTRY(resume_userspace)
12913 LOCKDEP_SYS_EXIT
12914@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12915 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12916 # int/exception return?
12917 jne work_pending
12918- jmp restore_all
12919-END(ret_from_exception)
12920+ jmp restore_all_pax
12921+ENDPROC(ret_from_exception)
12922
12923 #ifdef CONFIG_PREEMPT
12924 ENTRY(resume_kernel)
12925@@ -361,7 +511,7 @@ need_resched:
12926 jz restore_all
12927 call preempt_schedule_irq
12928 jmp need_resched
12929-END(resume_kernel)
12930+ENDPROC(resume_kernel)
12931 #endif
12932 CFI_ENDPROC
12933 /*
12934@@ -395,23 +545,34 @@ sysenter_past_esp:
12935 /*CFI_REL_OFFSET cs, 0*/
12936 /*
12937 * Push current_thread_info()->sysenter_return to the stack.
12938- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12939- * pushed above; +8 corresponds to copy_thread's esp0 setting.
12940 */
12941- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12942+ pushl_cfi $0
12943 CFI_REL_OFFSET eip, 0
12944
12945 pushl_cfi %eax
12946 SAVE_ALL
12947+ GET_THREAD_INFO(%ebp)
12948+ movl TI_sysenter_return(%ebp),%ebp
12949+ movl %ebp,PT_EIP(%esp)
12950 ENABLE_INTERRUPTS(CLBR_NONE)
12951
12952 /*
12953 * Load the potential sixth argument from user stack.
12954 * Careful about security.
12955 */
12956+ movl PT_OLDESP(%esp),%ebp
12957+
12958+#ifdef CONFIG_PAX_MEMORY_UDEREF
12959+ mov PT_OLDSS(%esp),%ds
12960+1: movl %ds:(%ebp),%ebp
12961+ push %ss
12962+ pop %ds
12963+#else
12964 cmpl $__PAGE_OFFSET-3,%ebp
12965 jae syscall_fault
12966 1: movl (%ebp),%ebp
12967+#endif
12968+
12969 movl %ebp,PT_EBP(%esp)
12970 .section __ex_table,"a"
12971 .align 4
12972@@ -434,12 +595,24 @@ sysenter_do_call:
12973 testl $_TIF_ALLWORK_MASK, %ecx
12974 jne sysexit_audit
12975 sysenter_exit:
12976+
12977+#ifdef CONFIG_PAX_RANDKSTACK
12978+ pushl_cfi %eax
12979+ movl %esp, %eax
12980+ call pax_randomize_kstack
12981+ popl_cfi %eax
12982+#endif
12983+
12984+ pax_erase_kstack
12985+
12986 /* if something modifies registers it must also disable sysexit */
12987 movl PT_EIP(%esp), %edx
12988 movl PT_OLDESP(%esp), %ecx
12989 xorl %ebp,%ebp
12990 TRACE_IRQS_ON
12991 1: mov PT_FS(%esp), %fs
12992+2: mov PT_DS(%esp), %ds
12993+3: mov PT_ES(%esp), %es
12994 PTGS_TO_GS
12995 ENABLE_INTERRUPTS_SYSEXIT
12996
12997@@ -456,6 +629,9 @@ sysenter_audit:
12998 movl %eax,%edx /* 2nd arg: syscall number */
12999 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13000 call audit_syscall_entry
13001+
13002+ pax_erase_kstack
13003+
13004 pushl_cfi %ebx
13005 movl PT_EAX(%esp),%eax /* reload syscall number */
13006 jmp sysenter_do_call
13007@@ -482,11 +658,17 @@ sysexit_audit:
13008
13009 CFI_ENDPROC
13010 .pushsection .fixup,"ax"
13011-2: movl $0,PT_FS(%esp)
13012+4: movl $0,PT_FS(%esp)
13013+ jmp 1b
13014+5: movl $0,PT_DS(%esp)
13015+ jmp 1b
13016+6: movl $0,PT_ES(%esp)
13017 jmp 1b
13018 .section __ex_table,"a"
13019 .align 4
13020- .long 1b,2b
13021+ .long 1b,4b
13022+ .long 2b,5b
13023+ .long 3b,6b
13024 .popsection
13025 PTGS_TO_GS_EX
13026 ENDPROC(ia32_sysenter_target)
13027@@ -519,6 +701,15 @@ syscall_exit:
13028 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13029 jne syscall_exit_work
13030
13031+restore_all_pax:
13032+
13033+#ifdef CONFIG_PAX_RANDKSTACK
13034+ movl %esp, %eax
13035+ call pax_randomize_kstack
13036+#endif
13037+
13038+ pax_erase_kstack
13039+
13040 restore_all:
13041 TRACE_IRQS_IRET
13042 restore_all_notrace:
13043@@ -578,14 +769,34 @@ ldt_ss:
13044 * compensating for the offset by changing to the ESPFIX segment with
13045 * a base address that matches for the difference.
13046 */
13047-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13048+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13049 mov %esp, %edx /* load kernel esp */
13050 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13051 mov %dx, %ax /* eax: new kernel esp */
13052 sub %eax, %edx /* offset (low word is 0) */
13053+#ifdef CONFIG_SMP
13054+ movl PER_CPU_VAR(cpu_number), %ebx
13055+ shll $PAGE_SHIFT_asm, %ebx
13056+ addl $cpu_gdt_table, %ebx
13057+#else
13058+ movl $cpu_gdt_table, %ebx
13059+#endif
13060 shr $16, %edx
13061- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13062- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13063+
13064+#ifdef CONFIG_PAX_KERNEXEC
13065+ mov %cr0, %esi
13066+ btr $16, %esi
13067+ mov %esi, %cr0
13068+#endif
13069+
13070+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13071+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13072+
13073+#ifdef CONFIG_PAX_KERNEXEC
13074+ bts $16, %esi
13075+ mov %esi, %cr0
13076+#endif
13077+
13078 pushl_cfi $__ESPFIX_SS
13079 pushl_cfi %eax /* new kernel esp */
13080 /* Disable interrupts, but do not irqtrace this section: we
13081@@ -614,34 +825,28 @@ work_resched:
13082 movl TI_flags(%ebp), %ecx
13083 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13084 # than syscall tracing?
13085- jz restore_all
13086+ jz restore_all_pax
13087 testb $_TIF_NEED_RESCHED, %cl
13088 jnz work_resched
13089
13090 work_notifysig: # deal with pending signals and
13091 # notify-resume requests
13092+ movl %esp, %eax
13093 #ifdef CONFIG_VM86
13094 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13095- movl %esp, %eax
13096- jne work_notifysig_v86 # returning to kernel-space or
13097+ jz 1f # returning to kernel-space or
13098 # vm86-space
13099- xorl %edx, %edx
13100- call do_notify_resume
13101- jmp resume_userspace_sig
13102
13103- ALIGN
13104-work_notifysig_v86:
13105 pushl_cfi %ecx # save ti_flags for do_notify_resume
13106 call save_v86_state # %eax contains pt_regs pointer
13107 popl_cfi %ecx
13108 movl %eax, %esp
13109-#else
13110- movl %esp, %eax
13111+1:
13112 #endif
13113 xorl %edx, %edx
13114 call do_notify_resume
13115 jmp resume_userspace_sig
13116-END(work_pending)
13117+ENDPROC(work_pending)
13118
13119 # perform syscall exit tracing
13120 ALIGN
13121@@ -649,11 +854,14 @@ syscall_trace_entry:
13122 movl $-ENOSYS,PT_EAX(%esp)
13123 movl %esp, %eax
13124 call syscall_trace_enter
13125+
13126+ pax_erase_kstack
13127+
13128 /* What it returned is what we'll actually use. */
13129 cmpl $(nr_syscalls), %eax
13130 jnae syscall_call
13131 jmp syscall_exit
13132-END(syscall_trace_entry)
13133+ENDPROC(syscall_trace_entry)
13134
13135 # perform syscall exit tracing
13136 ALIGN
13137@@ -666,20 +874,24 @@ syscall_exit_work:
13138 movl %esp, %eax
13139 call syscall_trace_leave
13140 jmp resume_userspace
13141-END(syscall_exit_work)
13142+ENDPROC(syscall_exit_work)
13143 CFI_ENDPROC
13144
13145 RING0_INT_FRAME # can't unwind into user space anyway
13146 syscall_fault:
13147+#ifdef CONFIG_PAX_MEMORY_UDEREF
13148+ push %ss
13149+ pop %ds
13150+#endif
13151 GET_THREAD_INFO(%ebp)
13152 movl $-EFAULT,PT_EAX(%esp)
13153 jmp resume_userspace
13154-END(syscall_fault)
13155+ENDPROC(syscall_fault)
13156
13157 syscall_badsys:
13158 movl $-ENOSYS,PT_EAX(%esp)
13159 jmp resume_userspace
13160-END(syscall_badsys)
13161+ENDPROC(syscall_badsys)
13162 CFI_ENDPROC
13163 /*
13164 * End of kprobes section
13165@@ -753,6 +965,36 @@ ptregs_clone:
13166 CFI_ENDPROC
13167 ENDPROC(ptregs_clone)
13168
13169+ ALIGN;
13170+ENTRY(kernel_execve)
13171+ CFI_STARTPROC
13172+ pushl_cfi %ebp
13173+ sub $PT_OLDSS+4,%esp
13174+ pushl_cfi %edi
13175+ pushl_cfi %ecx
13176+ pushl_cfi %eax
13177+ lea 3*4(%esp),%edi
13178+ mov $PT_OLDSS/4+1,%ecx
13179+ xorl %eax,%eax
13180+ rep stosl
13181+ popl_cfi %eax
13182+ popl_cfi %ecx
13183+ popl_cfi %edi
13184+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13185+ pushl_cfi %esp
13186+ call sys_execve
13187+ add $4,%esp
13188+ CFI_ADJUST_CFA_OFFSET -4
13189+ GET_THREAD_INFO(%ebp)
13190+ test %eax,%eax
13191+ jz syscall_exit
13192+ add $PT_OLDSS+4,%esp
13193+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13194+ popl_cfi %ebp
13195+ ret
13196+ CFI_ENDPROC
13197+ENDPROC(kernel_execve)
13198+
13199 .macro FIXUP_ESPFIX_STACK
13200 /*
13201 * Switch back for ESPFIX stack to the normal zerobased stack
13202@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13203 * normal stack and adjusts ESP with the matching offset.
13204 */
13205 /* fixup the stack */
13206- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13207- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13208+#ifdef CONFIG_SMP
13209+ movl PER_CPU_VAR(cpu_number), %ebx
13210+ shll $PAGE_SHIFT_asm, %ebx
13211+ addl $cpu_gdt_table, %ebx
13212+#else
13213+ movl $cpu_gdt_table, %ebx
13214+#endif
13215+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13216+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13217 shl $16, %eax
13218 addl %esp, %eax /* the adjusted stack pointer */
13219 pushl_cfi $__KERNEL_DS
13220@@ -816,7 +1065,7 @@ vector=vector+1
13221 .endr
13222 2: jmp common_interrupt
13223 .endr
13224-END(irq_entries_start)
13225+ENDPROC(irq_entries_start)
13226
13227 .previous
13228 END(interrupt)
13229@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13230 pushl_cfi $do_coprocessor_error
13231 jmp error_code
13232 CFI_ENDPROC
13233-END(coprocessor_error)
13234+ENDPROC(coprocessor_error)
13235
13236 ENTRY(simd_coprocessor_error)
13237 RING0_INT_FRAME
13238@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13239 #endif
13240 jmp error_code
13241 CFI_ENDPROC
13242-END(simd_coprocessor_error)
13243+ENDPROC(simd_coprocessor_error)
13244
13245 ENTRY(device_not_available)
13246 RING0_INT_FRAME
13247@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13248 pushl_cfi $do_device_not_available
13249 jmp error_code
13250 CFI_ENDPROC
13251-END(device_not_available)
13252+ENDPROC(device_not_available)
13253
13254 #ifdef CONFIG_PARAVIRT
13255 ENTRY(native_iret)
13256@@ -902,12 +1151,12 @@ ENTRY(native_iret)
13257 .align 4
13258 .long native_iret, iret_exc
13259 .previous
13260-END(native_iret)
13261+ENDPROC(native_iret)
13262
13263 ENTRY(native_irq_enable_sysexit)
13264 sti
13265 sysexit
13266-END(native_irq_enable_sysexit)
13267+ENDPROC(native_irq_enable_sysexit)
13268 #endif
13269
13270 ENTRY(overflow)
13271@@ -916,7 +1165,7 @@ ENTRY(overflow)
13272 pushl_cfi $do_overflow
13273 jmp error_code
13274 CFI_ENDPROC
13275-END(overflow)
13276+ENDPROC(overflow)
13277
13278 ENTRY(bounds)
13279 RING0_INT_FRAME
13280@@ -924,7 +1173,7 @@ ENTRY(bounds)
13281 pushl_cfi $do_bounds
13282 jmp error_code
13283 CFI_ENDPROC
13284-END(bounds)
13285+ENDPROC(bounds)
13286
13287 ENTRY(invalid_op)
13288 RING0_INT_FRAME
13289@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13290 pushl_cfi $do_invalid_op
13291 jmp error_code
13292 CFI_ENDPROC
13293-END(invalid_op)
13294+ENDPROC(invalid_op)
13295
13296 ENTRY(coprocessor_segment_overrun)
13297 RING0_INT_FRAME
13298@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13299 pushl_cfi $do_coprocessor_segment_overrun
13300 jmp error_code
13301 CFI_ENDPROC
13302-END(coprocessor_segment_overrun)
13303+ENDPROC(coprocessor_segment_overrun)
13304
13305 ENTRY(invalid_TSS)
13306 RING0_EC_FRAME
13307 pushl_cfi $do_invalid_TSS
13308 jmp error_code
13309 CFI_ENDPROC
13310-END(invalid_TSS)
13311+ENDPROC(invalid_TSS)
13312
13313 ENTRY(segment_not_present)
13314 RING0_EC_FRAME
13315 pushl_cfi $do_segment_not_present
13316 jmp error_code
13317 CFI_ENDPROC
13318-END(segment_not_present)
13319+ENDPROC(segment_not_present)
13320
13321 ENTRY(stack_segment)
13322 RING0_EC_FRAME
13323 pushl_cfi $do_stack_segment
13324 jmp error_code
13325 CFI_ENDPROC
13326-END(stack_segment)
13327+ENDPROC(stack_segment)
13328
13329 ENTRY(alignment_check)
13330 RING0_EC_FRAME
13331 pushl_cfi $do_alignment_check
13332 jmp error_code
13333 CFI_ENDPROC
13334-END(alignment_check)
13335+ENDPROC(alignment_check)
13336
13337 ENTRY(divide_error)
13338 RING0_INT_FRAME
13339@@ -976,7 +1225,7 @@ ENTRY(divide_error)
13340 pushl_cfi $do_divide_error
13341 jmp error_code
13342 CFI_ENDPROC
13343-END(divide_error)
13344+ENDPROC(divide_error)
13345
13346 #ifdef CONFIG_X86_MCE
13347 ENTRY(machine_check)
13348@@ -985,7 +1234,7 @@ ENTRY(machine_check)
13349 pushl_cfi machine_check_vector
13350 jmp error_code
13351 CFI_ENDPROC
13352-END(machine_check)
13353+ENDPROC(machine_check)
13354 #endif
13355
13356 ENTRY(spurious_interrupt_bug)
13357@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13358 pushl_cfi $do_spurious_interrupt_bug
13359 jmp error_code
13360 CFI_ENDPROC
13361-END(spurious_interrupt_bug)
13362+ENDPROC(spurious_interrupt_bug)
13363 /*
13364 * End of kprobes section
13365 */
13366@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13367
13368 ENTRY(mcount)
13369 ret
13370-END(mcount)
13371+ENDPROC(mcount)
13372
13373 ENTRY(ftrace_caller)
13374 cmpl $0, function_trace_stop
13375@@ -1138,7 +1387,7 @@ ftrace_graph_call:
13376 .globl ftrace_stub
13377 ftrace_stub:
13378 ret
13379-END(ftrace_caller)
13380+ENDPROC(ftrace_caller)
13381
13382 #else /* ! CONFIG_DYNAMIC_FTRACE */
13383
13384@@ -1174,7 +1423,7 @@ trace:
13385 popl %ecx
13386 popl %eax
13387 jmp ftrace_stub
13388-END(mcount)
13389+ENDPROC(mcount)
13390 #endif /* CONFIG_DYNAMIC_FTRACE */
13391 #endif /* CONFIG_FUNCTION_TRACER */
13392
13393@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13394 popl %ecx
13395 popl %eax
13396 ret
13397-END(ftrace_graph_caller)
13398+ENDPROC(ftrace_graph_caller)
13399
13400 .globl return_to_handler
13401 return_to_handler:
13402@@ -1209,7 +1458,6 @@ return_to_handler:
13403 jmp *%ecx
13404 #endif
13405
13406-.section .rodata,"a"
13407 #include "syscall_table_32.S"
13408
13409 syscall_table_size=(.-sys_call_table)
13410@@ -1255,15 +1503,18 @@ error_code:
13411 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13412 REG_TO_PTGS %ecx
13413 SET_KERNEL_GS %ecx
13414- movl $(__USER_DS), %ecx
13415+ movl $(__KERNEL_DS), %ecx
13416 movl %ecx, %ds
13417 movl %ecx, %es
13418+
13419+ pax_enter_kernel
13420+
13421 TRACE_IRQS_OFF
13422 movl %esp,%eax # pt_regs pointer
13423 call *%edi
13424 jmp ret_from_exception
13425 CFI_ENDPROC
13426-END(page_fault)
13427+ENDPROC(page_fault)
13428
13429 /*
13430 * Debug traps and NMI can happen at the one SYSENTER instruction
13431@@ -1305,7 +1556,7 @@ debug_stack_correct:
13432 call do_debug
13433 jmp ret_from_exception
13434 CFI_ENDPROC
13435-END(debug)
13436+ENDPROC(debug)
13437
13438 /*
13439 * NMI is doubly nasty. It can happen _while_ we're handling
13440@@ -1342,6 +1593,9 @@ nmi_stack_correct:
13441 xorl %edx,%edx # zero error code
13442 movl %esp,%eax # pt_regs pointer
13443 call do_nmi
13444+
13445+ pax_exit_kernel
13446+
13447 jmp restore_all_notrace
13448 CFI_ENDPROC
13449
13450@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13451 FIXUP_ESPFIX_STACK # %eax == %esp
13452 xorl %edx,%edx # zero error code
13453 call do_nmi
13454+
13455+ pax_exit_kernel
13456+
13457 RESTORE_REGS
13458 lss 12+4(%esp), %esp # back to espfix stack
13459 CFI_ADJUST_CFA_OFFSET -24
13460 jmp irq_return
13461 CFI_ENDPROC
13462-END(nmi)
13463+ENDPROC(nmi)
13464
13465 ENTRY(int3)
13466 RING0_INT_FRAME
13467@@ -1395,14 +1652,14 @@ ENTRY(int3)
13468 call do_int3
13469 jmp ret_from_exception
13470 CFI_ENDPROC
13471-END(int3)
13472+ENDPROC(int3)
13473
13474 ENTRY(general_protection)
13475 RING0_EC_FRAME
13476 pushl_cfi $do_general_protection
13477 jmp error_code
13478 CFI_ENDPROC
13479-END(general_protection)
13480+ENDPROC(general_protection)
13481
13482 #ifdef CONFIG_KVM_GUEST
13483 ENTRY(async_page_fault)
13484@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13485 pushl_cfi $do_async_page_fault
13486 jmp error_code
13487 CFI_ENDPROC
13488-END(async_page_fault)
13489+ENDPROC(async_page_fault)
13490 #endif
13491
13492 /*
13493diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13494index faf8d5e..4f16a68 100644
13495--- a/arch/x86/kernel/entry_64.S
13496+++ b/arch/x86/kernel/entry_64.S
13497@@ -55,6 +55,8 @@
13498 #include <asm/paravirt.h>
13499 #include <asm/ftrace.h>
13500 #include <asm/percpu.h>
13501+#include <asm/pgtable.h>
13502+#include <asm/alternative-asm.h>
13503
13504 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13505 #include <linux/elf-em.h>
13506@@ -68,8 +70,9 @@
13507 #ifdef CONFIG_FUNCTION_TRACER
13508 #ifdef CONFIG_DYNAMIC_FTRACE
13509 ENTRY(mcount)
13510+ pax_force_retaddr
13511 retq
13512-END(mcount)
13513+ENDPROC(mcount)
13514
13515 ENTRY(ftrace_caller)
13516 cmpl $0, function_trace_stop
13517@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13518 #endif
13519
13520 GLOBAL(ftrace_stub)
13521+ pax_force_retaddr
13522 retq
13523-END(ftrace_caller)
13524+ENDPROC(ftrace_caller)
13525
13526 #else /* ! CONFIG_DYNAMIC_FTRACE */
13527 ENTRY(mcount)
13528@@ -112,6 +116,7 @@ ENTRY(mcount)
13529 #endif
13530
13531 GLOBAL(ftrace_stub)
13532+ pax_force_retaddr
13533 retq
13534
13535 trace:
13536@@ -121,12 +126,13 @@ trace:
13537 movq 8(%rbp), %rsi
13538 subq $MCOUNT_INSN_SIZE, %rdi
13539
13540+ pax_force_fptr ftrace_trace_function
13541 call *ftrace_trace_function
13542
13543 MCOUNT_RESTORE_FRAME
13544
13545 jmp ftrace_stub
13546-END(mcount)
13547+ENDPROC(mcount)
13548 #endif /* CONFIG_DYNAMIC_FTRACE */
13549 #endif /* CONFIG_FUNCTION_TRACER */
13550
13551@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13552
13553 MCOUNT_RESTORE_FRAME
13554
13555+ pax_force_retaddr
13556 retq
13557-END(ftrace_graph_caller)
13558+ENDPROC(ftrace_graph_caller)
13559
13560 GLOBAL(return_to_handler)
13561 subq $24, %rsp
13562@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13563 movq 8(%rsp), %rdx
13564 movq (%rsp), %rax
13565 addq $24, %rsp
13566+ pax_force_fptr %rdi
13567 jmp *%rdi
13568 #endif
13569
13570@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13571 ENDPROC(native_usergs_sysret64)
13572 #endif /* CONFIG_PARAVIRT */
13573
13574+ .macro ljmpq sel, off
13575+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13576+ .byte 0x48; ljmp *1234f(%rip)
13577+ .pushsection .rodata
13578+ .align 16
13579+ 1234: .quad \off; .word \sel
13580+ .popsection
13581+#else
13582+ pushq $\sel
13583+ pushq $\off
13584+ lretq
13585+#endif
13586+ .endm
13587+
13588+ .macro pax_enter_kernel
13589+ pax_set_fptr_mask
13590+#ifdef CONFIG_PAX_KERNEXEC
13591+ call pax_enter_kernel
13592+#endif
13593+ .endm
13594+
13595+ .macro pax_exit_kernel
13596+#ifdef CONFIG_PAX_KERNEXEC
13597+ call pax_exit_kernel
13598+#endif
13599+ .endm
13600+
13601+#ifdef CONFIG_PAX_KERNEXEC
13602+ENTRY(pax_enter_kernel)
13603+ pushq %rdi
13604+
13605+#ifdef CONFIG_PARAVIRT
13606+ PV_SAVE_REGS(CLBR_RDI)
13607+#endif
13608+
13609+ GET_CR0_INTO_RDI
13610+ bts $16,%rdi
13611+ jnc 3f
13612+ mov %cs,%edi
13613+ cmp $__KERNEL_CS,%edi
13614+ jnz 2f
13615+1:
13616+
13617+#ifdef CONFIG_PARAVIRT
13618+ PV_RESTORE_REGS(CLBR_RDI)
13619+#endif
13620+
13621+ popq %rdi
13622+ pax_force_retaddr
13623+ retq
13624+
13625+2: ljmpq __KERNEL_CS,1f
13626+3: ljmpq __KERNEXEC_KERNEL_CS,4f
13627+4: SET_RDI_INTO_CR0
13628+ jmp 1b
13629+ENDPROC(pax_enter_kernel)
13630+
13631+ENTRY(pax_exit_kernel)
13632+ pushq %rdi
13633+
13634+#ifdef CONFIG_PARAVIRT
13635+ PV_SAVE_REGS(CLBR_RDI)
13636+#endif
13637+
13638+ mov %cs,%rdi
13639+ cmp $__KERNEXEC_KERNEL_CS,%edi
13640+ jz 2f
13641+1:
13642+
13643+#ifdef CONFIG_PARAVIRT
13644+ PV_RESTORE_REGS(CLBR_RDI);
13645+#endif
13646+
13647+ popq %rdi
13648+ pax_force_retaddr
13649+ retq
13650+
13651+2: GET_CR0_INTO_RDI
13652+ btr $16,%rdi
13653+ ljmpq __KERNEL_CS,3f
13654+3: SET_RDI_INTO_CR0
13655+ jmp 1b
13656+#ifdef CONFIG_PARAVIRT
13657+ PV_RESTORE_REGS(CLBR_RDI);
13658+#endif
13659+
13660+ popq %rdi
13661+ pax_force_retaddr
13662+ retq
13663+ENDPROC(pax_exit_kernel)
13664+#endif
13665+
13666+ .macro pax_enter_kernel_user
13667+ pax_set_fptr_mask
13668+#ifdef CONFIG_PAX_MEMORY_UDEREF
13669+ call pax_enter_kernel_user
13670+#endif
13671+ .endm
13672+
13673+ .macro pax_exit_kernel_user
13674+#ifdef CONFIG_PAX_MEMORY_UDEREF
13675+ call pax_exit_kernel_user
13676+#endif
13677+#ifdef CONFIG_PAX_RANDKSTACK
13678+ pushq %rax
13679+ call pax_randomize_kstack
13680+ popq %rax
13681+#endif
13682+ .endm
13683+
13684+#ifdef CONFIG_PAX_MEMORY_UDEREF
13685+ENTRY(pax_enter_kernel_user)
13686+ pushq %rdi
13687+ pushq %rbx
13688+
13689+#ifdef CONFIG_PARAVIRT
13690+ PV_SAVE_REGS(CLBR_RDI)
13691+#endif
13692+
13693+ GET_CR3_INTO_RDI
13694+ mov %rdi,%rbx
13695+ add $__START_KERNEL_map,%rbx
13696+ sub phys_base(%rip),%rbx
13697+
13698+#ifdef CONFIG_PARAVIRT
13699+ pushq %rdi
13700+ cmpl $0, pv_info+PARAVIRT_enabled
13701+ jz 1f
13702+ i = 0
13703+ .rept USER_PGD_PTRS
13704+ mov i*8(%rbx),%rsi
13705+ mov $0,%sil
13706+ lea i*8(%rbx),%rdi
13707+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13708+ i = i + 1
13709+ .endr
13710+ jmp 2f
13711+1:
13712+#endif
13713+
13714+ i = 0
13715+ .rept USER_PGD_PTRS
13716+ movb $0,i*8(%rbx)
13717+ i = i + 1
13718+ .endr
13719+
13720+#ifdef CONFIG_PARAVIRT
13721+2: popq %rdi
13722+#endif
13723+ SET_RDI_INTO_CR3
13724+
13725+#ifdef CONFIG_PAX_KERNEXEC
13726+ GET_CR0_INTO_RDI
13727+ bts $16,%rdi
13728+ SET_RDI_INTO_CR0
13729+#endif
13730+
13731+#ifdef CONFIG_PARAVIRT
13732+ PV_RESTORE_REGS(CLBR_RDI)
13733+#endif
13734+
13735+ popq %rbx
13736+ popq %rdi
13737+ pax_force_retaddr
13738+ retq
13739+ENDPROC(pax_enter_kernel_user)
13740+
13741+ENTRY(pax_exit_kernel_user)
13742+ push %rdi
13743+
13744+#ifdef CONFIG_PARAVIRT
13745+ pushq %rbx
13746+ PV_SAVE_REGS(CLBR_RDI)
13747+#endif
13748+
13749+#ifdef CONFIG_PAX_KERNEXEC
13750+ GET_CR0_INTO_RDI
13751+ btr $16,%rdi
13752+ SET_RDI_INTO_CR0
13753+#endif
13754+
13755+ GET_CR3_INTO_RDI
13756+ add $__START_KERNEL_map,%rdi
13757+ sub phys_base(%rip),%rdi
13758+
13759+#ifdef CONFIG_PARAVIRT
13760+ cmpl $0, pv_info+PARAVIRT_enabled
13761+ jz 1f
13762+ mov %rdi,%rbx
13763+ i = 0
13764+ .rept USER_PGD_PTRS
13765+ mov i*8(%rbx),%rsi
13766+ mov $0x67,%sil
13767+ lea i*8(%rbx),%rdi
13768+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13769+ i = i + 1
13770+ .endr
13771+ jmp 2f
13772+1:
13773+#endif
13774+
13775+ i = 0
13776+ .rept USER_PGD_PTRS
13777+ movb $0x67,i*8(%rdi)
13778+ i = i + 1
13779+ .endr
13780+
13781+#ifdef CONFIG_PARAVIRT
13782+2: PV_RESTORE_REGS(CLBR_RDI)
13783+ popq %rbx
13784+#endif
13785+
13786+ popq %rdi
13787+ pax_force_retaddr
13788+ retq
13789+ENDPROC(pax_exit_kernel_user)
13790+#endif
13791+
13792+.macro pax_erase_kstack
13793+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13794+ call pax_erase_kstack
13795+#endif
13796+.endm
13797+
13798+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13799+/*
13800+ * r11: thread_info
13801+ * rcx, rdx: can be clobbered
13802+ */
13803+ENTRY(pax_erase_kstack)
13804+ pushq %rdi
13805+ pushq %rax
13806+ pushq %r11
13807+
13808+ GET_THREAD_INFO(%r11)
13809+ mov TI_lowest_stack(%r11), %rdi
13810+ mov $-0xBEEF, %rax
13811+ std
13812+
13813+1: mov %edi, %ecx
13814+ and $THREAD_SIZE_asm - 1, %ecx
13815+ shr $3, %ecx
13816+ repne scasq
13817+ jecxz 2f
13818+
13819+ cmp $2*8, %ecx
13820+ jc 2f
13821+
13822+ mov $2*8, %ecx
13823+ repe scasq
13824+ jecxz 2f
13825+ jne 1b
13826+
13827+2: cld
13828+ mov %esp, %ecx
13829+ sub %edi, %ecx
13830+
13831+ cmp $THREAD_SIZE_asm, %rcx
13832+ jb 3f
13833+ ud2
13834+3:
13835+
13836+ shr $3, %ecx
13837+ rep stosq
13838+
13839+ mov TI_task_thread_sp0(%r11), %rdi
13840+ sub $256, %rdi
13841+ mov %rdi, TI_lowest_stack(%r11)
13842+
13843+ popq %r11
13844+ popq %rax
13845+ popq %rdi
13846+ pax_force_retaddr
13847+ ret
13848+ENDPROC(pax_erase_kstack)
13849+#endif
13850
13851 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13852 #ifdef CONFIG_TRACE_IRQFLAGS
13853@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
13854 .endm
13855
13856 .macro UNFAKE_STACK_FRAME
13857- addq $8*6, %rsp
13858- CFI_ADJUST_CFA_OFFSET -(6*8)
13859+ addq $8*6 + ARG_SKIP, %rsp
13860+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
13861 .endm
13862
13863 /*
13864@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13865 movq %rsp, %rsi
13866
13867 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13868- testl $3, CS(%rdi)
13869+ testb $3, CS(%rdi)
13870 je 1f
13871 SWAPGS
13872 /*
13873@@ -355,9 +639,10 @@ ENTRY(save_rest)
13874 movq_cfi r15, R15+16
13875 movq %r11, 8(%rsp) /* return address */
13876 FIXUP_TOP_OF_STACK %r11, 16
13877+ pax_force_retaddr
13878 ret
13879 CFI_ENDPROC
13880-END(save_rest)
13881+ENDPROC(save_rest)
13882
13883 /* save complete stack frame */
13884 .pushsection .kprobes.text, "ax"
13885@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
13886 js 1f /* negative -> in kernel */
13887 SWAPGS
13888 xorl %ebx,%ebx
13889-1: ret
13890+1: pax_force_retaddr_bts
13891+ ret
13892 CFI_ENDPROC
13893-END(save_paranoid)
13894+ENDPROC(save_paranoid)
13895 .popsection
13896
13897 /*
13898@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
13899
13900 RESTORE_REST
13901
13902- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13903+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13904 je int_ret_from_sys_call
13905
13906 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13907@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
13908 jmp ret_from_sys_call # go to the SYSRET fastpath
13909
13910 CFI_ENDPROC
13911-END(ret_from_fork)
13912+ENDPROC(ret_from_fork)
13913
13914 /*
13915 * System call entry. Up to 6 arguments in registers are supported.
13916@@ -456,7 +742,7 @@ END(ret_from_fork)
13917 ENTRY(system_call)
13918 CFI_STARTPROC simple
13919 CFI_SIGNAL_FRAME
13920- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13921+ CFI_DEF_CFA rsp,0
13922 CFI_REGISTER rip,rcx
13923 /*CFI_REGISTER rflags,r11*/
13924 SWAPGS_UNSAFE_STACK
13925@@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
13926
13927 movq %rsp,PER_CPU_VAR(old_rsp)
13928 movq PER_CPU_VAR(kernel_stack),%rsp
13929+ SAVE_ARGS 8*6,0
13930+ pax_enter_kernel_user
13931 /*
13932 * No need to follow this irqs off/on section - it's straight
13933 * and short:
13934 */
13935 ENABLE_INTERRUPTS(CLBR_NONE)
13936- SAVE_ARGS 8,0
13937 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13938 movq %rcx,RIP-ARGOFFSET(%rsp)
13939 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13940@@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
13941 system_call_fastpath:
13942 cmpq $__NR_syscall_max,%rax
13943 ja badsys
13944- movq %r10,%rcx
13945+ movq R10-ARGOFFSET(%rsp),%rcx
13946 call *sys_call_table(,%rax,8) # XXX: rip relative
13947 movq %rax,RAX-ARGOFFSET(%rsp)
13948 /*
13949@@ -503,6 +790,8 @@ sysret_check:
13950 andl %edi,%edx
13951 jnz sysret_careful
13952 CFI_REMEMBER_STATE
13953+ pax_exit_kernel_user
13954+ pax_erase_kstack
13955 /*
13956 * sysretq will re-enable interrupts:
13957 */
13958@@ -554,14 +843,18 @@ badsys:
13959 * jump back to the normal fast path.
13960 */
13961 auditsys:
13962- movq %r10,%r9 /* 6th arg: 4th syscall arg */
13963+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13964 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13965 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13966 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13967 movq %rax,%rsi /* 2nd arg: syscall number */
13968 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13969 call audit_syscall_entry
13970+
13971+ pax_erase_kstack
13972+
13973 LOAD_ARGS 0 /* reload call-clobbered registers */
13974+ pax_set_fptr_mask
13975 jmp system_call_fastpath
13976
13977 /*
13978@@ -591,16 +884,20 @@ tracesys:
13979 FIXUP_TOP_OF_STACK %rdi
13980 movq %rsp,%rdi
13981 call syscall_trace_enter
13982+
13983+ pax_erase_kstack
13984+
13985 /*
13986 * Reload arg registers from stack in case ptrace changed them.
13987 * We don't reload %rax because syscall_trace_enter() returned
13988 * the value it wants us to use in the table lookup.
13989 */
13990 LOAD_ARGS ARGOFFSET, 1
13991+ pax_set_fptr_mask
13992 RESTORE_REST
13993 cmpq $__NR_syscall_max,%rax
13994 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13995- movq %r10,%rcx /* fixup for C */
13996+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13997 call *sys_call_table(,%rax,8)
13998 movq %rax,RAX-ARGOFFSET(%rsp)
13999 /* Use IRET because user could have changed frame */
14000@@ -612,7 +909,7 @@ tracesys:
14001 GLOBAL(int_ret_from_sys_call)
14002 DISABLE_INTERRUPTS(CLBR_NONE)
14003 TRACE_IRQS_OFF
14004- testl $3,CS-ARGOFFSET(%rsp)
14005+ testb $3,CS-ARGOFFSET(%rsp)
14006 je retint_restore_args
14007 movl $_TIF_ALLWORK_MASK,%edi
14008 /* edi: mask to check */
14009@@ -623,6 +920,7 @@ GLOBAL(int_with_check)
14010 andl %edi,%edx
14011 jnz int_careful
14012 andl $~TS_COMPAT,TI_status(%rcx)
14013+ pax_erase_kstack
14014 jmp retint_swapgs
14015
14016 /* Either reschedule or signal or syscall exit tracking needed. */
14017@@ -669,7 +967,7 @@ int_restore_rest:
14018 TRACE_IRQS_OFF
14019 jmp int_with_check
14020 CFI_ENDPROC
14021-END(system_call)
14022+ENDPROC(system_call)
14023
14024 /*
14025 * Certain special system calls that need to save a complete full stack frame.
14026@@ -685,7 +983,7 @@ ENTRY(\label)
14027 call \func
14028 jmp ptregscall_common
14029 CFI_ENDPROC
14030-END(\label)
14031+ENDPROC(\label)
14032 .endm
14033
14034 PTREGSCALL stub_clone, sys_clone, %r8
14035@@ -703,9 +1001,10 @@ ENTRY(ptregscall_common)
14036 movq_cfi_restore R12+8, r12
14037 movq_cfi_restore RBP+8, rbp
14038 movq_cfi_restore RBX+8, rbx
14039+ pax_force_retaddr
14040 ret $REST_SKIP /* pop extended registers */
14041 CFI_ENDPROC
14042-END(ptregscall_common)
14043+ENDPROC(ptregscall_common)
14044
14045 ENTRY(stub_execve)
14046 CFI_STARTPROC
14047@@ -720,7 +1019,7 @@ ENTRY(stub_execve)
14048 RESTORE_REST
14049 jmp int_ret_from_sys_call
14050 CFI_ENDPROC
14051-END(stub_execve)
14052+ENDPROC(stub_execve)
14053
14054 /*
14055 * sigreturn is special because it needs to restore all registers on return.
14056@@ -738,7 +1037,7 @@ ENTRY(stub_rt_sigreturn)
14057 RESTORE_REST
14058 jmp int_ret_from_sys_call
14059 CFI_ENDPROC
14060-END(stub_rt_sigreturn)
14061+ENDPROC(stub_rt_sigreturn)
14062
14063 /*
14064 * Build the entry stubs and pointer table with some assembler magic.
14065@@ -773,7 +1072,7 @@ vector=vector+1
14066 2: jmp common_interrupt
14067 .endr
14068 CFI_ENDPROC
14069-END(irq_entries_start)
14070+ENDPROC(irq_entries_start)
14071
14072 .previous
14073 END(interrupt)
14074@@ -793,6 +1092,16 @@ END(interrupt)
14075 subq $ORIG_RAX-RBP, %rsp
14076 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14077 SAVE_ARGS_IRQ
14078+#ifdef CONFIG_PAX_MEMORY_UDEREF
14079+ testb $3, CS(%rdi)
14080+ jnz 1f
14081+ pax_enter_kernel
14082+ jmp 2f
14083+1: pax_enter_kernel_user
14084+2:
14085+#else
14086+ pax_enter_kernel
14087+#endif
14088 call \func
14089 .endm
14090
14091@@ -824,7 +1133,7 @@ ret_from_intr:
14092
14093 exit_intr:
14094 GET_THREAD_INFO(%rcx)
14095- testl $3,CS-ARGOFFSET(%rsp)
14096+ testb $3,CS-ARGOFFSET(%rsp)
14097 je retint_kernel
14098
14099 /* Interrupt came from user space */
14100@@ -846,12 +1155,15 @@ retint_swapgs: /* return to user-space */
14101 * The iretq could re-enable interrupts:
14102 */
14103 DISABLE_INTERRUPTS(CLBR_ANY)
14104+ pax_exit_kernel_user
14105 TRACE_IRQS_IRETQ
14106 SWAPGS
14107 jmp restore_args
14108
14109 retint_restore_args: /* return to kernel space */
14110 DISABLE_INTERRUPTS(CLBR_ANY)
14111+ pax_exit_kernel
14112+ pax_force_retaddr RIP-ARGOFFSET
14113 /*
14114 * The iretq could re-enable interrupts:
14115 */
14116@@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14117 #endif
14118
14119 CFI_ENDPROC
14120-END(common_interrupt)
14121+ENDPROC(common_interrupt)
14122 /*
14123 * End of kprobes section
14124 */
14125@@ -956,7 +1268,7 @@ ENTRY(\sym)
14126 interrupt \do_sym
14127 jmp ret_from_intr
14128 CFI_ENDPROC
14129-END(\sym)
14130+ENDPROC(\sym)
14131 .endm
14132
14133 #ifdef CONFIG_SMP
14134@@ -1021,12 +1333,22 @@ ENTRY(\sym)
14135 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14136 call error_entry
14137 DEFAULT_FRAME 0
14138+#ifdef CONFIG_PAX_MEMORY_UDEREF
14139+ testb $3, CS(%rsp)
14140+ jnz 1f
14141+ pax_enter_kernel
14142+ jmp 2f
14143+1: pax_enter_kernel_user
14144+2:
14145+#else
14146+ pax_enter_kernel
14147+#endif
14148 movq %rsp,%rdi /* pt_regs pointer */
14149 xorl %esi,%esi /* no error code */
14150 call \do_sym
14151 jmp error_exit /* %ebx: no swapgs flag */
14152 CFI_ENDPROC
14153-END(\sym)
14154+ENDPROC(\sym)
14155 .endm
14156
14157 .macro paranoidzeroentry sym do_sym
14158@@ -1038,15 +1360,25 @@ ENTRY(\sym)
14159 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14160 call save_paranoid
14161 TRACE_IRQS_OFF
14162+#ifdef CONFIG_PAX_MEMORY_UDEREF
14163+ testb $3, CS(%rsp)
14164+ jnz 1f
14165+ pax_enter_kernel
14166+ jmp 2f
14167+1: pax_enter_kernel_user
14168+2:
14169+#else
14170+ pax_enter_kernel
14171+#endif
14172 movq %rsp,%rdi /* pt_regs pointer */
14173 xorl %esi,%esi /* no error code */
14174 call \do_sym
14175 jmp paranoid_exit /* %ebx: no swapgs flag */
14176 CFI_ENDPROC
14177-END(\sym)
14178+ENDPROC(\sym)
14179 .endm
14180
14181-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14182+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14183 .macro paranoidzeroentry_ist sym do_sym ist
14184 ENTRY(\sym)
14185 INTR_FRAME
14186@@ -1056,14 +1388,30 @@ ENTRY(\sym)
14187 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14188 call save_paranoid
14189 TRACE_IRQS_OFF
14190+#ifdef CONFIG_PAX_MEMORY_UDEREF
14191+ testb $3, CS(%rsp)
14192+ jnz 1f
14193+ pax_enter_kernel
14194+ jmp 2f
14195+1: pax_enter_kernel_user
14196+2:
14197+#else
14198+ pax_enter_kernel
14199+#endif
14200 movq %rsp,%rdi /* pt_regs pointer */
14201 xorl %esi,%esi /* no error code */
14202+#ifdef CONFIG_SMP
14203+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14204+ lea init_tss(%r12), %r12
14205+#else
14206+ lea init_tss(%rip), %r12
14207+#endif
14208 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14209 call \do_sym
14210 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14211 jmp paranoid_exit /* %ebx: no swapgs flag */
14212 CFI_ENDPROC
14213-END(\sym)
14214+ENDPROC(\sym)
14215 .endm
14216
14217 .macro errorentry sym do_sym
14218@@ -1074,13 +1422,23 @@ ENTRY(\sym)
14219 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14220 call error_entry
14221 DEFAULT_FRAME 0
14222+#ifdef CONFIG_PAX_MEMORY_UDEREF
14223+ testb $3, CS(%rsp)
14224+ jnz 1f
14225+ pax_enter_kernel
14226+ jmp 2f
14227+1: pax_enter_kernel_user
14228+2:
14229+#else
14230+ pax_enter_kernel
14231+#endif
14232 movq %rsp,%rdi /* pt_regs pointer */
14233 movq ORIG_RAX(%rsp),%rsi /* get error code */
14234 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14235 call \do_sym
14236 jmp error_exit /* %ebx: no swapgs flag */
14237 CFI_ENDPROC
14238-END(\sym)
14239+ENDPROC(\sym)
14240 .endm
14241
14242 /* error code is on the stack already */
14243@@ -1093,13 +1451,23 @@ ENTRY(\sym)
14244 call save_paranoid
14245 DEFAULT_FRAME 0
14246 TRACE_IRQS_OFF
14247+#ifdef CONFIG_PAX_MEMORY_UDEREF
14248+ testb $3, CS(%rsp)
14249+ jnz 1f
14250+ pax_enter_kernel
14251+ jmp 2f
14252+1: pax_enter_kernel_user
14253+2:
14254+#else
14255+ pax_enter_kernel
14256+#endif
14257 movq %rsp,%rdi /* pt_regs pointer */
14258 movq ORIG_RAX(%rsp),%rsi /* get error code */
14259 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14260 call \do_sym
14261 jmp paranoid_exit /* %ebx: no swapgs flag */
14262 CFI_ENDPROC
14263-END(\sym)
14264+ENDPROC(\sym)
14265 .endm
14266
14267 zeroentry divide_error do_divide_error
14268@@ -1129,9 +1497,10 @@ gs_change:
14269 2: mfence /* workaround */
14270 SWAPGS
14271 popfq_cfi
14272+ pax_force_retaddr
14273 ret
14274 CFI_ENDPROC
14275-END(native_load_gs_index)
14276+ENDPROC(native_load_gs_index)
14277
14278 .section __ex_table,"a"
14279 .align 8
14280@@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14281 * Here we are in the child and the registers are set as they were
14282 * at kernel_thread() invocation in the parent.
14283 */
14284+ pax_force_fptr %rsi
14285 call *%rsi
14286 # exit
14287 mov %eax, %edi
14288 call do_exit
14289 ud2 # padding for call trace
14290 CFI_ENDPROC
14291-END(kernel_thread_helper)
14292+ENDPROC(kernel_thread_helper)
14293
14294 /*
14295 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14296@@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14297 RESTORE_REST
14298 testq %rax,%rax
14299 je int_ret_from_sys_call
14300- RESTORE_ARGS
14301 UNFAKE_STACK_FRAME
14302+ pax_force_retaddr
14303 ret
14304 CFI_ENDPROC
14305-END(kernel_execve)
14306+ENDPROC(kernel_execve)
14307
14308 /* Call softirq on interrupt stack. Interrupts are off. */
14309 ENTRY(call_softirq)
14310@@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14311 CFI_DEF_CFA_REGISTER rsp
14312 CFI_ADJUST_CFA_OFFSET -8
14313 decl PER_CPU_VAR(irq_count)
14314+ pax_force_retaddr
14315 ret
14316 CFI_ENDPROC
14317-END(call_softirq)
14318+ENDPROC(call_softirq)
14319
14320 #ifdef CONFIG_XEN
14321 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14322@@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14323 decl PER_CPU_VAR(irq_count)
14324 jmp error_exit
14325 CFI_ENDPROC
14326-END(xen_do_hypervisor_callback)
14327+ENDPROC(xen_do_hypervisor_callback)
14328
14329 /*
14330 * Hypervisor uses this for application faults while it executes.
14331@@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14332 SAVE_ALL
14333 jmp error_exit
14334 CFI_ENDPROC
14335-END(xen_failsafe_callback)
14336+ENDPROC(xen_failsafe_callback)
14337
14338 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14339 xen_hvm_callback_vector xen_evtchn_do_upcall
14340@@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14341 TRACE_IRQS_OFF
14342 testl %ebx,%ebx /* swapgs needed? */
14343 jnz paranoid_restore
14344- testl $3,CS(%rsp)
14345+ testb $3,CS(%rsp)
14346 jnz paranoid_userspace
14347+#ifdef CONFIG_PAX_MEMORY_UDEREF
14348+ pax_exit_kernel
14349+ TRACE_IRQS_IRETQ 0
14350+ SWAPGS_UNSAFE_STACK
14351+ RESTORE_ALL 8
14352+ pax_force_retaddr_bts
14353+ jmp irq_return
14354+#endif
14355 paranoid_swapgs:
14356+#ifdef CONFIG_PAX_MEMORY_UDEREF
14357+ pax_exit_kernel_user
14358+#else
14359+ pax_exit_kernel
14360+#endif
14361 TRACE_IRQS_IRETQ 0
14362 SWAPGS_UNSAFE_STACK
14363 RESTORE_ALL 8
14364 jmp irq_return
14365 paranoid_restore:
14366+ pax_exit_kernel
14367 TRACE_IRQS_IRETQ 0
14368 RESTORE_ALL 8
14369+ pax_force_retaddr_bts
14370 jmp irq_return
14371 paranoid_userspace:
14372 GET_THREAD_INFO(%rcx)
14373@@ -1394,7 +1780,7 @@ paranoid_schedule:
14374 TRACE_IRQS_OFF
14375 jmp paranoid_userspace
14376 CFI_ENDPROC
14377-END(paranoid_exit)
14378+ENDPROC(paranoid_exit)
14379
14380 /*
14381 * Exception entry point. This expects an error code/orig_rax on the stack.
14382@@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14383 movq_cfi r14, R14+8
14384 movq_cfi r15, R15+8
14385 xorl %ebx,%ebx
14386- testl $3,CS+8(%rsp)
14387+ testb $3,CS+8(%rsp)
14388 je error_kernelspace
14389 error_swapgs:
14390 SWAPGS
14391 error_sti:
14392 TRACE_IRQS_OFF
14393+ pax_force_retaddr_bts
14394 ret
14395
14396 /*
14397@@ -1453,7 +1840,7 @@ bstep_iret:
14398 movq %rcx,RIP+8(%rsp)
14399 jmp error_swapgs
14400 CFI_ENDPROC
14401-END(error_entry)
14402+ENDPROC(error_entry)
14403
14404
14405 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14406@@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14407 jnz retint_careful
14408 jmp retint_swapgs
14409 CFI_ENDPROC
14410-END(error_exit)
14411+ENDPROC(error_exit)
14412
14413
14414 /* runs on exception stack */
14415@@ -1485,6 +1872,16 @@ ENTRY(nmi)
14416 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14417 call save_paranoid
14418 DEFAULT_FRAME 0
14419+#ifdef CONFIG_PAX_MEMORY_UDEREF
14420+ testb $3, CS(%rsp)
14421+ jnz 1f
14422+ pax_enter_kernel
14423+ jmp 2f
14424+1: pax_enter_kernel_user
14425+2:
14426+#else
14427+ pax_enter_kernel
14428+#endif
14429 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14430 movq %rsp,%rdi
14431 movq $-1,%rsi
14432@@ -1495,12 +1892,28 @@ ENTRY(nmi)
14433 DISABLE_INTERRUPTS(CLBR_NONE)
14434 testl %ebx,%ebx /* swapgs needed? */
14435 jnz nmi_restore
14436- testl $3,CS(%rsp)
14437+ testb $3,CS(%rsp)
14438 jnz nmi_userspace
14439+#ifdef CONFIG_PAX_MEMORY_UDEREF
14440+ pax_exit_kernel
14441+ SWAPGS_UNSAFE_STACK
14442+ RESTORE_ALL 8
14443+ pax_force_retaddr_bts
14444+ jmp irq_return
14445+#endif
14446 nmi_swapgs:
14447+#ifdef CONFIG_PAX_MEMORY_UDEREF
14448+ pax_exit_kernel_user
14449+#else
14450+ pax_exit_kernel
14451+#endif
14452 SWAPGS_UNSAFE_STACK
14453+ RESTORE_ALL 8
14454+ jmp irq_return
14455 nmi_restore:
14456+ pax_exit_kernel
14457 RESTORE_ALL 8
14458+ pax_force_retaddr_bts
14459 jmp irq_return
14460 nmi_userspace:
14461 GET_THREAD_INFO(%rcx)
14462@@ -1529,14 +1942,14 @@ nmi_schedule:
14463 jmp paranoid_exit
14464 CFI_ENDPROC
14465 #endif
14466-END(nmi)
14467+ENDPROC(nmi)
14468
14469 ENTRY(ignore_sysret)
14470 CFI_STARTPROC
14471 mov $-ENOSYS,%eax
14472 sysret
14473 CFI_ENDPROC
14474-END(ignore_sysret)
14475+ENDPROC(ignore_sysret)
14476
14477 /*
14478 * End of kprobes section
14479diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14480index c9a281f..ce2f317 100644
14481--- a/arch/x86/kernel/ftrace.c
14482+++ b/arch/x86/kernel/ftrace.c
14483@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14484 static const void *mod_code_newcode; /* holds the text to write to the IP */
14485
14486 static unsigned nmi_wait_count;
14487-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14488+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14489
14490 int ftrace_arch_read_dyn_info(char *buf, int size)
14491 {
14492@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14493
14494 r = snprintf(buf, size, "%u %u",
14495 nmi_wait_count,
14496- atomic_read(&nmi_update_count));
14497+ atomic_read_unchecked(&nmi_update_count));
14498 return r;
14499 }
14500
14501@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14502
14503 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14504 smp_rmb();
14505+ pax_open_kernel();
14506 ftrace_mod_code();
14507- atomic_inc(&nmi_update_count);
14508+ pax_close_kernel();
14509+ atomic_inc_unchecked(&nmi_update_count);
14510 }
14511 /* Must have previous changes seen before executions */
14512 smp_mb();
14513@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14514 {
14515 unsigned char replaced[MCOUNT_INSN_SIZE];
14516
14517+ ip = ktla_ktva(ip);
14518+
14519 /*
14520 * Note: Due to modules and __init, code can
14521 * disappear and change, we need to protect against faulting
14522@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14523 unsigned char old[MCOUNT_INSN_SIZE], *new;
14524 int ret;
14525
14526- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14527+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14528 new = ftrace_call_replace(ip, (unsigned long)func);
14529 ret = ftrace_modify_code(ip, old, new);
14530
14531@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14532 {
14533 unsigned char code[MCOUNT_INSN_SIZE];
14534
14535+ ip = ktla_ktva(ip);
14536+
14537 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14538 return -EFAULT;
14539
14540diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14541index 3bb0850..55a56f4 100644
14542--- a/arch/x86/kernel/head32.c
14543+++ b/arch/x86/kernel/head32.c
14544@@ -19,6 +19,7 @@
14545 #include <asm/io_apic.h>
14546 #include <asm/bios_ebda.h>
14547 #include <asm/tlbflush.h>
14548+#include <asm/boot.h>
14549
14550 static void __init i386_default_early_setup(void)
14551 {
14552@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14553 {
14554 memblock_init();
14555
14556- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14557+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14558
14559 #ifdef CONFIG_BLK_DEV_INITRD
14560 /* Reserve INITRD */
14561diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14562index ce0be7c..c41476e 100644
14563--- a/arch/x86/kernel/head_32.S
14564+++ b/arch/x86/kernel/head_32.S
14565@@ -25,6 +25,12 @@
14566 /* Physical address */
14567 #define pa(X) ((X) - __PAGE_OFFSET)
14568
14569+#ifdef CONFIG_PAX_KERNEXEC
14570+#define ta(X) (X)
14571+#else
14572+#define ta(X) ((X) - __PAGE_OFFSET)
14573+#endif
14574+
14575 /*
14576 * References to members of the new_cpu_data structure.
14577 */
14578@@ -54,11 +60,7 @@
14579 * and small than max_low_pfn, otherwise will waste some page table entries
14580 */
14581
14582-#if PTRS_PER_PMD > 1
14583-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14584-#else
14585-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14586-#endif
14587+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14588
14589 /* Number of possible pages in the lowmem region */
14590 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14591@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14592 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14593
14594 /*
14595+ * Real beginning of normal "text" segment
14596+ */
14597+ENTRY(stext)
14598+ENTRY(_stext)
14599+
14600+/*
14601 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14602 * %esi points to the real-mode code as a 32-bit pointer.
14603 * CS and DS must be 4 GB flat segments, but we don't depend on
14604@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14605 * can.
14606 */
14607 __HEAD
14608+
14609+#ifdef CONFIG_PAX_KERNEXEC
14610+ jmp startup_32
14611+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14612+.fill PAGE_SIZE-5,1,0xcc
14613+#endif
14614+
14615 ENTRY(startup_32)
14616 movl pa(stack_start),%ecx
14617
14618@@ -105,6 +120,57 @@ ENTRY(startup_32)
14619 2:
14620 leal -__PAGE_OFFSET(%ecx),%esp
14621
14622+#ifdef CONFIG_SMP
14623+ movl $pa(cpu_gdt_table),%edi
14624+ movl $__per_cpu_load,%eax
14625+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14626+ rorl $16,%eax
14627+ movb %al,__KERNEL_PERCPU + 4(%edi)
14628+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14629+ movl $__per_cpu_end - 1,%eax
14630+ subl $__per_cpu_start,%eax
14631+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14632+#endif
14633+
14634+#ifdef CONFIG_PAX_MEMORY_UDEREF
14635+ movl $NR_CPUS,%ecx
14636+ movl $pa(cpu_gdt_table),%edi
14637+1:
14638+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14639+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14640+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14641+ addl $PAGE_SIZE_asm,%edi
14642+ loop 1b
14643+#endif
14644+
14645+#ifdef CONFIG_PAX_KERNEXEC
14646+ movl $pa(boot_gdt),%edi
14647+ movl $__LOAD_PHYSICAL_ADDR,%eax
14648+ movw %ax,__BOOT_CS + 2(%edi)
14649+ rorl $16,%eax
14650+ movb %al,__BOOT_CS + 4(%edi)
14651+ movb %ah,__BOOT_CS + 7(%edi)
14652+ rorl $16,%eax
14653+
14654+ ljmp $(__BOOT_CS),$1f
14655+1:
14656+
14657+ movl $NR_CPUS,%ecx
14658+ movl $pa(cpu_gdt_table),%edi
14659+ addl $__PAGE_OFFSET,%eax
14660+1:
14661+ movw %ax,__KERNEL_CS + 2(%edi)
14662+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14663+ rorl $16,%eax
14664+ movb %al,__KERNEL_CS + 4(%edi)
14665+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14666+ movb %ah,__KERNEL_CS + 7(%edi)
14667+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14668+ rorl $16,%eax
14669+ addl $PAGE_SIZE_asm,%edi
14670+ loop 1b
14671+#endif
14672+
14673 /*
14674 * Clear BSS first so that there are no surprises...
14675 */
14676@@ -195,8 +261,11 @@ ENTRY(startup_32)
14677 movl %eax, pa(max_pfn_mapped)
14678
14679 /* Do early initialization of the fixmap area */
14680- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14681- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14682+#ifdef CONFIG_COMPAT_VDSO
14683+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14684+#else
14685+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14686+#endif
14687 #else /* Not PAE */
14688
14689 page_pde_offset = (__PAGE_OFFSET >> 20);
14690@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14691 movl %eax, pa(max_pfn_mapped)
14692
14693 /* Do early initialization of the fixmap area */
14694- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14695- movl %eax,pa(initial_page_table+0xffc)
14696+#ifdef CONFIG_COMPAT_VDSO
14697+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14698+#else
14699+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14700+#endif
14701 #endif
14702
14703 #ifdef CONFIG_PARAVIRT
14704@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14705 cmpl $num_subarch_entries, %eax
14706 jae bad_subarch
14707
14708- movl pa(subarch_entries)(,%eax,4), %eax
14709- subl $__PAGE_OFFSET, %eax
14710- jmp *%eax
14711+ jmp *pa(subarch_entries)(,%eax,4)
14712
14713 bad_subarch:
14714 WEAK(lguest_entry)
14715@@ -255,10 +325,10 @@ WEAK(xen_entry)
14716 __INITDATA
14717
14718 subarch_entries:
14719- .long default_entry /* normal x86/PC */
14720- .long lguest_entry /* lguest hypervisor */
14721- .long xen_entry /* Xen hypervisor */
14722- .long default_entry /* Moorestown MID */
14723+ .long ta(default_entry) /* normal x86/PC */
14724+ .long ta(lguest_entry) /* lguest hypervisor */
14725+ .long ta(xen_entry) /* Xen hypervisor */
14726+ .long ta(default_entry) /* Moorestown MID */
14727 num_subarch_entries = (. - subarch_entries) / 4
14728 .previous
14729 #else
14730@@ -312,6 +382,7 @@ default_entry:
14731 orl %edx,%eax
14732 movl %eax,%cr4
14733
14734+#ifdef CONFIG_X86_PAE
14735 testb $X86_CR4_PAE, %al # check if PAE is enabled
14736 jz 6f
14737
14738@@ -340,6 +411,9 @@ default_entry:
14739 /* Make changes effective */
14740 wrmsr
14741
14742+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14743+#endif
14744+
14745 6:
14746
14747 /*
14748@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14749 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14750 movl %eax,%ss # after changing gdt.
14751
14752- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14753+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14754 movl %eax,%ds
14755 movl %eax,%es
14756
14757@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14758 */
14759 cmpb $0,ready
14760 jne 1f
14761- movl $gdt_page,%eax
14762+ movl $cpu_gdt_table,%eax
14763 movl $stack_canary,%ecx
14764+#ifdef CONFIG_SMP
14765+ addl $__per_cpu_load,%ecx
14766+#endif
14767 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14768 shrl $16, %ecx
14769 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14770 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14771 1:
14772-#endif
14773 movl $(__KERNEL_STACK_CANARY),%eax
14774+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14775+ movl $(__USER_DS),%eax
14776+#else
14777+ xorl %eax,%eax
14778+#endif
14779 movl %eax,%gs
14780
14781 xorl %eax,%eax # Clear LDT
14782@@ -558,22 +639,22 @@ early_page_fault:
14783 jmp early_fault
14784
14785 early_fault:
14786- cld
14787 #ifdef CONFIG_PRINTK
14788+ cmpl $1,%ss:early_recursion_flag
14789+ je hlt_loop
14790+ incl %ss:early_recursion_flag
14791+ cld
14792 pusha
14793 movl $(__KERNEL_DS),%eax
14794 movl %eax,%ds
14795 movl %eax,%es
14796- cmpl $2,early_recursion_flag
14797- je hlt_loop
14798- incl early_recursion_flag
14799 movl %cr2,%eax
14800 pushl %eax
14801 pushl %edx /* trapno */
14802 pushl $fault_msg
14803 call printk
14804+; call dump_stack
14805 #endif
14806- call dump_stack
14807 hlt_loop:
14808 hlt
14809 jmp hlt_loop
14810@@ -581,8 +662,11 @@ hlt_loop:
14811 /* This is the default interrupt "handler" :-) */
14812 ALIGN
14813 ignore_int:
14814- cld
14815 #ifdef CONFIG_PRINTK
14816+ cmpl $2,%ss:early_recursion_flag
14817+ je hlt_loop
14818+ incl %ss:early_recursion_flag
14819+ cld
14820 pushl %eax
14821 pushl %ecx
14822 pushl %edx
14823@@ -591,9 +675,6 @@ ignore_int:
14824 movl $(__KERNEL_DS),%eax
14825 movl %eax,%ds
14826 movl %eax,%es
14827- cmpl $2,early_recursion_flag
14828- je hlt_loop
14829- incl early_recursion_flag
14830 pushl 16(%esp)
14831 pushl 24(%esp)
14832 pushl 32(%esp)
14833@@ -622,29 +703,43 @@ ENTRY(initial_code)
14834 /*
14835 * BSS section
14836 */
14837-__PAGE_ALIGNED_BSS
14838- .align PAGE_SIZE
14839 #ifdef CONFIG_X86_PAE
14840+.section .initial_pg_pmd,"a",@progbits
14841 initial_pg_pmd:
14842 .fill 1024*KPMDS,4,0
14843 #else
14844+.section .initial_page_table,"a",@progbits
14845 ENTRY(initial_page_table)
14846 .fill 1024,4,0
14847 #endif
14848+.section .initial_pg_fixmap,"a",@progbits
14849 initial_pg_fixmap:
14850 .fill 1024,4,0
14851+.section .empty_zero_page,"a",@progbits
14852 ENTRY(empty_zero_page)
14853 .fill 4096,1,0
14854+.section .swapper_pg_dir,"a",@progbits
14855 ENTRY(swapper_pg_dir)
14856+#ifdef CONFIG_X86_PAE
14857+ .fill 4,8,0
14858+#else
14859 .fill 1024,4,0
14860+#endif
14861+
14862+/*
14863+ * The IDT has to be page-aligned to simplify the Pentium
14864+ * F0 0F bug workaround.. We have a special link segment
14865+ * for this.
14866+ */
14867+.section .idt,"a",@progbits
14868+ENTRY(idt_table)
14869+ .fill 256,8,0
14870
14871 /*
14872 * This starts the data section.
14873 */
14874 #ifdef CONFIG_X86_PAE
14875-__PAGE_ALIGNED_DATA
14876- /* Page-aligned for the benefit of paravirt? */
14877- .align PAGE_SIZE
14878+.section .initial_page_table,"a",@progbits
14879 ENTRY(initial_page_table)
14880 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14881 # if KPMDS == 3
14882@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14883 # error "Kernel PMDs should be 1, 2 or 3"
14884 # endif
14885 .align PAGE_SIZE /* needs to be page-sized too */
14886+
14887+#ifdef CONFIG_PAX_PER_CPU_PGD
14888+ENTRY(cpu_pgd)
14889+ .rept NR_CPUS
14890+ .fill 4,8,0
14891+ .endr
14892+#endif
14893+
14894 #endif
14895
14896 .data
14897 .balign 4
14898 ENTRY(stack_start)
14899- .long init_thread_union+THREAD_SIZE
14900+ .long init_thread_union+THREAD_SIZE-8
14901
14902+ready: .byte 0
14903+
14904+.section .rodata,"a",@progbits
14905 early_recursion_flag:
14906 .long 0
14907
14908-ready: .byte 0
14909-
14910 int_msg:
14911 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14912
14913@@ -707,7 +811,7 @@ fault_msg:
14914 .word 0 # 32 bit align gdt_desc.address
14915 boot_gdt_descr:
14916 .word __BOOT_DS+7
14917- .long boot_gdt - __PAGE_OFFSET
14918+ .long pa(boot_gdt)
14919
14920 .word 0 # 32-bit align idt_desc.address
14921 idt_descr:
14922@@ -718,7 +822,7 @@ idt_descr:
14923 .word 0 # 32 bit align gdt_desc.address
14924 ENTRY(early_gdt_descr)
14925 .word GDT_ENTRIES*8-1
14926- .long gdt_page /* Overwritten for secondary CPUs */
14927+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14928
14929 /*
14930 * The boot_gdt must mirror the equivalent in setup.S and is
14931@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14932 .align L1_CACHE_BYTES
14933 ENTRY(boot_gdt)
14934 .fill GDT_ENTRY_BOOT_CS,8,0
14935- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14936- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14937+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14938+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14939+
14940+ .align PAGE_SIZE_asm
14941+ENTRY(cpu_gdt_table)
14942+ .rept NR_CPUS
14943+ .quad 0x0000000000000000 /* NULL descriptor */
14944+ .quad 0x0000000000000000 /* 0x0b reserved */
14945+ .quad 0x0000000000000000 /* 0x13 reserved */
14946+ .quad 0x0000000000000000 /* 0x1b reserved */
14947+
14948+#ifdef CONFIG_PAX_KERNEXEC
14949+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14950+#else
14951+ .quad 0x0000000000000000 /* 0x20 unused */
14952+#endif
14953+
14954+ .quad 0x0000000000000000 /* 0x28 unused */
14955+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14956+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14957+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14958+ .quad 0x0000000000000000 /* 0x4b reserved */
14959+ .quad 0x0000000000000000 /* 0x53 reserved */
14960+ .quad 0x0000000000000000 /* 0x5b reserved */
14961+
14962+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14963+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14964+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14965+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14966+
14967+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14968+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14969+
14970+ /*
14971+ * Segments used for calling PnP BIOS have byte granularity.
14972+ * The code segments and data segments have fixed 64k limits,
14973+ * the transfer segment sizes are set at run time.
14974+ */
14975+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14976+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14977+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14978+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14979+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14980+
14981+ /*
14982+ * The APM segments have byte granularity and their bases
14983+ * are set at run time. All have 64k limits.
14984+ */
14985+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14986+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14987+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14988+
14989+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14990+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14991+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14992+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14993+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14994+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14995+
14996+ /* Be sure this is zeroed to avoid false validations in Xen */
14997+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14998+ .endr
14999diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
15000index e11e394..9aebc5d 100644
15001--- a/arch/x86/kernel/head_64.S
15002+++ b/arch/x86/kernel/head_64.S
15003@@ -19,6 +19,8 @@
15004 #include <asm/cache.h>
15005 #include <asm/processor-flags.h>
15006 #include <asm/percpu.h>
15007+#include <asm/cpufeature.h>
15008+#include <asm/alternative-asm.h>
15009
15010 #ifdef CONFIG_PARAVIRT
15011 #include <asm/asm-offsets.h>
15012@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
15013 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15014 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15015 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15016+L4_VMALLOC_START = pgd_index(VMALLOC_START)
15017+L3_VMALLOC_START = pud_index(VMALLOC_START)
15018+L4_VMALLOC_END = pgd_index(VMALLOC_END)
15019+L3_VMALLOC_END = pud_index(VMALLOC_END)
15020+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15021+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15022
15023 .text
15024 __HEAD
15025@@ -85,35 +93,23 @@ startup_64:
15026 */
15027 addq %rbp, init_level4_pgt + 0(%rip)
15028 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15029+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15030+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15031+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15032 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15033
15034 addq %rbp, level3_ident_pgt + 0(%rip)
15035+#ifndef CONFIG_XEN
15036+ addq %rbp, level3_ident_pgt + 8(%rip)
15037+#endif
15038
15039- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15040- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15041+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15042+
15043+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15044+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15045
15046 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15047-
15048- /* Add an Identity mapping if I am above 1G */
15049- leaq _text(%rip), %rdi
15050- andq $PMD_PAGE_MASK, %rdi
15051-
15052- movq %rdi, %rax
15053- shrq $PUD_SHIFT, %rax
15054- andq $(PTRS_PER_PUD - 1), %rax
15055- jz ident_complete
15056-
15057- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15058- leaq level3_ident_pgt(%rip), %rbx
15059- movq %rdx, 0(%rbx, %rax, 8)
15060-
15061- movq %rdi, %rax
15062- shrq $PMD_SHIFT, %rax
15063- andq $(PTRS_PER_PMD - 1), %rax
15064- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15065- leaq level2_spare_pgt(%rip), %rbx
15066- movq %rdx, 0(%rbx, %rax, 8)
15067-ident_complete:
15068+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15069
15070 /*
15071 * Fixup the kernel text+data virtual addresses. Note that
15072@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15073 * after the boot processor executes this code.
15074 */
15075
15076- /* Enable PAE mode and PGE */
15077- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15078+ /* Enable PAE mode and PSE/PGE */
15079+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15080 movq %rax, %cr4
15081
15082 /* Setup early boot stage 4 level pagetables. */
15083@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15084 movl $MSR_EFER, %ecx
15085 rdmsr
15086 btsl $_EFER_SCE, %eax /* Enable System Call */
15087- btl $20,%edi /* No Execute supported? */
15088+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15089 jnc 1f
15090 btsl $_EFER_NX, %eax
15091+ leaq init_level4_pgt(%rip), %rdi
15092+#ifndef CONFIG_EFI
15093+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15094+#endif
15095+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15096+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15097+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15098+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15099 1: wrmsr /* Make changes effective */
15100
15101 /* Setup cr0 */
15102@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15103 * jump. In addition we need to ensure %cs is set so we make this
15104 * a far return.
15105 */
15106+ pax_set_fptr_mask
15107 movq initial_code(%rip),%rax
15108 pushq $0 # fake return address to stop unwinder
15109 pushq $__KERNEL_CS # set correct cs
15110@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15111 bad_address:
15112 jmp bad_address
15113
15114- .section ".init.text","ax"
15115+ __INIT
15116 #ifdef CONFIG_EARLY_PRINTK
15117 .globl early_idt_handlers
15118 early_idt_handlers:
15119@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15120 #endif /* EARLY_PRINTK */
15121 1: hlt
15122 jmp 1b
15123+ .previous
15124
15125 #ifdef CONFIG_EARLY_PRINTK
15126+ __INITDATA
15127 early_recursion_flag:
15128 .long 0
15129+ .previous
15130
15131+ .section .rodata,"a",@progbits
15132 early_idt_msg:
15133 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15134 early_idt_ripmsg:
15135 .asciz "RIP %s\n"
15136+ .previous
15137 #endif /* CONFIG_EARLY_PRINTK */
15138- .previous
15139
15140+ .section .rodata,"a",@progbits
15141 #define NEXT_PAGE(name) \
15142 .balign PAGE_SIZE; \
15143 ENTRY(name)
15144@@ -338,7 +348,6 @@ ENTRY(name)
15145 i = i + 1 ; \
15146 .endr
15147
15148- .data
15149 /*
15150 * This default setting generates an ident mapping at address 0x100000
15151 * and a mapping for the kernel that precisely maps virtual address
15152@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15153 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15154 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15155 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15156+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
15157+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15158+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
15159+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15160+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15161+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15162 .org init_level4_pgt + L4_START_KERNEL*8, 0
15163 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15164 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15165
15166+#ifdef CONFIG_PAX_PER_CPU_PGD
15167+NEXT_PAGE(cpu_pgd)
15168+ .rept NR_CPUS
15169+ .fill 512,8,0
15170+ .endr
15171+#endif
15172+
15173 NEXT_PAGE(level3_ident_pgt)
15174 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15175+#ifdef CONFIG_XEN
15176 .fill 511,8,0
15177+#else
15178+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15179+ .fill 510,8,0
15180+#endif
15181+
15182+NEXT_PAGE(level3_vmalloc_start_pgt)
15183+ .fill 512,8,0
15184+
15185+NEXT_PAGE(level3_vmalloc_end_pgt)
15186+ .fill 512,8,0
15187+
15188+NEXT_PAGE(level3_vmemmap_pgt)
15189+ .fill L3_VMEMMAP_START,8,0
15190+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15191
15192 NEXT_PAGE(level3_kernel_pgt)
15193 .fill L3_START_KERNEL,8,0
15194@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15195 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15196 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15197
15198+NEXT_PAGE(level2_vmemmap_pgt)
15199+ .fill 512,8,0
15200+
15201 NEXT_PAGE(level2_fixmap_pgt)
15202- .fill 506,8,0
15203- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15204- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15205- .fill 5,8,0
15206+ .fill 507,8,0
15207+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15208+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15209+ .fill 4,8,0
15210
15211-NEXT_PAGE(level1_fixmap_pgt)
15212+NEXT_PAGE(level1_vsyscall_pgt)
15213 .fill 512,8,0
15214
15215-NEXT_PAGE(level2_ident_pgt)
15216- /* Since I easily can, map the first 1G.
15217+ /* Since I easily can, map the first 2G.
15218 * Don't set NX because code runs from these pages.
15219 */
15220- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15221+NEXT_PAGE(level2_ident_pgt)
15222+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15223
15224 NEXT_PAGE(level2_kernel_pgt)
15225 /*
15226@@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15227 * If you want to increase this then increase MODULES_VADDR
15228 * too.)
15229 */
15230- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15231- KERNEL_IMAGE_SIZE/PMD_SIZE)
15232-
15233-NEXT_PAGE(level2_spare_pgt)
15234- .fill 512, 8, 0
15235+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15236
15237 #undef PMDS
15238 #undef NEXT_PAGE
15239
15240- .data
15241+ .align PAGE_SIZE
15242+ENTRY(cpu_gdt_table)
15243+ .rept NR_CPUS
15244+ .quad 0x0000000000000000 /* NULL descriptor */
15245+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15246+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15247+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15248+ .quad 0x00cffb000000ffff /* __USER32_CS */
15249+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15250+ .quad 0x00affb000000ffff /* __USER_CS */
15251+
15252+#ifdef CONFIG_PAX_KERNEXEC
15253+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15254+#else
15255+ .quad 0x0 /* unused */
15256+#endif
15257+
15258+ .quad 0,0 /* TSS */
15259+ .quad 0,0 /* LDT */
15260+ .quad 0,0,0 /* three TLS descriptors */
15261+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15262+ /* asm/segment.h:GDT_ENTRIES must match this */
15263+
15264+ /* zero the remaining page */
15265+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15266+ .endr
15267+
15268 .align 16
15269 .globl early_gdt_descr
15270 early_gdt_descr:
15271 .word GDT_ENTRIES*8-1
15272 early_gdt_descr_base:
15273- .quad INIT_PER_CPU_VAR(gdt_page)
15274+ .quad cpu_gdt_table
15275
15276 ENTRY(phys_base)
15277 /* This must match the first entry in level2_kernel_pgt */
15278 .quad 0x0000000000000000
15279
15280 #include "../../x86/xen/xen-head.S"
15281-
15282- .section .bss, "aw", @nobits
15283+
15284+ .section .rodata,"a",@progbits
15285 .align L1_CACHE_BYTES
15286 ENTRY(idt_table)
15287- .skip IDT_ENTRIES * 16
15288+ .fill 512,8,0
15289
15290 __PAGE_ALIGNED_BSS
15291 .align PAGE_SIZE
15292diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15293index 9c3bd4a..e1d9b35 100644
15294--- a/arch/x86/kernel/i386_ksyms_32.c
15295+++ b/arch/x86/kernel/i386_ksyms_32.c
15296@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15297 EXPORT_SYMBOL(cmpxchg8b_emu);
15298 #endif
15299
15300+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15301+
15302 /* Networking helper routines. */
15303 EXPORT_SYMBOL(csum_partial_copy_generic);
15304+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15305+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15306
15307 EXPORT_SYMBOL(__get_user_1);
15308 EXPORT_SYMBOL(__get_user_2);
15309@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15310
15311 EXPORT_SYMBOL(csum_partial);
15312 EXPORT_SYMBOL(empty_zero_page);
15313+
15314+#ifdef CONFIG_PAX_KERNEXEC
15315+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15316+#endif
15317diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15318index 6104852..6114160 100644
15319--- a/arch/x86/kernel/i8259.c
15320+++ b/arch/x86/kernel/i8259.c
15321@@ -210,7 +210,7 @@ spurious_8259A_irq:
15322 "spurious 8259A interrupt: IRQ%d.\n", irq);
15323 spurious_irq_mask |= irqmask;
15324 }
15325- atomic_inc(&irq_err_count);
15326+ atomic_inc_unchecked(&irq_err_count);
15327 /*
15328 * Theoretically we do not have to handle this IRQ,
15329 * but in Linux this does not cause problems and is
15330diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15331index 43e9ccf..44ccf6f 100644
15332--- a/arch/x86/kernel/init_task.c
15333+++ b/arch/x86/kernel/init_task.c
15334@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15335 * way process stacks are handled. This is done by having a special
15336 * "init_task" linker map entry..
15337 */
15338-union thread_union init_thread_union __init_task_data =
15339- { INIT_THREAD_INFO(init_task) };
15340+union thread_union init_thread_union __init_task_data;
15341
15342 /*
15343 * Initial task structure.
15344@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15345 * section. Since TSS's are completely CPU-local, we want them
15346 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15347 */
15348-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15349-
15350+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15351+EXPORT_SYMBOL(init_tss);
15352diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15353index 8c96897..be66bfa 100644
15354--- a/arch/x86/kernel/ioport.c
15355+++ b/arch/x86/kernel/ioport.c
15356@@ -6,6 +6,7 @@
15357 #include <linux/sched.h>
15358 #include <linux/kernel.h>
15359 #include <linux/capability.h>
15360+#include <linux/security.h>
15361 #include <linux/errno.h>
15362 #include <linux/types.h>
15363 #include <linux/ioport.h>
15364@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15365
15366 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15367 return -EINVAL;
15368+#ifdef CONFIG_GRKERNSEC_IO
15369+ if (turn_on && grsec_disable_privio) {
15370+ gr_handle_ioperm();
15371+ return -EPERM;
15372+ }
15373+#endif
15374 if (turn_on && !capable(CAP_SYS_RAWIO))
15375 return -EPERM;
15376
15377@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15378 * because the ->io_bitmap_max value must match the bitmap
15379 * contents:
15380 */
15381- tss = &per_cpu(init_tss, get_cpu());
15382+ tss = init_tss + get_cpu();
15383
15384 if (turn_on)
15385 bitmap_clear(t->io_bitmap_ptr, from, num);
15386@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15387 return -EINVAL;
15388 /* Trying to gain more privileges? */
15389 if (level > old) {
15390+#ifdef CONFIG_GRKERNSEC_IO
15391+ if (grsec_disable_privio) {
15392+ gr_handle_iopl();
15393+ return -EPERM;
15394+ }
15395+#endif
15396 if (!capable(CAP_SYS_RAWIO))
15397 return -EPERM;
15398 }
15399diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15400index 429e0c9..17b3ece 100644
15401--- a/arch/x86/kernel/irq.c
15402+++ b/arch/x86/kernel/irq.c
15403@@ -18,7 +18,7 @@
15404 #include <asm/mce.h>
15405 #include <asm/hw_irq.h>
15406
15407-atomic_t irq_err_count;
15408+atomic_unchecked_t irq_err_count;
15409
15410 /* Function pointer for generic interrupt vector handling */
15411 void (*x86_platform_ipi_callback)(void) = NULL;
15412@@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15413 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15414 seq_printf(p, " Machine check polls\n");
15415 #endif
15416- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15417+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15418 #if defined(CONFIG_X86_IO_APIC)
15419- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15420+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15421 #endif
15422 return 0;
15423 }
15424@@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15425
15426 u64 arch_irq_stat(void)
15427 {
15428- u64 sum = atomic_read(&irq_err_count);
15429+ u64 sum = atomic_read_unchecked(&irq_err_count);
15430
15431 #ifdef CONFIG_X86_IO_APIC
15432- sum += atomic_read(&irq_mis_count);
15433+ sum += atomic_read_unchecked(&irq_mis_count);
15434 #endif
15435 return sum;
15436 }
15437diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15438index 7209070..cbcd71a 100644
15439--- a/arch/x86/kernel/irq_32.c
15440+++ b/arch/x86/kernel/irq_32.c
15441@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15442 __asm__ __volatile__("andl %%esp,%0" :
15443 "=r" (sp) : "0" (THREAD_SIZE - 1));
15444
15445- return sp < (sizeof(struct thread_info) + STACK_WARN);
15446+ return sp < STACK_WARN;
15447 }
15448
15449 static void print_stack_overflow(void)
15450@@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15451 * per-CPU IRQ handling contexts (thread information and stack)
15452 */
15453 union irq_ctx {
15454- struct thread_info tinfo;
15455- u32 stack[THREAD_SIZE/sizeof(u32)];
15456+ unsigned long previous_esp;
15457+ u32 stack[THREAD_SIZE/sizeof(u32)];
15458 } __attribute__((aligned(THREAD_SIZE)));
15459
15460 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15461@@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15462 static inline int
15463 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15464 {
15465- union irq_ctx *curctx, *irqctx;
15466+ union irq_ctx *irqctx;
15467 u32 *isp, arg1, arg2;
15468
15469- curctx = (union irq_ctx *) current_thread_info();
15470 irqctx = __this_cpu_read(hardirq_ctx);
15471
15472 /*
15473@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15474 * handler) we can't do that and just have to keep using the
15475 * current stack (which is the irq stack already after all)
15476 */
15477- if (unlikely(curctx == irqctx))
15478+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15479 return 0;
15480
15481 /* build the stack frame on the IRQ stack */
15482- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15483- irqctx->tinfo.task = curctx->tinfo.task;
15484- irqctx->tinfo.previous_esp = current_stack_pointer;
15485+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15486+ irqctx->previous_esp = current_stack_pointer;
15487
15488- /*
15489- * Copy the softirq bits in preempt_count so that the
15490- * softirq checks work in the hardirq context.
15491- */
15492- irqctx->tinfo.preempt_count =
15493- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15494- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15495+#ifdef CONFIG_PAX_MEMORY_UDEREF
15496+ __set_fs(MAKE_MM_SEG(0));
15497+#endif
15498
15499 if (unlikely(overflow))
15500 call_on_stack(print_stack_overflow, isp);
15501@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15502 : "0" (irq), "1" (desc), "2" (isp),
15503 "D" (desc->handle_irq)
15504 : "memory", "cc", "ecx");
15505+
15506+#ifdef CONFIG_PAX_MEMORY_UDEREF
15507+ __set_fs(current_thread_info()->addr_limit);
15508+#endif
15509+
15510 return 1;
15511 }
15512
15513@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15514 */
15515 void __cpuinit irq_ctx_init(int cpu)
15516 {
15517- union irq_ctx *irqctx;
15518-
15519 if (per_cpu(hardirq_ctx, cpu))
15520 return;
15521
15522- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15523- THREAD_FLAGS,
15524- THREAD_ORDER));
15525- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15526- irqctx->tinfo.cpu = cpu;
15527- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15528- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15529-
15530- per_cpu(hardirq_ctx, cpu) = irqctx;
15531-
15532- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15533- THREAD_FLAGS,
15534- THREAD_ORDER));
15535- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15536- irqctx->tinfo.cpu = cpu;
15537- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15538-
15539- per_cpu(softirq_ctx, cpu) = irqctx;
15540+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15541+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15542
15543 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15544 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15545@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15546 asmlinkage void do_softirq(void)
15547 {
15548 unsigned long flags;
15549- struct thread_info *curctx;
15550 union irq_ctx *irqctx;
15551 u32 *isp;
15552
15553@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15554 local_irq_save(flags);
15555
15556 if (local_softirq_pending()) {
15557- curctx = current_thread_info();
15558 irqctx = __this_cpu_read(softirq_ctx);
15559- irqctx->tinfo.task = curctx->task;
15560- irqctx->tinfo.previous_esp = current_stack_pointer;
15561+ irqctx->previous_esp = current_stack_pointer;
15562
15563 /* build the stack frame on the softirq stack */
15564- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15565+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15566+
15567+#ifdef CONFIG_PAX_MEMORY_UDEREF
15568+ __set_fs(MAKE_MM_SEG(0));
15569+#endif
15570
15571 call_on_stack(__do_softirq, isp);
15572+
15573+#ifdef CONFIG_PAX_MEMORY_UDEREF
15574+ __set_fs(current_thread_info()->addr_limit);
15575+#endif
15576+
15577 /*
15578 * Shouldn't happen, we returned above if in_interrupt():
15579 */
15580diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15581index 69bca46..0bac999 100644
15582--- a/arch/x86/kernel/irq_64.c
15583+++ b/arch/x86/kernel/irq_64.c
15584@@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15585 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15586 u64 curbase = (u64)task_stack_page(current);
15587
15588- if (user_mode_vm(regs))
15589+ if (user_mode(regs))
15590 return;
15591
15592 WARN_ONCE(regs->sp >= curbase &&
15593diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15594index faba577..93b9e71 100644
15595--- a/arch/x86/kernel/kgdb.c
15596+++ b/arch/x86/kernel/kgdb.c
15597@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15598 #ifdef CONFIG_X86_32
15599 switch (regno) {
15600 case GDB_SS:
15601- if (!user_mode_vm(regs))
15602+ if (!user_mode(regs))
15603 *(unsigned long *)mem = __KERNEL_DS;
15604 break;
15605 case GDB_SP:
15606- if (!user_mode_vm(regs))
15607+ if (!user_mode(regs))
15608 *(unsigned long *)mem = kernel_stack_pointer(regs);
15609 break;
15610 case GDB_GS:
15611@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15612 case 'k':
15613 /* clear the trace bit */
15614 linux_regs->flags &= ~X86_EFLAGS_TF;
15615- atomic_set(&kgdb_cpu_doing_single_step, -1);
15616+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15617
15618 /* set the trace bit if we're stepping */
15619 if (remcomInBuffer[0] == 's') {
15620 linux_regs->flags |= X86_EFLAGS_TF;
15621- atomic_set(&kgdb_cpu_doing_single_step,
15622+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15623 raw_smp_processor_id());
15624 }
15625
15626@@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15627
15628 switch (cmd) {
15629 case DIE_DEBUG:
15630- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15631+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15632 if (user_mode(regs))
15633 return single_step_cont(regs, args);
15634 break;
15635diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15636index 7da647d..5d3c4c1 100644
15637--- a/arch/x86/kernel/kprobes.c
15638+++ b/arch/x86/kernel/kprobes.c
15639@@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15640 } __attribute__((packed)) *insn;
15641
15642 insn = (struct __arch_relative_insn *)from;
15643+
15644+ pax_open_kernel();
15645 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15646 insn->op = op;
15647+ pax_close_kernel();
15648 }
15649
15650 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15651@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15652 kprobe_opcode_t opcode;
15653 kprobe_opcode_t *orig_opcodes = opcodes;
15654
15655- if (search_exception_tables((unsigned long)opcodes))
15656+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15657 return 0; /* Page fault may occur on this address. */
15658
15659 retry:
15660@@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15661 }
15662 }
15663 insn_get_length(&insn);
15664+ pax_open_kernel();
15665 memcpy(dest, insn.kaddr, insn.length);
15666+ pax_close_kernel();
15667
15668 #ifdef CONFIG_X86_64
15669 if (insn_rip_relative(&insn)) {
15670@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15671 (u8 *) dest;
15672 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15673 disp = (u8 *) dest + insn_offset_displacement(&insn);
15674+ pax_open_kernel();
15675 *(s32 *) disp = (s32) newdisp;
15676+ pax_close_kernel();
15677 }
15678 #endif
15679 return insn.length;
15680@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15681 */
15682 __copy_instruction(p->ainsn.insn, p->addr, 0);
15683
15684- if (can_boost(p->addr))
15685+ if (can_boost(ktla_ktva(p->addr)))
15686 p->ainsn.boostable = 0;
15687 else
15688 p->ainsn.boostable = -1;
15689
15690- p->opcode = *p->addr;
15691+ p->opcode = *(ktla_ktva(p->addr));
15692 }
15693
15694 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15695@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15696 * nor set current_kprobe, because it doesn't use single
15697 * stepping.
15698 */
15699- regs->ip = (unsigned long)p->ainsn.insn;
15700+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15701 preempt_enable_no_resched();
15702 return;
15703 }
15704@@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15705 if (p->opcode == BREAKPOINT_INSTRUCTION)
15706 regs->ip = (unsigned long)p->addr;
15707 else
15708- regs->ip = (unsigned long)p->ainsn.insn;
15709+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15710 }
15711
15712 /*
15713@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15714 setup_singlestep(p, regs, kcb, 0);
15715 return 1;
15716 }
15717- } else if (*addr != BREAKPOINT_INSTRUCTION) {
15718+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15719 /*
15720 * The breakpoint instruction was removed right
15721 * after we hit it. Another cpu has removed
15722@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15723 " movq %rax, 152(%rsp)\n"
15724 RESTORE_REGS_STRING
15725 " popfq\n"
15726+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15727+ " btsq $63,(%rsp)\n"
15728+#endif
15729 #else
15730 " pushf\n"
15731 SAVE_REGS_STRING
15732@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15733 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15734 {
15735 unsigned long *tos = stack_addr(regs);
15736- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15737+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15738 unsigned long orig_ip = (unsigned long)p->addr;
15739 kprobe_opcode_t *insn = p->ainsn.insn;
15740
15741@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15742 struct die_args *args = data;
15743 int ret = NOTIFY_DONE;
15744
15745- if (args->regs && user_mode_vm(args->regs))
15746+ if (args->regs && user_mode(args->regs))
15747 return ret;
15748
15749 switch (val) {
15750@@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15751 * Verify if the address gap is in 2GB range, because this uses
15752 * a relative jump.
15753 */
15754- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15755+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15756 if (abs(rel) > 0x7fffffff)
15757 return -ERANGE;
15758
15759@@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15760 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15761
15762 /* Set probe function call */
15763- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15764+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15765
15766 /* Set returning jmp instruction at the tail of out-of-line buffer */
15767 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15768- (u8 *)op->kp.addr + op->optinsn.size);
15769+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15770
15771 flush_icache_range((unsigned long) buf,
15772 (unsigned long) buf + TMPL_END_IDX +
15773@@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
15774 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15775
15776 /* Backup instructions which will be replaced by jump address */
15777- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15778+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15779 RELATIVE_ADDR_SIZE);
15780
15781 insn_buf[0] = RELATIVEJUMP_OPCODE;
15782diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15783index a9c2116..a52d4fc 100644
15784--- a/arch/x86/kernel/kvm.c
15785+++ b/arch/x86/kernel/kvm.c
15786@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15787 pv_mmu_ops.set_pud = kvm_set_pud;
15788 #if PAGETABLE_LEVELS == 4
15789 pv_mmu_ops.set_pgd = kvm_set_pgd;
15790+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15791 #endif
15792 #endif
15793 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15794diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15795index ea69726..604d066 100644
15796--- a/arch/x86/kernel/ldt.c
15797+++ b/arch/x86/kernel/ldt.c
15798@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
15799 if (reload) {
15800 #ifdef CONFIG_SMP
15801 preempt_disable();
15802- load_LDT(pc);
15803+ load_LDT_nolock(pc);
15804 if (!cpumask_equal(mm_cpumask(current->mm),
15805 cpumask_of(smp_processor_id())))
15806 smp_call_function(flush_ldt, current->mm, 1);
15807 preempt_enable();
15808 #else
15809- load_LDT(pc);
15810+ load_LDT_nolock(pc);
15811 #endif
15812 }
15813 if (oldsize) {
15814@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
15815 return err;
15816
15817 for (i = 0; i < old->size; i++)
15818- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15819+ write_ldt_entry(new->ldt, i, old->ldt + i);
15820 return 0;
15821 }
15822
15823@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15824 retval = copy_ldt(&mm->context, &old_mm->context);
15825 mutex_unlock(&old_mm->context.lock);
15826 }
15827+
15828+ if (tsk == current) {
15829+ mm->context.vdso = 0;
15830+
15831+#ifdef CONFIG_X86_32
15832+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15833+ mm->context.user_cs_base = 0UL;
15834+ mm->context.user_cs_limit = ~0UL;
15835+
15836+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15837+ cpus_clear(mm->context.cpu_user_cs_mask);
15838+#endif
15839+
15840+#endif
15841+#endif
15842+
15843+ }
15844+
15845 return retval;
15846 }
15847
15848@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
15849 }
15850 }
15851
15852+#ifdef CONFIG_PAX_SEGMEXEC
15853+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15854+ error = -EINVAL;
15855+ goto out_unlock;
15856+ }
15857+#endif
15858+
15859 fill_ldt(&ldt, &ldt_info);
15860 if (oldmode)
15861 ldt.avl = 0;
15862diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15863index a3fa43b..8966f4c 100644
15864--- a/arch/x86/kernel/machine_kexec_32.c
15865+++ b/arch/x86/kernel/machine_kexec_32.c
15866@@ -27,7 +27,7 @@
15867 #include <asm/cacheflush.h>
15868 #include <asm/debugreg.h>
15869
15870-static void set_idt(void *newidt, __u16 limit)
15871+static void set_idt(struct desc_struct *newidt, __u16 limit)
15872 {
15873 struct desc_ptr curidt;
15874
15875@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
15876 }
15877
15878
15879-static void set_gdt(void *newgdt, __u16 limit)
15880+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15881 {
15882 struct desc_ptr curgdt;
15883
15884@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15885 }
15886
15887 control_page = page_address(image->control_code_page);
15888- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15889+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15890
15891 relocate_kernel_ptr = control_page;
15892 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15893diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15894index 3ca42d0..7cff8cc 100644
15895--- a/arch/x86/kernel/microcode_intel.c
15896+++ b/arch/x86/kernel/microcode_intel.c
15897@@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
15898
15899 static int get_ucode_user(void *to, const void *from, size_t n)
15900 {
15901- return copy_from_user(to, from, n);
15902+ return copy_from_user(to, (const void __force_user *)from, n);
15903 }
15904
15905 static enum ucode_state
15906 request_microcode_user(int cpu, const void __user *buf, size_t size)
15907 {
15908- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15909+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15910 }
15911
15912 static void microcode_fini_cpu(int cpu)
15913diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15914index 925179f..267ac7a 100644
15915--- a/arch/x86/kernel/module.c
15916+++ b/arch/x86/kernel/module.c
15917@@ -36,15 +36,60 @@
15918 #define DEBUGP(fmt...)
15919 #endif
15920
15921-void *module_alloc(unsigned long size)
15922+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15923 {
15924- if (PAGE_ALIGN(size) > MODULES_LEN)
15925+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
15926 return NULL;
15927 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15928- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15929+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15930 -1, __builtin_return_address(0));
15931 }
15932
15933+void *module_alloc(unsigned long size)
15934+{
15935+
15936+#ifdef CONFIG_PAX_KERNEXEC
15937+ return __module_alloc(size, PAGE_KERNEL);
15938+#else
15939+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15940+#endif
15941+
15942+}
15943+
15944+#ifdef CONFIG_PAX_KERNEXEC
15945+#ifdef CONFIG_X86_32
15946+void *module_alloc_exec(unsigned long size)
15947+{
15948+ struct vm_struct *area;
15949+
15950+ if (size == 0)
15951+ return NULL;
15952+
15953+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15954+ return area ? area->addr : NULL;
15955+}
15956+EXPORT_SYMBOL(module_alloc_exec);
15957+
15958+void module_free_exec(struct module *mod, void *module_region)
15959+{
15960+ vunmap(module_region);
15961+}
15962+EXPORT_SYMBOL(module_free_exec);
15963+#else
15964+void module_free_exec(struct module *mod, void *module_region)
15965+{
15966+ module_free(mod, module_region);
15967+}
15968+EXPORT_SYMBOL(module_free_exec);
15969+
15970+void *module_alloc_exec(unsigned long size)
15971+{
15972+ return __module_alloc(size, PAGE_KERNEL_RX);
15973+}
15974+EXPORT_SYMBOL(module_alloc_exec);
15975+#endif
15976+#endif
15977+
15978 #ifdef CONFIG_X86_32
15979 int apply_relocate(Elf32_Shdr *sechdrs,
15980 const char *strtab,
15981@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15982 unsigned int i;
15983 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15984 Elf32_Sym *sym;
15985- uint32_t *location;
15986+ uint32_t *plocation, location;
15987
15988 DEBUGP("Applying relocate section %u to %u\n", relsec,
15989 sechdrs[relsec].sh_info);
15990 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15991 /* This is where to make the change */
15992- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15993- + rel[i].r_offset;
15994+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15995+ location = (uint32_t)plocation;
15996+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15997+ plocation = ktla_ktva((void *)plocation);
15998 /* This is the symbol it is referring to. Note that all
15999 undefined symbols have been resolved. */
16000 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
16001@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16002 switch (ELF32_R_TYPE(rel[i].r_info)) {
16003 case R_386_32:
16004 /* We add the value into the location given */
16005- *location += sym->st_value;
16006+ pax_open_kernel();
16007+ *plocation += sym->st_value;
16008+ pax_close_kernel();
16009 break;
16010 case R_386_PC32:
16011 /* Add the value, subtract its postition */
16012- *location += sym->st_value - (uint32_t)location;
16013+ pax_open_kernel();
16014+ *plocation += sym->st_value - location;
16015+ pax_close_kernel();
16016 break;
16017 default:
16018 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16019@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
16020 case R_X86_64_NONE:
16021 break;
16022 case R_X86_64_64:
16023+ pax_open_kernel();
16024 *(u64 *)loc = val;
16025+ pax_close_kernel();
16026 break;
16027 case R_X86_64_32:
16028+ pax_open_kernel();
16029 *(u32 *)loc = val;
16030+ pax_close_kernel();
16031 if (val != *(u32 *)loc)
16032 goto overflow;
16033 break;
16034 case R_X86_64_32S:
16035+ pax_open_kernel();
16036 *(s32 *)loc = val;
16037+ pax_close_kernel();
16038 if ((s64)val != *(s32 *)loc)
16039 goto overflow;
16040 break;
16041 case R_X86_64_PC32:
16042 val -= (u64)loc;
16043+ pax_open_kernel();
16044 *(u32 *)loc = val;
16045+ pax_close_kernel();
16046+
16047 #if 0
16048 if ((s64)val != *(s32 *)loc)
16049 goto overflow;
16050diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16051index e88f37b..1353db6 100644
16052--- a/arch/x86/kernel/nmi.c
16053+++ b/arch/x86/kernel/nmi.c
16054@@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16055 dotraplinkage notrace __kprobes void
16056 do_nmi(struct pt_regs *regs, long error_code)
16057 {
16058+
16059+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16060+ if (!user_mode(regs)) {
16061+ unsigned long cs = regs->cs & 0xFFFF;
16062+ unsigned long ip = ktva_ktla(regs->ip);
16063+
16064+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16065+ regs->ip = ip;
16066+ }
16067+#endif
16068+
16069 nmi_enter();
16070
16071 inc_irq_stat(__nmi_count);
16072diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16073index 676b8c7..870ba04 100644
16074--- a/arch/x86/kernel/paravirt-spinlocks.c
16075+++ b/arch/x86/kernel/paravirt-spinlocks.c
16076@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16077 arch_spin_lock(lock);
16078 }
16079
16080-struct pv_lock_ops pv_lock_ops = {
16081+struct pv_lock_ops pv_lock_ops __read_only = {
16082 #ifdef CONFIG_SMP
16083 .spin_is_locked = __ticket_spin_is_locked,
16084 .spin_is_contended = __ticket_spin_is_contended,
16085diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16086index d90272e..6bb013b 100644
16087--- a/arch/x86/kernel/paravirt.c
16088+++ b/arch/x86/kernel/paravirt.c
16089@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16090 {
16091 return x;
16092 }
16093+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16094+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16095+#endif
16096
16097 void __init default_banner(void)
16098 {
16099@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16100 if (opfunc == NULL)
16101 /* If there's no function, patch it with a ud2a (BUG) */
16102 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16103- else if (opfunc == _paravirt_nop)
16104+ else if (opfunc == (void *)_paravirt_nop)
16105 /* If the operation is a nop, then nop the callsite */
16106 ret = paravirt_patch_nop();
16107
16108 /* identity functions just return their single argument */
16109- else if (opfunc == _paravirt_ident_32)
16110+ else if (opfunc == (void *)_paravirt_ident_32)
16111 ret = paravirt_patch_ident_32(insnbuf, len);
16112- else if (opfunc == _paravirt_ident_64)
16113+ else if (opfunc == (void *)_paravirt_ident_64)
16114 ret = paravirt_patch_ident_64(insnbuf, len);
16115+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16116+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16117+ ret = paravirt_patch_ident_64(insnbuf, len);
16118+#endif
16119
16120 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16121 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16122@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16123 if (insn_len > len || start == NULL)
16124 insn_len = len;
16125 else
16126- memcpy(insnbuf, start, insn_len);
16127+ memcpy(insnbuf, ktla_ktva(start), insn_len);
16128
16129 return insn_len;
16130 }
16131@@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16132 preempt_enable();
16133 }
16134
16135-struct pv_info pv_info = {
16136+struct pv_info pv_info __read_only = {
16137 .name = "bare hardware",
16138 .paravirt_enabled = 0,
16139 .kernel_rpl = 0,
16140@@ -313,16 +320,16 @@ struct pv_info pv_info = {
16141 #endif
16142 };
16143
16144-struct pv_init_ops pv_init_ops = {
16145+struct pv_init_ops pv_init_ops __read_only = {
16146 .patch = native_patch,
16147 };
16148
16149-struct pv_time_ops pv_time_ops = {
16150+struct pv_time_ops pv_time_ops __read_only = {
16151 .sched_clock = native_sched_clock,
16152 .steal_clock = native_steal_clock,
16153 };
16154
16155-struct pv_irq_ops pv_irq_ops = {
16156+struct pv_irq_ops pv_irq_ops __read_only = {
16157 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16158 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16159 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16160@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16161 #endif
16162 };
16163
16164-struct pv_cpu_ops pv_cpu_ops = {
16165+struct pv_cpu_ops pv_cpu_ops __read_only = {
16166 .cpuid = native_cpuid,
16167 .get_debugreg = native_get_debugreg,
16168 .set_debugreg = native_set_debugreg,
16169@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16170 .end_context_switch = paravirt_nop,
16171 };
16172
16173-struct pv_apic_ops pv_apic_ops = {
16174+struct pv_apic_ops pv_apic_ops __read_only = {
16175 #ifdef CONFIG_X86_LOCAL_APIC
16176 .startup_ipi_hook = paravirt_nop,
16177 #endif
16178 };
16179
16180-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16181+#ifdef CONFIG_X86_32
16182+#ifdef CONFIG_X86_PAE
16183+/* 64-bit pagetable entries */
16184+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16185+#else
16186 /* 32-bit pagetable entries */
16187 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16188+#endif
16189 #else
16190 /* 64-bit pagetable entries */
16191 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16192 #endif
16193
16194-struct pv_mmu_ops pv_mmu_ops = {
16195+struct pv_mmu_ops pv_mmu_ops __read_only = {
16196
16197 .read_cr2 = native_read_cr2,
16198 .write_cr2 = native_write_cr2,
16199@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16200 .make_pud = PTE_IDENT,
16201
16202 .set_pgd = native_set_pgd,
16203+ .set_pgd_batched = native_set_pgd_batched,
16204 #endif
16205 #endif /* PAGETABLE_LEVELS >= 3 */
16206
16207@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16208 },
16209
16210 .set_fixmap = native_set_fixmap,
16211+
16212+#ifdef CONFIG_PAX_KERNEXEC
16213+ .pax_open_kernel = native_pax_open_kernel,
16214+ .pax_close_kernel = native_pax_close_kernel,
16215+#endif
16216+
16217 };
16218
16219 EXPORT_SYMBOL_GPL(pv_time_ops);
16220diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16221index 35ccf75..7a15747 100644
16222--- a/arch/x86/kernel/pci-iommu_table.c
16223+++ b/arch/x86/kernel/pci-iommu_table.c
16224@@ -2,7 +2,7 @@
16225 #include <asm/iommu_table.h>
16226 #include <linux/string.h>
16227 #include <linux/kallsyms.h>
16228-
16229+#include <linux/sched.h>
16230
16231 #define DEBUG 1
16232
16233diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16234index ee5d4fb..426649b 100644
16235--- a/arch/x86/kernel/process.c
16236+++ b/arch/x86/kernel/process.c
16237@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16238
16239 void free_thread_info(struct thread_info *ti)
16240 {
16241- free_thread_xstate(ti->task);
16242 free_pages((unsigned long)ti, THREAD_ORDER);
16243 }
16244
16245+static struct kmem_cache *task_struct_cachep;
16246+
16247 void arch_task_cache_init(void)
16248 {
16249- task_xstate_cachep =
16250- kmem_cache_create("task_xstate", xstate_size,
16251+ /* create a slab on which task_structs can be allocated */
16252+ task_struct_cachep =
16253+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16254+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16255+
16256+ task_xstate_cachep =
16257+ kmem_cache_create("task_xstate", xstate_size,
16258 __alignof__(union thread_xstate),
16259- SLAB_PANIC | SLAB_NOTRACK, NULL);
16260+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16261+}
16262+
16263+struct task_struct *alloc_task_struct_node(int node)
16264+{
16265+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16266+}
16267+
16268+void free_task_struct(struct task_struct *task)
16269+{
16270+ free_thread_xstate(task);
16271+ kmem_cache_free(task_struct_cachep, task);
16272 }
16273
16274 /*
16275@@ -70,7 +87,7 @@ void exit_thread(void)
16276 unsigned long *bp = t->io_bitmap_ptr;
16277
16278 if (bp) {
16279- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16280+ struct tss_struct *tss = init_tss + get_cpu();
16281
16282 t->io_bitmap_ptr = NULL;
16283 clear_thread_flag(TIF_IO_BITMAP);
16284@@ -106,7 +123,7 @@ void show_regs_common(void)
16285
16286 printk(KERN_CONT "\n");
16287 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16288- current->pid, current->comm, print_tainted(),
16289+ task_pid_nr(current), current->comm, print_tainted(),
16290 init_utsname()->release,
16291 (int)strcspn(init_utsname()->version, " "),
16292 init_utsname()->version);
16293@@ -120,6 +137,9 @@ void flush_thread(void)
16294 {
16295 struct task_struct *tsk = current;
16296
16297+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16298+ loadsegment(gs, 0);
16299+#endif
16300 flush_ptrace_hw_breakpoint(tsk);
16301 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16302 /*
16303@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16304 regs.di = (unsigned long) arg;
16305
16306 #ifdef CONFIG_X86_32
16307- regs.ds = __USER_DS;
16308- regs.es = __USER_DS;
16309+ regs.ds = __KERNEL_DS;
16310+ regs.es = __KERNEL_DS;
16311 regs.fs = __KERNEL_PERCPU;
16312- regs.gs = __KERNEL_STACK_CANARY;
16313+ savesegment(gs, regs.gs);
16314 #else
16315 regs.ss = __KERNEL_DS;
16316 #endif
16317@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16318
16319 return ret;
16320 }
16321-void stop_this_cpu(void *dummy)
16322+__noreturn void stop_this_cpu(void *dummy)
16323 {
16324 local_irq_disable();
16325 /*
16326@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16327 }
16328 early_param("idle", idle_setup);
16329
16330-unsigned long arch_align_stack(unsigned long sp)
16331+#ifdef CONFIG_PAX_RANDKSTACK
16332+void pax_randomize_kstack(struct pt_regs *regs)
16333 {
16334- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16335- sp -= get_random_int() % 8192;
16336- return sp & ~0xf;
16337-}
16338+ struct thread_struct *thread = &current->thread;
16339+ unsigned long time;
16340
16341-unsigned long arch_randomize_brk(struct mm_struct *mm)
16342-{
16343- unsigned long range_end = mm->brk + 0x02000000;
16344- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16345-}
16346+ if (!randomize_va_space)
16347+ return;
16348+
16349+ if (v8086_mode(regs))
16350+ return;
16351
16352+ rdtscl(time);
16353+
16354+ /* P4 seems to return a 0 LSB, ignore it */
16355+#ifdef CONFIG_MPENTIUM4
16356+ time &= 0x3EUL;
16357+ time <<= 2;
16358+#elif defined(CONFIG_X86_64)
16359+ time &= 0xFUL;
16360+ time <<= 4;
16361+#else
16362+ time &= 0x1FUL;
16363+ time <<= 3;
16364+#endif
16365+
16366+ thread->sp0 ^= time;
16367+ load_sp0(init_tss + smp_processor_id(), thread);
16368+
16369+#ifdef CONFIG_X86_64
16370+ percpu_write(kernel_stack, thread->sp0);
16371+#endif
16372+}
16373+#endif
16374diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16375index 795b79f..063767a 100644
16376--- a/arch/x86/kernel/process_32.c
16377+++ b/arch/x86/kernel/process_32.c
16378@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16379 unsigned long thread_saved_pc(struct task_struct *tsk)
16380 {
16381 return ((unsigned long *)tsk->thread.sp)[3];
16382+//XXX return tsk->thread.eip;
16383 }
16384
16385 #ifndef CONFIG_SMP
16386@@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16387 unsigned long sp;
16388 unsigned short ss, gs;
16389
16390- if (user_mode_vm(regs)) {
16391+ if (user_mode(regs)) {
16392 sp = regs->sp;
16393 ss = regs->ss & 0xffff;
16394- gs = get_user_gs(regs);
16395 } else {
16396 sp = kernel_stack_pointer(regs);
16397 savesegment(ss, ss);
16398- savesegment(gs, gs);
16399 }
16400+ gs = get_user_gs(regs);
16401
16402 show_regs_common();
16403
16404@@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16405 struct task_struct *tsk;
16406 int err;
16407
16408- childregs = task_pt_regs(p);
16409+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16410 *childregs = *regs;
16411 childregs->ax = 0;
16412 childregs->sp = sp;
16413
16414 p->thread.sp = (unsigned long) childregs;
16415 p->thread.sp0 = (unsigned long) (childregs+1);
16416+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16417
16418 p->thread.ip = (unsigned long) ret_from_fork;
16419
16420@@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16421 struct thread_struct *prev = &prev_p->thread,
16422 *next = &next_p->thread;
16423 int cpu = smp_processor_id();
16424- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16425+ struct tss_struct *tss = init_tss + cpu;
16426 bool preload_fpu;
16427
16428 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16429@@ -331,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16430 */
16431 lazy_save_gs(prev->gs);
16432
16433+#ifdef CONFIG_PAX_MEMORY_UDEREF
16434+ __set_fs(task_thread_info(next_p)->addr_limit);
16435+#endif
16436+
16437 /*
16438 * Load the per-thread Thread-Local Storage descriptor.
16439 */
16440@@ -366,6 +371,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16441 */
16442 arch_end_context_switch(next_p);
16443
16444+ percpu_write(current_task, next_p);
16445+ percpu_write(current_tinfo, &next_p->tinfo);
16446+
16447 if (preload_fpu)
16448 __math_state_restore();
16449
16450@@ -375,8 +383,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16451 if (prev->gs | next->gs)
16452 lazy_load_gs(next->gs);
16453
16454- percpu_write(current_task, next_p);
16455-
16456 return prev_p;
16457 }
16458
16459@@ -406,4 +412,3 @@ unsigned long get_wchan(struct task_struct *p)
16460 } while (count++ < 16);
16461 return 0;
16462 }
16463-
16464diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16465index 3bd7e6e..90b2bcf 100644
16466--- a/arch/x86/kernel/process_64.c
16467+++ b/arch/x86/kernel/process_64.c
16468@@ -89,7 +89,7 @@ static void __exit_idle(void)
16469 void exit_idle(void)
16470 {
16471 /* idle loop has pid 0 */
16472- if (current->pid)
16473+ if (task_pid_nr(current))
16474 return;
16475 __exit_idle();
16476 }
16477@@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16478 struct pt_regs *childregs;
16479 struct task_struct *me = current;
16480
16481- childregs = ((struct pt_regs *)
16482- (THREAD_SIZE + task_stack_page(p))) - 1;
16483+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16484 *childregs = *regs;
16485
16486 childregs->ax = 0;
16487@@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16488 p->thread.sp = (unsigned long) childregs;
16489 p->thread.sp0 = (unsigned long) (childregs+1);
16490 p->thread.usersp = me->thread.usersp;
16491+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16492
16493 set_tsk_thread_flag(p, TIF_FORK);
16494
16495@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16496 struct thread_struct *prev = &prev_p->thread;
16497 struct thread_struct *next = &next_p->thread;
16498 int cpu = smp_processor_id();
16499- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16500+ struct tss_struct *tss = init_tss + cpu;
16501 unsigned fsindex, gsindex;
16502 bool preload_fpu;
16503
16504@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16505 prev->usersp = percpu_read(old_rsp);
16506 percpu_write(old_rsp, next->usersp);
16507 percpu_write(current_task, next_p);
16508+ percpu_write(current_tinfo, &next_p->tinfo);
16509
16510- percpu_write(kernel_stack,
16511- (unsigned long)task_stack_page(next_p) +
16512- THREAD_SIZE - KERNEL_STACK_OFFSET);
16513+ percpu_write(kernel_stack, next->sp0);
16514
16515 /*
16516 * Now maybe reload the debug registers and handle I/O bitmaps
16517@@ -540,12 +539,11 @@ unsigned long get_wchan(struct task_struct *p)
16518 if (!p || p == current || p->state == TASK_RUNNING)
16519 return 0;
16520 stack = (unsigned long)task_stack_page(p);
16521- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16522+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16523 return 0;
16524 fp = *(u64 *)(p->thread.sp);
16525 do {
16526- if (fp < (unsigned long)stack ||
16527- fp >= (unsigned long)stack+THREAD_SIZE)
16528+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16529 return 0;
16530 ip = *(u64 *)(fp+8);
16531 if (!in_sched_functions(ip))
16532diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16533index 8252879..d3219e0 100644
16534--- a/arch/x86/kernel/ptrace.c
16535+++ b/arch/x86/kernel/ptrace.c
16536@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16537 unsigned long addr, unsigned long data)
16538 {
16539 int ret;
16540- unsigned long __user *datap = (unsigned long __user *)data;
16541+ unsigned long __user *datap = (__force unsigned long __user *)data;
16542
16543 switch (request) {
16544 /* read the word at location addr in the USER area. */
16545@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16546 if ((int) addr < 0)
16547 return -EIO;
16548 ret = do_get_thread_area(child, addr,
16549- (struct user_desc __user *)data);
16550+ (__force struct user_desc __user *) data);
16551 break;
16552
16553 case PTRACE_SET_THREAD_AREA:
16554 if ((int) addr < 0)
16555 return -EIO;
16556 ret = do_set_thread_area(child, addr,
16557- (struct user_desc __user *)data, 0);
16558+ (__force struct user_desc __user *) data, 0);
16559 break;
16560 #endif
16561
16562@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16563 memset(info, 0, sizeof(*info));
16564 info->si_signo = SIGTRAP;
16565 info->si_code = si_code;
16566- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16567+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16568 }
16569
16570 void user_single_step_siginfo(struct task_struct *tsk,
16571diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16572index 42eb330..139955c 100644
16573--- a/arch/x86/kernel/pvclock.c
16574+++ b/arch/x86/kernel/pvclock.c
16575@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16576 return pv_tsc_khz;
16577 }
16578
16579-static atomic64_t last_value = ATOMIC64_INIT(0);
16580+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16581
16582 void pvclock_resume(void)
16583 {
16584- atomic64_set(&last_value, 0);
16585+ atomic64_set_unchecked(&last_value, 0);
16586 }
16587
16588 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16589@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16590 * updating at the same time, and one of them could be slightly behind,
16591 * making the assumption that last_value always go forward fail to hold.
16592 */
16593- last = atomic64_read(&last_value);
16594+ last = atomic64_read_unchecked(&last_value);
16595 do {
16596 if (ret < last)
16597 return last;
16598- last = atomic64_cmpxchg(&last_value, last, ret);
16599+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16600 } while (unlikely(last != ret));
16601
16602 return ret;
16603diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16604index 37a458b..e63d183 100644
16605--- a/arch/x86/kernel/reboot.c
16606+++ b/arch/x86/kernel/reboot.c
16607@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16608 EXPORT_SYMBOL(pm_power_off);
16609
16610 static const struct desc_ptr no_idt = {};
16611-static int reboot_mode;
16612+static unsigned short reboot_mode;
16613 enum reboot_type reboot_type = BOOT_ACPI;
16614 int reboot_force;
16615
16616@@ -324,13 +324,17 @@ core_initcall(reboot_init);
16617 extern const unsigned char machine_real_restart_asm[];
16618 extern const u64 machine_real_restart_gdt[3];
16619
16620-void machine_real_restart(unsigned int type)
16621+__noreturn void machine_real_restart(unsigned int type)
16622 {
16623 void *restart_va;
16624 unsigned long restart_pa;
16625- void (*restart_lowmem)(unsigned int);
16626+ void (* __noreturn restart_lowmem)(unsigned int);
16627 u64 *lowmem_gdt;
16628
16629+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16630+ struct desc_struct *gdt;
16631+#endif
16632+
16633 local_irq_disable();
16634
16635 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16636@@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16637 boot)". This seems like a fairly standard thing that gets set by
16638 REBOOT.COM programs, and the previous reset routine did this
16639 too. */
16640- *((unsigned short *)0x472) = reboot_mode;
16641+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16642
16643 /* Patch the GDT in the low memory trampoline */
16644 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16645
16646 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16647 restart_pa = virt_to_phys(restart_va);
16648- restart_lowmem = (void (*)(unsigned int))restart_pa;
16649+ restart_lowmem = (void *)restart_pa;
16650
16651 /* GDT[0]: GDT self-pointer */
16652 lowmem_gdt[0] =
16653@@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16654 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16655
16656 /* Jump to the identity-mapped low memory code */
16657+
16658+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16659+ gdt = get_cpu_gdt_table(smp_processor_id());
16660+ pax_open_kernel();
16661+#ifdef CONFIG_PAX_MEMORY_UDEREF
16662+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16663+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16664+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16665+#endif
16666+#ifdef CONFIG_PAX_KERNEXEC
16667+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16668+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16669+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16670+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16671+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16672+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16673+#endif
16674+ pax_close_kernel();
16675+#endif
16676+
16677+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16678+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16679+ unreachable();
16680+#else
16681 restart_lowmem(type);
16682+#endif
16683+
16684 }
16685 #ifdef CONFIG_APM_MODULE
16686 EXPORT_SYMBOL(machine_real_restart);
16687@@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16688 * try to force a triple fault and then cycle between hitting the keyboard
16689 * controller and doing that
16690 */
16691-static void native_machine_emergency_restart(void)
16692+__noreturn static void native_machine_emergency_restart(void)
16693 {
16694 int i;
16695 int attempt = 0;
16696@@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16697 #endif
16698 }
16699
16700-static void __machine_emergency_restart(int emergency)
16701+static __noreturn void __machine_emergency_restart(int emergency)
16702 {
16703 reboot_emergency = emergency;
16704 machine_ops.emergency_restart();
16705 }
16706
16707-static void native_machine_restart(char *__unused)
16708+static __noreturn void native_machine_restart(char *__unused)
16709 {
16710 printk("machine restart\n");
16711
16712@@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16713 __machine_emergency_restart(0);
16714 }
16715
16716-static void native_machine_halt(void)
16717+static __noreturn void native_machine_halt(void)
16718 {
16719 /* stop other cpus and apics */
16720 machine_shutdown();
16721@@ -690,7 +720,7 @@ static void native_machine_halt(void)
16722 stop_this_cpu(NULL);
16723 }
16724
16725-static void native_machine_power_off(void)
16726+__noreturn static void native_machine_power_off(void)
16727 {
16728 if (pm_power_off) {
16729 if (!reboot_force)
16730@@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16731 }
16732 /* a fallback in case there is no PM info available */
16733 tboot_shutdown(TB_SHUTDOWN_HALT);
16734+ unreachable();
16735 }
16736
16737 struct machine_ops machine_ops = {
16738diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16739index 7a6f3b3..bed145d7 100644
16740--- a/arch/x86/kernel/relocate_kernel_64.S
16741+++ b/arch/x86/kernel/relocate_kernel_64.S
16742@@ -11,6 +11,7 @@
16743 #include <asm/kexec.h>
16744 #include <asm/processor-flags.h>
16745 #include <asm/pgtable_types.h>
16746+#include <asm/alternative-asm.h>
16747
16748 /*
16749 * Must be relocatable PIC code callable as a C function
16750@@ -160,13 +161,14 @@ identity_mapped:
16751 xorq %rbp, %rbp
16752 xorq %r8, %r8
16753 xorq %r9, %r9
16754- xorq %r10, %r9
16755+ xorq %r10, %r10
16756 xorq %r11, %r11
16757 xorq %r12, %r12
16758 xorq %r13, %r13
16759 xorq %r14, %r14
16760 xorq %r15, %r15
16761
16762+ pax_force_retaddr 0, 1
16763 ret
16764
16765 1:
16766diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16767index cf0ef98..e3f780b 100644
16768--- a/arch/x86/kernel/setup.c
16769+++ b/arch/x86/kernel/setup.c
16770@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
16771
16772 switch (data->type) {
16773 case SETUP_E820_EXT:
16774- parse_e820_ext(data);
16775+ parse_e820_ext((struct setup_data __force_kernel *)data);
16776 break;
16777 case SETUP_DTB:
16778 add_dtb(pa_data);
16779@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
16780 * area (640->1Mb) as ram even though it is not.
16781 * take them out.
16782 */
16783- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16784+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16785 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16786 }
16787
16788@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
16789
16790 if (!boot_params.hdr.root_flags)
16791 root_mountflags &= ~MS_RDONLY;
16792- init_mm.start_code = (unsigned long) _text;
16793- init_mm.end_code = (unsigned long) _etext;
16794+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16795+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16796 init_mm.end_data = (unsigned long) _edata;
16797 init_mm.brk = _brk_end;
16798
16799- code_resource.start = virt_to_phys(_text);
16800- code_resource.end = virt_to_phys(_etext)-1;
16801- data_resource.start = virt_to_phys(_etext);
16802+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16803+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16804+ data_resource.start = virt_to_phys(_sdata);
16805 data_resource.end = virt_to_phys(_edata)-1;
16806 bss_resource.start = virt_to_phys(&__bss_start);
16807 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16808diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16809index 71f4727..16dc9f7 100644
16810--- a/arch/x86/kernel/setup_percpu.c
16811+++ b/arch/x86/kernel/setup_percpu.c
16812@@ -21,19 +21,17 @@
16813 #include <asm/cpu.h>
16814 #include <asm/stackprotector.h>
16815
16816-DEFINE_PER_CPU(int, cpu_number);
16817+#ifdef CONFIG_SMP
16818+DEFINE_PER_CPU(unsigned int, cpu_number);
16819 EXPORT_PER_CPU_SYMBOL(cpu_number);
16820+#endif
16821
16822-#ifdef CONFIG_X86_64
16823 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16824-#else
16825-#define BOOT_PERCPU_OFFSET 0
16826-#endif
16827
16828 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16829 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16830
16831-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16832+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16833 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16834 };
16835 EXPORT_SYMBOL(__per_cpu_offset);
16836@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
16837 {
16838 #ifdef CONFIG_X86_32
16839 struct desc_struct gdt;
16840+ unsigned long base = per_cpu_offset(cpu);
16841
16842- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16843- 0x2 | DESCTYPE_S, 0x8);
16844- gdt.s = 1;
16845+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16846+ 0x83 | DESCTYPE_S, 0xC);
16847 write_gdt_entry(get_cpu_gdt_table(cpu),
16848 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16849 #endif
16850@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16851 /* alrighty, percpu areas up and running */
16852 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16853 for_each_possible_cpu(cpu) {
16854+#ifdef CONFIG_CC_STACKPROTECTOR
16855+#ifdef CONFIG_X86_32
16856+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16857+#endif
16858+#endif
16859 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16860 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16861 per_cpu(cpu_number, cpu) = cpu;
16862@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16863 */
16864 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16865 #endif
16866+#ifdef CONFIG_CC_STACKPROTECTOR
16867+#ifdef CONFIG_X86_32
16868+ if (!cpu)
16869+ per_cpu(stack_canary.canary, cpu) = canary;
16870+#endif
16871+#endif
16872 /*
16873 * Up to this point, the boot CPU has been using .init.data
16874 * area. Reload any changed state for the boot CPU.
16875diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16876index 54ddaeb2..22c3bdc 100644
16877--- a/arch/x86/kernel/signal.c
16878+++ b/arch/x86/kernel/signal.c
16879@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
16880 * Align the stack pointer according to the i386 ABI,
16881 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16882 */
16883- sp = ((sp + 4) & -16ul) - 4;
16884+ sp = ((sp - 12) & -16ul) - 4;
16885 #else /* !CONFIG_X86_32 */
16886 sp = round_down(sp, 16) - 8;
16887 #endif
16888@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
16889 * Return an always-bogus address instead so we will die with SIGSEGV.
16890 */
16891 if (onsigstack && !likely(on_sig_stack(sp)))
16892- return (void __user *)-1L;
16893+ return (__force void __user *)-1L;
16894
16895 /* save i387 state */
16896 if (used_math() && save_i387_xstate(*fpstate) < 0)
16897- return (void __user *)-1L;
16898+ return (__force void __user *)-1L;
16899
16900 return (void __user *)sp;
16901 }
16902@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16903 }
16904
16905 if (current->mm->context.vdso)
16906- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16907+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16908 else
16909- restorer = &frame->retcode;
16910+ restorer = (void __user *)&frame->retcode;
16911 if (ka->sa.sa_flags & SA_RESTORER)
16912 restorer = ka->sa.sa_restorer;
16913
16914@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16915 * reasons and because gdb uses it as a signature to notice
16916 * signal handler stack frames.
16917 */
16918- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16919+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16920
16921 if (err)
16922 return -EFAULT;
16923@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16924 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16925
16926 /* Set up to return from userspace. */
16927- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16928+ if (current->mm->context.vdso)
16929+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16930+ else
16931+ restorer = (void __user *)&frame->retcode;
16932 if (ka->sa.sa_flags & SA_RESTORER)
16933 restorer = ka->sa.sa_restorer;
16934 put_user_ex(restorer, &frame->pretcode);
16935@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16936 * reasons and because gdb uses it as a signature to notice
16937 * signal handler stack frames.
16938 */
16939- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16940+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16941 } put_user_catch(err);
16942
16943 if (err)
16944@@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
16945 * X86_32: vm86 regs switched out by assembly code before reaching
16946 * here, so testing against kernel CS suffices.
16947 */
16948- if (!user_mode(regs))
16949+ if (!user_mode_novm(regs))
16950 return;
16951
16952 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16953diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16954index 9f548cb..caf76f7 100644
16955--- a/arch/x86/kernel/smpboot.c
16956+++ b/arch/x86/kernel/smpboot.c
16957@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
16958 set_idle_for_cpu(cpu, c_idle.idle);
16959 do_rest:
16960 per_cpu(current_task, cpu) = c_idle.idle;
16961+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16962 #ifdef CONFIG_X86_32
16963 /* Stack for startup_32 can be just as for start_secondary onwards */
16964 irq_ctx_init(cpu);
16965 #else
16966 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16967 initial_gs = per_cpu_offset(cpu);
16968- per_cpu(kernel_stack, cpu) =
16969- (unsigned long)task_stack_page(c_idle.idle) -
16970- KERNEL_STACK_OFFSET + THREAD_SIZE;
16971+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16972 #endif
16973+
16974+ pax_open_kernel();
16975 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16976+ pax_close_kernel();
16977+
16978 initial_code = (unsigned long)start_secondary;
16979 stack_start = c_idle.idle->thread.sp;
16980
16981@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
16982
16983 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16984
16985+#ifdef CONFIG_PAX_PER_CPU_PGD
16986+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16987+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16988+ KERNEL_PGD_PTRS);
16989+#endif
16990+
16991 err = do_boot_cpu(apicid, cpu);
16992 if (err) {
16993 pr_debug("do_boot_cpu failed %d\n", err);
16994diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16995index c346d11..d43b163 100644
16996--- a/arch/x86/kernel/step.c
16997+++ b/arch/x86/kernel/step.c
16998@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16999 struct desc_struct *desc;
17000 unsigned long base;
17001
17002- seg &= ~7UL;
17003+ seg >>= 3;
17004
17005 mutex_lock(&child->mm->context.lock);
17006- if (unlikely((seg >> 3) >= child->mm->context.size))
17007+ if (unlikely(seg >= child->mm->context.size))
17008 addr = -1L; /* bogus selector, access would fault */
17009 else {
17010 desc = child->mm->context.ldt + seg;
17011@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17012 addr += base;
17013 }
17014 mutex_unlock(&child->mm->context.lock);
17015- }
17016+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17017+ addr = ktla_ktva(addr);
17018
17019 return addr;
17020 }
17021@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
17022 unsigned char opcode[15];
17023 unsigned long addr = convert_ip_to_linear(child, regs);
17024
17025+ if (addr == -EINVAL)
17026+ return 0;
17027+
17028 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17029 for (i = 0; i < copied; i++) {
17030 switch (opcode[i]) {
17031diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17032index 0b0cb5f..db6b9ed 100644
17033--- a/arch/x86/kernel/sys_i386_32.c
17034+++ b/arch/x86/kernel/sys_i386_32.c
17035@@ -24,17 +24,224 @@
17036
17037 #include <asm/syscalls.h>
17038
17039-/*
17040- * Do a system call from kernel instead of calling sys_execve so we
17041- * end up with proper pt_regs.
17042- */
17043-int kernel_execve(const char *filename,
17044- const char *const argv[],
17045- const char *const envp[])
17046+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17047 {
17048- long __res;
17049- asm volatile ("int $0x80"
17050- : "=a" (__res)
17051- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17052- return __res;
17053+ unsigned long pax_task_size = TASK_SIZE;
17054+
17055+#ifdef CONFIG_PAX_SEGMEXEC
17056+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17057+ pax_task_size = SEGMEXEC_TASK_SIZE;
17058+#endif
17059+
17060+ if (len > pax_task_size || addr > pax_task_size - len)
17061+ return -EINVAL;
17062+
17063+ return 0;
17064+}
17065+
17066+unsigned long
17067+arch_get_unmapped_area(struct file *filp, unsigned long addr,
17068+ unsigned long len, unsigned long pgoff, unsigned long flags)
17069+{
17070+ struct mm_struct *mm = current->mm;
17071+ struct vm_area_struct *vma;
17072+ unsigned long start_addr, pax_task_size = TASK_SIZE;
17073+
17074+#ifdef CONFIG_PAX_SEGMEXEC
17075+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17076+ pax_task_size = SEGMEXEC_TASK_SIZE;
17077+#endif
17078+
17079+ pax_task_size -= PAGE_SIZE;
17080+
17081+ if (len > pax_task_size)
17082+ return -ENOMEM;
17083+
17084+ if (flags & MAP_FIXED)
17085+ return addr;
17086+
17087+#ifdef CONFIG_PAX_RANDMMAP
17088+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17089+#endif
17090+
17091+ if (addr) {
17092+ addr = PAGE_ALIGN(addr);
17093+ if (pax_task_size - len >= addr) {
17094+ vma = find_vma(mm, addr);
17095+ if (check_heap_stack_gap(vma, addr, len))
17096+ return addr;
17097+ }
17098+ }
17099+ if (len > mm->cached_hole_size) {
17100+ start_addr = addr = mm->free_area_cache;
17101+ } else {
17102+ start_addr = addr = mm->mmap_base;
17103+ mm->cached_hole_size = 0;
17104+ }
17105+
17106+#ifdef CONFIG_PAX_PAGEEXEC
17107+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17108+ start_addr = 0x00110000UL;
17109+
17110+#ifdef CONFIG_PAX_RANDMMAP
17111+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17112+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17113+#endif
17114+
17115+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17116+ start_addr = addr = mm->mmap_base;
17117+ else
17118+ addr = start_addr;
17119+ }
17120+#endif
17121+
17122+full_search:
17123+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17124+ /* At this point: (!vma || addr < vma->vm_end). */
17125+ if (pax_task_size - len < addr) {
17126+ /*
17127+ * Start a new search - just in case we missed
17128+ * some holes.
17129+ */
17130+ if (start_addr != mm->mmap_base) {
17131+ start_addr = addr = mm->mmap_base;
17132+ mm->cached_hole_size = 0;
17133+ goto full_search;
17134+ }
17135+ return -ENOMEM;
17136+ }
17137+ if (check_heap_stack_gap(vma, addr, len))
17138+ break;
17139+ if (addr + mm->cached_hole_size < vma->vm_start)
17140+ mm->cached_hole_size = vma->vm_start - addr;
17141+ addr = vma->vm_end;
17142+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17143+ start_addr = addr = mm->mmap_base;
17144+ mm->cached_hole_size = 0;
17145+ goto full_search;
17146+ }
17147+ }
17148+
17149+ /*
17150+ * Remember the place where we stopped the search:
17151+ */
17152+ mm->free_area_cache = addr + len;
17153+ return addr;
17154+}
17155+
17156+unsigned long
17157+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17158+ const unsigned long len, const unsigned long pgoff,
17159+ const unsigned long flags)
17160+{
17161+ struct vm_area_struct *vma;
17162+ struct mm_struct *mm = current->mm;
17163+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17164+
17165+#ifdef CONFIG_PAX_SEGMEXEC
17166+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17167+ pax_task_size = SEGMEXEC_TASK_SIZE;
17168+#endif
17169+
17170+ pax_task_size -= PAGE_SIZE;
17171+
17172+ /* requested length too big for entire address space */
17173+ if (len > pax_task_size)
17174+ return -ENOMEM;
17175+
17176+ if (flags & MAP_FIXED)
17177+ return addr;
17178+
17179+#ifdef CONFIG_PAX_PAGEEXEC
17180+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17181+ goto bottomup;
17182+#endif
17183+
17184+#ifdef CONFIG_PAX_RANDMMAP
17185+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17186+#endif
17187+
17188+ /* requesting a specific address */
17189+ if (addr) {
17190+ addr = PAGE_ALIGN(addr);
17191+ if (pax_task_size - len >= addr) {
17192+ vma = find_vma(mm, addr);
17193+ if (check_heap_stack_gap(vma, addr, len))
17194+ return addr;
17195+ }
17196+ }
17197+
17198+ /* check if free_area_cache is useful for us */
17199+ if (len <= mm->cached_hole_size) {
17200+ mm->cached_hole_size = 0;
17201+ mm->free_area_cache = mm->mmap_base;
17202+ }
17203+
17204+ /* either no address requested or can't fit in requested address hole */
17205+ addr = mm->free_area_cache;
17206+
17207+ /* make sure it can fit in the remaining address space */
17208+ if (addr > len) {
17209+ vma = find_vma(mm, addr-len);
17210+ if (check_heap_stack_gap(vma, addr - len, len))
17211+ /* remember the address as a hint for next time */
17212+ return (mm->free_area_cache = addr-len);
17213+ }
17214+
17215+ if (mm->mmap_base < len)
17216+ goto bottomup;
17217+
17218+ addr = mm->mmap_base-len;
17219+
17220+ do {
17221+ /*
17222+ * Lookup failure means no vma is above this address,
17223+ * else if new region fits below vma->vm_start,
17224+ * return with success:
17225+ */
17226+ vma = find_vma(mm, addr);
17227+ if (check_heap_stack_gap(vma, addr, len))
17228+ /* remember the address as a hint for next time */
17229+ return (mm->free_area_cache = addr);
17230+
17231+ /* remember the largest hole we saw so far */
17232+ if (addr + mm->cached_hole_size < vma->vm_start)
17233+ mm->cached_hole_size = vma->vm_start - addr;
17234+
17235+ /* try just below the current vma->vm_start */
17236+ addr = skip_heap_stack_gap(vma, len);
17237+ } while (!IS_ERR_VALUE(addr));
17238+
17239+bottomup:
17240+ /*
17241+ * A failed mmap() very likely causes application failure,
17242+ * so fall back to the bottom-up function here. This scenario
17243+ * can happen with large stack limits and large mmap()
17244+ * allocations.
17245+ */
17246+
17247+#ifdef CONFIG_PAX_SEGMEXEC
17248+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17249+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17250+ else
17251+#endif
17252+
17253+ mm->mmap_base = TASK_UNMAPPED_BASE;
17254+
17255+#ifdef CONFIG_PAX_RANDMMAP
17256+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17257+ mm->mmap_base += mm->delta_mmap;
17258+#endif
17259+
17260+ mm->free_area_cache = mm->mmap_base;
17261+ mm->cached_hole_size = ~0UL;
17262+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17263+ /*
17264+ * Restore the topdown base:
17265+ */
17266+ mm->mmap_base = base;
17267+ mm->free_area_cache = base;
17268+ mm->cached_hole_size = ~0UL;
17269+
17270+ return addr;
17271 }
17272diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17273index 0514890..3dbebce 100644
17274--- a/arch/x86/kernel/sys_x86_64.c
17275+++ b/arch/x86/kernel/sys_x86_64.c
17276@@ -95,8 +95,8 @@ out:
17277 return error;
17278 }
17279
17280-static void find_start_end(unsigned long flags, unsigned long *begin,
17281- unsigned long *end)
17282+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17283+ unsigned long *begin, unsigned long *end)
17284 {
17285 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17286 unsigned long new_begin;
17287@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17288 *begin = new_begin;
17289 }
17290 } else {
17291- *begin = TASK_UNMAPPED_BASE;
17292+ *begin = mm->mmap_base;
17293 *end = TASK_SIZE;
17294 }
17295 }
17296@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17297 if (flags & MAP_FIXED)
17298 return addr;
17299
17300- find_start_end(flags, &begin, &end);
17301+ find_start_end(mm, flags, &begin, &end);
17302
17303 if (len > end)
17304 return -ENOMEM;
17305
17306+#ifdef CONFIG_PAX_RANDMMAP
17307+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17308+#endif
17309+
17310 if (addr) {
17311 addr = PAGE_ALIGN(addr);
17312 vma = find_vma(mm, addr);
17313- if (end - len >= addr &&
17314- (!vma || addr + len <= vma->vm_start))
17315+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17316 return addr;
17317 }
17318 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17319@@ -172,7 +175,7 @@ full_search:
17320 }
17321 return -ENOMEM;
17322 }
17323- if (!vma || addr + len <= vma->vm_start) {
17324+ if (check_heap_stack_gap(vma, addr, len)) {
17325 /*
17326 * Remember the place where we stopped the search:
17327 */
17328@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17329 {
17330 struct vm_area_struct *vma;
17331 struct mm_struct *mm = current->mm;
17332- unsigned long addr = addr0;
17333+ unsigned long base = mm->mmap_base, addr = addr0;
17334
17335 /* requested length too big for entire address space */
17336 if (len > TASK_SIZE)
17337@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17338 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17339 goto bottomup;
17340
17341+#ifdef CONFIG_PAX_RANDMMAP
17342+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17343+#endif
17344+
17345 /* requesting a specific address */
17346 if (addr) {
17347 addr = PAGE_ALIGN(addr);
17348- vma = find_vma(mm, addr);
17349- if (TASK_SIZE - len >= addr &&
17350- (!vma || addr + len <= vma->vm_start))
17351- return addr;
17352+ if (TASK_SIZE - len >= addr) {
17353+ vma = find_vma(mm, addr);
17354+ if (check_heap_stack_gap(vma, addr, len))
17355+ return addr;
17356+ }
17357 }
17358
17359 /* check if free_area_cache is useful for us */
17360@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17361 ALIGN_TOPDOWN);
17362
17363 vma = find_vma(mm, tmp_addr);
17364- if (!vma || tmp_addr + len <= vma->vm_start)
17365+ if (check_heap_stack_gap(vma, tmp_addr, len))
17366 /* remember the address as a hint for next time */
17367 return mm->free_area_cache = tmp_addr;
17368 }
17369@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17370 * return with success:
17371 */
17372 vma = find_vma(mm, addr);
17373- if (!vma || addr+len <= vma->vm_start)
17374+ if (check_heap_stack_gap(vma, addr, len))
17375 /* remember the address as a hint for next time */
17376 return mm->free_area_cache = addr;
17377
17378@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17379 mm->cached_hole_size = vma->vm_start - addr;
17380
17381 /* try just below the current vma->vm_start */
17382- addr = vma->vm_start-len;
17383- } while (len < vma->vm_start);
17384+ addr = skip_heap_stack_gap(vma, len);
17385+ } while (!IS_ERR_VALUE(addr));
17386
17387 bottomup:
17388 /*
17389@@ -270,13 +278,21 @@ bottomup:
17390 * can happen with large stack limits and large mmap()
17391 * allocations.
17392 */
17393+ mm->mmap_base = TASK_UNMAPPED_BASE;
17394+
17395+#ifdef CONFIG_PAX_RANDMMAP
17396+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17397+ mm->mmap_base += mm->delta_mmap;
17398+#endif
17399+
17400+ mm->free_area_cache = mm->mmap_base;
17401 mm->cached_hole_size = ~0UL;
17402- mm->free_area_cache = TASK_UNMAPPED_BASE;
17403 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17404 /*
17405 * Restore the topdown base:
17406 */
17407- mm->free_area_cache = mm->mmap_base;
17408+ mm->mmap_base = base;
17409+ mm->free_area_cache = base;
17410 mm->cached_hole_size = ~0UL;
17411
17412 return addr;
17413diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17414index 9a0e312..e6f66f2 100644
17415--- a/arch/x86/kernel/syscall_table_32.S
17416+++ b/arch/x86/kernel/syscall_table_32.S
17417@@ -1,3 +1,4 @@
17418+.section .rodata,"a",@progbits
17419 ENTRY(sys_call_table)
17420 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17421 .long sys_exit
17422diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17423index e2410e2..4fe3fbc 100644
17424--- a/arch/x86/kernel/tboot.c
17425+++ b/arch/x86/kernel/tboot.c
17426@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17427
17428 void tboot_shutdown(u32 shutdown_type)
17429 {
17430- void (*shutdown)(void);
17431+ void (* __noreturn shutdown)(void);
17432
17433 if (!tboot_enabled())
17434 return;
17435@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17436
17437 switch_to_tboot_pt();
17438
17439- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17440+ shutdown = (void *)tboot->shutdown_entry;
17441 shutdown();
17442
17443 /* should not reach here */
17444@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17445 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17446 }
17447
17448-static atomic_t ap_wfs_count;
17449+static atomic_unchecked_t ap_wfs_count;
17450
17451 static int tboot_wait_for_aps(int num_aps)
17452 {
17453@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17454 {
17455 switch (action) {
17456 case CPU_DYING:
17457- atomic_inc(&ap_wfs_count);
17458+ atomic_inc_unchecked(&ap_wfs_count);
17459 if (num_online_cpus() == 1)
17460- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17461+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17462 return NOTIFY_BAD;
17463 break;
17464 }
17465@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17466
17467 tboot_create_trampoline();
17468
17469- atomic_set(&ap_wfs_count, 0);
17470+ atomic_set_unchecked(&ap_wfs_count, 0);
17471 register_hotcpu_notifier(&tboot_cpu_notifier);
17472 return 0;
17473 }
17474diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17475index dd5fbf4..b7f2232 100644
17476--- a/arch/x86/kernel/time.c
17477+++ b/arch/x86/kernel/time.c
17478@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17479 {
17480 unsigned long pc = instruction_pointer(regs);
17481
17482- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17483+ if (!user_mode(regs) && in_lock_functions(pc)) {
17484 #ifdef CONFIG_FRAME_POINTER
17485- return *(unsigned long *)(regs->bp + sizeof(long));
17486+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17487 #else
17488 unsigned long *sp =
17489 (unsigned long *)kernel_stack_pointer(regs);
17490@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17491 * or above a saved flags. Eflags has bits 22-31 zero,
17492 * kernel addresses don't.
17493 */
17494+
17495+#ifdef CONFIG_PAX_KERNEXEC
17496+ return ktla_ktva(sp[0]);
17497+#else
17498 if (sp[0] >> 22)
17499 return sp[0];
17500 if (sp[1] >> 22)
17501 return sp[1];
17502 #endif
17503+
17504+#endif
17505 }
17506 return pc;
17507 }
17508diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17509index 6bb7b85..dd853e1 100644
17510--- a/arch/x86/kernel/tls.c
17511+++ b/arch/x86/kernel/tls.c
17512@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17513 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17514 return -EINVAL;
17515
17516+#ifdef CONFIG_PAX_SEGMEXEC
17517+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17518+ return -EINVAL;
17519+#endif
17520+
17521 set_tls_desc(p, idx, &info, 1);
17522
17523 return 0;
17524diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17525index 451c0a7..e57f551 100644
17526--- a/arch/x86/kernel/trampoline_32.S
17527+++ b/arch/x86/kernel/trampoline_32.S
17528@@ -32,6 +32,12 @@
17529 #include <asm/segment.h>
17530 #include <asm/page_types.h>
17531
17532+#ifdef CONFIG_PAX_KERNEXEC
17533+#define ta(X) (X)
17534+#else
17535+#define ta(X) ((X) - __PAGE_OFFSET)
17536+#endif
17537+
17538 #ifdef CONFIG_SMP
17539
17540 .section ".x86_trampoline","a"
17541@@ -62,7 +68,7 @@ r_base = .
17542 inc %ax # protected mode (PE) bit
17543 lmsw %ax # into protected mode
17544 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17545- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17546+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17547
17548 # These need to be in the same 64K segment as the above;
17549 # hence we don't use the boot_gdt_descr defined in head.S
17550diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17551index 09ff517..df19fbff 100644
17552--- a/arch/x86/kernel/trampoline_64.S
17553+++ b/arch/x86/kernel/trampoline_64.S
17554@@ -90,7 +90,7 @@ startup_32:
17555 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17556 movl %eax, %ds
17557
17558- movl $X86_CR4_PAE, %eax
17559+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17560 movl %eax, %cr4 # Enable PAE mode
17561
17562 # Setup trampoline 4 level pagetables
17563@@ -138,7 +138,7 @@ tidt:
17564 # so the kernel can live anywhere
17565 .balign 4
17566 tgdt:
17567- .short tgdt_end - tgdt # gdt limit
17568+ .short tgdt_end - tgdt - 1 # gdt limit
17569 .long tgdt - r_base
17570 .short 0
17571 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17572diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17573index a8e3eb8..c9dbd7d 100644
17574--- a/arch/x86/kernel/traps.c
17575+++ b/arch/x86/kernel/traps.c
17576@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17577
17578 /* Do we ignore FPU interrupts ? */
17579 char ignore_fpu_irq;
17580-
17581-/*
17582- * The IDT has to be page-aligned to simplify the Pentium
17583- * F0 0F bug workaround.
17584- */
17585-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17586 #endif
17587
17588 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17589@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17590 }
17591
17592 static void __kprobes
17593-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17594+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17595 long error_code, siginfo_t *info)
17596 {
17597 struct task_struct *tsk = current;
17598
17599 #ifdef CONFIG_X86_32
17600- if (regs->flags & X86_VM_MASK) {
17601+ if (v8086_mode(regs)) {
17602 /*
17603 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17604 * On nmi (interrupt 2), do_trap should not be called.
17605@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17606 }
17607 #endif
17608
17609- if (!user_mode(regs))
17610+ if (!user_mode_novm(regs))
17611 goto kernel_trap;
17612
17613 #ifdef CONFIG_X86_32
17614@@ -148,7 +142,7 @@ trap_signal:
17615 printk_ratelimit()) {
17616 printk(KERN_INFO
17617 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17618- tsk->comm, tsk->pid, str,
17619+ tsk->comm, task_pid_nr(tsk), str,
17620 regs->ip, regs->sp, error_code);
17621 print_vma_addr(" in ", regs->ip);
17622 printk("\n");
17623@@ -165,8 +159,20 @@ kernel_trap:
17624 if (!fixup_exception(regs)) {
17625 tsk->thread.error_code = error_code;
17626 tsk->thread.trap_no = trapnr;
17627+
17628+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17629+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17630+ str = "PAX: suspicious stack segment fault";
17631+#endif
17632+
17633 die(str, regs, error_code);
17634 }
17635+
17636+#ifdef CONFIG_PAX_REFCOUNT
17637+ if (trapnr == 4)
17638+ pax_report_refcount_overflow(regs);
17639+#endif
17640+
17641 return;
17642
17643 #ifdef CONFIG_X86_32
17644@@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17645 conditional_sti(regs);
17646
17647 #ifdef CONFIG_X86_32
17648- if (regs->flags & X86_VM_MASK)
17649+ if (v8086_mode(regs))
17650 goto gp_in_vm86;
17651 #endif
17652
17653 tsk = current;
17654- if (!user_mode(regs))
17655+ if (!user_mode_novm(regs))
17656 goto gp_in_kernel;
17657
17658+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17659+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17660+ struct mm_struct *mm = tsk->mm;
17661+ unsigned long limit;
17662+
17663+ down_write(&mm->mmap_sem);
17664+ limit = mm->context.user_cs_limit;
17665+ if (limit < TASK_SIZE) {
17666+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17667+ up_write(&mm->mmap_sem);
17668+ return;
17669+ }
17670+ up_write(&mm->mmap_sem);
17671+ }
17672+#endif
17673+
17674 tsk->thread.error_code = error_code;
17675 tsk->thread.trap_no = 13;
17676
17677@@ -295,6 +317,13 @@ gp_in_kernel:
17678 if (notify_die(DIE_GPF, "general protection fault", regs,
17679 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17680 return;
17681+
17682+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17683+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17684+ die("PAX: suspicious general protection fault", regs, error_code);
17685+ else
17686+#endif
17687+
17688 die("general protection fault", regs, error_code);
17689 }
17690
17691@@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17692 /* It's safe to allow irq's after DR6 has been saved */
17693 preempt_conditional_sti(regs);
17694
17695- if (regs->flags & X86_VM_MASK) {
17696+ if (v8086_mode(regs)) {
17697 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17698 error_code, 1);
17699 preempt_conditional_cli(regs);
17700@@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17701 * We already checked v86 mode above, so we can check for kernel mode
17702 * by just checking the CPL of CS.
17703 */
17704- if ((dr6 & DR_STEP) && !user_mode(regs)) {
17705+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17706 tsk->thread.debugreg6 &= ~DR_STEP;
17707 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17708 regs->flags &= ~X86_EFLAGS_TF;
17709@@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17710 return;
17711 conditional_sti(regs);
17712
17713- if (!user_mode_vm(regs))
17714+ if (!user_mode(regs))
17715 {
17716 if (!fixup_exception(regs)) {
17717 task->thread.error_code = error_code;
17718@@ -568,7 +597,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17719 void __math_state_restore(void)
17720 {
17721 struct thread_info *thread = current_thread_info();
17722- struct task_struct *tsk = thread->task;
17723+ struct task_struct *tsk = current;
17724
17725 /*
17726 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17727@@ -595,8 +624,7 @@ void __math_state_restore(void)
17728 */
17729 asmlinkage void math_state_restore(void)
17730 {
17731- struct thread_info *thread = current_thread_info();
17732- struct task_struct *tsk = thread->task;
17733+ struct task_struct *tsk = current;
17734
17735 if (!tsk_used_math(tsk)) {
17736 local_irq_enable();
17737diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17738index b9242ba..50c5edd 100644
17739--- a/arch/x86/kernel/verify_cpu.S
17740+++ b/arch/x86/kernel/verify_cpu.S
17741@@ -20,6 +20,7 @@
17742 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17743 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17744 * arch/x86/kernel/head_32.S: processor startup
17745+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17746 *
17747 * verify_cpu, returns the status of longmode and SSE in register %eax.
17748 * 0: Success 1: Failure
17749diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17750index 863f875..4307295 100644
17751--- a/arch/x86/kernel/vm86_32.c
17752+++ b/arch/x86/kernel/vm86_32.c
17753@@ -41,6 +41,7 @@
17754 #include <linux/ptrace.h>
17755 #include <linux/audit.h>
17756 #include <linux/stddef.h>
17757+#include <linux/grsecurity.h>
17758
17759 #include <asm/uaccess.h>
17760 #include <asm/io.h>
17761@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17762 do_exit(SIGSEGV);
17763 }
17764
17765- tss = &per_cpu(init_tss, get_cpu());
17766+ tss = init_tss + get_cpu();
17767 current->thread.sp0 = current->thread.saved_sp0;
17768 current->thread.sysenter_cs = __KERNEL_CS;
17769 load_sp0(tss, &current->thread);
17770@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17771 struct task_struct *tsk;
17772 int tmp, ret = -EPERM;
17773
17774+#ifdef CONFIG_GRKERNSEC_VM86
17775+ if (!capable(CAP_SYS_RAWIO)) {
17776+ gr_handle_vm86();
17777+ goto out;
17778+ }
17779+#endif
17780+
17781 tsk = current;
17782 if (tsk->thread.saved_sp0)
17783 goto out;
17784@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
17785 int tmp, ret;
17786 struct vm86plus_struct __user *v86;
17787
17788+#ifdef CONFIG_GRKERNSEC_VM86
17789+ if (!capable(CAP_SYS_RAWIO)) {
17790+ gr_handle_vm86();
17791+ ret = -EPERM;
17792+ goto out;
17793+ }
17794+#endif
17795+
17796 tsk = current;
17797 switch (cmd) {
17798 case VM86_REQUEST_IRQ:
17799@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
17800 tsk->thread.saved_fs = info->regs32->fs;
17801 tsk->thread.saved_gs = get_user_gs(info->regs32);
17802
17803- tss = &per_cpu(init_tss, get_cpu());
17804+ tss = init_tss + get_cpu();
17805 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17806 if (cpu_has_sep)
17807 tsk->thread.sysenter_cs = 0;
17808@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
17809 goto cannot_handle;
17810 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17811 goto cannot_handle;
17812- intr_ptr = (unsigned long __user *) (i << 2);
17813+ intr_ptr = (__force unsigned long __user *) (i << 2);
17814 if (get_user(segoffs, intr_ptr))
17815 goto cannot_handle;
17816 if ((segoffs >> 16) == BIOSSEG)
17817diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17818index 0f703f1..9e15f64 100644
17819--- a/arch/x86/kernel/vmlinux.lds.S
17820+++ b/arch/x86/kernel/vmlinux.lds.S
17821@@ -26,6 +26,13 @@
17822 #include <asm/page_types.h>
17823 #include <asm/cache.h>
17824 #include <asm/boot.h>
17825+#include <asm/segment.h>
17826+
17827+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17828+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17829+#else
17830+#define __KERNEL_TEXT_OFFSET 0
17831+#endif
17832
17833 #undef i386 /* in case the preprocessor is a 32bit one */
17834
17835@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17836
17837 PHDRS {
17838 text PT_LOAD FLAGS(5); /* R_E */
17839+#ifdef CONFIG_X86_32
17840+ module PT_LOAD FLAGS(5); /* R_E */
17841+#endif
17842+#ifdef CONFIG_XEN
17843+ rodata PT_LOAD FLAGS(5); /* R_E */
17844+#else
17845+ rodata PT_LOAD FLAGS(4); /* R__ */
17846+#endif
17847 data PT_LOAD FLAGS(6); /* RW_ */
17848-#ifdef CONFIG_X86_64
17849+ init.begin PT_LOAD FLAGS(6); /* RW_ */
17850 #ifdef CONFIG_SMP
17851 percpu PT_LOAD FLAGS(6); /* RW_ */
17852 #endif
17853+ text.init PT_LOAD FLAGS(5); /* R_E */
17854+ text.exit PT_LOAD FLAGS(5); /* R_E */
17855 init PT_LOAD FLAGS(7); /* RWE */
17856-#endif
17857 note PT_NOTE FLAGS(0); /* ___ */
17858 }
17859
17860 SECTIONS
17861 {
17862 #ifdef CONFIG_X86_32
17863- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17864- phys_startup_32 = startup_32 - LOAD_OFFSET;
17865+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17866 #else
17867- . = __START_KERNEL;
17868- phys_startup_64 = startup_64 - LOAD_OFFSET;
17869+ . = __START_KERNEL;
17870 #endif
17871
17872 /* Text and read-only data */
17873- .text : AT(ADDR(.text) - LOAD_OFFSET) {
17874- _text = .;
17875+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17876 /* bootstrapping code */
17877+#ifdef CONFIG_X86_32
17878+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17879+#else
17880+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17881+#endif
17882+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17883+ _text = .;
17884 HEAD_TEXT
17885 #ifdef CONFIG_X86_32
17886 . = ALIGN(PAGE_SIZE);
17887@@ -108,13 +128,47 @@ SECTIONS
17888 IRQENTRY_TEXT
17889 *(.fixup)
17890 *(.gnu.warning)
17891- /* End of text section */
17892- _etext = .;
17893 } :text = 0x9090
17894
17895- NOTES :text :note
17896+ . += __KERNEL_TEXT_OFFSET;
17897
17898- EXCEPTION_TABLE(16) :text = 0x9090
17899+#ifdef CONFIG_X86_32
17900+ . = ALIGN(PAGE_SIZE);
17901+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17902+
17903+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17904+ MODULES_EXEC_VADDR = .;
17905+ BYTE(0)
17906+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17907+ . = ALIGN(HPAGE_SIZE);
17908+ MODULES_EXEC_END = . - 1;
17909+#endif
17910+
17911+ } :module
17912+#endif
17913+
17914+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17915+ /* End of text section */
17916+ _etext = . - __KERNEL_TEXT_OFFSET;
17917+ }
17918+
17919+#ifdef CONFIG_X86_32
17920+ . = ALIGN(PAGE_SIZE);
17921+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17922+ *(.idt)
17923+ . = ALIGN(PAGE_SIZE);
17924+ *(.empty_zero_page)
17925+ *(.initial_pg_fixmap)
17926+ *(.initial_pg_pmd)
17927+ *(.initial_page_table)
17928+ *(.swapper_pg_dir)
17929+ } :rodata
17930+#endif
17931+
17932+ . = ALIGN(PAGE_SIZE);
17933+ NOTES :rodata :note
17934+
17935+ EXCEPTION_TABLE(16) :rodata
17936
17937 #if defined(CONFIG_DEBUG_RODATA)
17938 /* .text should occupy whole number of pages */
17939@@ -126,16 +180,20 @@ SECTIONS
17940
17941 /* Data */
17942 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17943+
17944+#ifdef CONFIG_PAX_KERNEXEC
17945+ . = ALIGN(HPAGE_SIZE);
17946+#else
17947+ . = ALIGN(PAGE_SIZE);
17948+#endif
17949+
17950 /* Start of data section */
17951 _sdata = .;
17952
17953 /* init_task */
17954 INIT_TASK_DATA(THREAD_SIZE)
17955
17956-#ifdef CONFIG_X86_32
17957- /* 32 bit has nosave before _edata */
17958 NOSAVE_DATA
17959-#endif
17960
17961 PAGE_ALIGNED_DATA(PAGE_SIZE)
17962
17963@@ -176,12 +234,19 @@ SECTIONS
17964 #endif /* CONFIG_X86_64 */
17965
17966 /* Init code and data - will be freed after init */
17967- . = ALIGN(PAGE_SIZE);
17968 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17969+ BYTE(0)
17970+
17971+#ifdef CONFIG_PAX_KERNEXEC
17972+ . = ALIGN(HPAGE_SIZE);
17973+#else
17974+ . = ALIGN(PAGE_SIZE);
17975+#endif
17976+
17977 __init_begin = .; /* paired with __init_end */
17978- }
17979+ } :init.begin
17980
17981-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17982+#ifdef CONFIG_SMP
17983 /*
17984 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17985 * output PHDR, so the next output section - .init.text - should
17986@@ -190,12 +255,27 @@ SECTIONS
17987 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17988 #endif
17989
17990- INIT_TEXT_SECTION(PAGE_SIZE)
17991-#ifdef CONFIG_X86_64
17992- :init
17993-#endif
17994+ . = ALIGN(PAGE_SIZE);
17995+ init_begin = .;
17996+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17997+ VMLINUX_SYMBOL(_sinittext) = .;
17998+ INIT_TEXT
17999+ VMLINUX_SYMBOL(_einittext) = .;
18000+ . = ALIGN(PAGE_SIZE);
18001+ } :text.init
18002
18003- INIT_DATA_SECTION(16)
18004+ /*
18005+ * .exit.text is discard at runtime, not link time, to deal with
18006+ * references from .altinstructions and .eh_frame
18007+ */
18008+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18009+ EXIT_TEXT
18010+ . = ALIGN(16);
18011+ } :text.exit
18012+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18013+
18014+ . = ALIGN(PAGE_SIZE);
18015+ INIT_DATA_SECTION(16) :init
18016
18017 /*
18018 * Code and data for a variety of lowlevel trampolines, to be
18019@@ -269,19 +349,12 @@ SECTIONS
18020 }
18021
18022 . = ALIGN(8);
18023- /*
18024- * .exit.text is discard at runtime, not link time, to deal with
18025- * references from .altinstructions and .eh_frame
18026- */
18027- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18028- EXIT_TEXT
18029- }
18030
18031 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18032 EXIT_DATA
18033 }
18034
18035-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18036+#ifndef CONFIG_SMP
18037 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18038 #endif
18039
18040@@ -300,16 +373,10 @@ SECTIONS
18041 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18042 __smp_locks = .;
18043 *(.smp_locks)
18044- . = ALIGN(PAGE_SIZE);
18045 __smp_locks_end = .;
18046+ . = ALIGN(PAGE_SIZE);
18047 }
18048
18049-#ifdef CONFIG_X86_64
18050- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18051- NOSAVE_DATA
18052- }
18053-#endif
18054-
18055 /* BSS */
18056 . = ALIGN(PAGE_SIZE);
18057 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18058@@ -325,6 +392,7 @@ SECTIONS
18059 __brk_base = .;
18060 . += 64 * 1024; /* 64k alignment slop space */
18061 *(.brk_reservation) /* areas brk users have reserved */
18062+ . = ALIGN(HPAGE_SIZE);
18063 __brk_limit = .;
18064 }
18065
18066@@ -351,13 +419,12 @@ SECTIONS
18067 * for the boot processor.
18068 */
18069 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18070-INIT_PER_CPU(gdt_page);
18071 INIT_PER_CPU(irq_stack_union);
18072
18073 /*
18074 * Build-time check on the image size:
18075 */
18076-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18077+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18078 "kernel image bigger than KERNEL_IMAGE_SIZE");
18079
18080 #ifdef CONFIG_SMP
18081diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18082index e4d4a22..47ee71f 100644
18083--- a/arch/x86/kernel/vsyscall_64.c
18084+++ b/arch/x86/kernel/vsyscall_64.c
18085@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18086 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18087 };
18088
18089-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18090+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18091
18092 static int __init vsyscall_setup(char *str)
18093 {
18094 if (str) {
18095 if (!strcmp("emulate", str))
18096 vsyscall_mode = EMULATE;
18097- else if (!strcmp("native", str))
18098- vsyscall_mode = NATIVE;
18099 else if (!strcmp("none", str))
18100 vsyscall_mode = NONE;
18101 else
18102@@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18103
18104 tsk = current;
18105 if (seccomp_mode(&tsk->seccomp))
18106- do_exit(SIGKILL);
18107+ do_group_exit(SIGKILL);
18108
18109 switch (vsyscall_nr) {
18110 case 0:
18111@@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18112 return true;
18113
18114 sigsegv:
18115- force_sig(SIGSEGV, current);
18116- return true;
18117+ do_group_exit(SIGKILL);
18118 }
18119
18120 /*
18121@@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18122 extern char __vvar_page;
18123 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18124
18125- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18126- vsyscall_mode == NATIVE
18127- ? PAGE_KERNEL_VSYSCALL
18128- : PAGE_KERNEL_VVAR);
18129+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18130 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18131 (unsigned long)VSYSCALL_START);
18132
18133diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18134index 9796c2f..f686fbf 100644
18135--- a/arch/x86/kernel/x8664_ksyms_64.c
18136+++ b/arch/x86/kernel/x8664_ksyms_64.c
18137@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18138 EXPORT_SYMBOL(copy_user_generic_string);
18139 EXPORT_SYMBOL(copy_user_generic_unrolled);
18140 EXPORT_SYMBOL(__copy_user_nocache);
18141-EXPORT_SYMBOL(_copy_from_user);
18142-EXPORT_SYMBOL(_copy_to_user);
18143
18144 EXPORT_SYMBOL(copy_page);
18145 EXPORT_SYMBOL(clear_page);
18146diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18147index a391134..d0b63b6e 100644
18148--- a/arch/x86/kernel/xsave.c
18149+++ b/arch/x86/kernel/xsave.c
18150@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18151 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18152 return -EINVAL;
18153
18154- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18155+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18156 fx_sw_user->extended_size -
18157 FP_XSTATE_MAGIC2_SIZE));
18158 if (err)
18159@@ -267,7 +267,7 @@ fx_only:
18160 * the other extended state.
18161 */
18162 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18163- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18164+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18165 }
18166
18167 /*
18168@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18169 if (use_xsave())
18170 err = restore_user_xstate(buf);
18171 else
18172- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18173+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18174 buf);
18175 if (unlikely(err)) {
18176 /*
18177diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18178index f1e3be1..588efc8 100644
18179--- a/arch/x86/kvm/emulate.c
18180+++ b/arch/x86/kvm/emulate.c
18181@@ -249,6 +249,7 @@ struct gprefix {
18182
18183 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18184 do { \
18185+ unsigned long _tmp; \
18186 __asm__ __volatile__ ( \
18187 _PRE_EFLAGS("0", "4", "2") \
18188 _op _suffix " %"_x"3,%1; " \
18189@@ -263,8 +264,6 @@ struct gprefix {
18190 /* Raw emulation: instruction has two explicit operands. */
18191 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18192 do { \
18193- unsigned long _tmp; \
18194- \
18195 switch ((ctxt)->dst.bytes) { \
18196 case 2: \
18197 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18198@@ -280,7 +279,6 @@ struct gprefix {
18199
18200 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18201 do { \
18202- unsigned long _tmp; \
18203 switch ((ctxt)->dst.bytes) { \
18204 case 1: \
18205 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18206diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18207index 54abb40..a192606 100644
18208--- a/arch/x86/kvm/lapic.c
18209+++ b/arch/x86/kvm/lapic.c
18210@@ -53,7 +53,7 @@
18211 #define APIC_BUS_CYCLE_NS 1
18212
18213 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18214-#define apic_debug(fmt, arg...)
18215+#define apic_debug(fmt, arg...) do {} while (0)
18216
18217 #define APIC_LVT_NUM 6
18218 /* 14 is the version for Xeon and Pentium 8.4.8*/
18219diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18220index f1b36cf..af8a124 100644
18221--- a/arch/x86/kvm/mmu.c
18222+++ b/arch/x86/kvm/mmu.c
18223@@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18224
18225 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18226
18227- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18228+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18229
18230 /*
18231 * Assume that the pte write on a page table of the same type
18232@@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18233 }
18234
18235 spin_lock(&vcpu->kvm->mmu_lock);
18236- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18237+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18238 gentry = 0;
18239 kvm_mmu_free_some_pages(vcpu);
18240 ++vcpu->kvm->stat.mmu_pte_write;
18241diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18242index 9299410..ade2f9b 100644
18243--- a/arch/x86/kvm/paging_tmpl.h
18244+++ b/arch/x86/kvm/paging_tmpl.h
18245@@ -197,7 +197,7 @@ retry_walk:
18246 if (unlikely(kvm_is_error_hva(host_addr)))
18247 goto error;
18248
18249- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18250+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18251 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18252 goto error;
18253
18254@@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18255 if (need_flush)
18256 kvm_flush_remote_tlbs(vcpu->kvm);
18257
18258- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18259+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18260
18261 spin_unlock(&vcpu->kvm->mmu_lock);
18262
18263diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18264index e32243e..a6e6172 100644
18265--- a/arch/x86/kvm/svm.c
18266+++ b/arch/x86/kvm/svm.c
18267@@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18268 int cpu = raw_smp_processor_id();
18269
18270 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18271+
18272+ pax_open_kernel();
18273 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18274+ pax_close_kernel();
18275+
18276 load_TR_desc();
18277 }
18278
18279@@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18280 #endif
18281 #endif
18282
18283+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18284+ __set_fs(current_thread_info()->addr_limit);
18285+#endif
18286+
18287 reload_tss(vcpu);
18288
18289 local_irq_disable();
18290diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18291index 579a0b5..ed7bbf9 100644
18292--- a/arch/x86/kvm/vmx.c
18293+++ b/arch/x86/kvm/vmx.c
18294@@ -1305,7 +1305,11 @@ static void reload_tss(void)
18295 struct desc_struct *descs;
18296
18297 descs = (void *)gdt->address;
18298+
18299+ pax_open_kernel();
18300 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18301+ pax_close_kernel();
18302+
18303 load_TR_desc();
18304 }
18305
18306@@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18307 if (!cpu_has_vmx_flexpriority())
18308 flexpriority_enabled = 0;
18309
18310- if (!cpu_has_vmx_tpr_shadow())
18311- kvm_x86_ops->update_cr8_intercept = NULL;
18312+ if (!cpu_has_vmx_tpr_shadow()) {
18313+ pax_open_kernel();
18314+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18315+ pax_close_kernel();
18316+ }
18317
18318 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18319 kvm_disable_largepages();
18320@@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18321 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18322
18323 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18324- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18325+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18326
18327 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18328 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18329@@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18330 "jmp .Lkvm_vmx_return \n\t"
18331 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18332 ".Lkvm_vmx_return: "
18333+
18334+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18335+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18336+ ".Lkvm_vmx_return2: "
18337+#endif
18338+
18339 /* Save guest registers, load host registers, keep flags */
18340 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18341 "pop %0 \n\t"
18342@@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18343 #endif
18344 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18345 [wordsize]"i"(sizeof(ulong))
18346+
18347+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18348+ ,[cs]"i"(__KERNEL_CS)
18349+#endif
18350+
18351 : "cc", "memory"
18352 , R"ax", R"bx", R"di", R"si"
18353 #ifdef CONFIG_X86_64
18354@@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18355 }
18356 }
18357
18358- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18359+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18360+
18361+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18362+ loadsegment(fs, __KERNEL_PERCPU);
18363+#endif
18364+
18365+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18366+ __set_fs(current_thread_info()->addr_limit);
18367+#endif
18368+
18369 vmx->loaded_vmcs->launched = 1;
18370
18371 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18372diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18373index 4c938da..4ddef65 100644
18374--- a/arch/x86/kvm/x86.c
18375+++ b/arch/x86/kvm/x86.c
18376@@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18377 {
18378 struct kvm *kvm = vcpu->kvm;
18379 int lm = is_long_mode(vcpu);
18380- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18381- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18382+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18383+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18384 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18385 : kvm->arch.xen_hvm_config.blob_size_32;
18386 u32 page_num = data & ~PAGE_MASK;
18387@@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18388 if (n < msr_list.nmsrs)
18389 goto out;
18390 r = -EFAULT;
18391+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18392+ goto out;
18393 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18394 num_msrs_to_save * sizeof(u32)))
18395 goto out;
18396@@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18397 struct kvm_cpuid2 *cpuid,
18398 struct kvm_cpuid_entry2 __user *entries)
18399 {
18400- int r;
18401+ int r, i;
18402
18403 r = -E2BIG;
18404 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18405 goto out;
18406 r = -EFAULT;
18407- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18408- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18409+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18410 goto out;
18411+ for (i = 0; i < cpuid->nent; ++i) {
18412+ struct kvm_cpuid_entry2 cpuid_entry;
18413+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18414+ goto out;
18415+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18416+ }
18417 vcpu->arch.cpuid_nent = cpuid->nent;
18418 kvm_apic_set_version(vcpu);
18419 kvm_x86_ops->cpuid_update(vcpu);
18420@@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18421 struct kvm_cpuid2 *cpuid,
18422 struct kvm_cpuid_entry2 __user *entries)
18423 {
18424- int r;
18425+ int r, i;
18426
18427 r = -E2BIG;
18428 if (cpuid->nent < vcpu->arch.cpuid_nent)
18429 goto out;
18430 r = -EFAULT;
18431- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18432- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18433+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18434 goto out;
18435+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18436+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18437+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18438+ goto out;
18439+ }
18440 return 0;
18441
18442 out:
18443@@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18444 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18445 struct kvm_interrupt *irq)
18446 {
18447- if (irq->irq < 0 || irq->irq >= 256)
18448+ if (irq->irq >= 256)
18449 return -EINVAL;
18450 if (irqchip_in_kernel(vcpu->kvm))
18451 return -ENXIO;
18452@@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18453 kvm_mmu_set_mmio_spte_mask(mask);
18454 }
18455
18456-int kvm_arch_init(void *opaque)
18457+int kvm_arch_init(const void *opaque)
18458 {
18459 int r;
18460 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18461diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18462index cf4603b..7cdde38 100644
18463--- a/arch/x86/lguest/boot.c
18464+++ b/arch/x86/lguest/boot.c
18465@@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18466 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18467 * Launcher to reboot us.
18468 */
18469-static void lguest_restart(char *reason)
18470+static __noreturn void lguest_restart(char *reason)
18471 {
18472 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18473+ BUG();
18474 }
18475
18476 /*G:050
18477diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18478index 042f682..c92afb6 100644
18479--- a/arch/x86/lib/atomic64_32.c
18480+++ b/arch/x86/lib/atomic64_32.c
18481@@ -8,18 +8,30 @@
18482
18483 long long atomic64_read_cx8(long long, const atomic64_t *v);
18484 EXPORT_SYMBOL(atomic64_read_cx8);
18485+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18486+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18487 long long atomic64_set_cx8(long long, const atomic64_t *v);
18488 EXPORT_SYMBOL(atomic64_set_cx8);
18489+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18490+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18491 long long atomic64_xchg_cx8(long long, unsigned high);
18492 EXPORT_SYMBOL(atomic64_xchg_cx8);
18493 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18494 EXPORT_SYMBOL(atomic64_add_return_cx8);
18495+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18496+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18497 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18498 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18499+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18500+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18501 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18502 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18503+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18504+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18505 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18506 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18507+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18508+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18509 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18510 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18511 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18512@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18513 #ifndef CONFIG_X86_CMPXCHG64
18514 long long atomic64_read_386(long long, const atomic64_t *v);
18515 EXPORT_SYMBOL(atomic64_read_386);
18516+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18517+EXPORT_SYMBOL(atomic64_read_unchecked_386);
18518 long long atomic64_set_386(long long, const atomic64_t *v);
18519 EXPORT_SYMBOL(atomic64_set_386);
18520+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18521+EXPORT_SYMBOL(atomic64_set_unchecked_386);
18522 long long atomic64_xchg_386(long long, unsigned high);
18523 EXPORT_SYMBOL(atomic64_xchg_386);
18524 long long atomic64_add_return_386(long long a, atomic64_t *v);
18525 EXPORT_SYMBOL(atomic64_add_return_386);
18526+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18527+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18528 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18529 EXPORT_SYMBOL(atomic64_sub_return_386);
18530+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18531+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18532 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18533 EXPORT_SYMBOL(atomic64_inc_return_386);
18534+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18535+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18536 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18537 EXPORT_SYMBOL(atomic64_dec_return_386);
18538+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18539+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18540 long long atomic64_add_386(long long a, atomic64_t *v);
18541 EXPORT_SYMBOL(atomic64_add_386);
18542+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18543+EXPORT_SYMBOL(atomic64_add_unchecked_386);
18544 long long atomic64_sub_386(long long a, atomic64_t *v);
18545 EXPORT_SYMBOL(atomic64_sub_386);
18546+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18547+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18548 long long atomic64_inc_386(long long a, atomic64_t *v);
18549 EXPORT_SYMBOL(atomic64_inc_386);
18550+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18551+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18552 long long atomic64_dec_386(long long a, atomic64_t *v);
18553 EXPORT_SYMBOL(atomic64_dec_386);
18554+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18555+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18556 long long atomic64_dec_if_positive_386(atomic64_t *v);
18557 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18558 int atomic64_inc_not_zero_386(atomic64_t *v);
18559diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18560index e8e7e0d..56fd1b0 100644
18561--- a/arch/x86/lib/atomic64_386_32.S
18562+++ b/arch/x86/lib/atomic64_386_32.S
18563@@ -48,6 +48,10 @@ BEGIN(read)
18564 movl (v), %eax
18565 movl 4(v), %edx
18566 RET_ENDP
18567+BEGIN(read_unchecked)
18568+ movl (v), %eax
18569+ movl 4(v), %edx
18570+RET_ENDP
18571 #undef v
18572
18573 #define v %esi
18574@@ -55,6 +59,10 @@ BEGIN(set)
18575 movl %ebx, (v)
18576 movl %ecx, 4(v)
18577 RET_ENDP
18578+BEGIN(set_unchecked)
18579+ movl %ebx, (v)
18580+ movl %ecx, 4(v)
18581+RET_ENDP
18582 #undef v
18583
18584 #define v %esi
18585@@ -70,6 +78,20 @@ RET_ENDP
18586 BEGIN(add)
18587 addl %eax, (v)
18588 adcl %edx, 4(v)
18589+
18590+#ifdef CONFIG_PAX_REFCOUNT
18591+ jno 0f
18592+ subl %eax, (v)
18593+ sbbl %edx, 4(v)
18594+ int $4
18595+0:
18596+ _ASM_EXTABLE(0b, 0b)
18597+#endif
18598+
18599+RET_ENDP
18600+BEGIN(add_unchecked)
18601+ addl %eax, (v)
18602+ adcl %edx, 4(v)
18603 RET_ENDP
18604 #undef v
18605
18606@@ -77,6 +99,24 @@ RET_ENDP
18607 BEGIN(add_return)
18608 addl (v), %eax
18609 adcl 4(v), %edx
18610+
18611+#ifdef CONFIG_PAX_REFCOUNT
18612+ into
18613+1234:
18614+ _ASM_EXTABLE(1234b, 2f)
18615+#endif
18616+
18617+ movl %eax, (v)
18618+ movl %edx, 4(v)
18619+
18620+#ifdef CONFIG_PAX_REFCOUNT
18621+2:
18622+#endif
18623+
18624+RET_ENDP
18625+BEGIN(add_return_unchecked)
18626+ addl (v), %eax
18627+ adcl 4(v), %edx
18628 movl %eax, (v)
18629 movl %edx, 4(v)
18630 RET_ENDP
18631@@ -86,6 +126,20 @@ RET_ENDP
18632 BEGIN(sub)
18633 subl %eax, (v)
18634 sbbl %edx, 4(v)
18635+
18636+#ifdef CONFIG_PAX_REFCOUNT
18637+ jno 0f
18638+ addl %eax, (v)
18639+ adcl %edx, 4(v)
18640+ int $4
18641+0:
18642+ _ASM_EXTABLE(0b, 0b)
18643+#endif
18644+
18645+RET_ENDP
18646+BEGIN(sub_unchecked)
18647+ subl %eax, (v)
18648+ sbbl %edx, 4(v)
18649 RET_ENDP
18650 #undef v
18651
18652@@ -96,6 +150,27 @@ BEGIN(sub_return)
18653 sbbl $0, %edx
18654 addl (v), %eax
18655 adcl 4(v), %edx
18656+
18657+#ifdef CONFIG_PAX_REFCOUNT
18658+ into
18659+1234:
18660+ _ASM_EXTABLE(1234b, 2f)
18661+#endif
18662+
18663+ movl %eax, (v)
18664+ movl %edx, 4(v)
18665+
18666+#ifdef CONFIG_PAX_REFCOUNT
18667+2:
18668+#endif
18669+
18670+RET_ENDP
18671+BEGIN(sub_return_unchecked)
18672+ negl %edx
18673+ negl %eax
18674+ sbbl $0, %edx
18675+ addl (v), %eax
18676+ adcl 4(v), %edx
18677 movl %eax, (v)
18678 movl %edx, 4(v)
18679 RET_ENDP
18680@@ -105,6 +180,20 @@ RET_ENDP
18681 BEGIN(inc)
18682 addl $1, (v)
18683 adcl $0, 4(v)
18684+
18685+#ifdef CONFIG_PAX_REFCOUNT
18686+ jno 0f
18687+ subl $1, (v)
18688+ sbbl $0, 4(v)
18689+ int $4
18690+0:
18691+ _ASM_EXTABLE(0b, 0b)
18692+#endif
18693+
18694+RET_ENDP
18695+BEGIN(inc_unchecked)
18696+ addl $1, (v)
18697+ adcl $0, 4(v)
18698 RET_ENDP
18699 #undef v
18700
18701@@ -114,6 +203,26 @@ BEGIN(inc_return)
18702 movl 4(v), %edx
18703 addl $1, %eax
18704 adcl $0, %edx
18705+
18706+#ifdef CONFIG_PAX_REFCOUNT
18707+ into
18708+1234:
18709+ _ASM_EXTABLE(1234b, 2f)
18710+#endif
18711+
18712+ movl %eax, (v)
18713+ movl %edx, 4(v)
18714+
18715+#ifdef CONFIG_PAX_REFCOUNT
18716+2:
18717+#endif
18718+
18719+RET_ENDP
18720+BEGIN(inc_return_unchecked)
18721+ movl (v), %eax
18722+ movl 4(v), %edx
18723+ addl $1, %eax
18724+ adcl $0, %edx
18725 movl %eax, (v)
18726 movl %edx, 4(v)
18727 RET_ENDP
18728@@ -123,6 +232,20 @@ RET_ENDP
18729 BEGIN(dec)
18730 subl $1, (v)
18731 sbbl $0, 4(v)
18732+
18733+#ifdef CONFIG_PAX_REFCOUNT
18734+ jno 0f
18735+ addl $1, (v)
18736+ adcl $0, 4(v)
18737+ int $4
18738+0:
18739+ _ASM_EXTABLE(0b, 0b)
18740+#endif
18741+
18742+RET_ENDP
18743+BEGIN(dec_unchecked)
18744+ subl $1, (v)
18745+ sbbl $0, 4(v)
18746 RET_ENDP
18747 #undef v
18748
18749@@ -132,6 +255,26 @@ BEGIN(dec_return)
18750 movl 4(v), %edx
18751 subl $1, %eax
18752 sbbl $0, %edx
18753+
18754+#ifdef CONFIG_PAX_REFCOUNT
18755+ into
18756+1234:
18757+ _ASM_EXTABLE(1234b, 2f)
18758+#endif
18759+
18760+ movl %eax, (v)
18761+ movl %edx, 4(v)
18762+
18763+#ifdef CONFIG_PAX_REFCOUNT
18764+2:
18765+#endif
18766+
18767+RET_ENDP
18768+BEGIN(dec_return_unchecked)
18769+ movl (v), %eax
18770+ movl 4(v), %edx
18771+ subl $1, %eax
18772+ sbbl $0, %edx
18773 movl %eax, (v)
18774 movl %edx, 4(v)
18775 RET_ENDP
18776@@ -143,6 +286,13 @@ BEGIN(add_unless)
18777 adcl %edx, %edi
18778 addl (v), %eax
18779 adcl 4(v), %edx
18780+
18781+#ifdef CONFIG_PAX_REFCOUNT
18782+ into
18783+1234:
18784+ _ASM_EXTABLE(1234b, 2f)
18785+#endif
18786+
18787 cmpl %eax, %esi
18788 je 3f
18789 1:
18790@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18791 1:
18792 addl $1, %eax
18793 adcl $0, %edx
18794+
18795+#ifdef CONFIG_PAX_REFCOUNT
18796+ into
18797+1234:
18798+ _ASM_EXTABLE(1234b, 2f)
18799+#endif
18800+
18801 movl %eax, (v)
18802 movl %edx, 4(v)
18803 movl $1, %eax
18804@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18805 movl 4(v), %edx
18806 subl $1, %eax
18807 sbbl $0, %edx
18808+
18809+#ifdef CONFIG_PAX_REFCOUNT
18810+ into
18811+1234:
18812+ _ASM_EXTABLE(1234b, 1f)
18813+#endif
18814+
18815 js 1f
18816 movl %eax, (v)
18817 movl %edx, 4(v)
18818diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18819index 391a083..d658e9f 100644
18820--- a/arch/x86/lib/atomic64_cx8_32.S
18821+++ b/arch/x86/lib/atomic64_cx8_32.S
18822@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18823 CFI_STARTPROC
18824
18825 read64 %ecx
18826+ pax_force_retaddr
18827 ret
18828 CFI_ENDPROC
18829 ENDPROC(atomic64_read_cx8)
18830
18831+ENTRY(atomic64_read_unchecked_cx8)
18832+ CFI_STARTPROC
18833+
18834+ read64 %ecx
18835+ pax_force_retaddr
18836+ ret
18837+ CFI_ENDPROC
18838+ENDPROC(atomic64_read_unchecked_cx8)
18839+
18840 ENTRY(atomic64_set_cx8)
18841 CFI_STARTPROC
18842
18843@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18844 cmpxchg8b (%esi)
18845 jne 1b
18846
18847+ pax_force_retaddr
18848 ret
18849 CFI_ENDPROC
18850 ENDPROC(atomic64_set_cx8)
18851
18852+ENTRY(atomic64_set_unchecked_cx8)
18853+ CFI_STARTPROC
18854+
18855+1:
18856+/* we don't need LOCK_PREFIX since aligned 64-bit writes
18857+ * are atomic on 586 and newer */
18858+ cmpxchg8b (%esi)
18859+ jne 1b
18860+
18861+ pax_force_retaddr
18862+ ret
18863+ CFI_ENDPROC
18864+ENDPROC(atomic64_set_unchecked_cx8)
18865+
18866 ENTRY(atomic64_xchg_cx8)
18867 CFI_STARTPROC
18868
18869@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18870 cmpxchg8b (%esi)
18871 jne 1b
18872
18873+ pax_force_retaddr
18874 ret
18875 CFI_ENDPROC
18876 ENDPROC(atomic64_xchg_cx8)
18877
18878-.macro addsub_return func ins insc
18879-ENTRY(atomic64_\func\()_return_cx8)
18880+.macro addsub_return func ins insc unchecked=""
18881+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18882 CFI_STARTPROC
18883 SAVE ebp
18884 SAVE ebx
18885@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18886 movl %edx, %ecx
18887 \ins\()l %esi, %ebx
18888 \insc\()l %edi, %ecx
18889+
18890+.ifb \unchecked
18891+#ifdef CONFIG_PAX_REFCOUNT
18892+ into
18893+2:
18894+ _ASM_EXTABLE(2b, 3f)
18895+#endif
18896+.endif
18897+
18898 LOCK_PREFIX
18899 cmpxchg8b (%ebp)
18900 jne 1b
18901-
18902-10:
18903 movl %ebx, %eax
18904 movl %ecx, %edx
18905+
18906+.ifb \unchecked
18907+#ifdef CONFIG_PAX_REFCOUNT
18908+3:
18909+#endif
18910+.endif
18911+
18912 RESTORE edi
18913 RESTORE esi
18914 RESTORE ebx
18915 RESTORE ebp
18916+ pax_force_retaddr
18917 ret
18918 CFI_ENDPROC
18919-ENDPROC(atomic64_\func\()_return_cx8)
18920+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18921 .endm
18922
18923 addsub_return add add adc
18924 addsub_return sub sub sbb
18925+addsub_return add add adc _unchecked
18926+addsub_return sub sub sbb _unchecked
18927
18928-.macro incdec_return func ins insc
18929-ENTRY(atomic64_\func\()_return_cx8)
18930+.macro incdec_return func ins insc unchecked
18931+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18932 CFI_STARTPROC
18933 SAVE ebx
18934
18935@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18936 movl %edx, %ecx
18937 \ins\()l $1, %ebx
18938 \insc\()l $0, %ecx
18939+
18940+.ifb \unchecked
18941+#ifdef CONFIG_PAX_REFCOUNT
18942+ into
18943+2:
18944+ _ASM_EXTABLE(2b, 3f)
18945+#endif
18946+.endif
18947+
18948 LOCK_PREFIX
18949 cmpxchg8b (%esi)
18950 jne 1b
18951
18952-10:
18953 movl %ebx, %eax
18954 movl %ecx, %edx
18955+
18956+.ifb \unchecked
18957+#ifdef CONFIG_PAX_REFCOUNT
18958+3:
18959+#endif
18960+.endif
18961+
18962 RESTORE ebx
18963+ pax_force_retaddr
18964 ret
18965 CFI_ENDPROC
18966-ENDPROC(atomic64_\func\()_return_cx8)
18967+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18968 .endm
18969
18970 incdec_return inc add adc
18971 incdec_return dec sub sbb
18972+incdec_return inc add adc _unchecked
18973+incdec_return dec sub sbb _unchecked
18974
18975 ENTRY(atomic64_dec_if_positive_cx8)
18976 CFI_STARTPROC
18977@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18978 movl %edx, %ecx
18979 subl $1, %ebx
18980 sbb $0, %ecx
18981+
18982+#ifdef CONFIG_PAX_REFCOUNT
18983+ into
18984+1234:
18985+ _ASM_EXTABLE(1234b, 2f)
18986+#endif
18987+
18988 js 2f
18989 LOCK_PREFIX
18990 cmpxchg8b (%esi)
18991@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18992 movl %ebx, %eax
18993 movl %ecx, %edx
18994 RESTORE ebx
18995+ pax_force_retaddr
18996 ret
18997 CFI_ENDPROC
18998 ENDPROC(atomic64_dec_if_positive_cx8)
18999@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
19000 movl %edx, %ecx
19001 addl %esi, %ebx
19002 adcl %edi, %ecx
19003+
19004+#ifdef CONFIG_PAX_REFCOUNT
19005+ into
19006+1234:
19007+ _ASM_EXTABLE(1234b, 3f)
19008+#endif
19009+
19010 LOCK_PREFIX
19011 cmpxchg8b (%ebp)
19012 jne 1b
19013@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
19014 CFI_ADJUST_CFA_OFFSET -8
19015 RESTORE ebx
19016 RESTORE ebp
19017+ pax_force_retaddr
19018 ret
19019 4:
19020 cmpl %edx, 4(%esp)
19021@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
19022 movl %edx, %ecx
19023 addl $1, %ebx
19024 adcl $0, %ecx
19025+
19026+#ifdef CONFIG_PAX_REFCOUNT
19027+ into
19028+1234:
19029+ _ASM_EXTABLE(1234b, 3f)
19030+#endif
19031+
19032 LOCK_PREFIX
19033 cmpxchg8b (%esi)
19034 jne 1b
19035@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19036 movl $1, %eax
19037 3:
19038 RESTORE ebx
19039+ pax_force_retaddr
19040 ret
19041 4:
19042 testl %edx, %edx
19043diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19044index 78d16a5..fbcf666 100644
19045--- a/arch/x86/lib/checksum_32.S
19046+++ b/arch/x86/lib/checksum_32.S
19047@@ -28,7 +28,8 @@
19048 #include <linux/linkage.h>
19049 #include <asm/dwarf2.h>
19050 #include <asm/errno.h>
19051-
19052+#include <asm/segment.h>
19053+
19054 /*
19055 * computes a partial checksum, e.g. for TCP/UDP fragments
19056 */
19057@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19058
19059 #define ARGBASE 16
19060 #define FP 12
19061-
19062-ENTRY(csum_partial_copy_generic)
19063+
19064+ENTRY(csum_partial_copy_generic_to_user)
19065 CFI_STARTPROC
19066+
19067+#ifdef CONFIG_PAX_MEMORY_UDEREF
19068+ pushl_cfi %gs
19069+ popl_cfi %es
19070+ jmp csum_partial_copy_generic
19071+#endif
19072+
19073+ENTRY(csum_partial_copy_generic_from_user)
19074+
19075+#ifdef CONFIG_PAX_MEMORY_UDEREF
19076+ pushl_cfi %gs
19077+ popl_cfi %ds
19078+#endif
19079+
19080+ENTRY(csum_partial_copy_generic)
19081 subl $4,%esp
19082 CFI_ADJUST_CFA_OFFSET 4
19083 pushl_cfi %edi
19084@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19085 jmp 4f
19086 SRC(1: movw (%esi), %bx )
19087 addl $2, %esi
19088-DST( movw %bx, (%edi) )
19089+DST( movw %bx, %es:(%edi) )
19090 addl $2, %edi
19091 addw %bx, %ax
19092 adcl $0, %eax
19093@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19094 SRC(1: movl (%esi), %ebx )
19095 SRC( movl 4(%esi), %edx )
19096 adcl %ebx, %eax
19097-DST( movl %ebx, (%edi) )
19098+DST( movl %ebx, %es:(%edi) )
19099 adcl %edx, %eax
19100-DST( movl %edx, 4(%edi) )
19101+DST( movl %edx, %es:4(%edi) )
19102
19103 SRC( movl 8(%esi), %ebx )
19104 SRC( movl 12(%esi), %edx )
19105 adcl %ebx, %eax
19106-DST( movl %ebx, 8(%edi) )
19107+DST( movl %ebx, %es:8(%edi) )
19108 adcl %edx, %eax
19109-DST( movl %edx, 12(%edi) )
19110+DST( movl %edx, %es:12(%edi) )
19111
19112 SRC( movl 16(%esi), %ebx )
19113 SRC( movl 20(%esi), %edx )
19114 adcl %ebx, %eax
19115-DST( movl %ebx, 16(%edi) )
19116+DST( movl %ebx, %es:16(%edi) )
19117 adcl %edx, %eax
19118-DST( movl %edx, 20(%edi) )
19119+DST( movl %edx, %es:20(%edi) )
19120
19121 SRC( movl 24(%esi), %ebx )
19122 SRC( movl 28(%esi), %edx )
19123 adcl %ebx, %eax
19124-DST( movl %ebx, 24(%edi) )
19125+DST( movl %ebx, %es:24(%edi) )
19126 adcl %edx, %eax
19127-DST( movl %edx, 28(%edi) )
19128+DST( movl %edx, %es:28(%edi) )
19129
19130 lea 32(%esi), %esi
19131 lea 32(%edi), %edi
19132@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19133 shrl $2, %edx # This clears CF
19134 SRC(3: movl (%esi), %ebx )
19135 adcl %ebx, %eax
19136-DST( movl %ebx, (%edi) )
19137+DST( movl %ebx, %es:(%edi) )
19138 lea 4(%esi), %esi
19139 lea 4(%edi), %edi
19140 dec %edx
19141@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19142 jb 5f
19143 SRC( movw (%esi), %cx )
19144 leal 2(%esi), %esi
19145-DST( movw %cx, (%edi) )
19146+DST( movw %cx, %es:(%edi) )
19147 leal 2(%edi), %edi
19148 je 6f
19149 shll $16,%ecx
19150 SRC(5: movb (%esi), %cl )
19151-DST( movb %cl, (%edi) )
19152+DST( movb %cl, %es:(%edi) )
19153 6: addl %ecx, %eax
19154 adcl $0, %eax
19155 7:
19156@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19157
19158 6001:
19159 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19160- movl $-EFAULT, (%ebx)
19161+ movl $-EFAULT, %ss:(%ebx)
19162
19163 # zero the complete destination - computing the rest
19164 # is too much work
19165@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19166
19167 6002:
19168 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19169- movl $-EFAULT,(%ebx)
19170+ movl $-EFAULT,%ss:(%ebx)
19171 jmp 5000b
19172
19173 .previous
19174
19175+ pushl_cfi %ss
19176+ popl_cfi %ds
19177+ pushl_cfi %ss
19178+ popl_cfi %es
19179 popl_cfi %ebx
19180 CFI_RESTORE ebx
19181 popl_cfi %esi
19182@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19183 popl_cfi %ecx # equivalent to addl $4,%esp
19184 ret
19185 CFI_ENDPROC
19186-ENDPROC(csum_partial_copy_generic)
19187+ENDPROC(csum_partial_copy_generic_to_user)
19188
19189 #else
19190
19191 /* Version for PentiumII/PPro */
19192
19193 #define ROUND1(x) \
19194+ nop; nop; nop; \
19195 SRC(movl x(%esi), %ebx ) ; \
19196 addl %ebx, %eax ; \
19197- DST(movl %ebx, x(%edi) ) ;
19198+ DST(movl %ebx, %es:x(%edi)) ;
19199
19200 #define ROUND(x) \
19201+ nop; nop; nop; \
19202 SRC(movl x(%esi), %ebx ) ; \
19203 adcl %ebx, %eax ; \
19204- DST(movl %ebx, x(%edi) ) ;
19205+ DST(movl %ebx, %es:x(%edi)) ;
19206
19207 #define ARGBASE 12
19208-
19209-ENTRY(csum_partial_copy_generic)
19210+
19211+ENTRY(csum_partial_copy_generic_to_user)
19212 CFI_STARTPROC
19213+
19214+#ifdef CONFIG_PAX_MEMORY_UDEREF
19215+ pushl_cfi %gs
19216+ popl_cfi %es
19217+ jmp csum_partial_copy_generic
19218+#endif
19219+
19220+ENTRY(csum_partial_copy_generic_from_user)
19221+
19222+#ifdef CONFIG_PAX_MEMORY_UDEREF
19223+ pushl_cfi %gs
19224+ popl_cfi %ds
19225+#endif
19226+
19227+ENTRY(csum_partial_copy_generic)
19228 pushl_cfi %ebx
19229 CFI_REL_OFFSET ebx, 0
19230 pushl_cfi %edi
19231@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19232 subl %ebx, %edi
19233 lea -1(%esi),%edx
19234 andl $-32,%edx
19235- lea 3f(%ebx,%ebx), %ebx
19236+ lea 3f(%ebx,%ebx,2), %ebx
19237 testl %esi, %esi
19238 jmp *%ebx
19239 1: addl $64,%esi
19240@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19241 jb 5f
19242 SRC( movw (%esi), %dx )
19243 leal 2(%esi), %esi
19244-DST( movw %dx, (%edi) )
19245+DST( movw %dx, %es:(%edi) )
19246 leal 2(%edi), %edi
19247 je 6f
19248 shll $16,%edx
19249 5:
19250 SRC( movb (%esi), %dl )
19251-DST( movb %dl, (%edi) )
19252+DST( movb %dl, %es:(%edi) )
19253 6: addl %edx, %eax
19254 adcl $0, %eax
19255 7:
19256 .section .fixup, "ax"
19257 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19258- movl $-EFAULT, (%ebx)
19259+ movl $-EFAULT, %ss:(%ebx)
19260 # zero the complete destination (computing the rest is too much work)
19261 movl ARGBASE+8(%esp),%edi # dst
19262 movl ARGBASE+12(%esp),%ecx # len
19263@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19264 rep; stosb
19265 jmp 7b
19266 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19267- movl $-EFAULT, (%ebx)
19268+ movl $-EFAULT, %ss:(%ebx)
19269 jmp 7b
19270 .previous
19271
19272+#ifdef CONFIG_PAX_MEMORY_UDEREF
19273+ pushl_cfi %ss
19274+ popl_cfi %ds
19275+ pushl_cfi %ss
19276+ popl_cfi %es
19277+#endif
19278+
19279 popl_cfi %esi
19280 CFI_RESTORE esi
19281 popl_cfi %edi
19282@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19283 CFI_RESTORE ebx
19284 ret
19285 CFI_ENDPROC
19286-ENDPROC(csum_partial_copy_generic)
19287+ENDPROC(csum_partial_copy_generic_to_user)
19288
19289 #undef ROUND
19290 #undef ROUND1
19291diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19292index f2145cf..cea889d 100644
19293--- a/arch/x86/lib/clear_page_64.S
19294+++ b/arch/x86/lib/clear_page_64.S
19295@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19296 movl $4096/8,%ecx
19297 xorl %eax,%eax
19298 rep stosq
19299+ pax_force_retaddr
19300 ret
19301 CFI_ENDPROC
19302 ENDPROC(clear_page_c)
19303@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19304 movl $4096,%ecx
19305 xorl %eax,%eax
19306 rep stosb
19307+ pax_force_retaddr
19308 ret
19309 CFI_ENDPROC
19310 ENDPROC(clear_page_c_e)
19311@@ -43,6 +45,7 @@ ENTRY(clear_page)
19312 leaq 64(%rdi),%rdi
19313 jnz .Lloop
19314 nop
19315+ pax_force_retaddr
19316 ret
19317 CFI_ENDPROC
19318 .Lclear_page_end:
19319@@ -58,7 +61,7 @@ ENDPROC(clear_page)
19320
19321 #include <asm/cpufeature.h>
19322
19323- .section .altinstr_replacement,"ax"
19324+ .section .altinstr_replacement,"a"
19325 1: .byte 0xeb /* jmp <disp8> */
19326 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19327 2: .byte 0xeb /* jmp <disp8> */
19328diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19329index 1e572c5..2a162cd 100644
19330--- a/arch/x86/lib/cmpxchg16b_emu.S
19331+++ b/arch/x86/lib/cmpxchg16b_emu.S
19332@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19333
19334 popf
19335 mov $1, %al
19336+ pax_force_retaddr
19337 ret
19338
19339 not_same:
19340 popf
19341 xor %al,%al
19342+ pax_force_retaddr
19343 ret
19344
19345 CFI_ENDPROC
19346diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19347index 01c805b..dccb07f 100644
19348--- a/arch/x86/lib/copy_page_64.S
19349+++ b/arch/x86/lib/copy_page_64.S
19350@@ -9,6 +9,7 @@ copy_page_c:
19351 CFI_STARTPROC
19352 movl $4096/8,%ecx
19353 rep movsq
19354+ pax_force_retaddr
19355 ret
19356 CFI_ENDPROC
19357 ENDPROC(copy_page_c)
19358@@ -39,7 +40,7 @@ ENTRY(copy_page)
19359 movq 16 (%rsi), %rdx
19360 movq 24 (%rsi), %r8
19361 movq 32 (%rsi), %r9
19362- movq 40 (%rsi), %r10
19363+ movq 40 (%rsi), %r13
19364 movq 48 (%rsi), %r11
19365 movq 56 (%rsi), %r12
19366
19367@@ -50,7 +51,7 @@ ENTRY(copy_page)
19368 movq %rdx, 16 (%rdi)
19369 movq %r8, 24 (%rdi)
19370 movq %r9, 32 (%rdi)
19371- movq %r10, 40 (%rdi)
19372+ movq %r13, 40 (%rdi)
19373 movq %r11, 48 (%rdi)
19374 movq %r12, 56 (%rdi)
19375
19376@@ -69,7 +70,7 @@ ENTRY(copy_page)
19377 movq 16 (%rsi), %rdx
19378 movq 24 (%rsi), %r8
19379 movq 32 (%rsi), %r9
19380- movq 40 (%rsi), %r10
19381+ movq 40 (%rsi), %r13
19382 movq 48 (%rsi), %r11
19383 movq 56 (%rsi), %r12
19384
19385@@ -78,7 +79,7 @@ ENTRY(copy_page)
19386 movq %rdx, 16 (%rdi)
19387 movq %r8, 24 (%rdi)
19388 movq %r9, 32 (%rdi)
19389- movq %r10, 40 (%rdi)
19390+ movq %r13, 40 (%rdi)
19391 movq %r11, 48 (%rdi)
19392 movq %r12, 56 (%rdi)
19393
19394@@ -95,6 +96,7 @@ ENTRY(copy_page)
19395 CFI_RESTORE r13
19396 addq $3*8,%rsp
19397 CFI_ADJUST_CFA_OFFSET -3*8
19398+ pax_force_retaddr
19399 ret
19400 .Lcopy_page_end:
19401 CFI_ENDPROC
19402@@ -105,7 +107,7 @@ ENDPROC(copy_page)
19403
19404 #include <asm/cpufeature.h>
19405
19406- .section .altinstr_replacement,"ax"
19407+ .section .altinstr_replacement,"a"
19408 1: .byte 0xeb /* jmp <disp8> */
19409 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19410 2:
19411diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19412index 0248402..821c786 100644
19413--- a/arch/x86/lib/copy_user_64.S
19414+++ b/arch/x86/lib/copy_user_64.S
19415@@ -16,6 +16,7 @@
19416 #include <asm/thread_info.h>
19417 #include <asm/cpufeature.h>
19418 #include <asm/alternative-asm.h>
19419+#include <asm/pgtable.h>
19420
19421 /*
19422 * By placing feature2 after feature1 in altinstructions section, we logically
19423@@ -29,7 +30,7 @@
19424 .byte 0xe9 /* 32bit jump */
19425 .long \orig-1f /* by default jump to orig */
19426 1:
19427- .section .altinstr_replacement,"ax"
19428+ .section .altinstr_replacement,"a"
19429 2: .byte 0xe9 /* near jump with 32bit immediate */
19430 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19431 3: .byte 0xe9 /* near jump with 32bit immediate */
19432@@ -71,47 +72,20 @@
19433 #endif
19434 .endm
19435
19436-/* Standard copy_to_user with segment limit checking */
19437-ENTRY(_copy_to_user)
19438- CFI_STARTPROC
19439- GET_THREAD_INFO(%rax)
19440- movq %rdi,%rcx
19441- addq %rdx,%rcx
19442- jc bad_to_user
19443- cmpq TI_addr_limit(%rax),%rcx
19444- ja bad_to_user
19445- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19446- copy_user_generic_unrolled,copy_user_generic_string, \
19447- copy_user_enhanced_fast_string
19448- CFI_ENDPROC
19449-ENDPROC(_copy_to_user)
19450-
19451-/* Standard copy_from_user with segment limit checking */
19452-ENTRY(_copy_from_user)
19453- CFI_STARTPROC
19454- GET_THREAD_INFO(%rax)
19455- movq %rsi,%rcx
19456- addq %rdx,%rcx
19457- jc bad_from_user
19458- cmpq TI_addr_limit(%rax),%rcx
19459- ja bad_from_user
19460- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19461- copy_user_generic_unrolled,copy_user_generic_string, \
19462- copy_user_enhanced_fast_string
19463- CFI_ENDPROC
19464-ENDPROC(_copy_from_user)
19465-
19466 .section .fixup,"ax"
19467 /* must zero dest */
19468 ENTRY(bad_from_user)
19469 bad_from_user:
19470 CFI_STARTPROC
19471+ testl %edx,%edx
19472+ js bad_to_user
19473 movl %edx,%ecx
19474 xorl %eax,%eax
19475 rep
19476 stosb
19477 bad_to_user:
19478 movl %edx,%eax
19479+ pax_force_retaddr
19480 ret
19481 CFI_ENDPROC
19482 ENDPROC(bad_from_user)
19483@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19484 jz 17f
19485 1: movq (%rsi),%r8
19486 2: movq 1*8(%rsi),%r9
19487-3: movq 2*8(%rsi),%r10
19488+3: movq 2*8(%rsi),%rax
19489 4: movq 3*8(%rsi),%r11
19490 5: movq %r8,(%rdi)
19491 6: movq %r9,1*8(%rdi)
19492-7: movq %r10,2*8(%rdi)
19493+7: movq %rax,2*8(%rdi)
19494 8: movq %r11,3*8(%rdi)
19495 9: movq 4*8(%rsi),%r8
19496 10: movq 5*8(%rsi),%r9
19497-11: movq 6*8(%rsi),%r10
19498+11: movq 6*8(%rsi),%rax
19499 12: movq 7*8(%rsi),%r11
19500 13: movq %r8,4*8(%rdi)
19501 14: movq %r9,5*8(%rdi)
19502-15: movq %r10,6*8(%rdi)
19503+15: movq %rax,6*8(%rdi)
19504 16: movq %r11,7*8(%rdi)
19505 leaq 64(%rsi),%rsi
19506 leaq 64(%rdi),%rdi
19507@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19508 decl %ecx
19509 jnz 21b
19510 23: xor %eax,%eax
19511+ pax_force_retaddr
19512 ret
19513
19514 .section .fixup,"ax"
19515@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19516 3: rep
19517 movsb
19518 4: xorl %eax,%eax
19519+ pax_force_retaddr
19520 ret
19521
19522 .section .fixup,"ax"
19523@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19524 1: rep
19525 movsb
19526 2: xorl %eax,%eax
19527+ pax_force_retaddr
19528 ret
19529
19530 .section .fixup,"ax"
19531diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19532index cb0c112..e3a6895 100644
19533--- a/arch/x86/lib/copy_user_nocache_64.S
19534+++ b/arch/x86/lib/copy_user_nocache_64.S
19535@@ -8,12 +8,14 @@
19536
19537 #include <linux/linkage.h>
19538 #include <asm/dwarf2.h>
19539+#include <asm/alternative-asm.h>
19540
19541 #define FIX_ALIGNMENT 1
19542
19543 #include <asm/current.h>
19544 #include <asm/asm-offsets.h>
19545 #include <asm/thread_info.h>
19546+#include <asm/pgtable.h>
19547
19548 .macro ALIGN_DESTINATION
19549 #ifdef FIX_ALIGNMENT
19550@@ -50,6 +52,15 @@
19551 */
19552 ENTRY(__copy_user_nocache)
19553 CFI_STARTPROC
19554+
19555+#ifdef CONFIG_PAX_MEMORY_UDEREF
19556+ mov $PAX_USER_SHADOW_BASE,%rcx
19557+ cmp %rcx,%rsi
19558+ jae 1f
19559+ add %rcx,%rsi
19560+1:
19561+#endif
19562+
19563 cmpl $8,%edx
19564 jb 20f /* less then 8 bytes, go to byte copy loop */
19565 ALIGN_DESTINATION
19566@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19567 jz 17f
19568 1: movq (%rsi),%r8
19569 2: movq 1*8(%rsi),%r9
19570-3: movq 2*8(%rsi),%r10
19571+3: movq 2*8(%rsi),%rax
19572 4: movq 3*8(%rsi),%r11
19573 5: movnti %r8,(%rdi)
19574 6: movnti %r9,1*8(%rdi)
19575-7: movnti %r10,2*8(%rdi)
19576+7: movnti %rax,2*8(%rdi)
19577 8: movnti %r11,3*8(%rdi)
19578 9: movq 4*8(%rsi),%r8
19579 10: movq 5*8(%rsi),%r9
19580-11: movq 6*8(%rsi),%r10
19581+11: movq 6*8(%rsi),%rax
19582 12: movq 7*8(%rsi),%r11
19583 13: movnti %r8,4*8(%rdi)
19584 14: movnti %r9,5*8(%rdi)
19585-15: movnti %r10,6*8(%rdi)
19586+15: movnti %rax,6*8(%rdi)
19587 16: movnti %r11,7*8(%rdi)
19588 leaq 64(%rsi),%rsi
19589 leaq 64(%rdi),%rdi
19590@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19591 jnz 21b
19592 23: xorl %eax,%eax
19593 sfence
19594+ pax_force_retaddr
19595 ret
19596
19597 .section .fixup,"ax"
19598diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19599index fb903b7..c92b7f7 100644
19600--- a/arch/x86/lib/csum-copy_64.S
19601+++ b/arch/x86/lib/csum-copy_64.S
19602@@ -8,6 +8,7 @@
19603 #include <linux/linkage.h>
19604 #include <asm/dwarf2.h>
19605 #include <asm/errno.h>
19606+#include <asm/alternative-asm.h>
19607
19608 /*
19609 * Checksum copy with exception handling.
19610@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19611 CFI_RESTORE rbp
19612 addq $7*8, %rsp
19613 CFI_ADJUST_CFA_OFFSET -7*8
19614+ pax_force_retaddr 0, 1
19615 ret
19616 CFI_RESTORE_STATE
19617
19618diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19619index 459b58a..9570bc7 100644
19620--- a/arch/x86/lib/csum-wrappers_64.c
19621+++ b/arch/x86/lib/csum-wrappers_64.c
19622@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19623 len -= 2;
19624 }
19625 }
19626- isum = csum_partial_copy_generic((__force const void *)src,
19627+
19628+#ifdef CONFIG_PAX_MEMORY_UDEREF
19629+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19630+ src += PAX_USER_SHADOW_BASE;
19631+#endif
19632+
19633+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
19634 dst, len, isum, errp, NULL);
19635 if (unlikely(*errp))
19636 goto out_err;
19637@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19638 }
19639
19640 *errp = 0;
19641- return csum_partial_copy_generic(src, (void __force *)dst,
19642+
19643+#ifdef CONFIG_PAX_MEMORY_UDEREF
19644+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19645+ dst += PAX_USER_SHADOW_BASE;
19646+#endif
19647+
19648+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19649 len, isum, NULL, errp);
19650 }
19651 EXPORT_SYMBOL(csum_partial_copy_to_user);
19652diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19653index 51f1504..ddac4c1 100644
19654--- a/arch/x86/lib/getuser.S
19655+++ b/arch/x86/lib/getuser.S
19656@@ -33,15 +33,38 @@
19657 #include <asm/asm-offsets.h>
19658 #include <asm/thread_info.h>
19659 #include <asm/asm.h>
19660+#include <asm/segment.h>
19661+#include <asm/pgtable.h>
19662+#include <asm/alternative-asm.h>
19663+
19664+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19665+#define __copyuser_seg gs;
19666+#else
19667+#define __copyuser_seg
19668+#endif
19669
19670 .text
19671 ENTRY(__get_user_1)
19672 CFI_STARTPROC
19673+
19674+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19675 GET_THREAD_INFO(%_ASM_DX)
19676 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19677 jae bad_get_user
19678-1: movzb (%_ASM_AX),%edx
19679+
19680+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19681+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19682+ cmp %_ASM_DX,%_ASM_AX
19683+ jae 1234f
19684+ add %_ASM_DX,%_ASM_AX
19685+1234:
19686+#endif
19687+
19688+#endif
19689+
19690+1: __copyuser_seg movzb (%_ASM_AX),%edx
19691 xor %eax,%eax
19692+ pax_force_retaddr
19693 ret
19694 CFI_ENDPROC
19695 ENDPROC(__get_user_1)
19696@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19697 ENTRY(__get_user_2)
19698 CFI_STARTPROC
19699 add $1,%_ASM_AX
19700+
19701+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19702 jc bad_get_user
19703 GET_THREAD_INFO(%_ASM_DX)
19704 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19705 jae bad_get_user
19706-2: movzwl -1(%_ASM_AX),%edx
19707+
19708+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19709+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19710+ cmp %_ASM_DX,%_ASM_AX
19711+ jae 1234f
19712+ add %_ASM_DX,%_ASM_AX
19713+1234:
19714+#endif
19715+
19716+#endif
19717+
19718+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19719 xor %eax,%eax
19720+ pax_force_retaddr
19721 ret
19722 CFI_ENDPROC
19723 ENDPROC(__get_user_2)
19724@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19725 ENTRY(__get_user_4)
19726 CFI_STARTPROC
19727 add $3,%_ASM_AX
19728+
19729+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19730 jc bad_get_user
19731 GET_THREAD_INFO(%_ASM_DX)
19732 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19733 jae bad_get_user
19734-3: mov -3(%_ASM_AX),%edx
19735+
19736+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19737+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19738+ cmp %_ASM_DX,%_ASM_AX
19739+ jae 1234f
19740+ add %_ASM_DX,%_ASM_AX
19741+1234:
19742+#endif
19743+
19744+#endif
19745+
19746+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19747 xor %eax,%eax
19748+ pax_force_retaddr
19749 ret
19750 CFI_ENDPROC
19751 ENDPROC(__get_user_4)
19752@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19753 GET_THREAD_INFO(%_ASM_DX)
19754 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19755 jae bad_get_user
19756+
19757+#ifdef CONFIG_PAX_MEMORY_UDEREF
19758+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19759+ cmp %_ASM_DX,%_ASM_AX
19760+ jae 1234f
19761+ add %_ASM_DX,%_ASM_AX
19762+1234:
19763+#endif
19764+
19765 4: movq -7(%_ASM_AX),%_ASM_DX
19766 xor %eax,%eax
19767+ pax_force_retaddr
19768 ret
19769 CFI_ENDPROC
19770 ENDPROC(__get_user_8)
19771@@ -91,6 +152,7 @@ bad_get_user:
19772 CFI_STARTPROC
19773 xor %edx,%edx
19774 mov $(-EFAULT),%_ASM_AX
19775+ pax_force_retaddr
19776 ret
19777 CFI_ENDPROC
19778 END(bad_get_user)
19779diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19780index 374562e..a75830b 100644
19781--- a/arch/x86/lib/insn.c
19782+++ b/arch/x86/lib/insn.c
19783@@ -21,6 +21,11 @@
19784 #include <linux/string.h>
19785 #include <asm/inat.h>
19786 #include <asm/insn.h>
19787+#ifdef __KERNEL__
19788+#include <asm/pgtable_types.h>
19789+#else
19790+#define ktla_ktva(addr) addr
19791+#endif
19792
19793 /* Verify next sizeof(t) bytes can be on the same instruction */
19794 #define validate_next(t, insn, n) \
19795@@ -49,8 +54,8 @@
19796 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19797 {
19798 memset(insn, 0, sizeof(*insn));
19799- insn->kaddr = kaddr;
19800- insn->next_byte = kaddr;
19801+ insn->kaddr = ktla_ktva(kaddr);
19802+ insn->next_byte = ktla_ktva(kaddr);
19803 insn->x86_64 = x86_64 ? 1 : 0;
19804 insn->opnd_bytes = 4;
19805 if (x86_64)
19806diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19807index 05a95e7..326f2fa 100644
19808--- a/arch/x86/lib/iomap_copy_64.S
19809+++ b/arch/x86/lib/iomap_copy_64.S
19810@@ -17,6 +17,7 @@
19811
19812 #include <linux/linkage.h>
19813 #include <asm/dwarf2.h>
19814+#include <asm/alternative-asm.h>
19815
19816 /*
19817 * override generic version in lib/iomap_copy.c
19818@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19819 CFI_STARTPROC
19820 movl %edx,%ecx
19821 rep movsd
19822+ pax_force_retaddr
19823 ret
19824 CFI_ENDPROC
19825 ENDPROC(__iowrite32_copy)
19826diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19827index efbf2a0..8893637 100644
19828--- a/arch/x86/lib/memcpy_64.S
19829+++ b/arch/x86/lib/memcpy_64.S
19830@@ -34,6 +34,7 @@
19831 rep movsq
19832 movl %edx, %ecx
19833 rep movsb
19834+ pax_force_retaddr
19835 ret
19836 .Lmemcpy_e:
19837 .previous
19838@@ -51,6 +52,7 @@
19839
19840 movl %edx, %ecx
19841 rep movsb
19842+ pax_force_retaddr
19843 ret
19844 .Lmemcpy_e_e:
19845 .previous
19846@@ -81,13 +83,13 @@ ENTRY(memcpy)
19847 */
19848 movq 0*8(%rsi), %r8
19849 movq 1*8(%rsi), %r9
19850- movq 2*8(%rsi), %r10
19851+ movq 2*8(%rsi), %rcx
19852 movq 3*8(%rsi), %r11
19853 leaq 4*8(%rsi), %rsi
19854
19855 movq %r8, 0*8(%rdi)
19856 movq %r9, 1*8(%rdi)
19857- movq %r10, 2*8(%rdi)
19858+ movq %rcx, 2*8(%rdi)
19859 movq %r11, 3*8(%rdi)
19860 leaq 4*8(%rdi), %rdi
19861 jae .Lcopy_forward_loop
19862@@ -110,12 +112,12 @@ ENTRY(memcpy)
19863 subq $0x20, %rdx
19864 movq -1*8(%rsi), %r8
19865 movq -2*8(%rsi), %r9
19866- movq -3*8(%rsi), %r10
19867+ movq -3*8(%rsi), %rcx
19868 movq -4*8(%rsi), %r11
19869 leaq -4*8(%rsi), %rsi
19870 movq %r8, -1*8(%rdi)
19871 movq %r9, -2*8(%rdi)
19872- movq %r10, -3*8(%rdi)
19873+ movq %rcx, -3*8(%rdi)
19874 movq %r11, -4*8(%rdi)
19875 leaq -4*8(%rdi), %rdi
19876 jae .Lcopy_backward_loop
19877@@ -135,12 +137,13 @@ ENTRY(memcpy)
19878 */
19879 movq 0*8(%rsi), %r8
19880 movq 1*8(%rsi), %r9
19881- movq -2*8(%rsi, %rdx), %r10
19882+ movq -2*8(%rsi, %rdx), %rcx
19883 movq -1*8(%rsi, %rdx), %r11
19884 movq %r8, 0*8(%rdi)
19885 movq %r9, 1*8(%rdi)
19886- movq %r10, -2*8(%rdi, %rdx)
19887+ movq %rcx, -2*8(%rdi, %rdx)
19888 movq %r11, -1*8(%rdi, %rdx)
19889+ pax_force_retaddr
19890 retq
19891 .p2align 4
19892 .Lless_16bytes:
19893@@ -153,6 +156,7 @@ ENTRY(memcpy)
19894 movq -1*8(%rsi, %rdx), %r9
19895 movq %r8, 0*8(%rdi)
19896 movq %r9, -1*8(%rdi, %rdx)
19897+ pax_force_retaddr
19898 retq
19899 .p2align 4
19900 .Lless_8bytes:
19901@@ -166,6 +170,7 @@ ENTRY(memcpy)
19902 movl -4(%rsi, %rdx), %r8d
19903 movl %ecx, (%rdi)
19904 movl %r8d, -4(%rdi, %rdx)
19905+ pax_force_retaddr
19906 retq
19907 .p2align 4
19908 .Lless_3bytes:
19909@@ -183,6 +188,7 @@ ENTRY(memcpy)
19910 jnz .Lloop_1
19911
19912 .Lend:
19913+ pax_force_retaddr
19914 retq
19915 CFI_ENDPROC
19916 ENDPROC(memcpy)
19917diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19918index ee16461..c39c199 100644
19919--- a/arch/x86/lib/memmove_64.S
19920+++ b/arch/x86/lib/memmove_64.S
19921@@ -61,13 +61,13 @@ ENTRY(memmove)
19922 5:
19923 sub $0x20, %rdx
19924 movq 0*8(%rsi), %r11
19925- movq 1*8(%rsi), %r10
19926+ movq 1*8(%rsi), %rcx
19927 movq 2*8(%rsi), %r9
19928 movq 3*8(%rsi), %r8
19929 leaq 4*8(%rsi), %rsi
19930
19931 movq %r11, 0*8(%rdi)
19932- movq %r10, 1*8(%rdi)
19933+ movq %rcx, 1*8(%rdi)
19934 movq %r9, 2*8(%rdi)
19935 movq %r8, 3*8(%rdi)
19936 leaq 4*8(%rdi), %rdi
19937@@ -81,10 +81,10 @@ ENTRY(memmove)
19938 4:
19939 movq %rdx, %rcx
19940 movq -8(%rsi, %rdx), %r11
19941- lea -8(%rdi, %rdx), %r10
19942+ lea -8(%rdi, %rdx), %r9
19943 shrq $3, %rcx
19944 rep movsq
19945- movq %r11, (%r10)
19946+ movq %r11, (%r9)
19947 jmp 13f
19948 .Lmemmove_end_forward:
19949
19950@@ -95,14 +95,14 @@ ENTRY(memmove)
19951 7:
19952 movq %rdx, %rcx
19953 movq (%rsi), %r11
19954- movq %rdi, %r10
19955+ movq %rdi, %r9
19956 leaq -8(%rsi, %rdx), %rsi
19957 leaq -8(%rdi, %rdx), %rdi
19958 shrq $3, %rcx
19959 std
19960 rep movsq
19961 cld
19962- movq %r11, (%r10)
19963+ movq %r11, (%r9)
19964 jmp 13f
19965
19966 /*
19967@@ -127,13 +127,13 @@ ENTRY(memmove)
19968 8:
19969 subq $0x20, %rdx
19970 movq -1*8(%rsi), %r11
19971- movq -2*8(%rsi), %r10
19972+ movq -2*8(%rsi), %rcx
19973 movq -3*8(%rsi), %r9
19974 movq -4*8(%rsi), %r8
19975 leaq -4*8(%rsi), %rsi
19976
19977 movq %r11, -1*8(%rdi)
19978- movq %r10, -2*8(%rdi)
19979+ movq %rcx, -2*8(%rdi)
19980 movq %r9, -3*8(%rdi)
19981 movq %r8, -4*8(%rdi)
19982 leaq -4*8(%rdi), %rdi
19983@@ -151,11 +151,11 @@ ENTRY(memmove)
19984 * Move data from 16 bytes to 31 bytes.
19985 */
19986 movq 0*8(%rsi), %r11
19987- movq 1*8(%rsi), %r10
19988+ movq 1*8(%rsi), %rcx
19989 movq -2*8(%rsi, %rdx), %r9
19990 movq -1*8(%rsi, %rdx), %r8
19991 movq %r11, 0*8(%rdi)
19992- movq %r10, 1*8(%rdi)
19993+ movq %rcx, 1*8(%rdi)
19994 movq %r9, -2*8(%rdi, %rdx)
19995 movq %r8, -1*8(%rdi, %rdx)
19996 jmp 13f
19997@@ -167,9 +167,9 @@ ENTRY(memmove)
19998 * Move data from 8 bytes to 15 bytes.
19999 */
20000 movq 0*8(%rsi), %r11
20001- movq -1*8(%rsi, %rdx), %r10
20002+ movq -1*8(%rsi, %rdx), %r9
20003 movq %r11, 0*8(%rdi)
20004- movq %r10, -1*8(%rdi, %rdx)
20005+ movq %r9, -1*8(%rdi, %rdx)
20006 jmp 13f
20007 10:
20008 cmpq $4, %rdx
20009@@ -178,9 +178,9 @@ ENTRY(memmove)
20010 * Move data from 4 bytes to 7 bytes.
20011 */
20012 movl (%rsi), %r11d
20013- movl -4(%rsi, %rdx), %r10d
20014+ movl -4(%rsi, %rdx), %r9d
20015 movl %r11d, (%rdi)
20016- movl %r10d, -4(%rdi, %rdx)
20017+ movl %r9d, -4(%rdi, %rdx)
20018 jmp 13f
20019 11:
20020 cmp $2, %rdx
20021@@ -189,9 +189,9 @@ ENTRY(memmove)
20022 * Move data from 2 bytes to 3 bytes.
20023 */
20024 movw (%rsi), %r11w
20025- movw -2(%rsi, %rdx), %r10w
20026+ movw -2(%rsi, %rdx), %r9w
20027 movw %r11w, (%rdi)
20028- movw %r10w, -2(%rdi, %rdx)
20029+ movw %r9w, -2(%rdi, %rdx)
20030 jmp 13f
20031 12:
20032 cmp $1, %rdx
20033@@ -202,6 +202,7 @@ ENTRY(memmove)
20034 movb (%rsi), %r11b
20035 movb %r11b, (%rdi)
20036 13:
20037+ pax_force_retaddr
20038 retq
20039 CFI_ENDPROC
20040
20041@@ -210,6 +211,7 @@ ENTRY(memmove)
20042 /* Forward moving data. */
20043 movq %rdx, %rcx
20044 rep movsb
20045+ pax_force_retaddr
20046 retq
20047 .Lmemmove_end_forward_efs:
20048 .previous
20049diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20050index 79bd454..dff325a 100644
20051--- a/arch/x86/lib/memset_64.S
20052+++ b/arch/x86/lib/memset_64.S
20053@@ -31,6 +31,7 @@
20054 movl %r8d,%ecx
20055 rep stosb
20056 movq %r9,%rax
20057+ pax_force_retaddr
20058 ret
20059 .Lmemset_e:
20060 .previous
20061@@ -53,6 +54,7 @@
20062 movl %edx,%ecx
20063 rep stosb
20064 movq %r9,%rax
20065+ pax_force_retaddr
20066 ret
20067 .Lmemset_e_e:
20068 .previous
20069@@ -60,13 +62,13 @@
20070 ENTRY(memset)
20071 ENTRY(__memset)
20072 CFI_STARTPROC
20073- movq %rdi,%r10
20074 movq %rdx,%r11
20075
20076 /* expand byte value */
20077 movzbl %sil,%ecx
20078 movabs $0x0101010101010101,%rax
20079 mul %rcx /* with rax, clobbers rdx */
20080+ movq %rdi,%rdx
20081
20082 /* align dst */
20083 movl %edi,%r9d
20084@@ -120,7 +122,8 @@ ENTRY(__memset)
20085 jnz .Lloop_1
20086
20087 .Lende:
20088- movq %r10,%rax
20089+ movq %rdx,%rax
20090+ pax_force_retaddr
20091 ret
20092
20093 CFI_RESTORE_STATE
20094diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20095index c9f2d9b..e7fd2c0 100644
20096--- a/arch/x86/lib/mmx_32.c
20097+++ b/arch/x86/lib/mmx_32.c
20098@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20099 {
20100 void *p;
20101 int i;
20102+ unsigned long cr0;
20103
20104 if (unlikely(in_interrupt()))
20105 return __memcpy(to, from, len);
20106@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20107 kernel_fpu_begin();
20108
20109 __asm__ __volatile__ (
20110- "1: prefetch (%0)\n" /* This set is 28 bytes */
20111- " prefetch 64(%0)\n"
20112- " prefetch 128(%0)\n"
20113- " prefetch 192(%0)\n"
20114- " prefetch 256(%0)\n"
20115+ "1: prefetch (%1)\n" /* This set is 28 bytes */
20116+ " prefetch 64(%1)\n"
20117+ " prefetch 128(%1)\n"
20118+ " prefetch 192(%1)\n"
20119+ " prefetch 256(%1)\n"
20120 "2: \n"
20121 ".section .fixup, \"ax\"\n"
20122- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20123+ "3: \n"
20124+
20125+#ifdef CONFIG_PAX_KERNEXEC
20126+ " movl %%cr0, %0\n"
20127+ " movl %0, %%eax\n"
20128+ " andl $0xFFFEFFFF, %%eax\n"
20129+ " movl %%eax, %%cr0\n"
20130+#endif
20131+
20132+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20133+
20134+#ifdef CONFIG_PAX_KERNEXEC
20135+ " movl %0, %%cr0\n"
20136+#endif
20137+
20138 " jmp 2b\n"
20139 ".previous\n"
20140 _ASM_EXTABLE(1b, 3b)
20141- : : "r" (from));
20142+ : "=&r" (cr0) : "r" (from) : "ax");
20143
20144 for ( ; i > 5; i--) {
20145 __asm__ __volatile__ (
20146- "1: prefetch 320(%0)\n"
20147- "2: movq (%0), %%mm0\n"
20148- " movq 8(%0), %%mm1\n"
20149- " movq 16(%0), %%mm2\n"
20150- " movq 24(%0), %%mm3\n"
20151- " movq %%mm0, (%1)\n"
20152- " movq %%mm1, 8(%1)\n"
20153- " movq %%mm2, 16(%1)\n"
20154- " movq %%mm3, 24(%1)\n"
20155- " movq 32(%0), %%mm0\n"
20156- " movq 40(%0), %%mm1\n"
20157- " movq 48(%0), %%mm2\n"
20158- " movq 56(%0), %%mm3\n"
20159- " movq %%mm0, 32(%1)\n"
20160- " movq %%mm1, 40(%1)\n"
20161- " movq %%mm2, 48(%1)\n"
20162- " movq %%mm3, 56(%1)\n"
20163+ "1: prefetch 320(%1)\n"
20164+ "2: movq (%1), %%mm0\n"
20165+ " movq 8(%1), %%mm1\n"
20166+ " movq 16(%1), %%mm2\n"
20167+ " movq 24(%1), %%mm3\n"
20168+ " movq %%mm0, (%2)\n"
20169+ " movq %%mm1, 8(%2)\n"
20170+ " movq %%mm2, 16(%2)\n"
20171+ " movq %%mm3, 24(%2)\n"
20172+ " movq 32(%1), %%mm0\n"
20173+ " movq 40(%1), %%mm1\n"
20174+ " movq 48(%1), %%mm2\n"
20175+ " movq 56(%1), %%mm3\n"
20176+ " movq %%mm0, 32(%2)\n"
20177+ " movq %%mm1, 40(%2)\n"
20178+ " movq %%mm2, 48(%2)\n"
20179+ " movq %%mm3, 56(%2)\n"
20180 ".section .fixup, \"ax\"\n"
20181- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20182+ "3:\n"
20183+
20184+#ifdef CONFIG_PAX_KERNEXEC
20185+ " movl %%cr0, %0\n"
20186+ " movl %0, %%eax\n"
20187+ " andl $0xFFFEFFFF, %%eax\n"
20188+ " movl %%eax, %%cr0\n"
20189+#endif
20190+
20191+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20192+
20193+#ifdef CONFIG_PAX_KERNEXEC
20194+ " movl %0, %%cr0\n"
20195+#endif
20196+
20197 " jmp 2b\n"
20198 ".previous\n"
20199 _ASM_EXTABLE(1b, 3b)
20200- : : "r" (from), "r" (to) : "memory");
20201+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20202
20203 from += 64;
20204 to += 64;
20205@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20206 static void fast_copy_page(void *to, void *from)
20207 {
20208 int i;
20209+ unsigned long cr0;
20210
20211 kernel_fpu_begin();
20212
20213@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20214 * but that is for later. -AV
20215 */
20216 __asm__ __volatile__(
20217- "1: prefetch (%0)\n"
20218- " prefetch 64(%0)\n"
20219- " prefetch 128(%0)\n"
20220- " prefetch 192(%0)\n"
20221- " prefetch 256(%0)\n"
20222+ "1: prefetch (%1)\n"
20223+ " prefetch 64(%1)\n"
20224+ " prefetch 128(%1)\n"
20225+ " prefetch 192(%1)\n"
20226+ " prefetch 256(%1)\n"
20227 "2: \n"
20228 ".section .fixup, \"ax\"\n"
20229- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20230+ "3: \n"
20231+
20232+#ifdef CONFIG_PAX_KERNEXEC
20233+ " movl %%cr0, %0\n"
20234+ " movl %0, %%eax\n"
20235+ " andl $0xFFFEFFFF, %%eax\n"
20236+ " movl %%eax, %%cr0\n"
20237+#endif
20238+
20239+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20240+
20241+#ifdef CONFIG_PAX_KERNEXEC
20242+ " movl %0, %%cr0\n"
20243+#endif
20244+
20245 " jmp 2b\n"
20246 ".previous\n"
20247- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20248+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20249
20250 for (i = 0; i < (4096-320)/64; i++) {
20251 __asm__ __volatile__ (
20252- "1: prefetch 320(%0)\n"
20253- "2: movq (%0), %%mm0\n"
20254- " movntq %%mm0, (%1)\n"
20255- " movq 8(%0), %%mm1\n"
20256- " movntq %%mm1, 8(%1)\n"
20257- " movq 16(%0), %%mm2\n"
20258- " movntq %%mm2, 16(%1)\n"
20259- " movq 24(%0), %%mm3\n"
20260- " movntq %%mm3, 24(%1)\n"
20261- " movq 32(%0), %%mm4\n"
20262- " movntq %%mm4, 32(%1)\n"
20263- " movq 40(%0), %%mm5\n"
20264- " movntq %%mm5, 40(%1)\n"
20265- " movq 48(%0), %%mm6\n"
20266- " movntq %%mm6, 48(%1)\n"
20267- " movq 56(%0), %%mm7\n"
20268- " movntq %%mm7, 56(%1)\n"
20269+ "1: prefetch 320(%1)\n"
20270+ "2: movq (%1), %%mm0\n"
20271+ " movntq %%mm0, (%2)\n"
20272+ " movq 8(%1), %%mm1\n"
20273+ " movntq %%mm1, 8(%2)\n"
20274+ " movq 16(%1), %%mm2\n"
20275+ " movntq %%mm2, 16(%2)\n"
20276+ " movq 24(%1), %%mm3\n"
20277+ " movntq %%mm3, 24(%2)\n"
20278+ " movq 32(%1), %%mm4\n"
20279+ " movntq %%mm4, 32(%2)\n"
20280+ " movq 40(%1), %%mm5\n"
20281+ " movntq %%mm5, 40(%2)\n"
20282+ " movq 48(%1), %%mm6\n"
20283+ " movntq %%mm6, 48(%2)\n"
20284+ " movq 56(%1), %%mm7\n"
20285+ " movntq %%mm7, 56(%2)\n"
20286 ".section .fixup, \"ax\"\n"
20287- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20288+ "3:\n"
20289+
20290+#ifdef CONFIG_PAX_KERNEXEC
20291+ " movl %%cr0, %0\n"
20292+ " movl %0, %%eax\n"
20293+ " andl $0xFFFEFFFF, %%eax\n"
20294+ " movl %%eax, %%cr0\n"
20295+#endif
20296+
20297+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20298+
20299+#ifdef CONFIG_PAX_KERNEXEC
20300+ " movl %0, %%cr0\n"
20301+#endif
20302+
20303 " jmp 2b\n"
20304 ".previous\n"
20305- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20306+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20307
20308 from += 64;
20309 to += 64;
20310@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20311 static void fast_copy_page(void *to, void *from)
20312 {
20313 int i;
20314+ unsigned long cr0;
20315
20316 kernel_fpu_begin();
20317
20318 __asm__ __volatile__ (
20319- "1: prefetch (%0)\n"
20320- " prefetch 64(%0)\n"
20321- " prefetch 128(%0)\n"
20322- " prefetch 192(%0)\n"
20323- " prefetch 256(%0)\n"
20324+ "1: prefetch (%1)\n"
20325+ " prefetch 64(%1)\n"
20326+ " prefetch 128(%1)\n"
20327+ " prefetch 192(%1)\n"
20328+ " prefetch 256(%1)\n"
20329 "2: \n"
20330 ".section .fixup, \"ax\"\n"
20331- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20332+ "3: \n"
20333+
20334+#ifdef CONFIG_PAX_KERNEXEC
20335+ " movl %%cr0, %0\n"
20336+ " movl %0, %%eax\n"
20337+ " andl $0xFFFEFFFF, %%eax\n"
20338+ " movl %%eax, %%cr0\n"
20339+#endif
20340+
20341+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20342+
20343+#ifdef CONFIG_PAX_KERNEXEC
20344+ " movl %0, %%cr0\n"
20345+#endif
20346+
20347 " jmp 2b\n"
20348 ".previous\n"
20349- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20350+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20351
20352 for (i = 0; i < 4096/64; i++) {
20353 __asm__ __volatile__ (
20354- "1: prefetch 320(%0)\n"
20355- "2: movq (%0), %%mm0\n"
20356- " movq 8(%0), %%mm1\n"
20357- " movq 16(%0), %%mm2\n"
20358- " movq 24(%0), %%mm3\n"
20359- " movq %%mm0, (%1)\n"
20360- " movq %%mm1, 8(%1)\n"
20361- " movq %%mm2, 16(%1)\n"
20362- " movq %%mm3, 24(%1)\n"
20363- " movq 32(%0), %%mm0\n"
20364- " movq 40(%0), %%mm1\n"
20365- " movq 48(%0), %%mm2\n"
20366- " movq 56(%0), %%mm3\n"
20367- " movq %%mm0, 32(%1)\n"
20368- " movq %%mm1, 40(%1)\n"
20369- " movq %%mm2, 48(%1)\n"
20370- " movq %%mm3, 56(%1)\n"
20371+ "1: prefetch 320(%1)\n"
20372+ "2: movq (%1), %%mm0\n"
20373+ " movq 8(%1), %%mm1\n"
20374+ " movq 16(%1), %%mm2\n"
20375+ " movq 24(%1), %%mm3\n"
20376+ " movq %%mm0, (%2)\n"
20377+ " movq %%mm1, 8(%2)\n"
20378+ " movq %%mm2, 16(%2)\n"
20379+ " movq %%mm3, 24(%2)\n"
20380+ " movq 32(%1), %%mm0\n"
20381+ " movq 40(%1), %%mm1\n"
20382+ " movq 48(%1), %%mm2\n"
20383+ " movq 56(%1), %%mm3\n"
20384+ " movq %%mm0, 32(%2)\n"
20385+ " movq %%mm1, 40(%2)\n"
20386+ " movq %%mm2, 48(%2)\n"
20387+ " movq %%mm3, 56(%2)\n"
20388 ".section .fixup, \"ax\"\n"
20389- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20390+ "3:\n"
20391+
20392+#ifdef CONFIG_PAX_KERNEXEC
20393+ " movl %%cr0, %0\n"
20394+ " movl %0, %%eax\n"
20395+ " andl $0xFFFEFFFF, %%eax\n"
20396+ " movl %%eax, %%cr0\n"
20397+#endif
20398+
20399+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20400+
20401+#ifdef CONFIG_PAX_KERNEXEC
20402+ " movl %0, %%cr0\n"
20403+#endif
20404+
20405 " jmp 2b\n"
20406 ".previous\n"
20407 _ASM_EXTABLE(1b, 3b)
20408- : : "r" (from), "r" (to) : "memory");
20409+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20410
20411 from += 64;
20412 to += 64;
20413diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20414index 69fa106..adda88b 100644
20415--- a/arch/x86/lib/msr-reg.S
20416+++ b/arch/x86/lib/msr-reg.S
20417@@ -3,6 +3,7 @@
20418 #include <asm/dwarf2.h>
20419 #include <asm/asm.h>
20420 #include <asm/msr.h>
20421+#include <asm/alternative-asm.h>
20422
20423 #ifdef CONFIG_X86_64
20424 /*
20425@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20426 CFI_STARTPROC
20427 pushq_cfi %rbx
20428 pushq_cfi %rbp
20429- movq %rdi, %r10 /* Save pointer */
20430+ movq %rdi, %r9 /* Save pointer */
20431 xorl %r11d, %r11d /* Return value */
20432 movl (%rdi), %eax
20433 movl 4(%rdi), %ecx
20434@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20435 movl 28(%rdi), %edi
20436 CFI_REMEMBER_STATE
20437 1: \op
20438-2: movl %eax, (%r10)
20439+2: movl %eax, (%r9)
20440 movl %r11d, %eax /* Return value */
20441- movl %ecx, 4(%r10)
20442- movl %edx, 8(%r10)
20443- movl %ebx, 12(%r10)
20444- movl %ebp, 20(%r10)
20445- movl %esi, 24(%r10)
20446- movl %edi, 28(%r10)
20447+ movl %ecx, 4(%r9)
20448+ movl %edx, 8(%r9)
20449+ movl %ebx, 12(%r9)
20450+ movl %ebp, 20(%r9)
20451+ movl %esi, 24(%r9)
20452+ movl %edi, 28(%r9)
20453 popq_cfi %rbp
20454 popq_cfi %rbx
20455+ pax_force_retaddr
20456 ret
20457 3:
20458 CFI_RESTORE_STATE
20459diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20460index 36b0d15..d381858 100644
20461--- a/arch/x86/lib/putuser.S
20462+++ b/arch/x86/lib/putuser.S
20463@@ -15,7 +15,9 @@
20464 #include <asm/thread_info.h>
20465 #include <asm/errno.h>
20466 #include <asm/asm.h>
20467-
20468+#include <asm/segment.h>
20469+#include <asm/pgtable.h>
20470+#include <asm/alternative-asm.h>
20471
20472 /*
20473 * __put_user_X
20474@@ -29,52 +31,119 @@
20475 * as they get called from within inline assembly.
20476 */
20477
20478-#define ENTER CFI_STARTPROC ; \
20479- GET_THREAD_INFO(%_ASM_BX)
20480-#define EXIT ret ; \
20481+#define ENTER CFI_STARTPROC
20482+#define EXIT pax_force_retaddr; ret ; \
20483 CFI_ENDPROC
20484
20485+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20486+#define _DEST %_ASM_CX,%_ASM_BX
20487+#else
20488+#define _DEST %_ASM_CX
20489+#endif
20490+
20491+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20492+#define __copyuser_seg gs;
20493+#else
20494+#define __copyuser_seg
20495+#endif
20496+
20497 .text
20498 ENTRY(__put_user_1)
20499 ENTER
20500+
20501+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20502+ GET_THREAD_INFO(%_ASM_BX)
20503 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20504 jae bad_put_user
20505-1: movb %al,(%_ASM_CX)
20506+
20507+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20508+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20509+ cmp %_ASM_BX,%_ASM_CX
20510+ jb 1234f
20511+ xor %ebx,%ebx
20512+1234:
20513+#endif
20514+
20515+#endif
20516+
20517+1: __copyuser_seg movb %al,(_DEST)
20518 xor %eax,%eax
20519 EXIT
20520 ENDPROC(__put_user_1)
20521
20522 ENTRY(__put_user_2)
20523 ENTER
20524+
20525+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20526+ GET_THREAD_INFO(%_ASM_BX)
20527 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20528 sub $1,%_ASM_BX
20529 cmp %_ASM_BX,%_ASM_CX
20530 jae bad_put_user
20531-2: movw %ax,(%_ASM_CX)
20532+
20533+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20534+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20535+ cmp %_ASM_BX,%_ASM_CX
20536+ jb 1234f
20537+ xor %ebx,%ebx
20538+1234:
20539+#endif
20540+
20541+#endif
20542+
20543+2: __copyuser_seg movw %ax,(_DEST)
20544 xor %eax,%eax
20545 EXIT
20546 ENDPROC(__put_user_2)
20547
20548 ENTRY(__put_user_4)
20549 ENTER
20550+
20551+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20552+ GET_THREAD_INFO(%_ASM_BX)
20553 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20554 sub $3,%_ASM_BX
20555 cmp %_ASM_BX,%_ASM_CX
20556 jae bad_put_user
20557-3: movl %eax,(%_ASM_CX)
20558+
20559+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20560+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20561+ cmp %_ASM_BX,%_ASM_CX
20562+ jb 1234f
20563+ xor %ebx,%ebx
20564+1234:
20565+#endif
20566+
20567+#endif
20568+
20569+3: __copyuser_seg movl %eax,(_DEST)
20570 xor %eax,%eax
20571 EXIT
20572 ENDPROC(__put_user_4)
20573
20574 ENTRY(__put_user_8)
20575 ENTER
20576+
20577+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20578+ GET_THREAD_INFO(%_ASM_BX)
20579 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20580 sub $7,%_ASM_BX
20581 cmp %_ASM_BX,%_ASM_CX
20582 jae bad_put_user
20583-4: mov %_ASM_AX,(%_ASM_CX)
20584+
20585+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20586+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20587+ cmp %_ASM_BX,%_ASM_CX
20588+ jb 1234f
20589+ xor %ebx,%ebx
20590+1234:
20591+#endif
20592+
20593+#endif
20594+
20595+4: __copyuser_seg mov %_ASM_AX,(_DEST)
20596 #ifdef CONFIG_X86_32
20597-5: movl %edx,4(%_ASM_CX)
20598+5: __copyuser_seg movl %edx,4(_DEST)
20599 #endif
20600 xor %eax,%eax
20601 EXIT
20602diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20603index 1cad221..de671ee 100644
20604--- a/arch/x86/lib/rwlock.S
20605+++ b/arch/x86/lib/rwlock.S
20606@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20607 FRAME
20608 0: LOCK_PREFIX
20609 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20610+
20611+#ifdef CONFIG_PAX_REFCOUNT
20612+ jno 1234f
20613+ LOCK_PREFIX
20614+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20615+ int $4
20616+1234:
20617+ _ASM_EXTABLE(1234b, 1234b)
20618+#endif
20619+
20620 1: rep; nop
20621 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20622 jne 1b
20623 LOCK_PREFIX
20624 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20625+
20626+#ifdef CONFIG_PAX_REFCOUNT
20627+ jno 1234f
20628+ LOCK_PREFIX
20629+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20630+ int $4
20631+1234:
20632+ _ASM_EXTABLE(1234b, 1234b)
20633+#endif
20634+
20635 jnz 0b
20636 ENDFRAME
20637+ pax_force_retaddr
20638 ret
20639 CFI_ENDPROC
20640 END(__write_lock_failed)
20641@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20642 FRAME
20643 0: LOCK_PREFIX
20644 READ_LOCK_SIZE(inc) (%__lock_ptr)
20645+
20646+#ifdef CONFIG_PAX_REFCOUNT
20647+ jno 1234f
20648+ LOCK_PREFIX
20649+ READ_LOCK_SIZE(dec) (%__lock_ptr)
20650+ int $4
20651+1234:
20652+ _ASM_EXTABLE(1234b, 1234b)
20653+#endif
20654+
20655 1: rep; nop
20656 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20657 js 1b
20658 LOCK_PREFIX
20659 READ_LOCK_SIZE(dec) (%__lock_ptr)
20660+
20661+#ifdef CONFIG_PAX_REFCOUNT
20662+ jno 1234f
20663+ LOCK_PREFIX
20664+ READ_LOCK_SIZE(inc) (%__lock_ptr)
20665+ int $4
20666+1234:
20667+ _ASM_EXTABLE(1234b, 1234b)
20668+#endif
20669+
20670 js 0b
20671 ENDFRAME
20672+ pax_force_retaddr
20673 ret
20674 CFI_ENDPROC
20675 END(__read_lock_failed)
20676diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20677index 5dff5f0..cadebf4 100644
20678--- a/arch/x86/lib/rwsem.S
20679+++ b/arch/x86/lib/rwsem.S
20680@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20681 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20682 CFI_RESTORE __ASM_REG(dx)
20683 restore_common_regs
20684+ pax_force_retaddr
20685 ret
20686 CFI_ENDPROC
20687 ENDPROC(call_rwsem_down_read_failed)
20688@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20689 movq %rax,%rdi
20690 call rwsem_down_write_failed
20691 restore_common_regs
20692+ pax_force_retaddr
20693 ret
20694 CFI_ENDPROC
20695 ENDPROC(call_rwsem_down_write_failed)
20696@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20697 movq %rax,%rdi
20698 call rwsem_wake
20699 restore_common_regs
20700-1: ret
20701+1: pax_force_retaddr
20702+ ret
20703 CFI_ENDPROC
20704 ENDPROC(call_rwsem_wake)
20705
20706@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20707 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20708 CFI_RESTORE __ASM_REG(dx)
20709 restore_common_regs
20710+ pax_force_retaddr
20711 ret
20712 CFI_ENDPROC
20713 ENDPROC(call_rwsem_downgrade_wake)
20714diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20715index a63efd6..ccecad8 100644
20716--- a/arch/x86/lib/thunk_64.S
20717+++ b/arch/x86/lib/thunk_64.S
20718@@ -8,6 +8,7 @@
20719 #include <linux/linkage.h>
20720 #include <asm/dwarf2.h>
20721 #include <asm/calling.h>
20722+#include <asm/alternative-asm.h>
20723
20724 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20725 .macro THUNK name, func, put_ret_addr_in_rdi=0
20726@@ -41,5 +42,6 @@
20727 SAVE_ARGS
20728 restore:
20729 RESTORE_ARGS
20730+ pax_force_retaddr
20731 ret
20732 CFI_ENDPROC
20733diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20734index e218d5d..35679b4 100644
20735--- a/arch/x86/lib/usercopy_32.c
20736+++ b/arch/x86/lib/usercopy_32.c
20737@@ -43,7 +43,7 @@ do { \
20738 __asm__ __volatile__( \
20739 " testl %1,%1\n" \
20740 " jz 2f\n" \
20741- "0: lodsb\n" \
20742+ "0: "__copyuser_seg"lodsb\n" \
20743 " stosb\n" \
20744 " testb %%al,%%al\n" \
20745 " jz 1f\n" \
20746@@ -128,10 +128,12 @@ do { \
20747 int __d0; \
20748 might_fault(); \
20749 __asm__ __volatile__( \
20750+ __COPYUSER_SET_ES \
20751 "0: rep; stosl\n" \
20752 " movl %2,%0\n" \
20753 "1: rep; stosb\n" \
20754 "2:\n" \
20755+ __COPYUSER_RESTORE_ES \
20756 ".section .fixup,\"ax\"\n" \
20757 "3: lea 0(%2,%0,4),%0\n" \
20758 " jmp 2b\n" \
20759@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20760 might_fault();
20761
20762 __asm__ __volatile__(
20763+ __COPYUSER_SET_ES
20764 " testl %0, %0\n"
20765 " jz 3f\n"
20766 " andl %0,%%ecx\n"
20767@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20768 " subl %%ecx,%0\n"
20769 " addl %0,%%eax\n"
20770 "1:\n"
20771+ __COPYUSER_RESTORE_ES
20772 ".section .fixup,\"ax\"\n"
20773 "2: xorl %%eax,%%eax\n"
20774 " jmp 1b\n"
20775@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20776
20777 #ifdef CONFIG_X86_INTEL_USERCOPY
20778 static unsigned long
20779-__copy_user_intel(void __user *to, const void *from, unsigned long size)
20780+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20781 {
20782 int d0, d1;
20783 __asm__ __volatile__(
20784@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20785 " .align 2,0x90\n"
20786 "3: movl 0(%4), %%eax\n"
20787 "4: movl 4(%4), %%edx\n"
20788- "5: movl %%eax, 0(%3)\n"
20789- "6: movl %%edx, 4(%3)\n"
20790+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20791+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20792 "7: movl 8(%4), %%eax\n"
20793 "8: movl 12(%4),%%edx\n"
20794- "9: movl %%eax, 8(%3)\n"
20795- "10: movl %%edx, 12(%3)\n"
20796+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20797+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20798 "11: movl 16(%4), %%eax\n"
20799 "12: movl 20(%4), %%edx\n"
20800- "13: movl %%eax, 16(%3)\n"
20801- "14: movl %%edx, 20(%3)\n"
20802+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20803+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20804 "15: movl 24(%4), %%eax\n"
20805 "16: movl 28(%4), %%edx\n"
20806- "17: movl %%eax, 24(%3)\n"
20807- "18: movl %%edx, 28(%3)\n"
20808+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20809+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20810 "19: movl 32(%4), %%eax\n"
20811 "20: movl 36(%4), %%edx\n"
20812- "21: movl %%eax, 32(%3)\n"
20813- "22: movl %%edx, 36(%3)\n"
20814+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20815+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20816 "23: movl 40(%4), %%eax\n"
20817 "24: movl 44(%4), %%edx\n"
20818- "25: movl %%eax, 40(%3)\n"
20819- "26: movl %%edx, 44(%3)\n"
20820+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20821+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20822 "27: movl 48(%4), %%eax\n"
20823 "28: movl 52(%4), %%edx\n"
20824- "29: movl %%eax, 48(%3)\n"
20825- "30: movl %%edx, 52(%3)\n"
20826+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20827+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20828 "31: movl 56(%4), %%eax\n"
20829 "32: movl 60(%4), %%edx\n"
20830- "33: movl %%eax, 56(%3)\n"
20831- "34: movl %%edx, 60(%3)\n"
20832+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20833+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20834 " addl $-64, %0\n"
20835 " addl $64, %4\n"
20836 " addl $64, %3\n"
20837@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20838 " shrl $2, %0\n"
20839 " andl $3, %%eax\n"
20840 " cld\n"
20841+ __COPYUSER_SET_ES
20842 "99: rep; movsl\n"
20843 "36: movl %%eax, %0\n"
20844 "37: rep; movsb\n"
20845 "100:\n"
20846+ __COPYUSER_RESTORE_ES
20847+ ".section .fixup,\"ax\"\n"
20848+ "101: lea 0(%%eax,%0,4),%0\n"
20849+ " jmp 100b\n"
20850+ ".previous\n"
20851+ ".section __ex_table,\"a\"\n"
20852+ " .align 4\n"
20853+ " .long 1b,100b\n"
20854+ " .long 2b,100b\n"
20855+ " .long 3b,100b\n"
20856+ " .long 4b,100b\n"
20857+ " .long 5b,100b\n"
20858+ " .long 6b,100b\n"
20859+ " .long 7b,100b\n"
20860+ " .long 8b,100b\n"
20861+ " .long 9b,100b\n"
20862+ " .long 10b,100b\n"
20863+ " .long 11b,100b\n"
20864+ " .long 12b,100b\n"
20865+ " .long 13b,100b\n"
20866+ " .long 14b,100b\n"
20867+ " .long 15b,100b\n"
20868+ " .long 16b,100b\n"
20869+ " .long 17b,100b\n"
20870+ " .long 18b,100b\n"
20871+ " .long 19b,100b\n"
20872+ " .long 20b,100b\n"
20873+ " .long 21b,100b\n"
20874+ " .long 22b,100b\n"
20875+ " .long 23b,100b\n"
20876+ " .long 24b,100b\n"
20877+ " .long 25b,100b\n"
20878+ " .long 26b,100b\n"
20879+ " .long 27b,100b\n"
20880+ " .long 28b,100b\n"
20881+ " .long 29b,100b\n"
20882+ " .long 30b,100b\n"
20883+ " .long 31b,100b\n"
20884+ " .long 32b,100b\n"
20885+ " .long 33b,100b\n"
20886+ " .long 34b,100b\n"
20887+ " .long 35b,100b\n"
20888+ " .long 36b,100b\n"
20889+ " .long 37b,100b\n"
20890+ " .long 99b,101b\n"
20891+ ".previous"
20892+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20893+ : "1"(to), "2"(from), "0"(size)
20894+ : "eax", "edx", "memory");
20895+ return size;
20896+}
20897+
20898+static unsigned long
20899+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20900+{
20901+ int d0, d1;
20902+ __asm__ __volatile__(
20903+ " .align 2,0x90\n"
20904+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20905+ " cmpl $67, %0\n"
20906+ " jbe 3f\n"
20907+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20908+ " .align 2,0x90\n"
20909+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20910+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20911+ "5: movl %%eax, 0(%3)\n"
20912+ "6: movl %%edx, 4(%3)\n"
20913+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20914+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20915+ "9: movl %%eax, 8(%3)\n"
20916+ "10: movl %%edx, 12(%3)\n"
20917+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20918+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20919+ "13: movl %%eax, 16(%3)\n"
20920+ "14: movl %%edx, 20(%3)\n"
20921+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20922+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20923+ "17: movl %%eax, 24(%3)\n"
20924+ "18: movl %%edx, 28(%3)\n"
20925+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20926+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20927+ "21: movl %%eax, 32(%3)\n"
20928+ "22: movl %%edx, 36(%3)\n"
20929+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20930+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20931+ "25: movl %%eax, 40(%3)\n"
20932+ "26: movl %%edx, 44(%3)\n"
20933+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20934+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20935+ "29: movl %%eax, 48(%3)\n"
20936+ "30: movl %%edx, 52(%3)\n"
20937+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20938+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20939+ "33: movl %%eax, 56(%3)\n"
20940+ "34: movl %%edx, 60(%3)\n"
20941+ " addl $-64, %0\n"
20942+ " addl $64, %4\n"
20943+ " addl $64, %3\n"
20944+ " cmpl $63, %0\n"
20945+ " ja 1b\n"
20946+ "35: movl %0, %%eax\n"
20947+ " shrl $2, %0\n"
20948+ " andl $3, %%eax\n"
20949+ " cld\n"
20950+ "99: rep; "__copyuser_seg" movsl\n"
20951+ "36: movl %%eax, %0\n"
20952+ "37: rep; "__copyuser_seg" movsb\n"
20953+ "100:\n"
20954 ".section .fixup,\"ax\"\n"
20955 "101: lea 0(%%eax,%0,4),%0\n"
20956 " jmp 100b\n"
20957@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20958 int d0, d1;
20959 __asm__ __volatile__(
20960 " .align 2,0x90\n"
20961- "0: movl 32(%4), %%eax\n"
20962+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20963 " cmpl $67, %0\n"
20964 " jbe 2f\n"
20965- "1: movl 64(%4), %%eax\n"
20966+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20967 " .align 2,0x90\n"
20968- "2: movl 0(%4), %%eax\n"
20969- "21: movl 4(%4), %%edx\n"
20970+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20971+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20972 " movl %%eax, 0(%3)\n"
20973 " movl %%edx, 4(%3)\n"
20974- "3: movl 8(%4), %%eax\n"
20975- "31: movl 12(%4),%%edx\n"
20976+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20977+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20978 " movl %%eax, 8(%3)\n"
20979 " movl %%edx, 12(%3)\n"
20980- "4: movl 16(%4), %%eax\n"
20981- "41: movl 20(%4), %%edx\n"
20982+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20983+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20984 " movl %%eax, 16(%3)\n"
20985 " movl %%edx, 20(%3)\n"
20986- "10: movl 24(%4), %%eax\n"
20987- "51: movl 28(%4), %%edx\n"
20988+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20989+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20990 " movl %%eax, 24(%3)\n"
20991 " movl %%edx, 28(%3)\n"
20992- "11: movl 32(%4), %%eax\n"
20993- "61: movl 36(%4), %%edx\n"
20994+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20995+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20996 " movl %%eax, 32(%3)\n"
20997 " movl %%edx, 36(%3)\n"
20998- "12: movl 40(%4), %%eax\n"
20999- "71: movl 44(%4), %%edx\n"
21000+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21001+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21002 " movl %%eax, 40(%3)\n"
21003 " movl %%edx, 44(%3)\n"
21004- "13: movl 48(%4), %%eax\n"
21005- "81: movl 52(%4), %%edx\n"
21006+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21007+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21008 " movl %%eax, 48(%3)\n"
21009 " movl %%edx, 52(%3)\n"
21010- "14: movl 56(%4), %%eax\n"
21011- "91: movl 60(%4), %%edx\n"
21012+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21013+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21014 " movl %%eax, 56(%3)\n"
21015 " movl %%edx, 60(%3)\n"
21016 " addl $-64, %0\n"
21017@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21018 " shrl $2, %0\n"
21019 " andl $3, %%eax\n"
21020 " cld\n"
21021- "6: rep; movsl\n"
21022+ "6: rep; "__copyuser_seg" movsl\n"
21023 " movl %%eax,%0\n"
21024- "7: rep; movsb\n"
21025+ "7: rep; "__copyuser_seg" movsb\n"
21026 "8:\n"
21027 ".section .fixup,\"ax\"\n"
21028 "9: lea 0(%%eax,%0,4),%0\n"
21029@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21030
21031 __asm__ __volatile__(
21032 " .align 2,0x90\n"
21033- "0: movl 32(%4), %%eax\n"
21034+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21035 " cmpl $67, %0\n"
21036 " jbe 2f\n"
21037- "1: movl 64(%4), %%eax\n"
21038+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21039 " .align 2,0x90\n"
21040- "2: movl 0(%4), %%eax\n"
21041- "21: movl 4(%4), %%edx\n"
21042+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21043+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21044 " movnti %%eax, 0(%3)\n"
21045 " movnti %%edx, 4(%3)\n"
21046- "3: movl 8(%4), %%eax\n"
21047- "31: movl 12(%4),%%edx\n"
21048+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21049+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21050 " movnti %%eax, 8(%3)\n"
21051 " movnti %%edx, 12(%3)\n"
21052- "4: movl 16(%4), %%eax\n"
21053- "41: movl 20(%4), %%edx\n"
21054+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21055+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21056 " movnti %%eax, 16(%3)\n"
21057 " movnti %%edx, 20(%3)\n"
21058- "10: movl 24(%4), %%eax\n"
21059- "51: movl 28(%4), %%edx\n"
21060+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21061+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21062 " movnti %%eax, 24(%3)\n"
21063 " movnti %%edx, 28(%3)\n"
21064- "11: movl 32(%4), %%eax\n"
21065- "61: movl 36(%4), %%edx\n"
21066+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21067+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21068 " movnti %%eax, 32(%3)\n"
21069 " movnti %%edx, 36(%3)\n"
21070- "12: movl 40(%4), %%eax\n"
21071- "71: movl 44(%4), %%edx\n"
21072+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21073+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21074 " movnti %%eax, 40(%3)\n"
21075 " movnti %%edx, 44(%3)\n"
21076- "13: movl 48(%4), %%eax\n"
21077- "81: movl 52(%4), %%edx\n"
21078+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21079+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21080 " movnti %%eax, 48(%3)\n"
21081 " movnti %%edx, 52(%3)\n"
21082- "14: movl 56(%4), %%eax\n"
21083- "91: movl 60(%4), %%edx\n"
21084+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21085+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21086 " movnti %%eax, 56(%3)\n"
21087 " movnti %%edx, 60(%3)\n"
21088 " addl $-64, %0\n"
21089@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21090 " shrl $2, %0\n"
21091 " andl $3, %%eax\n"
21092 " cld\n"
21093- "6: rep; movsl\n"
21094+ "6: rep; "__copyuser_seg" movsl\n"
21095 " movl %%eax,%0\n"
21096- "7: rep; movsb\n"
21097+ "7: rep; "__copyuser_seg" movsb\n"
21098 "8:\n"
21099 ".section .fixup,\"ax\"\n"
21100 "9: lea 0(%%eax,%0,4),%0\n"
21101@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21102
21103 __asm__ __volatile__(
21104 " .align 2,0x90\n"
21105- "0: movl 32(%4), %%eax\n"
21106+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21107 " cmpl $67, %0\n"
21108 " jbe 2f\n"
21109- "1: movl 64(%4), %%eax\n"
21110+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21111 " .align 2,0x90\n"
21112- "2: movl 0(%4), %%eax\n"
21113- "21: movl 4(%4), %%edx\n"
21114+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21115+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21116 " movnti %%eax, 0(%3)\n"
21117 " movnti %%edx, 4(%3)\n"
21118- "3: movl 8(%4), %%eax\n"
21119- "31: movl 12(%4),%%edx\n"
21120+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21121+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21122 " movnti %%eax, 8(%3)\n"
21123 " movnti %%edx, 12(%3)\n"
21124- "4: movl 16(%4), %%eax\n"
21125- "41: movl 20(%4), %%edx\n"
21126+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21127+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21128 " movnti %%eax, 16(%3)\n"
21129 " movnti %%edx, 20(%3)\n"
21130- "10: movl 24(%4), %%eax\n"
21131- "51: movl 28(%4), %%edx\n"
21132+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21133+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21134 " movnti %%eax, 24(%3)\n"
21135 " movnti %%edx, 28(%3)\n"
21136- "11: movl 32(%4), %%eax\n"
21137- "61: movl 36(%4), %%edx\n"
21138+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21139+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21140 " movnti %%eax, 32(%3)\n"
21141 " movnti %%edx, 36(%3)\n"
21142- "12: movl 40(%4), %%eax\n"
21143- "71: movl 44(%4), %%edx\n"
21144+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21145+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21146 " movnti %%eax, 40(%3)\n"
21147 " movnti %%edx, 44(%3)\n"
21148- "13: movl 48(%4), %%eax\n"
21149- "81: movl 52(%4), %%edx\n"
21150+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21151+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21152 " movnti %%eax, 48(%3)\n"
21153 " movnti %%edx, 52(%3)\n"
21154- "14: movl 56(%4), %%eax\n"
21155- "91: movl 60(%4), %%edx\n"
21156+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21157+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21158 " movnti %%eax, 56(%3)\n"
21159 " movnti %%edx, 60(%3)\n"
21160 " addl $-64, %0\n"
21161@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21162 " shrl $2, %0\n"
21163 " andl $3, %%eax\n"
21164 " cld\n"
21165- "6: rep; movsl\n"
21166+ "6: rep; "__copyuser_seg" movsl\n"
21167 " movl %%eax,%0\n"
21168- "7: rep; movsb\n"
21169+ "7: rep; "__copyuser_seg" movsb\n"
21170 "8:\n"
21171 ".section .fixup,\"ax\"\n"
21172 "9: lea 0(%%eax,%0,4),%0\n"
21173@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21174 */
21175 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21176 unsigned long size);
21177-unsigned long __copy_user_intel(void __user *to, const void *from,
21178+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21179+ unsigned long size);
21180+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21181 unsigned long size);
21182 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21183 const void __user *from, unsigned long size);
21184 #endif /* CONFIG_X86_INTEL_USERCOPY */
21185
21186 /* Generic arbitrary sized copy. */
21187-#define __copy_user(to, from, size) \
21188+#define __copy_user(to, from, size, prefix, set, restore) \
21189 do { \
21190 int __d0, __d1, __d2; \
21191 __asm__ __volatile__( \
21192+ set \
21193 " cmp $7,%0\n" \
21194 " jbe 1f\n" \
21195 " movl %1,%0\n" \
21196 " negl %0\n" \
21197 " andl $7,%0\n" \
21198 " subl %0,%3\n" \
21199- "4: rep; movsb\n" \
21200+ "4: rep; "prefix"movsb\n" \
21201 " movl %3,%0\n" \
21202 " shrl $2,%0\n" \
21203 " andl $3,%3\n" \
21204 " .align 2,0x90\n" \
21205- "0: rep; movsl\n" \
21206+ "0: rep; "prefix"movsl\n" \
21207 " movl %3,%0\n" \
21208- "1: rep; movsb\n" \
21209+ "1: rep; "prefix"movsb\n" \
21210 "2:\n" \
21211+ restore \
21212 ".section .fixup,\"ax\"\n" \
21213 "5: addl %3,%0\n" \
21214 " jmp 2b\n" \
21215@@ -682,14 +799,14 @@ do { \
21216 " negl %0\n" \
21217 " andl $7,%0\n" \
21218 " subl %0,%3\n" \
21219- "4: rep; movsb\n" \
21220+ "4: rep; "__copyuser_seg"movsb\n" \
21221 " movl %3,%0\n" \
21222 " shrl $2,%0\n" \
21223 " andl $3,%3\n" \
21224 " .align 2,0x90\n" \
21225- "0: rep; movsl\n" \
21226+ "0: rep; "__copyuser_seg"movsl\n" \
21227 " movl %3,%0\n" \
21228- "1: rep; movsb\n" \
21229+ "1: rep; "__copyuser_seg"movsb\n" \
21230 "2:\n" \
21231 ".section .fixup,\"ax\"\n" \
21232 "5: addl %3,%0\n" \
21233@@ -775,9 +892,9 @@ survive:
21234 }
21235 #endif
21236 if (movsl_is_ok(to, from, n))
21237- __copy_user(to, from, n);
21238+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21239 else
21240- n = __copy_user_intel(to, from, n);
21241+ n = __generic_copy_to_user_intel(to, from, n);
21242 return n;
21243 }
21244 EXPORT_SYMBOL(__copy_to_user_ll);
21245@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21246 unsigned long n)
21247 {
21248 if (movsl_is_ok(to, from, n))
21249- __copy_user(to, from, n);
21250+ __copy_user(to, from, n, __copyuser_seg, "", "");
21251 else
21252- n = __copy_user_intel((void __user *)to,
21253- (const void *)from, n);
21254+ n = __generic_copy_from_user_intel(to, from, n);
21255 return n;
21256 }
21257 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21258@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21259 if (n > 64 && cpu_has_xmm2)
21260 n = __copy_user_intel_nocache(to, from, n);
21261 else
21262- __copy_user(to, from, n);
21263+ __copy_user(to, from, n, __copyuser_seg, "", "");
21264 #else
21265- __copy_user(to, from, n);
21266+ __copy_user(to, from, n, __copyuser_seg, "", "");
21267 #endif
21268 return n;
21269 }
21270 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21271
21272-/**
21273- * copy_to_user: - Copy a block of data into user space.
21274- * @to: Destination address, in user space.
21275- * @from: Source address, in kernel space.
21276- * @n: Number of bytes to copy.
21277- *
21278- * Context: User context only. This function may sleep.
21279- *
21280- * Copy data from kernel space to user space.
21281- *
21282- * Returns number of bytes that could not be copied.
21283- * On success, this will be zero.
21284- */
21285-unsigned long
21286-copy_to_user(void __user *to, const void *from, unsigned long n)
21287-{
21288- if (access_ok(VERIFY_WRITE, to, n))
21289- n = __copy_to_user(to, from, n);
21290- return n;
21291-}
21292-EXPORT_SYMBOL(copy_to_user);
21293-
21294-/**
21295- * copy_from_user: - Copy a block of data from user space.
21296- * @to: Destination address, in kernel space.
21297- * @from: Source address, in user space.
21298- * @n: Number of bytes to copy.
21299- *
21300- * Context: User context only. This function may sleep.
21301- *
21302- * Copy data from user space to kernel space.
21303- *
21304- * Returns number of bytes that could not be copied.
21305- * On success, this will be zero.
21306- *
21307- * If some data could not be copied, this function will pad the copied
21308- * data to the requested size using zero bytes.
21309- */
21310-unsigned long
21311-_copy_from_user(void *to, const void __user *from, unsigned long n)
21312-{
21313- if (access_ok(VERIFY_READ, from, n))
21314- n = __copy_from_user(to, from, n);
21315- else
21316- memset(to, 0, n);
21317- return n;
21318-}
21319-EXPORT_SYMBOL(_copy_from_user);
21320-
21321 void copy_from_user_overflow(void)
21322 {
21323 WARN(1, "Buffer overflow detected!\n");
21324 }
21325 EXPORT_SYMBOL(copy_from_user_overflow);
21326+
21327+void copy_to_user_overflow(void)
21328+{
21329+ WARN(1, "Buffer overflow detected!\n");
21330+}
21331+EXPORT_SYMBOL(copy_to_user_overflow);
21332+
21333+#ifdef CONFIG_PAX_MEMORY_UDEREF
21334+void __set_fs(mm_segment_t x)
21335+{
21336+ switch (x.seg) {
21337+ case 0:
21338+ loadsegment(gs, 0);
21339+ break;
21340+ case TASK_SIZE_MAX:
21341+ loadsegment(gs, __USER_DS);
21342+ break;
21343+ case -1UL:
21344+ loadsegment(gs, __KERNEL_DS);
21345+ break;
21346+ default:
21347+ BUG();
21348+ }
21349+ return;
21350+}
21351+EXPORT_SYMBOL(__set_fs);
21352+
21353+void set_fs(mm_segment_t x)
21354+{
21355+ current_thread_info()->addr_limit = x;
21356+ __set_fs(x);
21357+}
21358+EXPORT_SYMBOL(set_fs);
21359+#endif
21360diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21361index b7c2849..8633ad8 100644
21362--- a/arch/x86/lib/usercopy_64.c
21363+++ b/arch/x86/lib/usercopy_64.c
21364@@ -42,6 +42,12 @@ long
21365 __strncpy_from_user(char *dst, const char __user *src, long count)
21366 {
21367 long res;
21368+
21369+#ifdef CONFIG_PAX_MEMORY_UDEREF
21370+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21371+ src += PAX_USER_SHADOW_BASE;
21372+#endif
21373+
21374 __do_strncpy_from_user(dst, src, count, res);
21375 return res;
21376 }
21377@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21378 {
21379 long __d0;
21380 might_fault();
21381+
21382+#ifdef CONFIG_PAX_MEMORY_UDEREF
21383+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21384+ addr += PAX_USER_SHADOW_BASE;
21385+#endif
21386+
21387 /* no memory constraint because it doesn't change any memory gcc knows
21388 about */
21389 asm volatile(
21390@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21391 }
21392 EXPORT_SYMBOL(strlen_user);
21393
21394-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21395+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21396 {
21397- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21398- return copy_user_generic((__force void *)to, (__force void *)from, len);
21399- }
21400- return len;
21401+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21402+
21403+#ifdef CONFIG_PAX_MEMORY_UDEREF
21404+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21405+ to += PAX_USER_SHADOW_BASE;
21406+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21407+ from += PAX_USER_SHADOW_BASE;
21408+#endif
21409+
21410+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21411+ }
21412+ return len;
21413 }
21414 EXPORT_SYMBOL(copy_in_user);
21415
21416@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21417 * it is not necessary to optimize tail handling.
21418 */
21419 unsigned long
21420-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21421+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21422 {
21423 char c;
21424 unsigned zero_len;
21425diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21426index d0474ad..36e9257 100644
21427--- a/arch/x86/mm/extable.c
21428+++ b/arch/x86/mm/extable.c
21429@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21430 const struct exception_table_entry *fixup;
21431
21432 #ifdef CONFIG_PNPBIOS
21433- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21434+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21435 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21436 extern u32 pnp_bios_is_utter_crap;
21437 pnp_bios_is_utter_crap = 1;
21438diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21439index 5db0490..2ddce45 100644
21440--- a/arch/x86/mm/fault.c
21441+++ b/arch/x86/mm/fault.c
21442@@ -13,11 +13,18 @@
21443 #include <linux/perf_event.h> /* perf_sw_event */
21444 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21445 #include <linux/prefetch.h> /* prefetchw */
21446+#include <linux/unistd.h>
21447+#include <linux/compiler.h>
21448
21449 #include <asm/traps.h> /* dotraplinkage, ... */
21450 #include <asm/pgalloc.h> /* pgd_*(), ... */
21451 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21452 #include <asm/fixmap.h> /* VSYSCALL_START */
21453+#include <asm/tlbflush.h>
21454+
21455+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21456+#include <asm/stacktrace.h>
21457+#endif
21458
21459 /*
21460 * Page fault error code bits:
21461@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21462 int ret = 0;
21463
21464 /* kprobe_running() needs smp_processor_id() */
21465- if (kprobes_built_in() && !user_mode_vm(regs)) {
21466+ if (kprobes_built_in() && !user_mode(regs)) {
21467 preempt_disable();
21468 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21469 ret = 1;
21470@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21471 return !instr_lo || (instr_lo>>1) == 1;
21472 case 0x00:
21473 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21474- if (probe_kernel_address(instr, opcode))
21475+ if (user_mode(regs)) {
21476+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21477+ return 0;
21478+ } else if (probe_kernel_address(instr, opcode))
21479 return 0;
21480
21481 *prefetch = (instr_lo == 0xF) &&
21482@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21483 while (instr < max_instr) {
21484 unsigned char opcode;
21485
21486- if (probe_kernel_address(instr, opcode))
21487+ if (user_mode(regs)) {
21488+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21489+ break;
21490+ } else if (probe_kernel_address(instr, opcode))
21491 break;
21492
21493 instr++;
21494@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21495 force_sig_info(si_signo, &info, tsk);
21496 }
21497
21498+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21499+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21500+#endif
21501+
21502+#ifdef CONFIG_PAX_EMUTRAMP
21503+static int pax_handle_fetch_fault(struct pt_regs *regs);
21504+#endif
21505+
21506+#ifdef CONFIG_PAX_PAGEEXEC
21507+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21508+{
21509+ pgd_t *pgd;
21510+ pud_t *pud;
21511+ pmd_t *pmd;
21512+
21513+ pgd = pgd_offset(mm, address);
21514+ if (!pgd_present(*pgd))
21515+ return NULL;
21516+ pud = pud_offset(pgd, address);
21517+ if (!pud_present(*pud))
21518+ return NULL;
21519+ pmd = pmd_offset(pud, address);
21520+ if (!pmd_present(*pmd))
21521+ return NULL;
21522+ return pmd;
21523+}
21524+#endif
21525+
21526 DEFINE_SPINLOCK(pgd_lock);
21527 LIST_HEAD(pgd_list);
21528
21529@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21530 for (address = VMALLOC_START & PMD_MASK;
21531 address >= TASK_SIZE && address < FIXADDR_TOP;
21532 address += PMD_SIZE) {
21533+
21534+#ifdef CONFIG_PAX_PER_CPU_PGD
21535+ unsigned long cpu;
21536+#else
21537 struct page *page;
21538+#endif
21539
21540 spin_lock(&pgd_lock);
21541+
21542+#ifdef CONFIG_PAX_PER_CPU_PGD
21543+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
21544+ pgd_t *pgd = get_cpu_pgd(cpu);
21545+ pmd_t *ret;
21546+#else
21547 list_for_each_entry(page, &pgd_list, lru) {
21548+ pgd_t *pgd = page_address(page);
21549 spinlock_t *pgt_lock;
21550 pmd_t *ret;
21551
21552@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21553 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21554
21555 spin_lock(pgt_lock);
21556- ret = vmalloc_sync_one(page_address(page), address);
21557+#endif
21558+
21559+ ret = vmalloc_sync_one(pgd, address);
21560+
21561+#ifndef CONFIG_PAX_PER_CPU_PGD
21562 spin_unlock(pgt_lock);
21563+#endif
21564
21565 if (!ret)
21566 break;
21567@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21568 * an interrupt in the middle of a task switch..
21569 */
21570 pgd_paddr = read_cr3();
21571+
21572+#ifdef CONFIG_PAX_PER_CPU_PGD
21573+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21574+#endif
21575+
21576 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21577 if (!pmd_k)
21578 return -1;
21579@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21580 * happen within a race in page table update. In the later
21581 * case just flush:
21582 */
21583+
21584+#ifdef CONFIG_PAX_PER_CPU_PGD
21585+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21586+ pgd = pgd_offset_cpu(smp_processor_id(), address);
21587+#else
21588 pgd = pgd_offset(current->active_mm, address);
21589+#endif
21590+
21591 pgd_ref = pgd_offset_k(address);
21592 if (pgd_none(*pgd_ref))
21593 return -1;
21594@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21595 static int is_errata100(struct pt_regs *regs, unsigned long address)
21596 {
21597 #ifdef CONFIG_X86_64
21598- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21599+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21600 return 1;
21601 #endif
21602 return 0;
21603@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21604 }
21605
21606 static const char nx_warning[] = KERN_CRIT
21607-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21608+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21609
21610 static void
21611 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21612@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21613 if (!oops_may_print())
21614 return;
21615
21616- if (error_code & PF_INSTR) {
21617+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21618 unsigned int level;
21619
21620 pte_t *pte = lookup_address(address, &level);
21621
21622 if (pte && pte_present(*pte) && !pte_exec(*pte))
21623- printk(nx_warning, current_uid());
21624+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21625 }
21626
21627+#ifdef CONFIG_PAX_KERNEXEC
21628+ if (init_mm.start_code <= address && address < init_mm.end_code) {
21629+ if (current->signal->curr_ip)
21630+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21631+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21632+ else
21633+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21634+ current->comm, task_pid_nr(current), current_uid(), current_euid());
21635+ }
21636+#endif
21637+
21638 printk(KERN_ALERT "BUG: unable to handle kernel ");
21639 if (address < PAGE_SIZE)
21640 printk(KERN_CONT "NULL pointer dereference");
21641@@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21642 }
21643 #endif
21644
21645+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21646+ if (pax_is_fetch_fault(regs, error_code, address)) {
21647+
21648+#ifdef CONFIG_PAX_EMUTRAMP
21649+ switch (pax_handle_fetch_fault(regs)) {
21650+ case 2:
21651+ return;
21652+ }
21653+#endif
21654+
21655+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21656+ do_group_exit(SIGKILL);
21657+ }
21658+#endif
21659+
21660 if (unlikely(show_unhandled_signals))
21661 show_signal_msg(regs, error_code, address, tsk);
21662
21663@@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21664 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21665 printk(KERN_ERR
21666 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21667- tsk->comm, tsk->pid, address);
21668+ tsk->comm, task_pid_nr(tsk), address);
21669 code = BUS_MCEERR_AR;
21670 }
21671 #endif
21672@@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21673 return 1;
21674 }
21675
21676+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21677+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21678+{
21679+ pte_t *pte;
21680+ pmd_t *pmd;
21681+ spinlock_t *ptl;
21682+ unsigned char pte_mask;
21683+
21684+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21685+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21686+ return 0;
21687+
21688+ /* PaX: it's our fault, let's handle it if we can */
21689+
21690+ /* PaX: take a look at read faults before acquiring any locks */
21691+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21692+ /* instruction fetch attempt from a protected page in user mode */
21693+ up_read(&mm->mmap_sem);
21694+
21695+#ifdef CONFIG_PAX_EMUTRAMP
21696+ switch (pax_handle_fetch_fault(regs)) {
21697+ case 2:
21698+ return 1;
21699+ }
21700+#endif
21701+
21702+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21703+ do_group_exit(SIGKILL);
21704+ }
21705+
21706+ pmd = pax_get_pmd(mm, address);
21707+ if (unlikely(!pmd))
21708+ return 0;
21709+
21710+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21711+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21712+ pte_unmap_unlock(pte, ptl);
21713+ return 0;
21714+ }
21715+
21716+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21717+ /* write attempt to a protected page in user mode */
21718+ pte_unmap_unlock(pte, ptl);
21719+ return 0;
21720+ }
21721+
21722+#ifdef CONFIG_SMP
21723+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21724+#else
21725+ if (likely(address > get_limit(regs->cs)))
21726+#endif
21727+ {
21728+ set_pte(pte, pte_mkread(*pte));
21729+ __flush_tlb_one(address);
21730+ pte_unmap_unlock(pte, ptl);
21731+ up_read(&mm->mmap_sem);
21732+ return 1;
21733+ }
21734+
21735+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21736+
21737+ /*
21738+ * PaX: fill DTLB with user rights and retry
21739+ */
21740+ __asm__ __volatile__ (
21741+ "orb %2,(%1)\n"
21742+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21743+/*
21744+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21745+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21746+ * page fault when examined during a TLB load attempt. this is true not only
21747+ * for PTEs holding a non-present entry but also present entries that will
21748+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21749+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21750+ * for our target pages since their PTEs are simply not in the TLBs at all.
21751+
21752+ * the best thing in omitting it is that we gain around 15-20% speed in the
21753+ * fast path of the page fault handler and can get rid of tracing since we
21754+ * can no longer flush unintended entries.
21755+ */
21756+ "invlpg (%0)\n"
21757+#endif
21758+ __copyuser_seg"testb $0,(%0)\n"
21759+ "xorb %3,(%1)\n"
21760+ :
21761+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21762+ : "memory", "cc");
21763+ pte_unmap_unlock(pte, ptl);
21764+ up_read(&mm->mmap_sem);
21765+ return 1;
21766+}
21767+#endif
21768+
21769 /*
21770 * Handle a spurious fault caused by a stale TLB entry.
21771 *
21772@@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
21773 static inline int
21774 access_error(unsigned long error_code, struct vm_area_struct *vma)
21775 {
21776+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21777+ return 1;
21778+
21779 if (error_code & PF_WRITE) {
21780 /* write, present and write, not present: */
21781 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21782@@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21783 {
21784 struct vm_area_struct *vma;
21785 struct task_struct *tsk;
21786- unsigned long address;
21787 struct mm_struct *mm;
21788 int fault;
21789 int write = error_code & PF_WRITE;
21790 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
21791 (write ? FAULT_FLAG_WRITE : 0);
21792
21793- tsk = current;
21794- mm = tsk->mm;
21795-
21796 /* Get the faulting address: */
21797- address = read_cr2();
21798+ unsigned long address = read_cr2();
21799+
21800+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21801+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21802+ if (!search_exception_tables(regs->ip)) {
21803+ bad_area_nosemaphore(regs, error_code, address);
21804+ return;
21805+ }
21806+ if (address < PAX_USER_SHADOW_BASE) {
21807+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21808+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
21809+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21810+ } else
21811+ address -= PAX_USER_SHADOW_BASE;
21812+ }
21813+#endif
21814+
21815+ tsk = current;
21816+ mm = tsk->mm;
21817
21818 /*
21819 * Detect and handle instructions that would cause a page fault for
21820@@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21821 * User-mode registers count as a user access even for any
21822 * potential system fault or CPU buglet:
21823 */
21824- if (user_mode_vm(regs)) {
21825+ if (user_mode(regs)) {
21826 local_irq_enable();
21827 error_code |= PF_USER;
21828 } else {
21829@@ -1122,6 +1328,11 @@ retry:
21830 might_sleep();
21831 }
21832
21833+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21834+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21835+ return;
21836+#endif
21837+
21838 vma = find_vma(mm, address);
21839 if (unlikely(!vma)) {
21840 bad_area(regs, error_code, address);
21841@@ -1133,18 +1344,24 @@ retry:
21842 bad_area(regs, error_code, address);
21843 return;
21844 }
21845- if (error_code & PF_USER) {
21846- /*
21847- * Accessing the stack below %sp is always a bug.
21848- * The large cushion allows instructions like enter
21849- * and pusha to work. ("enter $65535, $31" pushes
21850- * 32 pointers and then decrements %sp by 65535.)
21851- */
21852- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21853- bad_area(regs, error_code, address);
21854- return;
21855- }
21856+ /*
21857+ * Accessing the stack below %sp is always a bug.
21858+ * The large cushion allows instructions like enter
21859+ * and pusha to work. ("enter $65535, $31" pushes
21860+ * 32 pointers and then decrements %sp by 65535.)
21861+ */
21862+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21863+ bad_area(regs, error_code, address);
21864+ return;
21865 }
21866+
21867+#ifdef CONFIG_PAX_SEGMEXEC
21868+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21869+ bad_area(regs, error_code, address);
21870+ return;
21871+ }
21872+#endif
21873+
21874 if (unlikely(expand_stack(vma, address))) {
21875 bad_area(regs, error_code, address);
21876 return;
21877@@ -1199,3 +1416,292 @@ good_area:
21878
21879 up_read(&mm->mmap_sem);
21880 }
21881+
21882+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21883+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21884+{
21885+ struct mm_struct *mm = current->mm;
21886+ unsigned long ip = regs->ip;
21887+
21888+ if (v8086_mode(regs))
21889+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21890+
21891+#ifdef CONFIG_PAX_PAGEEXEC
21892+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21893+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21894+ return true;
21895+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21896+ return true;
21897+ return false;
21898+ }
21899+#endif
21900+
21901+#ifdef CONFIG_PAX_SEGMEXEC
21902+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21903+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21904+ return true;
21905+ return false;
21906+ }
21907+#endif
21908+
21909+ return false;
21910+}
21911+#endif
21912+
21913+#ifdef CONFIG_PAX_EMUTRAMP
21914+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21915+{
21916+ int err;
21917+
21918+ do { /* PaX: libffi trampoline emulation */
21919+ unsigned char mov, jmp;
21920+ unsigned int addr1, addr2;
21921+
21922+#ifdef CONFIG_X86_64
21923+ if ((regs->ip + 9) >> 32)
21924+ break;
21925+#endif
21926+
21927+ err = get_user(mov, (unsigned char __user *)regs->ip);
21928+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21929+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21930+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21931+
21932+ if (err)
21933+ break;
21934+
21935+ if (mov == 0xB8 && jmp == 0xE9) {
21936+ regs->ax = addr1;
21937+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21938+ return 2;
21939+ }
21940+ } while (0);
21941+
21942+ do { /* PaX: gcc trampoline emulation #1 */
21943+ unsigned char mov1, mov2;
21944+ unsigned short jmp;
21945+ unsigned int addr1, addr2;
21946+
21947+#ifdef CONFIG_X86_64
21948+ if ((regs->ip + 11) >> 32)
21949+ break;
21950+#endif
21951+
21952+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21953+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21954+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21955+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21956+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21957+
21958+ if (err)
21959+ break;
21960+
21961+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21962+ regs->cx = addr1;
21963+ regs->ax = addr2;
21964+ regs->ip = addr2;
21965+ return 2;
21966+ }
21967+ } while (0);
21968+
21969+ do { /* PaX: gcc trampoline emulation #2 */
21970+ unsigned char mov, jmp;
21971+ unsigned int addr1, addr2;
21972+
21973+#ifdef CONFIG_X86_64
21974+ if ((regs->ip + 9) >> 32)
21975+ break;
21976+#endif
21977+
21978+ err = get_user(mov, (unsigned char __user *)regs->ip);
21979+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21980+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21981+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21982+
21983+ if (err)
21984+ break;
21985+
21986+ if (mov == 0xB9 && jmp == 0xE9) {
21987+ regs->cx = addr1;
21988+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21989+ return 2;
21990+ }
21991+ } while (0);
21992+
21993+ return 1; /* PaX in action */
21994+}
21995+
21996+#ifdef CONFIG_X86_64
21997+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21998+{
21999+ int err;
22000+
22001+ do { /* PaX: libffi trampoline emulation */
22002+ unsigned short mov1, mov2, jmp1;
22003+ unsigned char stcclc, jmp2;
22004+ unsigned long addr1, addr2;
22005+
22006+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22007+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22008+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22009+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22010+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
22011+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
22012+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
22013+
22014+ if (err)
22015+ break;
22016+
22017+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22018+ regs->r11 = addr1;
22019+ regs->r10 = addr2;
22020+ if (stcclc == 0xF8)
22021+ regs->flags &= ~X86_EFLAGS_CF;
22022+ else
22023+ regs->flags |= X86_EFLAGS_CF;
22024+ regs->ip = addr1;
22025+ return 2;
22026+ }
22027+ } while (0);
22028+
22029+ do { /* PaX: gcc trampoline emulation #1 */
22030+ unsigned short mov1, mov2, jmp1;
22031+ unsigned char jmp2;
22032+ unsigned int addr1;
22033+ unsigned long addr2;
22034+
22035+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22036+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22037+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22038+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22039+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22040+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22041+
22042+ if (err)
22043+ break;
22044+
22045+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22046+ regs->r11 = addr1;
22047+ regs->r10 = addr2;
22048+ regs->ip = addr1;
22049+ return 2;
22050+ }
22051+ } while (0);
22052+
22053+ do { /* PaX: gcc trampoline emulation #2 */
22054+ unsigned short mov1, mov2, jmp1;
22055+ unsigned char jmp2;
22056+ unsigned long addr1, addr2;
22057+
22058+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22059+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22060+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22061+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22062+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22063+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22064+
22065+ if (err)
22066+ break;
22067+
22068+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22069+ regs->r11 = addr1;
22070+ regs->r10 = addr2;
22071+ regs->ip = addr1;
22072+ return 2;
22073+ }
22074+ } while (0);
22075+
22076+ return 1; /* PaX in action */
22077+}
22078+#endif
22079+
22080+/*
22081+ * PaX: decide what to do with offenders (regs->ip = fault address)
22082+ *
22083+ * returns 1 when task should be killed
22084+ * 2 when gcc trampoline was detected
22085+ */
22086+static int pax_handle_fetch_fault(struct pt_regs *regs)
22087+{
22088+ if (v8086_mode(regs))
22089+ return 1;
22090+
22091+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22092+ return 1;
22093+
22094+#ifdef CONFIG_X86_32
22095+ return pax_handle_fetch_fault_32(regs);
22096+#else
22097+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22098+ return pax_handle_fetch_fault_32(regs);
22099+ else
22100+ return pax_handle_fetch_fault_64(regs);
22101+#endif
22102+}
22103+#endif
22104+
22105+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22106+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22107+{
22108+ long i;
22109+
22110+ printk(KERN_ERR "PAX: bytes at PC: ");
22111+ for (i = 0; i < 20; i++) {
22112+ unsigned char c;
22113+ if (get_user(c, (unsigned char __force_user *)pc+i))
22114+ printk(KERN_CONT "?? ");
22115+ else
22116+ printk(KERN_CONT "%02x ", c);
22117+ }
22118+ printk("\n");
22119+
22120+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22121+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
22122+ unsigned long c;
22123+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
22124+#ifdef CONFIG_X86_32
22125+ printk(KERN_CONT "???????? ");
22126+#else
22127+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22128+ printk(KERN_CONT "???????? ???????? ");
22129+ else
22130+ printk(KERN_CONT "???????????????? ");
22131+#endif
22132+ } else {
22133+#ifdef CONFIG_X86_64
22134+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22135+ printk(KERN_CONT "%08x ", (unsigned int)c);
22136+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22137+ } else
22138+#endif
22139+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22140+ }
22141+ }
22142+ printk("\n");
22143+}
22144+#endif
22145+
22146+/**
22147+ * probe_kernel_write(): safely attempt to write to a location
22148+ * @dst: address to write to
22149+ * @src: pointer to the data that shall be written
22150+ * @size: size of the data chunk
22151+ *
22152+ * Safely write to address @dst from the buffer at @src. If a kernel fault
22153+ * happens, handle that and return -EFAULT.
22154+ */
22155+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22156+{
22157+ long ret;
22158+ mm_segment_t old_fs = get_fs();
22159+
22160+ set_fs(KERNEL_DS);
22161+ pagefault_disable();
22162+ pax_open_kernel();
22163+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22164+ pax_close_kernel();
22165+ pagefault_enable();
22166+ set_fs(old_fs);
22167+
22168+ return ret ? -EFAULT : 0;
22169+}
22170diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22171index dd74e46..7d26398 100644
22172--- a/arch/x86/mm/gup.c
22173+++ b/arch/x86/mm/gup.c
22174@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22175 addr = start;
22176 len = (unsigned long) nr_pages << PAGE_SHIFT;
22177 end = start + len;
22178- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22179+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22180 (void __user *)start, len)))
22181 return 0;
22182
22183diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22184index f4f29b1..5cac4fb 100644
22185--- a/arch/x86/mm/highmem_32.c
22186+++ b/arch/x86/mm/highmem_32.c
22187@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22188 idx = type + KM_TYPE_NR*smp_processor_id();
22189 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22190 BUG_ON(!pte_none(*(kmap_pte-idx)));
22191+
22192+ pax_open_kernel();
22193 set_pte(kmap_pte-idx, mk_pte(page, prot));
22194+ pax_close_kernel();
22195+
22196 arch_flush_lazy_mmu_mode();
22197
22198 return (void *)vaddr;
22199diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22200index f581a18..29efd37 100644
22201--- a/arch/x86/mm/hugetlbpage.c
22202+++ b/arch/x86/mm/hugetlbpage.c
22203@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22204 struct hstate *h = hstate_file(file);
22205 struct mm_struct *mm = current->mm;
22206 struct vm_area_struct *vma;
22207- unsigned long start_addr;
22208+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22209+
22210+#ifdef CONFIG_PAX_SEGMEXEC
22211+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22212+ pax_task_size = SEGMEXEC_TASK_SIZE;
22213+#endif
22214+
22215+ pax_task_size -= PAGE_SIZE;
22216
22217 if (len > mm->cached_hole_size) {
22218- start_addr = mm->free_area_cache;
22219+ start_addr = mm->free_area_cache;
22220 } else {
22221- start_addr = TASK_UNMAPPED_BASE;
22222- mm->cached_hole_size = 0;
22223+ start_addr = mm->mmap_base;
22224+ mm->cached_hole_size = 0;
22225 }
22226
22227 full_search:
22228@@ -280,26 +287,27 @@ full_search:
22229
22230 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22231 /* At this point: (!vma || addr < vma->vm_end). */
22232- if (TASK_SIZE - len < addr) {
22233+ if (pax_task_size - len < addr) {
22234 /*
22235 * Start a new search - just in case we missed
22236 * some holes.
22237 */
22238- if (start_addr != TASK_UNMAPPED_BASE) {
22239- start_addr = TASK_UNMAPPED_BASE;
22240+ if (start_addr != mm->mmap_base) {
22241+ start_addr = mm->mmap_base;
22242 mm->cached_hole_size = 0;
22243 goto full_search;
22244 }
22245 return -ENOMEM;
22246 }
22247- if (!vma || addr + len <= vma->vm_start) {
22248- mm->free_area_cache = addr + len;
22249- return addr;
22250- }
22251+ if (check_heap_stack_gap(vma, addr, len))
22252+ break;
22253 if (addr + mm->cached_hole_size < vma->vm_start)
22254 mm->cached_hole_size = vma->vm_start - addr;
22255 addr = ALIGN(vma->vm_end, huge_page_size(h));
22256 }
22257+
22258+ mm->free_area_cache = addr + len;
22259+ return addr;
22260 }
22261
22262 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22263@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22264 {
22265 struct hstate *h = hstate_file(file);
22266 struct mm_struct *mm = current->mm;
22267- struct vm_area_struct *vma, *prev_vma;
22268- unsigned long base = mm->mmap_base, addr = addr0;
22269+ struct vm_area_struct *vma;
22270+ unsigned long base = mm->mmap_base, addr;
22271 unsigned long largest_hole = mm->cached_hole_size;
22272- int first_time = 1;
22273
22274 /* don't allow allocations above current base */
22275 if (mm->free_area_cache > base)
22276@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22277 largest_hole = 0;
22278 mm->free_area_cache = base;
22279 }
22280-try_again:
22281+
22282 /* make sure it can fit in the remaining address space */
22283 if (mm->free_area_cache < len)
22284 goto fail;
22285
22286 /* either no address requested or can't fit in requested address hole */
22287- addr = (mm->free_area_cache - len) & huge_page_mask(h);
22288+ addr = (mm->free_area_cache - len);
22289 do {
22290+ addr &= huge_page_mask(h);
22291+ vma = find_vma(mm, addr);
22292 /*
22293 * Lookup failure means no vma is above this address,
22294 * i.e. return with success:
22295- */
22296- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22297- return addr;
22298-
22299- /*
22300 * new region fits between prev_vma->vm_end and
22301 * vma->vm_start, use it:
22302 */
22303- if (addr + len <= vma->vm_start &&
22304- (!prev_vma || (addr >= prev_vma->vm_end))) {
22305+ if (check_heap_stack_gap(vma, addr, len)) {
22306 /* remember the address as a hint for next time */
22307- mm->cached_hole_size = largest_hole;
22308- return (mm->free_area_cache = addr);
22309- } else {
22310- /* pull free_area_cache down to the first hole */
22311- if (mm->free_area_cache == vma->vm_end) {
22312- mm->free_area_cache = vma->vm_start;
22313- mm->cached_hole_size = largest_hole;
22314- }
22315+ mm->cached_hole_size = largest_hole;
22316+ return (mm->free_area_cache = addr);
22317+ }
22318+ /* pull free_area_cache down to the first hole */
22319+ if (mm->free_area_cache == vma->vm_end) {
22320+ mm->free_area_cache = vma->vm_start;
22321+ mm->cached_hole_size = largest_hole;
22322 }
22323
22324 /* remember the largest hole we saw so far */
22325 if (addr + largest_hole < vma->vm_start)
22326- largest_hole = vma->vm_start - addr;
22327+ largest_hole = vma->vm_start - addr;
22328
22329 /* try just below the current vma->vm_start */
22330- addr = (vma->vm_start - len) & huge_page_mask(h);
22331- } while (len <= vma->vm_start);
22332+ addr = skip_heap_stack_gap(vma, len);
22333+ } while (!IS_ERR_VALUE(addr));
22334
22335 fail:
22336 /*
22337- * if hint left us with no space for the requested
22338- * mapping then try again:
22339- */
22340- if (first_time) {
22341- mm->free_area_cache = base;
22342- largest_hole = 0;
22343- first_time = 0;
22344- goto try_again;
22345- }
22346- /*
22347 * A failed mmap() very likely causes application failure,
22348 * so fall back to the bottom-up function here. This scenario
22349 * can happen with large stack limits and large mmap()
22350 * allocations.
22351 */
22352- mm->free_area_cache = TASK_UNMAPPED_BASE;
22353+
22354+#ifdef CONFIG_PAX_SEGMEXEC
22355+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22356+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22357+ else
22358+#endif
22359+
22360+ mm->mmap_base = TASK_UNMAPPED_BASE;
22361+
22362+#ifdef CONFIG_PAX_RANDMMAP
22363+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22364+ mm->mmap_base += mm->delta_mmap;
22365+#endif
22366+
22367+ mm->free_area_cache = mm->mmap_base;
22368 mm->cached_hole_size = ~0UL;
22369 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22370 len, pgoff, flags);
22371@@ -386,6 +392,7 @@ fail:
22372 /*
22373 * Restore the topdown base:
22374 */
22375+ mm->mmap_base = base;
22376 mm->free_area_cache = base;
22377 mm->cached_hole_size = ~0UL;
22378
22379@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22380 struct hstate *h = hstate_file(file);
22381 struct mm_struct *mm = current->mm;
22382 struct vm_area_struct *vma;
22383+ unsigned long pax_task_size = TASK_SIZE;
22384
22385 if (len & ~huge_page_mask(h))
22386 return -EINVAL;
22387- if (len > TASK_SIZE)
22388+
22389+#ifdef CONFIG_PAX_SEGMEXEC
22390+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22391+ pax_task_size = SEGMEXEC_TASK_SIZE;
22392+#endif
22393+
22394+ pax_task_size -= PAGE_SIZE;
22395+
22396+ if (len > pax_task_size)
22397 return -ENOMEM;
22398
22399 if (flags & MAP_FIXED) {
22400@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22401 if (addr) {
22402 addr = ALIGN(addr, huge_page_size(h));
22403 vma = find_vma(mm, addr);
22404- if (TASK_SIZE - len >= addr &&
22405- (!vma || addr + len <= vma->vm_start))
22406+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22407 return addr;
22408 }
22409 if (mm->get_unmapped_area == arch_get_unmapped_area)
22410diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22411index 87488b9..399f416 100644
22412--- a/arch/x86/mm/init.c
22413+++ b/arch/x86/mm/init.c
22414@@ -15,6 +15,7 @@
22415 #include <asm/tlbflush.h>
22416 #include <asm/tlb.h>
22417 #include <asm/proto.h>
22418+#include <asm/desc.h>
22419
22420 unsigned long __initdata pgt_buf_start;
22421 unsigned long __meminitdata pgt_buf_end;
22422@@ -31,7 +32,7 @@ int direct_gbpages
22423 static void __init find_early_table_space(unsigned long end, int use_pse,
22424 int use_gbpages)
22425 {
22426- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22427+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22428 phys_addr_t base;
22429
22430 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22431@@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22432 */
22433 int devmem_is_allowed(unsigned long pagenr)
22434 {
22435+#ifdef CONFIG_GRKERNSEC_KMEM
22436+ /* allow BDA */
22437+ if (!pagenr)
22438+ return 1;
22439+ /* allow EBDA */
22440+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22441+ return 1;
22442+#else
22443+ if (!pagenr)
22444+ return 1;
22445+#ifdef CONFIG_VM86
22446+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22447+ return 1;
22448+#endif
22449+#endif
22450+
22451+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22452+ return 1;
22453+#ifdef CONFIG_GRKERNSEC_KMEM
22454+ /* throw out everything else below 1MB */
22455 if (pagenr <= 256)
22456- return 1;
22457+ return 0;
22458+#endif
22459 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22460 return 0;
22461 if (!page_is_ram(pagenr))
22462@@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22463
22464 void free_initmem(void)
22465 {
22466+
22467+#ifdef CONFIG_PAX_KERNEXEC
22468+#ifdef CONFIG_X86_32
22469+ /* PaX: limit KERNEL_CS to actual size */
22470+ unsigned long addr, limit;
22471+ struct desc_struct d;
22472+ int cpu;
22473+
22474+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22475+ limit = (limit - 1UL) >> PAGE_SHIFT;
22476+
22477+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22478+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
22479+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22480+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22481+ }
22482+
22483+ /* PaX: make KERNEL_CS read-only */
22484+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22485+ if (!paravirt_enabled())
22486+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22487+/*
22488+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22489+ pgd = pgd_offset_k(addr);
22490+ pud = pud_offset(pgd, addr);
22491+ pmd = pmd_offset(pud, addr);
22492+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22493+ }
22494+*/
22495+#ifdef CONFIG_X86_PAE
22496+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22497+/*
22498+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22499+ pgd = pgd_offset_k(addr);
22500+ pud = pud_offset(pgd, addr);
22501+ pmd = pmd_offset(pud, addr);
22502+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22503+ }
22504+*/
22505+#endif
22506+
22507+#ifdef CONFIG_MODULES
22508+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22509+#endif
22510+
22511+#else
22512+ pgd_t *pgd;
22513+ pud_t *pud;
22514+ pmd_t *pmd;
22515+ unsigned long addr, end;
22516+
22517+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22518+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22519+ pgd = pgd_offset_k(addr);
22520+ pud = pud_offset(pgd, addr);
22521+ pmd = pmd_offset(pud, addr);
22522+ if (!pmd_present(*pmd))
22523+ continue;
22524+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22525+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22526+ else
22527+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22528+ }
22529+
22530+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22531+ end = addr + KERNEL_IMAGE_SIZE;
22532+ for (; addr < end; addr += PMD_SIZE) {
22533+ pgd = pgd_offset_k(addr);
22534+ pud = pud_offset(pgd, addr);
22535+ pmd = pmd_offset(pud, addr);
22536+ if (!pmd_present(*pmd))
22537+ continue;
22538+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22539+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22540+ }
22541+#endif
22542+
22543+ flush_tlb_all();
22544+#endif
22545+
22546 free_init_pages("unused kernel memory",
22547 (unsigned long)(&__init_begin),
22548 (unsigned long)(&__init_end));
22549diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22550index 29f7c6d..b46b35b 100644
22551--- a/arch/x86/mm/init_32.c
22552+++ b/arch/x86/mm/init_32.c
22553@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22554 }
22555
22556 /*
22557- * Creates a middle page table and puts a pointer to it in the
22558- * given global directory entry. This only returns the gd entry
22559- * in non-PAE compilation mode, since the middle layer is folded.
22560- */
22561-static pmd_t * __init one_md_table_init(pgd_t *pgd)
22562-{
22563- pud_t *pud;
22564- pmd_t *pmd_table;
22565-
22566-#ifdef CONFIG_X86_PAE
22567- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22568- if (after_bootmem)
22569- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22570- else
22571- pmd_table = (pmd_t *)alloc_low_page();
22572- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22573- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22574- pud = pud_offset(pgd, 0);
22575- BUG_ON(pmd_table != pmd_offset(pud, 0));
22576-
22577- return pmd_table;
22578- }
22579-#endif
22580- pud = pud_offset(pgd, 0);
22581- pmd_table = pmd_offset(pud, 0);
22582-
22583- return pmd_table;
22584-}
22585-
22586-/*
22587 * Create a page table and place a pointer to it in a middle page
22588 * directory entry:
22589 */
22590@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22591 page_table = (pte_t *)alloc_low_page();
22592
22593 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22594+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22595+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22596+#else
22597 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22598+#endif
22599 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22600 }
22601
22602 return pte_offset_kernel(pmd, 0);
22603 }
22604
22605+static pmd_t * __init one_md_table_init(pgd_t *pgd)
22606+{
22607+ pud_t *pud;
22608+ pmd_t *pmd_table;
22609+
22610+ pud = pud_offset(pgd, 0);
22611+ pmd_table = pmd_offset(pud, 0);
22612+
22613+ return pmd_table;
22614+}
22615+
22616 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22617 {
22618 int pgd_idx = pgd_index(vaddr);
22619@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22620 int pgd_idx, pmd_idx;
22621 unsigned long vaddr;
22622 pgd_t *pgd;
22623+ pud_t *pud;
22624 pmd_t *pmd;
22625 pte_t *pte = NULL;
22626
22627@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22628 pgd = pgd_base + pgd_idx;
22629
22630 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22631- pmd = one_md_table_init(pgd);
22632- pmd = pmd + pmd_index(vaddr);
22633+ pud = pud_offset(pgd, vaddr);
22634+ pmd = pmd_offset(pud, vaddr);
22635+
22636+#ifdef CONFIG_X86_PAE
22637+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22638+#endif
22639+
22640 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22641 pmd++, pmd_idx++) {
22642 pte = page_table_kmap_check(one_page_table_init(pmd),
22643@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22644 }
22645 }
22646
22647-static inline int is_kernel_text(unsigned long addr)
22648+static inline int is_kernel_text(unsigned long start, unsigned long end)
22649 {
22650- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22651- return 1;
22652- return 0;
22653+ if ((start > ktla_ktva((unsigned long)_etext) ||
22654+ end <= ktla_ktva((unsigned long)_stext)) &&
22655+ (start > ktla_ktva((unsigned long)_einittext) ||
22656+ end <= ktla_ktva((unsigned long)_sinittext)) &&
22657+
22658+#ifdef CONFIG_ACPI_SLEEP
22659+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22660+#endif
22661+
22662+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22663+ return 0;
22664+ return 1;
22665 }
22666
22667 /*
22668@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22669 unsigned long last_map_addr = end;
22670 unsigned long start_pfn, end_pfn;
22671 pgd_t *pgd_base = swapper_pg_dir;
22672- int pgd_idx, pmd_idx, pte_ofs;
22673+ unsigned int pgd_idx, pmd_idx, pte_ofs;
22674 unsigned long pfn;
22675 pgd_t *pgd;
22676+ pud_t *pud;
22677 pmd_t *pmd;
22678 pte_t *pte;
22679 unsigned pages_2m, pages_4k;
22680@@ -281,8 +282,13 @@ repeat:
22681 pfn = start_pfn;
22682 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22683 pgd = pgd_base + pgd_idx;
22684- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22685- pmd = one_md_table_init(pgd);
22686+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22687+ pud = pud_offset(pgd, 0);
22688+ pmd = pmd_offset(pud, 0);
22689+
22690+#ifdef CONFIG_X86_PAE
22691+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22692+#endif
22693
22694 if (pfn >= end_pfn)
22695 continue;
22696@@ -294,14 +300,13 @@ repeat:
22697 #endif
22698 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22699 pmd++, pmd_idx++) {
22700- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22701+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22702
22703 /*
22704 * Map with big pages if possible, otherwise
22705 * create normal page tables:
22706 */
22707 if (use_pse) {
22708- unsigned int addr2;
22709 pgprot_t prot = PAGE_KERNEL_LARGE;
22710 /*
22711 * first pass will use the same initial
22712@@ -311,11 +316,7 @@ repeat:
22713 __pgprot(PTE_IDENT_ATTR |
22714 _PAGE_PSE);
22715
22716- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22717- PAGE_OFFSET + PAGE_SIZE-1;
22718-
22719- if (is_kernel_text(addr) ||
22720- is_kernel_text(addr2))
22721+ if (is_kernel_text(address, address + PMD_SIZE))
22722 prot = PAGE_KERNEL_LARGE_EXEC;
22723
22724 pages_2m++;
22725@@ -332,7 +333,7 @@ repeat:
22726 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22727 pte += pte_ofs;
22728 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22729- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22730+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22731 pgprot_t prot = PAGE_KERNEL;
22732 /*
22733 * first pass will use the same initial
22734@@ -340,7 +341,7 @@ repeat:
22735 */
22736 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22737
22738- if (is_kernel_text(addr))
22739+ if (is_kernel_text(address, address + PAGE_SIZE))
22740 prot = PAGE_KERNEL_EXEC;
22741
22742 pages_4k++;
22743@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22744
22745 pud = pud_offset(pgd, va);
22746 pmd = pmd_offset(pud, va);
22747- if (!pmd_present(*pmd))
22748+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
22749 break;
22750
22751 pte = pte_offset_kernel(pmd, va);
22752@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22753
22754 static void __init pagetable_init(void)
22755 {
22756- pgd_t *pgd_base = swapper_pg_dir;
22757-
22758- permanent_kmaps_init(pgd_base);
22759+ permanent_kmaps_init(swapper_pg_dir);
22760 }
22761
22762-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22763+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22764 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22765
22766 /* user-defined highmem size */
22767@@ -757,6 +756,12 @@ void __init mem_init(void)
22768
22769 pci_iommu_alloc();
22770
22771+#ifdef CONFIG_PAX_PER_CPU_PGD
22772+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22773+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22774+ KERNEL_PGD_PTRS);
22775+#endif
22776+
22777 #ifdef CONFIG_FLATMEM
22778 BUG_ON(!mem_map);
22779 #endif
22780@@ -774,7 +779,7 @@ void __init mem_init(void)
22781 set_highmem_pages_init();
22782
22783 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22784- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22785+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22786 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22787
22788 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22789@@ -815,10 +820,10 @@ void __init mem_init(void)
22790 ((unsigned long)&__init_end -
22791 (unsigned long)&__init_begin) >> 10,
22792
22793- (unsigned long)&_etext, (unsigned long)&_edata,
22794- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22795+ (unsigned long)&_sdata, (unsigned long)&_edata,
22796+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22797
22798- (unsigned long)&_text, (unsigned long)&_etext,
22799+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22800 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22801
22802 /*
22803@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
22804 if (!kernel_set_to_readonly)
22805 return;
22806
22807+ start = ktla_ktva(start);
22808 pr_debug("Set kernel text: %lx - %lx for read write\n",
22809 start, start+size);
22810
22811@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
22812 if (!kernel_set_to_readonly)
22813 return;
22814
22815+ start = ktla_ktva(start);
22816 pr_debug("Set kernel text: %lx - %lx for read only\n",
22817 start, start+size);
22818
22819@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
22820 unsigned long start = PFN_ALIGN(_text);
22821 unsigned long size = PFN_ALIGN(_etext) - start;
22822
22823+ start = ktla_ktva(start);
22824 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22825 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22826 size >> 10);
22827diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22828index bbaaa00..796fa65 100644
22829--- a/arch/x86/mm/init_64.c
22830+++ b/arch/x86/mm/init_64.c
22831@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
22832 * around without checking the pgd every time.
22833 */
22834
22835-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22836+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22837 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22838
22839 int force_personality32;
22840@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22841
22842 for (address = start; address <= end; address += PGDIR_SIZE) {
22843 const pgd_t *pgd_ref = pgd_offset_k(address);
22844+
22845+#ifdef CONFIG_PAX_PER_CPU_PGD
22846+ unsigned long cpu;
22847+#else
22848 struct page *page;
22849+#endif
22850
22851 if (pgd_none(*pgd_ref))
22852 continue;
22853
22854 spin_lock(&pgd_lock);
22855+
22856+#ifdef CONFIG_PAX_PER_CPU_PGD
22857+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
22858+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
22859+#else
22860 list_for_each_entry(page, &pgd_list, lru) {
22861 pgd_t *pgd;
22862 spinlock_t *pgt_lock;
22863@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22864 /* the pgt_lock only for Xen */
22865 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22866 spin_lock(pgt_lock);
22867+#endif
22868
22869 if (pgd_none(*pgd))
22870 set_pgd(pgd, *pgd_ref);
22871@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22872 BUG_ON(pgd_page_vaddr(*pgd)
22873 != pgd_page_vaddr(*pgd_ref));
22874
22875+#ifndef CONFIG_PAX_PER_CPU_PGD
22876 spin_unlock(pgt_lock);
22877+#endif
22878+
22879 }
22880 spin_unlock(&pgd_lock);
22881 }
22882@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
22883 pmd = fill_pmd(pud, vaddr);
22884 pte = fill_pte(pmd, vaddr);
22885
22886+ pax_open_kernel();
22887 set_pte(pte, new_pte);
22888+ pax_close_kernel();
22889
22890 /*
22891 * It's enough to flush this one mapping.
22892@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
22893 pgd = pgd_offset_k((unsigned long)__va(phys));
22894 if (pgd_none(*pgd)) {
22895 pud = (pud_t *) spp_getpage();
22896- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22897- _PAGE_USER));
22898+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22899 }
22900 pud = pud_offset(pgd, (unsigned long)__va(phys));
22901 if (pud_none(*pud)) {
22902 pmd = (pmd_t *) spp_getpage();
22903- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22904- _PAGE_USER));
22905+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22906 }
22907 pmd = pmd_offset(pud, phys);
22908 BUG_ON(!pmd_none(*pmd));
22909@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
22910 if (pfn >= pgt_buf_top)
22911 panic("alloc_low_page: ran out of memory");
22912
22913- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22914+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22915 clear_page(adr);
22916 *phys = pfn * PAGE_SIZE;
22917 return adr;
22918@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
22919
22920 phys = __pa(virt);
22921 left = phys & (PAGE_SIZE - 1);
22922- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22923+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22924 adr = (void *)(((unsigned long)adr) | left);
22925
22926 return adr;
22927@@ -693,6 +707,12 @@ void __init mem_init(void)
22928
22929 pci_iommu_alloc();
22930
22931+#ifdef CONFIG_PAX_PER_CPU_PGD
22932+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22933+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22934+ KERNEL_PGD_PTRS);
22935+#endif
22936+
22937 /* clear_bss() already clear the empty_zero_page */
22938
22939 reservedpages = 0;
22940@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
22941 static struct vm_area_struct gate_vma = {
22942 .vm_start = VSYSCALL_START,
22943 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22944- .vm_page_prot = PAGE_READONLY_EXEC,
22945- .vm_flags = VM_READ | VM_EXEC
22946+ .vm_page_prot = PAGE_READONLY,
22947+ .vm_flags = VM_READ
22948 };
22949
22950 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
22951@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
22952
22953 const char *arch_vma_name(struct vm_area_struct *vma)
22954 {
22955- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22956+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22957 return "[vdso]";
22958 if (vma == &gate_vma)
22959 return "[vsyscall]";
22960diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22961index 7b179b4..6bd1777 100644
22962--- a/arch/x86/mm/iomap_32.c
22963+++ b/arch/x86/mm/iomap_32.c
22964@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
22965 type = kmap_atomic_idx_push();
22966 idx = type + KM_TYPE_NR * smp_processor_id();
22967 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22968+
22969+ pax_open_kernel();
22970 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22971+ pax_close_kernel();
22972+
22973 arch_flush_lazy_mmu_mode();
22974
22975 return (void *)vaddr;
22976diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22977index be1ef57..55f0160 100644
22978--- a/arch/x86/mm/ioremap.c
22979+++ b/arch/x86/mm/ioremap.c
22980@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
22981 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22982 int is_ram = page_is_ram(pfn);
22983
22984- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22985+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22986 return NULL;
22987 WARN_ON_ONCE(is_ram);
22988 }
22989@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
22990
22991 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
22992 if (page_is_ram(start >> PAGE_SHIFT))
22993+#ifdef CONFIG_HIGHMEM
22994+ if ((start >> PAGE_SHIFT) < max_low_pfn)
22995+#endif
22996 return __va(phys);
22997
22998 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
22999@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
23000 early_param("early_ioremap_debug", early_ioremap_debug_setup);
23001
23002 static __initdata int after_paging_init;
23003-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
23004+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
23005
23006 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
23007 {
23008@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
23009 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
23010
23011 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
23012- memset(bm_pte, 0, sizeof(bm_pte));
23013- pmd_populate_kernel(&init_mm, pmd, bm_pte);
23014+ pmd_populate_user(&init_mm, pmd, bm_pte);
23015
23016 /*
23017 * The boot-ioremap range spans multiple pmds, for which
23018diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
23019index d87dd6d..bf3fa66 100644
23020--- a/arch/x86/mm/kmemcheck/kmemcheck.c
23021+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
23022@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
23023 * memory (e.g. tracked pages)? For now, we need this to avoid
23024 * invoking kmemcheck for PnP BIOS calls.
23025 */
23026- if (regs->flags & X86_VM_MASK)
23027+ if (v8086_mode(regs))
23028 return false;
23029- if (regs->cs != __KERNEL_CS)
23030+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23031 return false;
23032
23033 pte = kmemcheck_pte_lookup(address);
23034diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
23035index 845df68..1d8d29f 100644
23036--- a/arch/x86/mm/mmap.c
23037+++ b/arch/x86/mm/mmap.c
23038@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23039 * Leave an at least ~128 MB hole with possible stack randomization.
23040 */
23041 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23042-#define MAX_GAP (TASK_SIZE/6*5)
23043+#define MAX_GAP (pax_task_size/6*5)
23044
23045 static int mmap_is_legacy(void)
23046 {
23047@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23048 return rnd << PAGE_SHIFT;
23049 }
23050
23051-static unsigned long mmap_base(void)
23052+static unsigned long mmap_base(struct mm_struct *mm)
23053 {
23054 unsigned long gap = rlimit(RLIMIT_STACK);
23055+ unsigned long pax_task_size = TASK_SIZE;
23056+
23057+#ifdef CONFIG_PAX_SEGMEXEC
23058+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23059+ pax_task_size = SEGMEXEC_TASK_SIZE;
23060+#endif
23061
23062 if (gap < MIN_GAP)
23063 gap = MIN_GAP;
23064 else if (gap > MAX_GAP)
23065 gap = MAX_GAP;
23066
23067- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23068+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23069 }
23070
23071 /*
23072 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23073 * does, but not when emulating X86_32
23074 */
23075-static unsigned long mmap_legacy_base(void)
23076+static unsigned long mmap_legacy_base(struct mm_struct *mm)
23077 {
23078- if (mmap_is_ia32())
23079+ if (mmap_is_ia32()) {
23080+
23081+#ifdef CONFIG_PAX_SEGMEXEC
23082+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23083+ return SEGMEXEC_TASK_UNMAPPED_BASE;
23084+ else
23085+#endif
23086+
23087 return TASK_UNMAPPED_BASE;
23088- else
23089+ } else
23090 return TASK_UNMAPPED_BASE + mmap_rnd();
23091 }
23092
23093@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23094 void arch_pick_mmap_layout(struct mm_struct *mm)
23095 {
23096 if (mmap_is_legacy()) {
23097- mm->mmap_base = mmap_legacy_base();
23098+ mm->mmap_base = mmap_legacy_base(mm);
23099+
23100+#ifdef CONFIG_PAX_RANDMMAP
23101+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23102+ mm->mmap_base += mm->delta_mmap;
23103+#endif
23104+
23105 mm->get_unmapped_area = arch_get_unmapped_area;
23106 mm->unmap_area = arch_unmap_area;
23107 } else {
23108- mm->mmap_base = mmap_base();
23109+ mm->mmap_base = mmap_base(mm);
23110+
23111+#ifdef CONFIG_PAX_RANDMMAP
23112+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23113+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23114+#endif
23115+
23116 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23117 mm->unmap_area = arch_unmap_area_topdown;
23118 }
23119diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23120index de54b9b..799051e 100644
23121--- a/arch/x86/mm/mmio-mod.c
23122+++ b/arch/x86/mm/mmio-mod.c
23123@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23124 break;
23125 default:
23126 {
23127- unsigned char *ip = (unsigned char *)instptr;
23128+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23129 my_trace->opcode = MMIO_UNKNOWN_OP;
23130 my_trace->width = 0;
23131 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23132@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23133 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23134 void __iomem *addr)
23135 {
23136- static atomic_t next_id;
23137+ static atomic_unchecked_t next_id;
23138 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23139 /* These are page-unaligned. */
23140 struct mmiotrace_map map = {
23141@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23142 .private = trace
23143 },
23144 .phys = offset,
23145- .id = atomic_inc_return(&next_id)
23146+ .id = atomic_inc_return_unchecked(&next_id)
23147 };
23148 map.map_id = trace->id;
23149
23150diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23151index b008656..773eac2 100644
23152--- a/arch/x86/mm/pageattr-test.c
23153+++ b/arch/x86/mm/pageattr-test.c
23154@@ -36,7 +36,7 @@ enum {
23155
23156 static int pte_testbit(pte_t pte)
23157 {
23158- return pte_flags(pte) & _PAGE_UNUSED1;
23159+ return pte_flags(pte) & _PAGE_CPA_TEST;
23160 }
23161
23162 struct split_state {
23163diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23164index f9e5267..77b1a40 100644
23165--- a/arch/x86/mm/pageattr.c
23166+++ b/arch/x86/mm/pageattr.c
23167@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23168 */
23169 #ifdef CONFIG_PCI_BIOS
23170 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23171- pgprot_val(forbidden) |= _PAGE_NX;
23172+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23173 #endif
23174
23175 /*
23176@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23177 * Does not cover __inittext since that is gone later on. On
23178 * 64bit we do not enforce !NX on the low mapping
23179 */
23180- if (within(address, (unsigned long)_text, (unsigned long)_etext))
23181- pgprot_val(forbidden) |= _PAGE_NX;
23182+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23183+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23184
23185+#ifdef CONFIG_DEBUG_RODATA
23186 /*
23187 * The .rodata section needs to be read-only. Using the pfn
23188 * catches all aliases.
23189@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23190 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23191 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23192 pgprot_val(forbidden) |= _PAGE_RW;
23193+#endif
23194
23195 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23196 /*
23197@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23198 }
23199 #endif
23200
23201+#ifdef CONFIG_PAX_KERNEXEC
23202+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23203+ pgprot_val(forbidden) |= _PAGE_RW;
23204+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23205+ }
23206+#endif
23207+
23208 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23209
23210 return prot;
23211@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23212 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23213 {
23214 /* change init_mm */
23215+ pax_open_kernel();
23216 set_pte_atomic(kpte, pte);
23217+
23218 #ifdef CONFIG_X86_32
23219 if (!SHARED_KERNEL_PMD) {
23220+
23221+#ifdef CONFIG_PAX_PER_CPU_PGD
23222+ unsigned long cpu;
23223+#else
23224 struct page *page;
23225+#endif
23226
23227+#ifdef CONFIG_PAX_PER_CPU_PGD
23228+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23229+ pgd_t *pgd = get_cpu_pgd(cpu);
23230+#else
23231 list_for_each_entry(page, &pgd_list, lru) {
23232- pgd_t *pgd;
23233+ pgd_t *pgd = (pgd_t *)page_address(page);
23234+#endif
23235+
23236 pud_t *pud;
23237 pmd_t *pmd;
23238
23239- pgd = (pgd_t *)page_address(page) + pgd_index(address);
23240+ pgd += pgd_index(address);
23241 pud = pud_offset(pgd, address);
23242 pmd = pmd_offset(pud, address);
23243 set_pte_atomic((pte_t *)pmd, pte);
23244 }
23245 }
23246 #endif
23247+ pax_close_kernel();
23248 }
23249
23250 static int
23251diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23252index f6ff57b..481690f 100644
23253--- a/arch/x86/mm/pat.c
23254+++ b/arch/x86/mm/pat.c
23255@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23256
23257 if (!entry) {
23258 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23259- current->comm, current->pid, start, end);
23260+ current->comm, task_pid_nr(current), start, end);
23261 return -EINVAL;
23262 }
23263
23264@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23265 while (cursor < to) {
23266 if (!devmem_is_allowed(pfn)) {
23267 printk(KERN_INFO
23268- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23269- current->comm, from, to);
23270+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23271+ current->comm, from, to, cursor);
23272 return 0;
23273 }
23274 cursor += PAGE_SIZE;
23275@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23276 printk(KERN_INFO
23277 "%s:%d ioremap_change_attr failed %s "
23278 "for %Lx-%Lx\n",
23279- current->comm, current->pid,
23280+ current->comm, task_pid_nr(current),
23281 cattr_name(flags),
23282 base, (unsigned long long)(base + size));
23283 return -EINVAL;
23284@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23285 if (want_flags != flags) {
23286 printk(KERN_WARNING
23287 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23288- current->comm, current->pid,
23289+ current->comm, task_pid_nr(current),
23290 cattr_name(want_flags),
23291 (unsigned long long)paddr,
23292 (unsigned long long)(paddr + size),
23293@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23294 free_memtype(paddr, paddr + size);
23295 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23296 " for %Lx-%Lx, got %s\n",
23297- current->comm, current->pid,
23298+ current->comm, task_pid_nr(current),
23299 cattr_name(want_flags),
23300 (unsigned long long)paddr,
23301 (unsigned long long)(paddr + size),
23302diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23303index 9f0614d..92ae64a 100644
23304--- a/arch/x86/mm/pf_in.c
23305+++ b/arch/x86/mm/pf_in.c
23306@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23307 int i;
23308 enum reason_type rv = OTHERS;
23309
23310- p = (unsigned char *)ins_addr;
23311+ p = (unsigned char *)ktla_ktva(ins_addr);
23312 p += skip_prefix(p, &prf);
23313 p += get_opcode(p, &opcode);
23314
23315@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23316 struct prefix_bits prf;
23317 int i;
23318
23319- p = (unsigned char *)ins_addr;
23320+ p = (unsigned char *)ktla_ktva(ins_addr);
23321 p += skip_prefix(p, &prf);
23322 p += get_opcode(p, &opcode);
23323
23324@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23325 struct prefix_bits prf;
23326 int i;
23327
23328- p = (unsigned char *)ins_addr;
23329+ p = (unsigned char *)ktla_ktva(ins_addr);
23330 p += skip_prefix(p, &prf);
23331 p += get_opcode(p, &opcode);
23332
23333@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23334 struct prefix_bits prf;
23335 int i;
23336
23337- p = (unsigned char *)ins_addr;
23338+ p = (unsigned char *)ktla_ktva(ins_addr);
23339 p += skip_prefix(p, &prf);
23340 p += get_opcode(p, &opcode);
23341 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23342@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23343 struct prefix_bits prf;
23344 int i;
23345
23346- p = (unsigned char *)ins_addr;
23347+ p = (unsigned char *)ktla_ktva(ins_addr);
23348 p += skip_prefix(p, &prf);
23349 p += get_opcode(p, &opcode);
23350 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23351diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23352index 8573b83..c3b1a30 100644
23353--- a/arch/x86/mm/pgtable.c
23354+++ b/arch/x86/mm/pgtable.c
23355@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23356 list_del(&page->lru);
23357 }
23358
23359-#define UNSHARED_PTRS_PER_PGD \
23360- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23361+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23362+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23363
23364+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23365+{
23366+ while (count--)
23367+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23368+}
23369+#endif
23370
23371+#ifdef CONFIG_PAX_PER_CPU_PGD
23372+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23373+{
23374+ while (count--)
23375+
23376+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23377+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23378+#else
23379+ *dst++ = *src++;
23380+#endif
23381+
23382+}
23383+#endif
23384+
23385+#ifdef CONFIG_X86_64
23386+#define pxd_t pud_t
23387+#define pyd_t pgd_t
23388+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23389+#define pxd_free(mm, pud) pud_free((mm), (pud))
23390+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23391+#define pyd_offset(mm, address) pgd_offset((mm), (address))
23392+#define PYD_SIZE PGDIR_SIZE
23393+#else
23394+#define pxd_t pmd_t
23395+#define pyd_t pud_t
23396+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23397+#define pxd_free(mm, pud) pmd_free((mm), (pud))
23398+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23399+#define pyd_offset(mm, address) pud_offset((mm), (address))
23400+#define PYD_SIZE PUD_SIZE
23401+#endif
23402+
23403+#ifdef CONFIG_PAX_PER_CPU_PGD
23404+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23405+static inline void pgd_dtor(pgd_t *pgd) {}
23406+#else
23407 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23408 {
23409 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23410@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23411 pgd_list_del(pgd);
23412 spin_unlock(&pgd_lock);
23413 }
23414+#endif
23415
23416 /*
23417 * List of all pgd's needed for non-PAE so it can invalidate entries
23418@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23419 * -- wli
23420 */
23421
23422-#ifdef CONFIG_X86_PAE
23423+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23424 /*
23425 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23426 * updating the top-level pagetable entries to guarantee the
23427@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23428 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23429 * and initialize the kernel pmds here.
23430 */
23431-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23432+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23433
23434 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23435 {
23436@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23437 */
23438 flush_tlb_mm(mm);
23439 }
23440+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23441+#define PREALLOCATED_PXDS USER_PGD_PTRS
23442 #else /* !CONFIG_X86_PAE */
23443
23444 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23445-#define PREALLOCATED_PMDS 0
23446+#define PREALLOCATED_PXDS 0
23447
23448 #endif /* CONFIG_X86_PAE */
23449
23450-static void free_pmds(pmd_t *pmds[])
23451+static void free_pxds(pxd_t *pxds[])
23452 {
23453 int i;
23454
23455- for(i = 0; i < PREALLOCATED_PMDS; i++)
23456- if (pmds[i])
23457- free_page((unsigned long)pmds[i]);
23458+ for(i = 0; i < PREALLOCATED_PXDS; i++)
23459+ if (pxds[i])
23460+ free_page((unsigned long)pxds[i]);
23461 }
23462
23463-static int preallocate_pmds(pmd_t *pmds[])
23464+static int preallocate_pxds(pxd_t *pxds[])
23465 {
23466 int i;
23467 bool failed = false;
23468
23469- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23470- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23471- if (pmd == NULL)
23472+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23473+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23474+ if (pxd == NULL)
23475 failed = true;
23476- pmds[i] = pmd;
23477+ pxds[i] = pxd;
23478 }
23479
23480 if (failed) {
23481- free_pmds(pmds);
23482+ free_pxds(pxds);
23483 return -ENOMEM;
23484 }
23485
23486@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23487 * preallocate which never got a corresponding vma will need to be
23488 * freed manually.
23489 */
23490-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23491+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23492 {
23493 int i;
23494
23495- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23496+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23497 pgd_t pgd = pgdp[i];
23498
23499 if (pgd_val(pgd) != 0) {
23500- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23501+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23502
23503- pgdp[i] = native_make_pgd(0);
23504+ set_pgd(pgdp + i, native_make_pgd(0));
23505
23506- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23507- pmd_free(mm, pmd);
23508+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23509+ pxd_free(mm, pxd);
23510 }
23511 }
23512 }
23513
23514-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23515+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23516 {
23517- pud_t *pud;
23518+ pyd_t *pyd;
23519 unsigned long addr;
23520 int i;
23521
23522- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23523+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23524 return;
23525
23526- pud = pud_offset(pgd, 0);
23527+#ifdef CONFIG_X86_64
23528+ pyd = pyd_offset(mm, 0L);
23529+#else
23530+ pyd = pyd_offset(pgd, 0L);
23531+#endif
23532
23533- for (addr = i = 0; i < PREALLOCATED_PMDS;
23534- i++, pud++, addr += PUD_SIZE) {
23535- pmd_t *pmd = pmds[i];
23536+ for (addr = i = 0; i < PREALLOCATED_PXDS;
23537+ i++, pyd++, addr += PYD_SIZE) {
23538+ pxd_t *pxd = pxds[i];
23539
23540 if (i >= KERNEL_PGD_BOUNDARY)
23541- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23542- sizeof(pmd_t) * PTRS_PER_PMD);
23543+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23544+ sizeof(pxd_t) * PTRS_PER_PMD);
23545
23546- pud_populate(mm, pud, pmd);
23547+ pyd_populate(mm, pyd, pxd);
23548 }
23549 }
23550
23551 pgd_t *pgd_alloc(struct mm_struct *mm)
23552 {
23553 pgd_t *pgd;
23554- pmd_t *pmds[PREALLOCATED_PMDS];
23555+ pxd_t *pxds[PREALLOCATED_PXDS];
23556
23557 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23558
23559@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23560
23561 mm->pgd = pgd;
23562
23563- if (preallocate_pmds(pmds) != 0)
23564+ if (preallocate_pxds(pxds) != 0)
23565 goto out_free_pgd;
23566
23567 if (paravirt_pgd_alloc(mm) != 0)
23568- goto out_free_pmds;
23569+ goto out_free_pxds;
23570
23571 /*
23572 * Make sure that pre-populating the pmds is atomic with
23573@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23574 spin_lock(&pgd_lock);
23575
23576 pgd_ctor(mm, pgd);
23577- pgd_prepopulate_pmd(mm, pgd, pmds);
23578+ pgd_prepopulate_pxd(mm, pgd, pxds);
23579
23580 spin_unlock(&pgd_lock);
23581
23582 return pgd;
23583
23584-out_free_pmds:
23585- free_pmds(pmds);
23586+out_free_pxds:
23587+ free_pxds(pxds);
23588 out_free_pgd:
23589 free_page((unsigned long)pgd);
23590 out:
23591@@ -295,7 +344,7 @@ out:
23592
23593 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23594 {
23595- pgd_mop_up_pmds(mm, pgd);
23596+ pgd_mop_up_pxds(mm, pgd);
23597 pgd_dtor(pgd);
23598 paravirt_pgd_free(mm, pgd);
23599 free_page((unsigned long)pgd);
23600diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23601index cac7184..09a39fa 100644
23602--- a/arch/x86/mm/pgtable_32.c
23603+++ b/arch/x86/mm/pgtable_32.c
23604@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23605 return;
23606 }
23607 pte = pte_offset_kernel(pmd, vaddr);
23608+
23609+ pax_open_kernel();
23610 if (pte_val(pteval))
23611 set_pte_at(&init_mm, vaddr, pte, pteval);
23612 else
23613 pte_clear(&init_mm, vaddr, pte);
23614+ pax_close_kernel();
23615
23616 /*
23617 * It's enough to flush this one mapping.
23618diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23619index 410531d..0f16030 100644
23620--- a/arch/x86/mm/setup_nx.c
23621+++ b/arch/x86/mm/setup_nx.c
23622@@ -5,8 +5,10 @@
23623 #include <asm/pgtable.h>
23624 #include <asm/proto.h>
23625
23626+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23627 static int disable_nx __cpuinitdata;
23628
23629+#ifndef CONFIG_PAX_PAGEEXEC
23630 /*
23631 * noexec = on|off
23632 *
23633@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23634 return 0;
23635 }
23636 early_param("noexec", noexec_setup);
23637+#endif
23638+
23639+#endif
23640
23641 void __cpuinit x86_configure_nx(void)
23642 {
23643+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23644 if (cpu_has_nx && !disable_nx)
23645 __supported_pte_mask |= _PAGE_NX;
23646 else
23647+#endif
23648 __supported_pte_mask &= ~_PAGE_NX;
23649 }
23650
23651diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23652index d6c0418..06a0ad5 100644
23653--- a/arch/x86/mm/tlb.c
23654+++ b/arch/x86/mm/tlb.c
23655@@ -65,7 +65,11 @@ void leave_mm(int cpu)
23656 BUG();
23657 cpumask_clear_cpu(cpu,
23658 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23659+
23660+#ifndef CONFIG_PAX_PER_CPU_PGD
23661 load_cr3(swapper_pg_dir);
23662+#endif
23663+
23664 }
23665 EXPORT_SYMBOL_GPL(leave_mm);
23666
23667diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23668index 6687022..ceabcfa 100644
23669--- a/arch/x86/net/bpf_jit.S
23670+++ b/arch/x86/net/bpf_jit.S
23671@@ -9,6 +9,7 @@
23672 */
23673 #include <linux/linkage.h>
23674 #include <asm/dwarf2.h>
23675+#include <asm/alternative-asm.h>
23676
23677 /*
23678 * Calling convention :
23679@@ -35,6 +36,7 @@ sk_load_word:
23680 jle bpf_slow_path_word
23681 mov (SKBDATA,%rsi),%eax
23682 bswap %eax /* ntohl() */
23683+ pax_force_retaddr
23684 ret
23685
23686
23687@@ -53,6 +55,7 @@ sk_load_half:
23688 jle bpf_slow_path_half
23689 movzwl (SKBDATA,%rsi),%eax
23690 rol $8,%ax # ntohs()
23691+ pax_force_retaddr
23692 ret
23693
23694 sk_load_byte_ind:
23695@@ -66,6 +69,7 @@ sk_load_byte:
23696 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23697 jle bpf_slow_path_byte
23698 movzbl (SKBDATA,%rsi),%eax
23699+ pax_force_retaddr
23700 ret
23701
23702 /**
23703@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23704 movzbl (SKBDATA,%rsi),%ebx
23705 and $15,%bl
23706 shl $2,%bl
23707+ pax_force_retaddr
23708 ret
23709 CFI_ENDPROC
23710 ENDPROC(sk_load_byte_msh)
23711@@ -91,6 +96,7 @@ bpf_error:
23712 xor %eax,%eax
23713 mov -8(%rbp),%rbx
23714 leaveq
23715+ pax_force_retaddr
23716 ret
23717
23718 /* rsi contains offset and can be scratched */
23719@@ -113,6 +119,7 @@ bpf_slow_path_word:
23720 js bpf_error
23721 mov -12(%rbp),%eax
23722 bswap %eax
23723+ pax_force_retaddr
23724 ret
23725
23726 bpf_slow_path_half:
23727@@ -121,12 +128,14 @@ bpf_slow_path_half:
23728 mov -12(%rbp),%ax
23729 rol $8,%ax
23730 movzwl %ax,%eax
23731+ pax_force_retaddr
23732 ret
23733
23734 bpf_slow_path_byte:
23735 bpf_slow_path_common(1)
23736 js bpf_error
23737 movzbl -12(%rbp),%eax
23738+ pax_force_retaddr
23739 ret
23740
23741 bpf_slow_path_byte_msh:
23742@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23743 and $15,%al
23744 shl $2,%al
23745 xchg %eax,%ebx
23746+ pax_force_retaddr
23747 ret
23748diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23749index 7c1b765..8c072c6 100644
23750--- a/arch/x86/net/bpf_jit_comp.c
23751+++ b/arch/x86/net/bpf_jit_comp.c
23752@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23753 set_fs(old_fs);
23754 }
23755
23756+struct bpf_jit_work {
23757+ struct work_struct work;
23758+ void *image;
23759+};
23760
23761 void bpf_jit_compile(struct sk_filter *fp)
23762 {
23763@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23764 if (addrs == NULL)
23765 return;
23766
23767+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23768+ if (!fp->work)
23769+ goto out;
23770+
23771 /* Before first pass, make a rough estimation of addrs[]
23772 * each bpf instruction is translated to less than 64 bytes
23773 */
23774@@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp)
23775 func = sk_load_word;
23776 common_load: seen |= SEEN_DATAREF;
23777 if ((int)K < 0)
23778- goto out;
23779+ goto error;
23780 t_offset = func - (image + addrs[i]);
23781 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
23782 EMIT1_off32(0xe8, t_offset); /* call */
23783@@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23784 break;
23785 default:
23786 /* hmm, too complex filter, give up with jit compiler */
23787- goto out;
23788+ goto error;
23789 }
23790 ilen = prog - temp;
23791 if (image) {
23792 if (unlikely(proglen + ilen > oldproglen)) {
23793 pr_err("bpb_jit_compile fatal error\n");
23794- kfree(addrs);
23795- module_free(NULL, image);
23796- return;
23797+ module_free_exec(NULL, image);
23798+ goto error;
23799 }
23800+ pax_open_kernel();
23801 memcpy(image + proglen, temp, ilen);
23802+ pax_close_kernel();
23803 }
23804 proglen += ilen;
23805 addrs[i] = proglen;
23806@@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23807 break;
23808 }
23809 if (proglen == oldproglen) {
23810- image = module_alloc(max_t(unsigned int,
23811- proglen,
23812- sizeof(struct work_struct)));
23813+ image = module_alloc_exec(proglen);
23814 if (!image)
23815- goto out;
23816+ goto error;
23817 }
23818 oldproglen = proglen;
23819 }
23820@@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23821 bpf_flush_icache(image, image + proglen);
23822
23823 fp->bpf_func = (void *)image;
23824- }
23825+ } else
23826+error:
23827+ kfree(fp->work);
23828+
23829 out:
23830 kfree(addrs);
23831 return;
23832@@ -645,18 +655,20 @@ out:
23833
23834 static void jit_free_defer(struct work_struct *arg)
23835 {
23836- module_free(NULL, arg);
23837+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
23838+ kfree(arg);
23839 }
23840
23841 /* run from softirq, we must use a work_struct to call
23842- * module_free() from process context
23843+ * module_free_exec() from process context
23844 */
23845 void bpf_jit_free(struct sk_filter *fp)
23846 {
23847 if (fp->bpf_func != sk_run_filter) {
23848- struct work_struct *work = (struct work_struct *)fp->bpf_func;
23849+ struct work_struct *work = &fp->work->work;
23850
23851 INIT_WORK(work, jit_free_defer);
23852+ fp->work->image = fp->bpf_func;
23853 schedule_work(work);
23854 }
23855 }
23856diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23857index bff89df..377758a 100644
23858--- a/arch/x86/oprofile/backtrace.c
23859+++ b/arch/x86/oprofile/backtrace.c
23860@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
23861 struct stack_frame_ia32 *fp;
23862 unsigned long bytes;
23863
23864- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23865+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23866 if (bytes != sizeof(bufhead))
23867 return NULL;
23868
23869- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23870+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23871
23872 oprofile_add_trace(bufhead[0].return_address);
23873
23874@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
23875 struct stack_frame bufhead[2];
23876 unsigned long bytes;
23877
23878- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23879+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23880 if (bytes != sizeof(bufhead))
23881 return NULL;
23882
23883@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
23884 {
23885 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
23886
23887- if (!user_mode_vm(regs)) {
23888+ if (!user_mode(regs)) {
23889 unsigned long stack = kernel_stack_pointer(regs);
23890 if (depth)
23891 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23892diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23893index cb29191..036766d 100644
23894--- a/arch/x86/pci/mrst.c
23895+++ b/arch/x86/pci/mrst.c
23896@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23897 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23898 pci_mmcfg_late_init();
23899 pcibios_enable_irq = mrst_pci_irq_enable;
23900- pci_root_ops = pci_mrst_ops;
23901+ pax_open_kernel();
23902+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23903+ pax_close_kernel();
23904 /* Continue with standard init */
23905 return 1;
23906 }
23907diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23908index db0e9a5..0372c14 100644
23909--- a/arch/x86/pci/pcbios.c
23910+++ b/arch/x86/pci/pcbios.c
23911@@ -79,50 +79,93 @@ union bios32 {
23912 static struct {
23913 unsigned long address;
23914 unsigned short segment;
23915-} bios32_indirect = { 0, __KERNEL_CS };
23916+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23917
23918 /*
23919 * Returns the entry point for the given service, NULL on error
23920 */
23921
23922-static unsigned long bios32_service(unsigned long service)
23923+static unsigned long __devinit bios32_service(unsigned long service)
23924 {
23925 unsigned char return_code; /* %al */
23926 unsigned long address; /* %ebx */
23927 unsigned long length; /* %ecx */
23928 unsigned long entry; /* %edx */
23929 unsigned long flags;
23930+ struct desc_struct d, *gdt;
23931
23932 local_irq_save(flags);
23933- __asm__("lcall *(%%edi); cld"
23934+
23935+ gdt = get_cpu_gdt_table(smp_processor_id());
23936+
23937+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23938+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23939+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23940+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23941+
23942+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23943 : "=a" (return_code),
23944 "=b" (address),
23945 "=c" (length),
23946 "=d" (entry)
23947 : "0" (service),
23948 "1" (0),
23949- "D" (&bios32_indirect));
23950+ "D" (&bios32_indirect),
23951+ "r"(__PCIBIOS_DS)
23952+ : "memory");
23953+
23954+ pax_open_kernel();
23955+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23956+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23957+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23958+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23959+ pax_close_kernel();
23960+
23961 local_irq_restore(flags);
23962
23963 switch (return_code) {
23964- case 0:
23965- return address + entry;
23966- case 0x80: /* Not present */
23967- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23968- return 0;
23969- default: /* Shouldn't happen */
23970- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23971- service, return_code);
23972+ case 0: {
23973+ int cpu;
23974+ unsigned char flags;
23975+
23976+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23977+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23978+ printk(KERN_WARNING "bios32_service: not valid\n");
23979 return 0;
23980+ }
23981+ address = address + PAGE_OFFSET;
23982+ length += 16UL; /* some BIOSs underreport this... */
23983+ flags = 4;
23984+ if (length >= 64*1024*1024) {
23985+ length >>= PAGE_SHIFT;
23986+ flags |= 8;
23987+ }
23988+
23989+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
23990+ gdt = get_cpu_gdt_table(cpu);
23991+ pack_descriptor(&d, address, length, 0x9b, flags);
23992+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23993+ pack_descriptor(&d, address, length, 0x93, flags);
23994+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23995+ }
23996+ return entry;
23997+ }
23998+ case 0x80: /* Not present */
23999+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24000+ return 0;
24001+ default: /* Shouldn't happen */
24002+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24003+ service, return_code);
24004+ return 0;
24005 }
24006 }
24007
24008 static struct {
24009 unsigned long address;
24010 unsigned short segment;
24011-} pci_indirect = { 0, __KERNEL_CS };
24012+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
24013
24014-static int pci_bios_present;
24015+static int pci_bios_present __read_only;
24016
24017 static int __devinit check_pcibios(void)
24018 {
24019@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
24020 unsigned long flags, pcibios_entry;
24021
24022 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
24023- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
24024+ pci_indirect.address = pcibios_entry;
24025
24026 local_irq_save(flags);
24027- __asm__(
24028- "lcall *(%%edi); cld\n\t"
24029+ __asm__("movw %w6, %%ds\n\t"
24030+ "lcall *%%ss:(%%edi); cld\n\t"
24031+ "push %%ss\n\t"
24032+ "pop %%ds\n\t"
24033 "jc 1f\n\t"
24034 "xor %%ah, %%ah\n"
24035 "1:"
24036@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
24037 "=b" (ebx),
24038 "=c" (ecx)
24039 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
24040- "D" (&pci_indirect)
24041+ "D" (&pci_indirect),
24042+ "r" (__PCIBIOS_DS)
24043 : "memory");
24044 local_irq_restore(flags);
24045
24046@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24047
24048 switch (len) {
24049 case 1:
24050- __asm__("lcall *(%%esi); cld\n\t"
24051+ __asm__("movw %w6, %%ds\n\t"
24052+ "lcall *%%ss:(%%esi); cld\n\t"
24053+ "push %%ss\n\t"
24054+ "pop %%ds\n\t"
24055 "jc 1f\n\t"
24056 "xor %%ah, %%ah\n"
24057 "1:"
24058@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24059 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24060 "b" (bx),
24061 "D" ((long)reg),
24062- "S" (&pci_indirect));
24063+ "S" (&pci_indirect),
24064+ "r" (__PCIBIOS_DS));
24065 /*
24066 * Zero-extend the result beyond 8 bits, do not trust the
24067 * BIOS having done it:
24068@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24069 *value &= 0xff;
24070 break;
24071 case 2:
24072- __asm__("lcall *(%%esi); cld\n\t"
24073+ __asm__("movw %w6, %%ds\n\t"
24074+ "lcall *%%ss:(%%esi); cld\n\t"
24075+ "push %%ss\n\t"
24076+ "pop %%ds\n\t"
24077 "jc 1f\n\t"
24078 "xor %%ah, %%ah\n"
24079 "1:"
24080@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24081 : "1" (PCIBIOS_READ_CONFIG_WORD),
24082 "b" (bx),
24083 "D" ((long)reg),
24084- "S" (&pci_indirect));
24085+ "S" (&pci_indirect),
24086+ "r" (__PCIBIOS_DS));
24087 /*
24088 * Zero-extend the result beyond 16 bits, do not trust the
24089 * BIOS having done it:
24090@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24091 *value &= 0xffff;
24092 break;
24093 case 4:
24094- __asm__("lcall *(%%esi); cld\n\t"
24095+ __asm__("movw %w6, %%ds\n\t"
24096+ "lcall *%%ss:(%%esi); cld\n\t"
24097+ "push %%ss\n\t"
24098+ "pop %%ds\n\t"
24099 "jc 1f\n\t"
24100 "xor %%ah, %%ah\n"
24101 "1:"
24102@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24103 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24104 "b" (bx),
24105 "D" ((long)reg),
24106- "S" (&pci_indirect));
24107+ "S" (&pci_indirect),
24108+ "r" (__PCIBIOS_DS));
24109 break;
24110 }
24111
24112@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24113
24114 switch (len) {
24115 case 1:
24116- __asm__("lcall *(%%esi); cld\n\t"
24117+ __asm__("movw %w6, %%ds\n\t"
24118+ "lcall *%%ss:(%%esi); cld\n\t"
24119+ "push %%ss\n\t"
24120+ "pop %%ds\n\t"
24121 "jc 1f\n\t"
24122 "xor %%ah, %%ah\n"
24123 "1:"
24124@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24125 "c" (value),
24126 "b" (bx),
24127 "D" ((long)reg),
24128- "S" (&pci_indirect));
24129+ "S" (&pci_indirect),
24130+ "r" (__PCIBIOS_DS));
24131 break;
24132 case 2:
24133- __asm__("lcall *(%%esi); cld\n\t"
24134+ __asm__("movw %w6, %%ds\n\t"
24135+ "lcall *%%ss:(%%esi); cld\n\t"
24136+ "push %%ss\n\t"
24137+ "pop %%ds\n\t"
24138 "jc 1f\n\t"
24139 "xor %%ah, %%ah\n"
24140 "1:"
24141@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24142 "c" (value),
24143 "b" (bx),
24144 "D" ((long)reg),
24145- "S" (&pci_indirect));
24146+ "S" (&pci_indirect),
24147+ "r" (__PCIBIOS_DS));
24148 break;
24149 case 4:
24150- __asm__("lcall *(%%esi); cld\n\t"
24151+ __asm__("movw %w6, %%ds\n\t"
24152+ "lcall *%%ss:(%%esi); cld\n\t"
24153+ "push %%ss\n\t"
24154+ "pop %%ds\n\t"
24155 "jc 1f\n\t"
24156 "xor %%ah, %%ah\n"
24157 "1:"
24158@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24159 "c" (value),
24160 "b" (bx),
24161 "D" ((long)reg),
24162- "S" (&pci_indirect));
24163+ "S" (&pci_indirect),
24164+ "r" (__PCIBIOS_DS));
24165 break;
24166 }
24167
24168@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24169
24170 DBG("PCI: Fetching IRQ routing table... ");
24171 __asm__("push %%es\n\t"
24172+ "movw %w8, %%ds\n\t"
24173 "push %%ds\n\t"
24174 "pop %%es\n\t"
24175- "lcall *(%%esi); cld\n\t"
24176+ "lcall *%%ss:(%%esi); cld\n\t"
24177 "pop %%es\n\t"
24178+ "push %%ss\n\t"
24179+ "pop %%ds\n"
24180 "jc 1f\n\t"
24181 "xor %%ah, %%ah\n"
24182 "1:"
24183@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24184 "1" (0),
24185 "D" ((long) &opt),
24186 "S" (&pci_indirect),
24187- "m" (opt)
24188+ "m" (opt),
24189+ "r" (__PCIBIOS_DS)
24190 : "memory");
24191 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24192 if (ret & 0xff00)
24193@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24194 {
24195 int ret;
24196
24197- __asm__("lcall *(%%esi); cld\n\t"
24198+ __asm__("movw %w5, %%ds\n\t"
24199+ "lcall *%%ss:(%%esi); cld\n\t"
24200+ "push %%ss\n\t"
24201+ "pop %%ds\n"
24202 "jc 1f\n\t"
24203 "xor %%ah, %%ah\n"
24204 "1:"
24205@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24206 : "0" (PCIBIOS_SET_PCI_HW_INT),
24207 "b" ((dev->bus->number << 8) | dev->devfn),
24208 "c" ((irq << 8) | (pin + 10)),
24209- "S" (&pci_indirect));
24210+ "S" (&pci_indirect),
24211+ "r" (__PCIBIOS_DS));
24212 return !(ret & 0xff00);
24213 }
24214 EXPORT_SYMBOL(pcibios_set_irq_routing);
24215diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24216index 40e4469..1ab536e 100644
24217--- a/arch/x86/platform/efi/efi_32.c
24218+++ b/arch/x86/platform/efi/efi_32.c
24219@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24220 {
24221 struct desc_ptr gdt_descr;
24222
24223+#ifdef CONFIG_PAX_KERNEXEC
24224+ struct desc_struct d;
24225+#endif
24226+
24227 local_irq_save(efi_rt_eflags);
24228
24229 load_cr3(initial_page_table);
24230 __flush_tlb_all();
24231
24232+#ifdef CONFIG_PAX_KERNEXEC
24233+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24234+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24235+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24236+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24237+#endif
24238+
24239 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24240 gdt_descr.size = GDT_SIZE - 1;
24241 load_gdt(&gdt_descr);
24242@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24243 {
24244 struct desc_ptr gdt_descr;
24245
24246+#ifdef CONFIG_PAX_KERNEXEC
24247+ struct desc_struct d;
24248+
24249+ memset(&d, 0, sizeof d);
24250+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24251+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24252+#endif
24253+
24254 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24255 gdt_descr.size = GDT_SIZE - 1;
24256 load_gdt(&gdt_descr);
24257diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24258index fbe66e6..c5c0dd2 100644
24259--- a/arch/x86/platform/efi/efi_stub_32.S
24260+++ b/arch/x86/platform/efi/efi_stub_32.S
24261@@ -6,7 +6,9 @@
24262 */
24263
24264 #include <linux/linkage.h>
24265+#include <linux/init.h>
24266 #include <asm/page_types.h>
24267+#include <asm/segment.h>
24268
24269 /*
24270 * efi_call_phys(void *, ...) is a function with variable parameters.
24271@@ -20,7 +22,7 @@
24272 * service functions will comply with gcc calling convention, too.
24273 */
24274
24275-.text
24276+__INIT
24277 ENTRY(efi_call_phys)
24278 /*
24279 * 0. The function can only be called in Linux kernel. So CS has been
24280@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24281 * The mapping of lower virtual memory has been created in prelog and
24282 * epilog.
24283 */
24284- movl $1f, %edx
24285- subl $__PAGE_OFFSET, %edx
24286- jmp *%edx
24287+ movl $(__KERNEXEC_EFI_DS), %edx
24288+ mov %edx, %ds
24289+ mov %edx, %es
24290+ mov %edx, %ss
24291+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24292 1:
24293
24294 /*
24295@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24296 * parameter 2, ..., param n. To make things easy, we save the return
24297 * address of efi_call_phys in a global variable.
24298 */
24299- popl %edx
24300- movl %edx, saved_return_addr
24301- /* get the function pointer into ECX*/
24302- popl %ecx
24303- movl %ecx, efi_rt_function_ptr
24304- movl $2f, %edx
24305- subl $__PAGE_OFFSET, %edx
24306- pushl %edx
24307+ popl (saved_return_addr)
24308+ popl (efi_rt_function_ptr)
24309
24310 /*
24311 * 3. Clear PG bit in %CR0.
24312@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24313 /*
24314 * 5. Call the physical function.
24315 */
24316- jmp *%ecx
24317+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
24318
24319-2:
24320 /*
24321 * 6. After EFI runtime service returns, control will return to
24322 * following instruction. We'd better readjust stack pointer first.
24323@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24324 movl %cr0, %edx
24325 orl $0x80000000, %edx
24326 movl %edx, %cr0
24327- jmp 1f
24328-1:
24329+
24330 /*
24331 * 8. Now restore the virtual mode from flat mode by
24332 * adding EIP with PAGE_OFFSET.
24333 */
24334- movl $1f, %edx
24335- jmp *%edx
24336+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24337 1:
24338+ movl $(__KERNEL_DS), %edx
24339+ mov %edx, %ds
24340+ mov %edx, %es
24341+ mov %edx, %ss
24342
24343 /*
24344 * 9. Balance the stack. And because EAX contain the return value,
24345 * we'd better not clobber it.
24346 */
24347- leal efi_rt_function_ptr, %edx
24348- movl (%edx), %ecx
24349- pushl %ecx
24350+ pushl (efi_rt_function_ptr)
24351
24352 /*
24353- * 10. Push the saved return address onto the stack and return.
24354+ * 10. Return to the saved return address.
24355 */
24356- leal saved_return_addr, %edx
24357- movl (%edx), %ecx
24358- pushl %ecx
24359- ret
24360+ jmpl *(saved_return_addr)
24361 ENDPROC(efi_call_phys)
24362 .previous
24363
24364-.data
24365+__INITDATA
24366 saved_return_addr:
24367 .long 0
24368 efi_rt_function_ptr:
24369diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24370index 4c07cca..2c8427d 100644
24371--- a/arch/x86/platform/efi/efi_stub_64.S
24372+++ b/arch/x86/platform/efi/efi_stub_64.S
24373@@ -7,6 +7,7 @@
24374 */
24375
24376 #include <linux/linkage.h>
24377+#include <asm/alternative-asm.h>
24378
24379 #define SAVE_XMM \
24380 mov %rsp, %rax; \
24381@@ -40,6 +41,7 @@ ENTRY(efi_call0)
24382 call *%rdi
24383 addq $32, %rsp
24384 RESTORE_XMM
24385+ pax_force_retaddr 0, 1
24386 ret
24387 ENDPROC(efi_call0)
24388
24389@@ -50,6 +52,7 @@ ENTRY(efi_call1)
24390 call *%rdi
24391 addq $32, %rsp
24392 RESTORE_XMM
24393+ pax_force_retaddr 0, 1
24394 ret
24395 ENDPROC(efi_call1)
24396
24397@@ -60,6 +63,7 @@ ENTRY(efi_call2)
24398 call *%rdi
24399 addq $32, %rsp
24400 RESTORE_XMM
24401+ pax_force_retaddr 0, 1
24402 ret
24403 ENDPROC(efi_call2)
24404
24405@@ -71,6 +75,7 @@ ENTRY(efi_call3)
24406 call *%rdi
24407 addq $32, %rsp
24408 RESTORE_XMM
24409+ pax_force_retaddr 0, 1
24410 ret
24411 ENDPROC(efi_call3)
24412
24413@@ -83,6 +88,7 @@ ENTRY(efi_call4)
24414 call *%rdi
24415 addq $32, %rsp
24416 RESTORE_XMM
24417+ pax_force_retaddr 0, 1
24418 ret
24419 ENDPROC(efi_call4)
24420
24421@@ -96,6 +102,7 @@ ENTRY(efi_call5)
24422 call *%rdi
24423 addq $48, %rsp
24424 RESTORE_XMM
24425+ pax_force_retaddr 0, 1
24426 ret
24427 ENDPROC(efi_call5)
24428
24429@@ -112,5 +119,6 @@ ENTRY(efi_call6)
24430 call *%rdi
24431 addq $48, %rsp
24432 RESTORE_XMM
24433+ pax_force_retaddr 0, 1
24434 ret
24435 ENDPROC(efi_call6)
24436diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24437index ad4ec1c..686479e 100644
24438--- a/arch/x86/platform/mrst/mrst.c
24439+++ b/arch/x86/platform/mrst/mrst.c
24440@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24441 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24442 int sfi_mrtc_num;
24443
24444-static void mrst_power_off(void)
24445+static __noreturn void mrst_power_off(void)
24446 {
24447 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24448 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24449+ BUG();
24450 }
24451
24452-static void mrst_reboot(void)
24453+static __noreturn void mrst_reboot(void)
24454 {
24455 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24456 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24457 else
24458 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24459+ BUG();
24460 }
24461
24462 /* parse all the mtimer info to a static mtimer array */
24463diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24464index f10c0af..3ec1f95 100644
24465--- a/arch/x86/power/cpu.c
24466+++ b/arch/x86/power/cpu.c
24467@@ -131,7 +131,7 @@ static void do_fpu_end(void)
24468 static void fix_processor_context(void)
24469 {
24470 int cpu = smp_processor_id();
24471- struct tss_struct *t = &per_cpu(init_tss, cpu);
24472+ struct tss_struct *t = init_tss + cpu;
24473
24474 set_tss_desc(cpu, t); /*
24475 * This just modifies memory; should not be
24476@@ -141,7 +141,9 @@ static void fix_processor_context(void)
24477 */
24478
24479 #ifdef CONFIG_X86_64
24480+ pax_open_kernel();
24481 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24482+ pax_close_kernel();
24483
24484 syscall_init(); /* This sets MSR_*STAR and related */
24485 #endif
24486diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24487index 5d17950..2253fc9 100644
24488--- a/arch/x86/vdso/Makefile
24489+++ b/arch/x86/vdso/Makefile
24490@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24491 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24492 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24493
24494-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24495+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24496 GCOV_PROFILE := n
24497
24498 #
24499diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24500index 468d591..8e80a0a 100644
24501--- a/arch/x86/vdso/vdso32-setup.c
24502+++ b/arch/x86/vdso/vdso32-setup.c
24503@@ -25,6 +25,7 @@
24504 #include <asm/tlbflush.h>
24505 #include <asm/vdso.h>
24506 #include <asm/proto.h>
24507+#include <asm/mman.h>
24508
24509 enum {
24510 VDSO_DISABLED = 0,
24511@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24512 void enable_sep_cpu(void)
24513 {
24514 int cpu = get_cpu();
24515- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24516+ struct tss_struct *tss = init_tss + cpu;
24517
24518 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24519 put_cpu();
24520@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24521 gate_vma.vm_start = FIXADDR_USER_START;
24522 gate_vma.vm_end = FIXADDR_USER_END;
24523 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24524- gate_vma.vm_page_prot = __P101;
24525+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24526 /*
24527 * Make sure the vDSO gets into every core dump.
24528 * Dumping its contents makes post-mortem fully interpretable later
24529@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24530 if (compat)
24531 addr = VDSO_HIGH_BASE;
24532 else {
24533- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24534+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24535 if (IS_ERR_VALUE(addr)) {
24536 ret = addr;
24537 goto up_fail;
24538 }
24539 }
24540
24541- current->mm->context.vdso = (void *)addr;
24542+ current->mm->context.vdso = addr;
24543
24544 if (compat_uses_vma || !compat) {
24545 /*
24546@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24547 }
24548
24549 current_thread_info()->sysenter_return =
24550- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24551+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24552
24553 up_fail:
24554 if (ret)
24555- current->mm->context.vdso = NULL;
24556+ current->mm->context.vdso = 0;
24557
24558 up_write(&mm->mmap_sem);
24559
24560@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24561
24562 const char *arch_vma_name(struct vm_area_struct *vma)
24563 {
24564- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24565+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24566 return "[vdso]";
24567+
24568+#ifdef CONFIG_PAX_SEGMEXEC
24569+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24570+ return "[vdso]";
24571+#endif
24572+
24573 return NULL;
24574 }
24575
24576@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24577 * Check to see if the corresponding task was created in compat vdso
24578 * mode.
24579 */
24580- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24581+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24582 return &gate_vma;
24583 return NULL;
24584 }
24585diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24586index 153407c..611cba9 100644
24587--- a/arch/x86/vdso/vma.c
24588+++ b/arch/x86/vdso/vma.c
24589@@ -16,8 +16,6 @@
24590 #include <asm/vdso.h>
24591 #include <asm/page.h>
24592
24593-unsigned int __read_mostly vdso_enabled = 1;
24594-
24595 extern char vdso_start[], vdso_end[];
24596 extern unsigned short vdso_sync_cpuid;
24597
24598@@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24599 * unaligned here as a result of stack start randomization.
24600 */
24601 addr = PAGE_ALIGN(addr);
24602- addr = align_addr(addr, NULL, ALIGN_VDSO);
24603
24604 return addr;
24605 }
24606@@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24607 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24608 {
24609 struct mm_struct *mm = current->mm;
24610- unsigned long addr;
24611+ unsigned long addr = 0;
24612 int ret;
24613
24614- if (!vdso_enabled)
24615- return 0;
24616-
24617 down_write(&mm->mmap_sem);
24618+
24619+#ifdef CONFIG_PAX_RANDMMAP
24620+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24621+#endif
24622+
24623 addr = vdso_addr(mm->start_stack, vdso_size);
24624+ addr = align_addr(addr, NULL, ALIGN_VDSO);
24625 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24626 if (IS_ERR_VALUE(addr)) {
24627 ret = addr;
24628 goto up_fail;
24629 }
24630
24631- current->mm->context.vdso = (void *)addr;
24632+ mm->context.vdso = addr;
24633
24634 ret = install_special_mapping(mm, addr, vdso_size,
24635 VM_READ|VM_EXEC|
24636 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24637 VM_ALWAYSDUMP,
24638 vdso_pages);
24639- if (ret) {
24640- current->mm->context.vdso = NULL;
24641- goto up_fail;
24642- }
24643+
24644+ if (ret)
24645+ mm->context.vdso = 0;
24646
24647 up_fail:
24648 up_write(&mm->mmap_sem);
24649 return ret;
24650 }
24651-
24652-static __init int vdso_setup(char *s)
24653-{
24654- vdso_enabled = simple_strtoul(s, NULL, 0);
24655- return 0;
24656-}
24657-__setup("vdso=", vdso_setup);
24658diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24659index 1f92865..c843b20 100644
24660--- a/arch/x86/xen/enlighten.c
24661+++ b/arch/x86/xen/enlighten.c
24662@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24663
24664 struct shared_info xen_dummy_shared_info;
24665
24666-void *xen_initial_gdt;
24667-
24668 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24669 __read_mostly int xen_have_vector_callback;
24670 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24671@@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24672 #endif
24673 };
24674
24675-static void xen_reboot(int reason)
24676+static __noreturn void xen_reboot(int reason)
24677 {
24678 struct sched_shutdown r = { .reason = reason };
24679
24680@@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24681 BUG();
24682 }
24683
24684-static void xen_restart(char *msg)
24685+static __noreturn void xen_restart(char *msg)
24686 {
24687 xen_reboot(SHUTDOWN_reboot);
24688 }
24689
24690-static void xen_emergency_restart(void)
24691+static __noreturn void xen_emergency_restart(void)
24692 {
24693 xen_reboot(SHUTDOWN_reboot);
24694 }
24695
24696-static void xen_machine_halt(void)
24697+static __noreturn void xen_machine_halt(void)
24698 {
24699 xen_reboot(SHUTDOWN_poweroff);
24700 }
24701@@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24702 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24703
24704 /* Work out if we support NX */
24705- x86_configure_nx();
24706+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24707+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24708+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24709+ unsigned l, h;
24710+
24711+ __supported_pte_mask |= _PAGE_NX;
24712+ rdmsr(MSR_EFER, l, h);
24713+ l |= EFER_NX;
24714+ wrmsr(MSR_EFER, l, h);
24715+ }
24716+#endif
24717
24718 xen_setup_features();
24719
24720@@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24721
24722 machine_ops = xen_machine_ops;
24723
24724- /*
24725- * The only reliable way to retain the initial address of the
24726- * percpu gdt_page is to remember it here, so we can go and
24727- * mark it RW later, when the initial percpu area is freed.
24728- */
24729- xen_initial_gdt = &per_cpu(gdt_page, 0);
24730-
24731 xen_smp_init();
24732
24733 #ifdef CONFIG_ACPI_NUMA
24734diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24735index 87f6673..e2555a6 100644
24736--- a/arch/x86/xen/mmu.c
24737+++ b/arch/x86/xen/mmu.c
24738@@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24739 convert_pfn_mfn(init_level4_pgt);
24740 convert_pfn_mfn(level3_ident_pgt);
24741 convert_pfn_mfn(level3_kernel_pgt);
24742+ convert_pfn_mfn(level3_vmalloc_start_pgt);
24743+ convert_pfn_mfn(level3_vmalloc_end_pgt);
24744+ convert_pfn_mfn(level3_vmemmap_pgt);
24745
24746 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24747 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24748@@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24749 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24750 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24751 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24752+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24753+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24754+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24755 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24756+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24757 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24758 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24759
24760@@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
24761 pv_mmu_ops.set_pud = xen_set_pud;
24762 #if PAGETABLE_LEVELS == 4
24763 pv_mmu_ops.set_pgd = xen_set_pgd;
24764+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24765 #endif
24766
24767 /* This will work as long as patching hasn't happened yet
24768@@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24769 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24770 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24771 .set_pgd = xen_set_pgd_hyper,
24772+ .set_pgd_batched = xen_set_pgd_hyper,
24773
24774 .alloc_pud = xen_alloc_pmd_init,
24775 .release_pud = xen_release_pmd_init,
24776diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24777index 041d4fe..7666b7e 100644
24778--- a/arch/x86/xen/smp.c
24779+++ b/arch/x86/xen/smp.c
24780@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
24781 {
24782 BUG_ON(smp_processor_id() != 0);
24783 native_smp_prepare_boot_cpu();
24784-
24785- /* We've switched to the "real" per-cpu gdt, so make sure the
24786- old memory can be recycled */
24787- make_lowmem_page_readwrite(xen_initial_gdt);
24788-
24789 xen_filter_cpu_maps();
24790 xen_setup_vcpu_info_placement();
24791 }
24792@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
24793 gdt = get_cpu_gdt_table(cpu);
24794
24795 ctxt->flags = VGCF_IN_KERNEL;
24796- ctxt->user_regs.ds = __USER_DS;
24797- ctxt->user_regs.es = __USER_DS;
24798+ ctxt->user_regs.ds = __KERNEL_DS;
24799+ ctxt->user_regs.es = __KERNEL_DS;
24800 ctxt->user_regs.ss = __KERNEL_DS;
24801 #ifdef CONFIG_X86_32
24802 ctxt->user_regs.fs = __KERNEL_PERCPU;
24803- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24804+ savesegment(gs, ctxt->user_regs.gs);
24805 #else
24806 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24807 #endif
24808@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
24809 int rc;
24810
24811 per_cpu(current_task, cpu) = idle;
24812+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24813 #ifdef CONFIG_X86_32
24814 irq_ctx_init(cpu);
24815 #else
24816 clear_tsk_thread_flag(idle, TIF_FORK);
24817- per_cpu(kernel_stack, cpu) =
24818- (unsigned long)task_stack_page(idle) -
24819- KERNEL_STACK_OFFSET + THREAD_SIZE;
24820+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24821 #endif
24822 xen_setup_runstate_info(cpu);
24823 xen_setup_timer(cpu);
24824diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24825index b040b0e..8cc4fe0 100644
24826--- a/arch/x86/xen/xen-asm_32.S
24827+++ b/arch/x86/xen/xen-asm_32.S
24828@@ -83,14 +83,14 @@ ENTRY(xen_iret)
24829 ESP_OFFSET=4 # bytes pushed onto stack
24830
24831 /*
24832- * Store vcpu_info pointer for easy access. Do it this way to
24833- * avoid having to reload %fs
24834+ * Store vcpu_info pointer for easy access.
24835 */
24836 #ifdef CONFIG_SMP
24837- GET_THREAD_INFO(%eax)
24838- movl TI_cpu(%eax), %eax
24839- movl __per_cpu_offset(,%eax,4), %eax
24840- mov xen_vcpu(%eax), %eax
24841+ push %fs
24842+ mov $(__KERNEL_PERCPU), %eax
24843+ mov %eax, %fs
24844+ mov PER_CPU_VAR(xen_vcpu), %eax
24845+ pop %fs
24846 #else
24847 movl xen_vcpu, %eax
24848 #endif
24849diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24850index aaa7291..3f77960 100644
24851--- a/arch/x86/xen/xen-head.S
24852+++ b/arch/x86/xen/xen-head.S
24853@@ -19,6 +19,17 @@ ENTRY(startup_xen)
24854 #ifdef CONFIG_X86_32
24855 mov %esi,xen_start_info
24856 mov $init_thread_union+THREAD_SIZE,%esp
24857+#ifdef CONFIG_SMP
24858+ movl $cpu_gdt_table,%edi
24859+ movl $__per_cpu_load,%eax
24860+ movw %ax,__KERNEL_PERCPU + 2(%edi)
24861+ rorl $16,%eax
24862+ movb %al,__KERNEL_PERCPU + 4(%edi)
24863+ movb %ah,__KERNEL_PERCPU + 7(%edi)
24864+ movl $__per_cpu_end - 1,%eax
24865+ subl $__per_cpu_start,%eax
24866+ movw %ax,__KERNEL_PERCPU + 0(%edi)
24867+#endif
24868 #else
24869 mov %rsi,xen_start_info
24870 mov $init_thread_union+THREAD_SIZE,%rsp
24871diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24872index b095739..8c17bcd 100644
24873--- a/arch/x86/xen/xen-ops.h
24874+++ b/arch/x86/xen/xen-ops.h
24875@@ -10,8 +10,6 @@
24876 extern const char xen_hypervisor_callback[];
24877 extern const char xen_failsafe_callback[];
24878
24879-extern void *xen_initial_gdt;
24880-
24881 struct trap_info;
24882 void xen_copy_trap_info(struct trap_info *traps);
24883
24884diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24885index 58916af..9cb880b 100644
24886--- a/block/blk-iopoll.c
24887+++ b/block/blk-iopoll.c
24888@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
24889 }
24890 EXPORT_SYMBOL(blk_iopoll_complete);
24891
24892-static void blk_iopoll_softirq(struct softirq_action *h)
24893+static void blk_iopoll_softirq(void)
24894 {
24895 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24896 int rearm = 0, budget = blk_iopoll_budget;
24897diff --git a/block/blk-map.c b/block/blk-map.c
24898index 623e1cd..ca1e109 100644
24899--- a/block/blk-map.c
24900+++ b/block/blk-map.c
24901@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
24902 if (!len || !kbuf)
24903 return -EINVAL;
24904
24905- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24906+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
24907 if (do_copy)
24908 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24909 else
24910diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24911index 1366a89..e17f54b 100644
24912--- a/block/blk-softirq.c
24913+++ b/block/blk-softirq.c
24914@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
24915 * Softirq action handler - move entries to local list and loop over them
24916 * while passing them to the queue registered handler.
24917 */
24918-static void blk_done_softirq(struct softirq_action *h)
24919+static void blk_done_softirq(void)
24920 {
24921 struct list_head *cpu_list, local_list;
24922
24923diff --git a/block/bsg.c b/block/bsg.c
24924index 702f131..37808bf 100644
24925--- a/block/bsg.c
24926+++ b/block/bsg.c
24927@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
24928 struct sg_io_v4 *hdr, struct bsg_device *bd,
24929 fmode_t has_write_perm)
24930 {
24931+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24932+ unsigned char *cmdptr;
24933+
24934 if (hdr->request_len > BLK_MAX_CDB) {
24935 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24936 if (!rq->cmd)
24937 return -ENOMEM;
24938- }
24939+ cmdptr = rq->cmd;
24940+ } else
24941+ cmdptr = tmpcmd;
24942
24943- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24944+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24945 hdr->request_len))
24946 return -EFAULT;
24947
24948+ if (cmdptr != rq->cmd)
24949+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24950+
24951 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24952 if (blk_verify_command(rq->cmd, has_write_perm))
24953 return -EPERM;
24954diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24955index 7b72502..646105c 100644
24956--- a/block/compat_ioctl.c
24957+++ b/block/compat_ioctl.c
24958@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
24959 err |= __get_user(f->spec1, &uf->spec1);
24960 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24961 err |= __get_user(name, &uf->name);
24962- f->name = compat_ptr(name);
24963+ f->name = (void __force_kernel *)compat_ptr(name);
24964 if (err) {
24965 err = -EFAULT;
24966 goto out;
24967diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24968index 688be8a..8a37d98 100644
24969--- a/block/scsi_ioctl.c
24970+++ b/block/scsi_ioctl.c
24971@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
24972 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24973 struct sg_io_hdr *hdr, fmode_t mode)
24974 {
24975- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24976+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24977+ unsigned char *cmdptr;
24978+
24979+ if (rq->cmd != rq->__cmd)
24980+ cmdptr = rq->cmd;
24981+ else
24982+ cmdptr = tmpcmd;
24983+
24984+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24985 return -EFAULT;
24986+
24987+ if (cmdptr != rq->cmd)
24988+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24989+
24990 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24991 return -EPERM;
24992
24993@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24994 int err;
24995 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24996 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24997+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24998+ unsigned char *cmdptr;
24999
25000 if (!sic)
25001 return -EINVAL;
25002@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
25003 */
25004 err = -EFAULT;
25005 rq->cmd_len = cmdlen;
25006- if (copy_from_user(rq->cmd, sic->data, cmdlen))
25007+
25008+ if (rq->cmd != rq->__cmd)
25009+ cmdptr = rq->cmd;
25010+ else
25011+ cmdptr = tmpcmd;
25012+
25013+ if (copy_from_user(cmdptr, sic->data, cmdlen))
25014 goto error;
25015
25016+ if (rq->cmd != cmdptr)
25017+ memcpy(rq->cmd, cmdptr, cmdlen);
25018+
25019 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
25020 goto error;
25021
25022diff --git a/crypto/cryptd.c b/crypto/cryptd.c
25023index 671d4d6..5f24030 100644
25024--- a/crypto/cryptd.c
25025+++ b/crypto/cryptd.c
25026@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
25027
25028 struct cryptd_blkcipher_request_ctx {
25029 crypto_completion_t complete;
25030-};
25031+} __no_const;
25032
25033 struct cryptd_hash_ctx {
25034 struct crypto_shash *child;
25035@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
25036
25037 struct cryptd_aead_request_ctx {
25038 crypto_completion_t complete;
25039-};
25040+} __no_const;
25041
25042 static void cryptd_queue_worker(struct work_struct *work);
25043
25044diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25045index 5d41894..22021e4 100644
25046--- a/drivers/acpi/apei/cper.c
25047+++ b/drivers/acpi/apei/cper.c
25048@@ -38,12 +38,12 @@
25049 */
25050 u64 cper_next_record_id(void)
25051 {
25052- static atomic64_t seq;
25053+ static atomic64_unchecked_t seq;
25054
25055- if (!atomic64_read(&seq))
25056- atomic64_set(&seq, ((u64)get_seconds()) << 32);
25057+ if (!atomic64_read_unchecked(&seq))
25058+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25059
25060- return atomic64_inc_return(&seq);
25061+ return atomic64_inc_return_unchecked(&seq);
25062 }
25063 EXPORT_SYMBOL_GPL(cper_next_record_id);
25064
25065diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25066index 6c47ae9..abfdd63 100644
25067--- a/drivers/acpi/ec_sys.c
25068+++ b/drivers/acpi/ec_sys.c
25069@@ -12,6 +12,7 @@
25070 #include <linux/acpi.h>
25071 #include <linux/debugfs.h>
25072 #include <linux/module.h>
25073+#include <linux/uaccess.h>
25074 #include "internal.h"
25075
25076 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25077@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25078 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25079 */
25080 unsigned int size = EC_SPACE_SIZE;
25081- u8 *data = (u8 *) buf;
25082+ u8 data;
25083 loff_t init_off = *off;
25084 int err = 0;
25085
25086@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25087 size = count;
25088
25089 while (size) {
25090- err = ec_read(*off, &data[*off - init_off]);
25091+ err = ec_read(*off, &data);
25092 if (err)
25093 return err;
25094+ if (put_user(data, &buf[*off - init_off]))
25095+ return -EFAULT;
25096 *off += 1;
25097 size--;
25098 }
25099@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25100
25101 unsigned int size = count;
25102 loff_t init_off = *off;
25103- u8 *data = (u8 *) buf;
25104 int err = 0;
25105
25106 if (*off >= EC_SPACE_SIZE)
25107@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25108 }
25109
25110 while (size) {
25111- u8 byte_write = data[*off - init_off];
25112+ u8 byte_write;
25113+ if (get_user(byte_write, &buf[*off - init_off]))
25114+ return -EFAULT;
25115 err = ec_write(*off, byte_write);
25116 if (err)
25117 return err;
25118diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25119index 251c7b62..000462d 100644
25120--- a/drivers/acpi/proc.c
25121+++ b/drivers/acpi/proc.c
25122@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25123 size_t count, loff_t * ppos)
25124 {
25125 struct list_head *node, *next;
25126- char strbuf[5];
25127- char str[5] = "";
25128- unsigned int len = count;
25129+ char strbuf[5] = {0};
25130
25131- if (len > 4)
25132- len = 4;
25133- if (len < 0)
25134+ if (count > 4)
25135+ count = 4;
25136+ if (copy_from_user(strbuf, buffer, count))
25137 return -EFAULT;
25138-
25139- if (copy_from_user(strbuf, buffer, len))
25140- return -EFAULT;
25141- strbuf[len] = '\0';
25142- sscanf(strbuf, "%s", str);
25143+ strbuf[count] = '\0';
25144
25145 mutex_lock(&acpi_device_lock);
25146 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25147@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25148 if (!dev->wakeup.flags.valid)
25149 continue;
25150
25151- if (!strncmp(dev->pnp.bus_id, str, 4)) {
25152+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25153 if (device_can_wakeup(&dev->dev)) {
25154 bool enable = !device_may_wakeup(&dev->dev);
25155 device_set_wakeup_enable(&dev->dev, enable);
25156diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25157index 9d7bc9f..a6fc091 100644
25158--- a/drivers/acpi/processor_driver.c
25159+++ b/drivers/acpi/processor_driver.c
25160@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25161 return 0;
25162 #endif
25163
25164- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25165+ BUG_ON(pr->id >= nr_cpu_ids);
25166
25167 /*
25168 * Buggy BIOS check
25169diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25170index c04ad68..0b99473 100644
25171--- a/drivers/ata/libata-core.c
25172+++ b/drivers/ata/libata-core.c
25173@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25174 struct ata_port *ap;
25175 unsigned int tag;
25176
25177- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25178+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25179 ap = qc->ap;
25180
25181 qc->flags = 0;
25182@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25183 struct ata_port *ap;
25184 struct ata_link *link;
25185
25186- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25187+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25188 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25189 ap = qc->ap;
25190 link = qc->dev->link;
25191@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25192 return;
25193
25194 spin_lock(&lock);
25195+ pax_open_kernel();
25196
25197 for (cur = ops->inherits; cur; cur = cur->inherits) {
25198 void **inherit = (void **)cur;
25199@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25200 if (IS_ERR(*pp))
25201 *pp = NULL;
25202
25203- ops->inherits = NULL;
25204+ *(struct ata_port_operations **)&ops->inherits = NULL;
25205
25206+ pax_close_kernel();
25207 spin_unlock(&lock);
25208 }
25209
25210diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25211index e8574bb..f9f6a72 100644
25212--- a/drivers/ata/pata_arasan_cf.c
25213+++ b/drivers/ata/pata_arasan_cf.c
25214@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25215 /* Handle platform specific quirks */
25216 if (pdata->quirk) {
25217 if (pdata->quirk & CF_BROKEN_PIO) {
25218- ap->ops->set_piomode = NULL;
25219+ pax_open_kernel();
25220+ *(void **)&ap->ops->set_piomode = NULL;
25221+ pax_close_kernel();
25222 ap->pio_mask = 0;
25223 }
25224 if (pdata->quirk & CF_BROKEN_MWDMA)
25225diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25226index f9b983a..887b9d8 100644
25227--- a/drivers/atm/adummy.c
25228+++ b/drivers/atm/adummy.c
25229@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25230 vcc->pop(vcc, skb);
25231 else
25232 dev_kfree_skb_any(skb);
25233- atomic_inc(&vcc->stats->tx);
25234+ atomic_inc_unchecked(&vcc->stats->tx);
25235
25236 return 0;
25237 }
25238diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25239index f8f41e0..1f987dd 100644
25240--- a/drivers/atm/ambassador.c
25241+++ b/drivers/atm/ambassador.c
25242@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25243 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25244
25245 // VC layer stats
25246- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25247+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25248
25249 // free the descriptor
25250 kfree (tx_descr);
25251@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25252 dump_skb ("<<<", vc, skb);
25253
25254 // VC layer stats
25255- atomic_inc(&atm_vcc->stats->rx);
25256+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25257 __net_timestamp(skb);
25258 // end of our responsibility
25259 atm_vcc->push (atm_vcc, skb);
25260@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25261 } else {
25262 PRINTK (KERN_INFO, "dropped over-size frame");
25263 // should we count this?
25264- atomic_inc(&atm_vcc->stats->rx_drop);
25265+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25266 }
25267
25268 } else {
25269@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25270 }
25271
25272 if (check_area (skb->data, skb->len)) {
25273- atomic_inc(&atm_vcc->stats->tx_err);
25274+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25275 return -ENOMEM; // ?
25276 }
25277
25278diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25279index b22d71c..d6e1049 100644
25280--- a/drivers/atm/atmtcp.c
25281+++ b/drivers/atm/atmtcp.c
25282@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25283 if (vcc->pop) vcc->pop(vcc,skb);
25284 else dev_kfree_skb(skb);
25285 if (dev_data) return 0;
25286- atomic_inc(&vcc->stats->tx_err);
25287+ atomic_inc_unchecked(&vcc->stats->tx_err);
25288 return -ENOLINK;
25289 }
25290 size = skb->len+sizeof(struct atmtcp_hdr);
25291@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25292 if (!new_skb) {
25293 if (vcc->pop) vcc->pop(vcc,skb);
25294 else dev_kfree_skb(skb);
25295- atomic_inc(&vcc->stats->tx_err);
25296+ atomic_inc_unchecked(&vcc->stats->tx_err);
25297 return -ENOBUFS;
25298 }
25299 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25300@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25301 if (vcc->pop) vcc->pop(vcc,skb);
25302 else dev_kfree_skb(skb);
25303 out_vcc->push(out_vcc,new_skb);
25304- atomic_inc(&vcc->stats->tx);
25305- atomic_inc(&out_vcc->stats->rx);
25306+ atomic_inc_unchecked(&vcc->stats->tx);
25307+ atomic_inc_unchecked(&out_vcc->stats->rx);
25308 return 0;
25309 }
25310
25311@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25312 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25313 read_unlock(&vcc_sklist_lock);
25314 if (!out_vcc) {
25315- atomic_inc(&vcc->stats->tx_err);
25316+ atomic_inc_unchecked(&vcc->stats->tx_err);
25317 goto done;
25318 }
25319 skb_pull(skb,sizeof(struct atmtcp_hdr));
25320@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25321 __net_timestamp(new_skb);
25322 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25323 out_vcc->push(out_vcc,new_skb);
25324- atomic_inc(&vcc->stats->tx);
25325- atomic_inc(&out_vcc->stats->rx);
25326+ atomic_inc_unchecked(&vcc->stats->tx);
25327+ atomic_inc_unchecked(&out_vcc->stats->rx);
25328 done:
25329 if (vcc->pop) vcc->pop(vcc,skb);
25330 else dev_kfree_skb(skb);
25331diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25332index 956e9ac..133516d 100644
25333--- a/drivers/atm/eni.c
25334+++ b/drivers/atm/eni.c
25335@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25336 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25337 vcc->dev->number);
25338 length = 0;
25339- atomic_inc(&vcc->stats->rx_err);
25340+ atomic_inc_unchecked(&vcc->stats->rx_err);
25341 }
25342 else {
25343 length = ATM_CELL_SIZE-1; /* no HEC */
25344@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25345 size);
25346 }
25347 eff = length = 0;
25348- atomic_inc(&vcc->stats->rx_err);
25349+ atomic_inc_unchecked(&vcc->stats->rx_err);
25350 }
25351 else {
25352 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25353@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25354 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25355 vcc->dev->number,vcc->vci,length,size << 2,descr);
25356 length = eff = 0;
25357- atomic_inc(&vcc->stats->rx_err);
25358+ atomic_inc_unchecked(&vcc->stats->rx_err);
25359 }
25360 }
25361 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25362@@ -771,7 +771,7 @@ rx_dequeued++;
25363 vcc->push(vcc,skb);
25364 pushed++;
25365 }
25366- atomic_inc(&vcc->stats->rx);
25367+ atomic_inc_unchecked(&vcc->stats->rx);
25368 }
25369 wake_up(&eni_dev->rx_wait);
25370 }
25371@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25372 PCI_DMA_TODEVICE);
25373 if (vcc->pop) vcc->pop(vcc,skb);
25374 else dev_kfree_skb_irq(skb);
25375- atomic_inc(&vcc->stats->tx);
25376+ atomic_inc_unchecked(&vcc->stats->tx);
25377 wake_up(&eni_dev->tx_wait);
25378 dma_complete++;
25379 }
25380@@ -1569,7 +1569,7 @@ tx_complete++;
25381 /*--------------------------------- entries ---------------------------------*/
25382
25383
25384-static const char *media_name[] __devinitdata = {
25385+static const char *media_name[] __devinitconst = {
25386 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25387 "UTP", "05?", "06?", "07?", /* 4- 7 */
25388 "TAXI","09?", "10?", "11?", /* 8-11 */
25389diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25390index 5072f8a..fa52520d 100644
25391--- a/drivers/atm/firestream.c
25392+++ b/drivers/atm/firestream.c
25393@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25394 }
25395 }
25396
25397- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25398+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25399
25400 fs_dprintk (FS_DEBUG_TXMEM, "i");
25401 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25402@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25403 #endif
25404 skb_put (skb, qe->p1 & 0xffff);
25405 ATM_SKB(skb)->vcc = atm_vcc;
25406- atomic_inc(&atm_vcc->stats->rx);
25407+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25408 __net_timestamp(skb);
25409 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25410 atm_vcc->push (atm_vcc, skb);
25411@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25412 kfree (pe);
25413 }
25414 if (atm_vcc)
25415- atomic_inc(&atm_vcc->stats->rx_drop);
25416+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25417 break;
25418 case 0x1f: /* Reassembly abort: no buffers. */
25419 /* Silently increment error counter. */
25420 if (atm_vcc)
25421- atomic_inc(&atm_vcc->stats->rx_drop);
25422+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25423 break;
25424 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25425 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25426diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25427index 361f5ae..7fc552d 100644
25428--- a/drivers/atm/fore200e.c
25429+++ b/drivers/atm/fore200e.c
25430@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25431 #endif
25432 /* check error condition */
25433 if (*entry->status & STATUS_ERROR)
25434- atomic_inc(&vcc->stats->tx_err);
25435+ atomic_inc_unchecked(&vcc->stats->tx_err);
25436 else
25437- atomic_inc(&vcc->stats->tx);
25438+ atomic_inc_unchecked(&vcc->stats->tx);
25439 }
25440 }
25441
25442@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25443 if (skb == NULL) {
25444 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25445
25446- atomic_inc(&vcc->stats->rx_drop);
25447+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25448 return -ENOMEM;
25449 }
25450
25451@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25452
25453 dev_kfree_skb_any(skb);
25454
25455- atomic_inc(&vcc->stats->rx_drop);
25456+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25457 return -ENOMEM;
25458 }
25459
25460 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25461
25462 vcc->push(vcc, skb);
25463- atomic_inc(&vcc->stats->rx);
25464+ atomic_inc_unchecked(&vcc->stats->rx);
25465
25466 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25467
25468@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25469 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25470 fore200e->atm_dev->number,
25471 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25472- atomic_inc(&vcc->stats->rx_err);
25473+ atomic_inc_unchecked(&vcc->stats->rx_err);
25474 }
25475 }
25476
25477@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25478 goto retry_here;
25479 }
25480
25481- atomic_inc(&vcc->stats->tx_err);
25482+ atomic_inc_unchecked(&vcc->stats->tx_err);
25483
25484 fore200e->tx_sat++;
25485 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25486diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25487index 9a51df4..f3bb5f8 100644
25488--- a/drivers/atm/he.c
25489+++ b/drivers/atm/he.c
25490@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25491
25492 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25493 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25494- atomic_inc(&vcc->stats->rx_drop);
25495+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25496 goto return_host_buffers;
25497 }
25498
25499@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25500 RBRQ_LEN_ERR(he_dev->rbrq_head)
25501 ? "LEN_ERR" : "",
25502 vcc->vpi, vcc->vci);
25503- atomic_inc(&vcc->stats->rx_err);
25504+ atomic_inc_unchecked(&vcc->stats->rx_err);
25505 goto return_host_buffers;
25506 }
25507
25508@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25509 vcc->push(vcc, skb);
25510 spin_lock(&he_dev->global_lock);
25511
25512- atomic_inc(&vcc->stats->rx);
25513+ atomic_inc_unchecked(&vcc->stats->rx);
25514
25515 return_host_buffers:
25516 ++pdus_assembled;
25517@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25518 tpd->vcc->pop(tpd->vcc, tpd->skb);
25519 else
25520 dev_kfree_skb_any(tpd->skb);
25521- atomic_inc(&tpd->vcc->stats->tx_err);
25522+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25523 }
25524 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25525 return;
25526@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25527 vcc->pop(vcc, skb);
25528 else
25529 dev_kfree_skb_any(skb);
25530- atomic_inc(&vcc->stats->tx_err);
25531+ atomic_inc_unchecked(&vcc->stats->tx_err);
25532 return -EINVAL;
25533 }
25534
25535@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25536 vcc->pop(vcc, skb);
25537 else
25538 dev_kfree_skb_any(skb);
25539- atomic_inc(&vcc->stats->tx_err);
25540+ atomic_inc_unchecked(&vcc->stats->tx_err);
25541 return -EINVAL;
25542 }
25543 #endif
25544@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25545 vcc->pop(vcc, skb);
25546 else
25547 dev_kfree_skb_any(skb);
25548- atomic_inc(&vcc->stats->tx_err);
25549+ atomic_inc_unchecked(&vcc->stats->tx_err);
25550 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25551 return -ENOMEM;
25552 }
25553@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25554 vcc->pop(vcc, skb);
25555 else
25556 dev_kfree_skb_any(skb);
25557- atomic_inc(&vcc->stats->tx_err);
25558+ atomic_inc_unchecked(&vcc->stats->tx_err);
25559 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25560 return -ENOMEM;
25561 }
25562@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25563 __enqueue_tpd(he_dev, tpd, cid);
25564 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25565
25566- atomic_inc(&vcc->stats->tx);
25567+ atomic_inc_unchecked(&vcc->stats->tx);
25568
25569 return 0;
25570 }
25571diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25572index b812103..e391a49 100644
25573--- a/drivers/atm/horizon.c
25574+++ b/drivers/atm/horizon.c
25575@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25576 {
25577 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25578 // VC layer stats
25579- atomic_inc(&vcc->stats->rx);
25580+ atomic_inc_unchecked(&vcc->stats->rx);
25581 __net_timestamp(skb);
25582 // end of our responsibility
25583 vcc->push (vcc, skb);
25584@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25585 dev->tx_iovec = NULL;
25586
25587 // VC layer stats
25588- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25589+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25590
25591 // free the skb
25592 hrz_kfree_skb (skb);
25593diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25594index 1c05212..c28e200 100644
25595--- a/drivers/atm/idt77252.c
25596+++ b/drivers/atm/idt77252.c
25597@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25598 else
25599 dev_kfree_skb(skb);
25600
25601- atomic_inc(&vcc->stats->tx);
25602+ atomic_inc_unchecked(&vcc->stats->tx);
25603 }
25604
25605 atomic_dec(&scq->used);
25606@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25607 if ((sb = dev_alloc_skb(64)) == NULL) {
25608 printk("%s: Can't allocate buffers for aal0.\n",
25609 card->name);
25610- atomic_add(i, &vcc->stats->rx_drop);
25611+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25612 break;
25613 }
25614 if (!atm_charge(vcc, sb->truesize)) {
25615 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25616 card->name);
25617- atomic_add(i - 1, &vcc->stats->rx_drop);
25618+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25619 dev_kfree_skb(sb);
25620 break;
25621 }
25622@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25623 ATM_SKB(sb)->vcc = vcc;
25624 __net_timestamp(sb);
25625 vcc->push(vcc, sb);
25626- atomic_inc(&vcc->stats->rx);
25627+ atomic_inc_unchecked(&vcc->stats->rx);
25628
25629 cell += ATM_CELL_PAYLOAD;
25630 }
25631@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25632 "(CDC: %08x)\n",
25633 card->name, len, rpp->len, readl(SAR_REG_CDC));
25634 recycle_rx_pool_skb(card, rpp);
25635- atomic_inc(&vcc->stats->rx_err);
25636+ atomic_inc_unchecked(&vcc->stats->rx_err);
25637 return;
25638 }
25639 if (stat & SAR_RSQE_CRC) {
25640 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25641 recycle_rx_pool_skb(card, rpp);
25642- atomic_inc(&vcc->stats->rx_err);
25643+ atomic_inc_unchecked(&vcc->stats->rx_err);
25644 return;
25645 }
25646 if (skb_queue_len(&rpp->queue) > 1) {
25647@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25648 RXPRINTK("%s: Can't alloc RX skb.\n",
25649 card->name);
25650 recycle_rx_pool_skb(card, rpp);
25651- atomic_inc(&vcc->stats->rx_err);
25652+ atomic_inc_unchecked(&vcc->stats->rx_err);
25653 return;
25654 }
25655 if (!atm_charge(vcc, skb->truesize)) {
25656@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25657 __net_timestamp(skb);
25658
25659 vcc->push(vcc, skb);
25660- atomic_inc(&vcc->stats->rx);
25661+ atomic_inc_unchecked(&vcc->stats->rx);
25662
25663 return;
25664 }
25665@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25666 __net_timestamp(skb);
25667
25668 vcc->push(vcc, skb);
25669- atomic_inc(&vcc->stats->rx);
25670+ atomic_inc_unchecked(&vcc->stats->rx);
25671
25672 if (skb->truesize > SAR_FB_SIZE_3)
25673 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25674@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25675 if (vcc->qos.aal != ATM_AAL0) {
25676 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25677 card->name, vpi, vci);
25678- atomic_inc(&vcc->stats->rx_drop);
25679+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25680 goto drop;
25681 }
25682
25683 if ((sb = dev_alloc_skb(64)) == NULL) {
25684 printk("%s: Can't allocate buffers for AAL0.\n",
25685 card->name);
25686- atomic_inc(&vcc->stats->rx_err);
25687+ atomic_inc_unchecked(&vcc->stats->rx_err);
25688 goto drop;
25689 }
25690
25691@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25692 ATM_SKB(sb)->vcc = vcc;
25693 __net_timestamp(sb);
25694 vcc->push(vcc, sb);
25695- atomic_inc(&vcc->stats->rx);
25696+ atomic_inc_unchecked(&vcc->stats->rx);
25697
25698 drop:
25699 skb_pull(queue, 64);
25700@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25701
25702 if (vc == NULL) {
25703 printk("%s: NULL connection in send().\n", card->name);
25704- atomic_inc(&vcc->stats->tx_err);
25705+ atomic_inc_unchecked(&vcc->stats->tx_err);
25706 dev_kfree_skb(skb);
25707 return -EINVAL;
25708 }
25709 if (!test_bit(VCF_TX, &vc->flags)) {
25710 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25711- atomic_inc(&vcc->stats->tx_err);
25712+ atomic_inc_unchecked(&vcc->stats->tx_err);
25713 dev_kfree_skb(skb);
25714 return -EINVAL;
25715 }
25716@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25717 break;
25718 default:
25719 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25720- atomic_inc(&vcc->stats->tx_err);
25721+ atomic_inc_unchecked(&vcc->stats->tx_err);
25722 dev_kfree_skb(skb);
25723 return -EINVAL;
25724 }
25725
25726 if (skb_shinfo(skb)->nr_frags != 0) {
25727 printk("%s: No scatter-gather yet.\n", card->name);
25728- atomic_inc(&vcc->stats->tx_err);
25729+ atomic_inc_unchecked(&vcc->stats->tx_err);
25730 dev_kfree_skb(skb);
25731 return -EINVAL;
25732 }
25733@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25734
25735 err = queue_skb(card, vc, skb, oam);
25736 if (err) {
25737- atomic_inc(&vcc->stats->tx_err);
25738+ atomic_inc_unchecked(&vcc->stats->tx_err);
25739 dev_kfree_skb(skb);
25740 return err;
25741 }
25742@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25743 skb = dev_alloc_skb(64);
25744 if (!skb) {
25745 printk("%s: Out of memory in send_oam().\n", card->name);
25746- atomic_inc(&vcc->stats->tx_err);
25747+ atomic_inc_unchecked(&vcc->stats->tx_err);
25748 return -ENOMEM;
25749 }
25750 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25751diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25752index 3d0c2b0..45441fa 100644
25753--- a/drivers/atm/iphase.c
25754+++ b/drivers/atm/iphase.c
25755@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25756 status = (u_short) (buf_desc_ptr->desc_mode);
25757 if (status & (RX_CER | RX_PTE | RX_OFL))
25758 {
25759- atomic_inc(&vcc->stats->rx_err);
25760+ atomic_inc_unchecked(&vcc->stats->rx_err);
25761 IF_ERR(printk("IA: bad packet, dropping it");)
25762 if (status & RX_CER) {
25763 IF_ERR(printk(" cause: packet CRC error\n");)
25764@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
25765 len = dma_addr - buf_addr;
25766 if (len > iadev->rx_buf_sz) {
25767 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25768- atomic_inc(&vcc->stats->rx_err);
25769+ atomic_inc_unchecked(&vcc->stats->rx_err);
25770 goto out_free_desc;
25771 }
25772
25773@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25774 ia_vcc = INPH_IA_VCC(vcc);
25775 if (ia_vcc == NULL)
25776 {
25777- atomic_inc(&vcc->stats->rx_err);
25778+ atomic_inc_unchecked(&vcc->stats->rx_err);
25779 dev_kfree_skb_any(skb);
25780 atm_return(vcc, atm_guess_pdu2truesize(len));
25781 goto INCR_DLE;
25782@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25783 if ((length > iadev->rx_buf_sz) || (length >
25784 (skb->len - sizeof(struct cpcs_trailer))))
25785 {
25786- atomic_inc(&vcc->stats->rx_err);
25787+ atomic_inc_unchecked(&vcc->stats->rx_err);
25788 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25789 length, skb->len);)
25790 dev_kfree_skb_any(skb);
25791@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25792
25793 IF_RX(printk("rx_dle_intr: skb push");)
25794 vcc->push(vcc,skb);
25795- atomic_inc(&vcc->stats->rx);
25796+ atomic_inc_unchecked(&vcc->stats->rx);
25797 iadev->rx_pkt_cnt++;
25798 }
25799 INCR_DLE:
25800@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
25801 {
25802 struct k_sonet_stats *stats;
25803 stats = &PRIV(_ia_dev[board])->sonet_stats;
25804- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25805- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25806- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25807- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25808- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25809- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25810- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25811- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25812- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25813+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25814+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25815+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25816+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25817+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25818+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25819+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25820+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25821+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25822 }
25823 ia_cmds.status = 0;
25824 break;
25825@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25826 if ((desc == 0) || (desc > iadev->num_tx_desc))
25827 {
25828 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25829- atomic_inc(&vcc->stats->tx);
25830+ atomic_inc_unchecked(&vcc->stats->tx);
25831 if (vcc->pop)
25832 vcc->pop(vcc, skb);
25833 else
25834@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25835 ATM_DESC(skb) = vcc->vci;
25836 skb_queue_tail(&iadev->tx_dma_q, skb);
25837
25838- atomic_inc(&vcc->stats->tx);
25839+ atomic_inc_unchecked(&vcc->stats->tx);
25840 iadev->tx_pkt_cnt++;
25841 /* Increment transaction counter */
25842 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25843
25844 #if 0
25845 /* add flow control logic */
25846- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25847+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25848 if (iavcc->vc_desc_cnt > 10) {
25849 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25850 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25851diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25852index f556969..0da15eb 100644
25853--- a/drivers/atm/lanai.c
25854+++ b/drivers/atm/lanai.c
25855@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
25856 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25857 lanai_endtx(lanai, lvcc);
25858 lanai_free_skb(lvcc->tx.atmvcc, skb);
25859- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25860+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25861 }
25862
25863 /* Try to fill the buffer - don't call unless there is backlog */
25864@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
25865 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25866 __net_timestamp(skb);
25867 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25868- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25869+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25870 out:
25871 lvcc->rx.buf.ptr = end;
25872 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25873@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25874 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25875 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25876 lanai->stats.service_rxnotaal5++;
25877- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25878+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25879 return 0;
25880 }
25881 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25882@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25883 int bytes;
25884 read_unlock(&vcc_sklist_lock);
25885 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25886- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25887+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25888 lvcc->stats.x.aal5.service_trash++;
25889 bytes = (SERVICE_GET_END(s) * 16) -
25890 (((unsigned long) lvcc->rx.buf.ptr) -
25891@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25892 }
25893 if (s & SERVICE_STREAM) {
25894 read_unlock(&vcc_sklist_lock);
25895- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25896+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25897 lvcc->stats.x.aal5.service_stream++;
25898 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25899 "PDU on VCI %d!\n", lanai->number, vci);
25900@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25901 return 0;
25902 }
25903 DPRINTK("got rx crc error on vci %d\n", vci);
25904- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25905+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25906 lvcc->stats.x.aal5.service_rxcrc++;
25907 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25908 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25909diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25910index 1c70c45..300718d 100644
25911--- a/drivers/atm/nicstar.c
25912+++ b/drivers/atm/nicstar.c
25913@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25914 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25915 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25916 card->index);
25917- atomic_inc(&vcc->stats->tx_err);
25918+ atomic_inc_unchecked(&vcc->stats->tx_err);
25919 dev_kfree_skb_any(skb);
25920 return -EINVAL;
25921 }
25922@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25923 if (!vc->tx) {
25924 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25925 card->index);
25926- atomic_inc(&vcc->stats->tx_err);
25927+ atomic_inc_unchecked(&vcc->stats->tx_err);
25928 dev_kfree_skb_any(skb);
25929 return -EINVAL;
25930 }
25931@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25932 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25933 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25934 card->index);
25935- atomic_inc(&vcc->stats->tx_err);
25936+ atomic_inc_unchecked(&vcc->stats->tx_err);
25937 dev_kfree_skb_any(skb);
25938 return -EINVAL;
25939 }
25940
25941 if (skb_shinfo(skb)->nr_frags != 0) {
25942 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25943- atomic_inc(&vcc->stats->tx_err);
25944+ atomic_inc_unchecked(&vcc->stats->tx_err);
25945 dev_kfree_skb_any(skb);
25946 return -EINVAL;
25947 }
25948@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25949 }
25950
25951 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
25952- atomic_inc(&vcc->stats->tx_err);
25953+ atomic_inc_unchecked(&vcc->stats->tx_err);
25954 dev_kfree_skb_any(skb);
25955 return -EIO;
25956 }
25957- atomic_inc(&vcc->stats->tx);
25958+ atomic_inc_unchecked(&vcc->stats->tx);
25959
25960 return 0;
25961 }
25962@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25963 printk
25964 ("nicstar%d: Can't allocate buffers for aal0.\n",
25965 card->index);
25966- atomic_add(i, &vcc->stats->rx_drop);
25967+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25968 break;
25969 }
25970 if (!atm_charge(vcc, sb->truesize)) {
25971 RXPRINTK
25972 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
25973 card->index);
25974- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25975+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25976 dev_kfree_skb_any(sb);
25977 break;
25978 }
25979@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25980 ATM_SKB(sb)->vcc = vcc;
25981 __net_timestamp(sb);
25982 vcc->push(vcc, sb);
25983- atomic_inc(&vcc->stats->rx);
25984+ atomic_inc_unchecked(&vcc->stats->rx);
25985 cell += ATM_CELL_PAYLOAD;
25986 }
25987
25988@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25989 if (iovb == NULL) {
25990 printk("nicstar%d: Out of iovec buffers.\n",
25991 card->index);
25992- atomic_inc(&vcc->stats->rx_drop);
25993+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25994 recycle_rx_buf(card, skb);
25995 return;
25996 }
25997@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25998 small or large buffer itself. */
25999 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
26000 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26001- atomic_inc(&vcc->stats->rx_err);
26002+ atomic_inc_unchecked(&vcc->stats->rx_err);
26003 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26004 NS_MAX_IOVECS);
26005 NS_PRV_IOVCNT(iovb) = 0;
26006@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26007 ("nicstar%d: Expected a small buffer, and this is not one.\n",
26008 card->index);
26009 which_list(card, skb);
26010- atomic_inc(&vcc->stats->rx_err);
26011+ atomic_inc_unchecked(&vcc->stats->rx_err);
26012 recycle_rx_buf(card, skb);
26013 vc->rx_iov = NULL;
26014 recycle_iov_buf(card, iovb);
26015@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26016 ("nicstar%d: Expected a large buffer, and this is not one.\n",
26017 card->index);
26018 which_list(card, skb);
26019- atomic_inc(&vcc->stats->rx_err);
26020+ atomic_inc_unchecked(&vcc->stats->rx_err);
26021 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26022 NS_PRV_IOVCNT(iovb));
26023 vc->rx_iov = NULL;
26024@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26025 printk(" - PDU size mismatch.\n");
26026 else
26027 printk(".\n");
26028- atomic_inc(&vcc->stats->rx_err);
26029+ atomic_inc_unchecked(&vcc->stats->rx_err);
26030 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26031 NS_PRV_IOVCNT(iovb));
26032 vc->rx_iov = NULL;
26033@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26034 /* skb points to a small buffer */
26035 if (!atm_charge(vcc, skb->truesize)) {
26036 push_rxbufs(card, skb);
26037- atomic_inc(&vcc->stats->rx_drop);
26038+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26039 } else {
26040 skb_put(skb, len);
26041 dequeue_sm_buf(card, skb);
26042@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26043 ATM_SKB(skb)->vcc = vcc;
26044 __net_timestamp(skb);
26045 vcc->push(vcc, skb);
26046- atomic_inc(&vcc->stats->rx);
26047+ atomic_inc_unchecked(&vcc->stats->rx);
26048 }
26049 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26050 struct sk_buff *sb;
26051@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26052 if (len <= NS_SMBUFSIZE) {
26053 if (!atm_charge(vcc, sb->truesize)) {
26054 push_rxbufs(card, sb);
26055- atomic_inc(&vcc->stats->rx_drop);
26056+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26057 } else {
26058 skb_put(sb, len);
26059 dequeue_sm_buf(card, sb);
26060@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26061 ATM_SKB(sb)->vcc = vcc;
26062 __net_timestamp(sb);
26063 vcc->push(vcc, sb);
26064- atomic_inc(&vcc->stats->rx);
26065+ atomic_inc_unchecked(&vcc->stats->rx);
26066 }
26067
26068 push_rxbufs(card, skb);
26069@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26070
26071 if (!atm_charge(vcc, skb->truesize)) {
26072 push_rxbufs(card, skb);
26073- atomic_inc(&vcc->stats->rx_drop);
26074+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26075 } else {
26076 dequeue_lg_buf(card, skb);
26077 #ifdef NS_USE_DESTRUCTORS
26078@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26079 ATM_SKB(skb)->vcc = vcc;
26080 __net_timestamp(skb);
26081 vcc->push(vcc, skb);
26082- atomic_inc(&vcc->stats->rx);
26083+ atomic_inc_unchecked(&vcc->stats->rx);
26084 }
26085
26086 push_rxbufs(card, sb);
26087@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26088 printk
26089 ("nicstar%d: Out of huge buffers.\n",
26090 card->index);
26091- atomic_inc(&vcc->stats->rx_drop);
26092+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26093 recycle_iovec_rx_bufs(card,
26094 (struct iovec *)
26095 iovb->data,
26096@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26097 card->hbpool.count++;
26098 } else
26099 dev_kfree_skb_any(hb);
26100- atomic_inc(&vcc->stats->rx_drop);
26101+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26102 } else {
26103 /* Copy the small buffer to the huge buffer */
26104 sb = (struct sk_buff *)iov->iov_base;
26105@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26106 #endif /* NS_USE_DESTRUCTORS */
26107 __net_timestamp(hb);
26108 vcc->push(vcc, hb);
26109- atomic_inc(&vcc->stats->rx);
26110+ atomic_inc_unchecked(&vcc->stats->rx);
26111 }
26112 }
26113
26114diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26115index 5d1d076..12fbca4 100644
26116--- a/drivers/atm/solos-pci.c
26117+++ b/drivers/atm/solos-pci.c
26118@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26119 }
26120 atm_charge(vcc, skb->truesize);
26121 vcc->push(vcc, skb);
26122- atomic_inc(&vcc->stats->rx);
26123+ atomic_inc_unchecked(&vcc->stats->rx);
26124 break;
26125
26126 case PKT_STATUS:
26127@@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26128 vcc = SKB_CB(oldskb)->vcc;
26129
26130 if (vcc) {
26131- atomic_inc(&vcc->stats->tx);
26132+ atomic_inc_unchecked(&vcc->stats->tx);
26133 solos_pop(vcc, oldskb);
26134 } else
26135 dev_kfree_skb_irq(oldskb);
26136diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26137index 90f1ccc..04c4a1e 100644
26138--- a/drivers/atm/suni.c
26139+++ b/drivers/atm/suni.c
26140@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26141
26142
26143 #define ADD_LIMITED(s,v) \
26144- atomic_add((v),&stats->s); \
26145- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26146+ atomic_add_unchecked((v),&stats->s); \
26147+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26148
26149
26150 static void suni_hz(unsigned long from_timer)
26151diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26152index 5120a96..e2572bd 100644
26153--- a/drivers/atm/uPD98402.c
26154+++ b/drivers/atm/uPD98402.c
26155@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26156 struct sonet_stats tmp;
26157 int error = 0;
26158
26159- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26160+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26161 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26162 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26163 if (zero && !error) {
26164@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26165
26166
26167 #define ADD_LIMITED(s,v) \
26168- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26169- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26170- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26171+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26172+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26173+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26174
26175
26176 static void stat_event(struct atm_dev *dev)
26177@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26178 if (reason & uPD98402_INT_PFM) stat_event(dev);
26179 if (reason & uPD98402_INT_PCO) {
26180 (void) GET(PCOCR); /* clear interrupt cause */
26181- atomic_add(GET(HECCT),
26182+ atomic_add_unchecked(GET(HECCT),
26183 &PRIV(dev)->sonet_stats.uncorr_hcs);
26184 }
26185 if ((reason & uPD98402_INT_RFO) &&
26186@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26187 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26188 uPD98402_INT_LOS),PIMR); /* enable them */
26189 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26190- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26191- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26192- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26193+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26194+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26195+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26196 return 0;
26197 }
26198
26199diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26200index d889f56..17eb71e 100644
26201--- a/drivers/atm/zatm.c
26202+++ b/drivers/atm/zatm.c
26203@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26204 }
26205 if (!size) {
26206 dev_kfree_skb_irq(skb);
26207- if (vcc) atomic_inc(&vcc->stats->rx_err);
26208+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26209 continue;
26210 }
26211 if (!atm_charge(vcc,skb->truesize)) {
26212@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26213 skb->len = size;
26214 ATM_SKB(skb)->vcc = vcc;
26215 vcc->push(vcc,skb);
26216- atomic_inc(&vcc->stats->rx);
26217+ atomic_inc_unchecked(&vcc->stats->rx);
26218 }
26219 zout(pos & 0xffff,MTA(mbx));
26220 #if 0 /* probably a stupid idea */
26221@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26222 skb_queue_head(&zatm_vcc->backlog,skb);
26223 break;
26224 }
26225- atomic_inc(&vcc->stats->tx);
26226+ atomic_inc_unchecked(&vcc->stats->tx);
26227 wake_up(&zatm_vcc->tx_wait);
26228 }
26229
26230diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26231index a4760e0..51283cf 100644
26232--- a/drivers/base/devtmpfs.c
26233+++ b/drivers/base/devtmpfs.c
26234@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26235 if (!thread)
26236 return 0;
26237
26238- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26239+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26240 if (err)
26241 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26242 else
26243diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26244index caf995f..6f76697 100644
26245--- a/drivers/base/power/wakeup.c
26246+++ b/drivers/base/power/wakeup.c
26247@@ -30,14 +30,14 @@ bool events_check_enabled;
26248 * They need to be modified together atomically, so it's better to use one
26249 * atomic variable to hold them both.
26250 */
26251-static atomic_t combined_event_count = ATOMIC_INIT(0);
26252+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26253
26254 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26255 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26256
26257 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26258 {
26259- unsigned int comb = atomic_read(&combined_event_count);
26260+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
26261
26262 *cnt = (comb >> IN_PROGRESS_BITS);
26263 *inpr = comb & MAX_IN_PROGRESS;
26264@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26265 ws->last_time = ktime_get();
26266
26267 /* Increment the counter of events in progress. */
26268- atomic_inc(&combined_event_count);
26269+ atomic_inc_unchecked(&combined_event_count);
26270 }
26271
26272 /**
26273@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26274 * Increment the counter of registered wakeup events and decrement the
26275 * couter of wakeup events in progress simultaneously.
26276 */
26277- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26278+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26279 }
26280
26281 /**
26282diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26283index b0f553b..77b928b 100644
26284--- a/drivers/block/cciss.c
26285+++ b/drivers/block/cciss.c
26286@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26287 int err;
26288 u32 cp;
26289
26290+ memset(&arg64, 0, sizeof(arg64));
26291+
26292 err = 0;
26293 err |=
26294 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26295@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26296 while (!list_empty(&h->reqQ)) {
26297 c = list_entry(h->reqQ.next, CommandList_struct, list);
26298 /* can't do anything if fifo is full */
26299- if ((h->access.fifo_full(h))) {
26300+ if ((h->access->fifo_full(h))) {
26301 dev_warn(&h->pdev->dev, "fifo full\n");
26302 break;
26303 }
26304@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26305 h->Qdepth--;
26306
26307 /* Tell the controller execute command */
26308- h->access.submit_command(h, c);
26309+ h->access->submit_command(h, c);
26310
26311 /* Put job onto the completed Q */
26312 addQ(&h->cmpQ, c);
26313@@ -3443,17 +3445,17 @@ startio:
26314
26315 static inline unsigned long get_next_completion(ctlr_info_t *h)
26316 {
26317- return h->access.command_completed(h);
26318+ return h->access->command_completed(h);
26319 }
26320
26321 static inline int interrupt_pending(ctlr_info_t *h)
26322 {
26323- return h->access.intr_pending(h);
26324+ return h->access->intr_pending(h);
26325 }
26326
26327 static inline long interrupt_not_for_us(ctlr_info_t *h)
26328 {
26329- return ((h->access.intr_pending(h) == 0) ||
26330+ return ((h->access->intr_pending(h) == 0) ||
26331 (h->interrupts_enabled == 0));
26332 }
26333
26334@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26335 u32 a;
26336
26337 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26338- return h->access.command_completed(h);
26339+ return h->access->command_completed(h);
26340
26341 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26342 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26343@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26344 trans_support & CFGTBL_Trans_use_short_tags);
26345
26346 /* Change the access methods to the performant access methods */
26347- h->access = SA5_performant_access;
26348+ h->access = &SA5_performant_access;
26349 h->transMethod = CFGTBL_Trans_Performant;
26350
26351 return;
26352@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26353 if (prod_index < 0)
26354 return -ENODEV;
26355 h->product_name = products[prod_index].product_name;
26356- h->access = *(products[prod_index].access);
26357+ h->access = products[prod_index].access;
26358
26359 if (cciss_board_disabled(h)) {
26360 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26361@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26362 }
26363
26364 /* make sure the board interrupts are off */
26365- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26366+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26367 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26368 if (rc)
26369 goto clean2;
26370@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26371 * fake ones to scoop up any residual completions.
26372 */
26373 spin_lock_irqsave(&h->lock, flags);
26374- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26375+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26376 spin_unlock_irqrestore(&h->lock, flags);
26377 free_irq(h->intr[h->intr_mode], h);
26378 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26379@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26380 dev_info(&h->pdev->dev, "Board READY.\n");
26381 dev_info(&h->pdev->dev,
26382 "Waiting for stale completions to drain.\n");
26383- h->access.set_intr_mask(h, CCISS_INTR_ON);
26384+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26385 msleep(10000);
26386- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26387+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26388
26389 rc = controller_reset_failed(h->cfgtable);
26390 if (rc)
26391@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26392 cciss_scsi_setup(h);
26393
26394 /* Turn the interrupts on so we can service requests */
26395- h->access.set_intr_mask(h, CCISS_INTR_ON);
26396+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26397
26398 /* Get the firmware version */
26399 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26400@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26401 kfree(flush_buf);
26402 if (return_code != IO_OK)
26403 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26404- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26405+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26406 free_irq(h->intr[h->intr_mode], h);
26407 }
26408
26409diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26410index 7fda30e..eb5dfe0 100644
26411--- a/drivers/block/cciss.h
26412+++ b/drivers/block/cciss.h
26413@@ -101,7 +101,7 @@ struct ctlr_info
26414 /* information about each logical volume */
26415 drive_info_struct *drv[CISS_MAX_LUN];
26416
26417- struct access_method access;
26418+ struct access_method *access;
26419
26420 /* queue and queue Info */
26421 struct list_head reqQ;
26422diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26423index 9125bbe..eede5c8 100644
26424--- a/drivers/block/cpqarray.c
26425+++ b/drivers/block/cpqarray.c
26426@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26427 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26428 goto Enomem4;
26429 }
26430- hba[i]->access.set_intr_mask(hba[i], 0);
26431+ hba[i]->access->set_intr_mask(hba[i], 0);
26432 if (request_irq(hba[i]->intr, do_ida_intr,
26433 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26434 {
26435@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26436 add_timer(&hba[i]->timer);
26437
26438 /* Enable IRQ now that spinlock and rate limit timer are set up */
26439- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26440+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26441
26442 for(j=0; j<NWD; j++) {
26443 struct gendisk *disk = ida_gendisk[i][j];
26444@@ -694,7 +694,7 @@ DBGINFO(
26445 for(i=0; i<NR_PRODUCTS; i++) {
26446 if (board_id == products[i].board_id) {
26447 c->product_name = products[i].product_name;
26448- c->access = *(products[i].access);
26449+ c->access = products[i].access;
26450 break;
26451 }
26452 }
26453@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26454 hba[ctlr]->intr = intr;
26455 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26456 hba[ctlr]->product_name = products[j].product_name;
26457- hba[ctlr]->access = *(products[j].access);
26458+ hba[ctlr]->access = products[j].access;
26459 hba[ctlr]->ctlr = ctlr;
26460 hba[ctlr]->board_id = board_id;
26461 hba[ctlr]->pci_dev = NULL; /* not PCI */
26462@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26463
26464 while((c = h->reqQ) != NULL) {
26465 /* Can't do anything if we're busy */
26466- if (h->access.fifo_full(h) == 0)
26467+ if (h->access->fifo_full(h) == 0)
26468 return;
26469
26470 /* Get the first entry from the request Q */
26471@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26472 h->Qdepth--;
26473
26474 /* Tell the controller to do our bidding */
26475- h->access.submit_command(h, c);
26476+ h->access->submit_command(h, c);
26477
26478 /* Get onto the completion Q */
26479 addQ(&h->cmpQ, c);
26480@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26481 unsigned long flags;
26482 __u32 a,a1;
26483
26484- istat = h->access.intr_pending(h);
26485+ istat = h->access->intr_pending(h);
26486 /* Is this interrupt for us? */
26487 if (istat == 0)
26488 return IRQ_NONE;
26489@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26490 */
26491 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26492 if (istat & FIFO_NOT_EMPTY) {
26493- while((a = h->access.command_completed(h))) {
26494+ while((a = h->access->command_completed(h))) {
26495 a1 = a; a &= ~3;
26496 if ((c = h->cmpQ) == NULL)
26497 {
26498@@ -1449,11 +1449,11 @@ static int sendcmd(
26499 /*
26500 * Disable interrupt
26501 */
26502- info_p->access.set_intr_mask(info_p, 0);
26503+ info_p->access->set_intr_mask(info_p, 0);
26504 /* Make sure there is room in the command FIFO */
26505 /* Actually it should be completely empty at this time. */
26506 for (i = 200000; i > 0; i--) {
26507- temp = info_p->access.fifo_full(info_p);
26508+ temp = info_p->access->fifo_full(info_p);
26509 if (temp != 0) {
26510 break;
26511 }
26512@@ -1466,7 +1466,7 @@ DBG(
26513 /*
26514 * Send the cmd
26515 */
26516- info_p->access.submit_command(info_p, c);
26517+ info_p->access->submit_command(info_p, c);
26518 complete = pollcomplete(ctlr);
26519
26520 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26521@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26522 * we check the new geometry. Then turn interrupts back on when
26523 * we're done.
26524 */
26525- host->access.set_intr_mask(host, 0);
26526+ host->access->set_intr_mask(host, 0);
26527 getgeometry(ctlr);
26528- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26529+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26530
26531 for(i=0; i<NWD; i++) {
26532 struct gendisk *disk = ida_gendisk[ctlr][i];
26533@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26534 /* Wait (up to 2 seconds) for a command to complete */
26535
26536 for (i = 200000; i > 0; i--) {
26537- done = hba[ctlr]->access.command_completed(hba[ctlr]);
26538+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
26539 if (done == 0) {
26540 udelay(10); /* a short fixed delay */
26541 } else
26542diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26543index be73e9d..7fbf140 100644
26544--- a/drivers/block/cpqarray.h
26545+++ b/drivers/block/cpqarray.h
26546@@ -99,7 +99,7 @@ struct ctlr_info {
26547 drv_info_t drv[NWD];
26548 struct proc_dir_entry *proc;
26549
26550- struct access_method access;
26551+ struct access_method *access;
26552
26553 cmdlist_t *reqQ;
26554 cmdlist_t *cmpQ;
26555diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26556index 9cf2035..bffca95 100644
26557--- a/drivers/block/drbd/drbd_int.h
26558+++ b/drivers/block/drbd/drbd_int.h
26559@@ -736,7 +736,7 @@ struct drbd_request;
26560 struct drbd_epoch {
26561 struct list_head list;
26562 unsigned int barrier_nr;
26563- atomic_t epoch_size; /* increased on every request added. */
26564+ atomic_unchecked_t epoch_size; /* increased on every request added. */
26565 atomic_t active; /* increased on every req. added, and dec on every finished. */
26566 unsigned long flags;
26567 };
26568@@ -1108,7 +1108,7 @@ struct drbd_conf {
26569 void *int_dig_in;
26570 void *int_dig_vv;
26571 wait_queue_head_t seq_wait;
26572- atomic_t packet_seq;
26573+ atomic_unchecked_t packet_seq;
26574 unsigned int peer_seq;
26575 spinlock_t peer_seq_lock;
26576 unsigned int minor;
26577@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26578
26579 static inline void drbd_tcp_cork(struct socket *sock)
26580 {
26581- int __user val = 1;
26582+ int val = 1;
26583 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26584- (char __user *)&val, sizeof(val));
26585+ (char __force_user *)&val, sizeof(val));
26586 }
26587
26588 static inline void drbd_tcp_uncork(struct socket *sock)
26589 {
26590- int __user val = 0;
26591+ int val = 0;
26592 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26593- (char __user *)&val, sizeof(val));
26594+ (char __force_user *)&val, sizeof(val));
26595 }
26596
26597 static inline void drbd_tcp_nodelay(struct socket *sock)
26598 {
26599- int __user val = 1;
26600+ int val = 1;
26601 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26602- (char __user *)&val, sizeof(val));
26603+ (char __force_user *)&val, sizeof(val));
26604 }
26605
26606 static inline void drbd_tcp_quickack(struct socket *sock)
26607 {
26608- int __user val = 2;
26609+ int val = 2;
26610 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26611- (char __user *)&val, sizeof(val));
26612+ (char __force_user *)&val, sizeof(val));
26613 }
26614
26615 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26616diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26617index 0358e55..bc33689 100644
26618--- a/drivers/block/drbd/drbd_main.c
26619+++ b/drivers/block/drbd/drbd_main.c
26620@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26621 p.sector = sector;
26622 p.block_id = block_id;
26623 p.blksize = blksize;
26624- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26625+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26626
26627 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26628 return false;
26629@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26630 p.sector = cpu_to_be64(req->sector);
26631 p.block_id = (unsigned long)req;
26632 p.seq_num = cpu_to_be32(req->seq_num =
26633- atomic_add_return(1, &mdev->packet_seq));
26634+ atomic_add_return_unchecked(1, &mdev->packet_seq));
26635
26636 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26637
26638@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26639 atomic_set(&mdev->unacked_cnt, 0);
26640 atomic_set(&mdev->local_cnt, 0);
26641 atomic_set(&mdev->net_cnt, 0);
26642- atomic_set(&mdev->packet_seq, 0);
26643+ atomic_set_unchecked(&mdev->packet_seq, 0);
26644 atomic_set(&mdev->pp_in_use, 0);
26645 atomic_set(&mdev->pp_in_use_by_net, 0);
26646 atomic_set(&mdev->rs_sect_in, 0);
26647@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26648 mdev->receiver.t_state);
26649
26650 /* no need to lock it, I'm the only thread alive */
26651- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26652- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26653+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26654+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26655 mdev->al_writ_cnt =
26656 mdev->bm_writ_cnt =
26657 mdev->read_cnt =
26658diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26659index af2a250..219c74b 100644
26660--- a/drivers/block/drbd/drbd_nl.c
26661+++ b/drivers/block/drbd/drbd_nl.c
26662@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26663 module_put(THIS_MODULE);
26664 }
26665
26666-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26667+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26668
26669 static unsigned short *
26670 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26671@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26672 cn_reply->id.idx = CN_IDX_DRBD;
26673 cn_reply->id.val = CN_VAL_DRBD;
26674
26675- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26676+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26677 cn_reply->ack = 0; /* not used here. */
26678 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26679 (int)((char *)tl - (char *)reply->tag_list);
26680@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26681 cn_reply->id.idx = CN_IDX_DRBD;
26682 cn_reply->id.val = CN_VAL_DRBD;
26683
26684- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26685+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26686 cn_reply->ack = 0; /* not used here. */
26687 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26688 (int)((char *)tl - (char *)reply->tag_list);
26689@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26690 cn_reply->id.idx = CN_IDX_DRBD;
26691 cn_reply->id.val = CN_VAL_DRBD;
26692
26693- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26694+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26695 cn_reply->ack = 0; // not used here.
26696 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26697 (int)((char*)tl - (char*)reply->tag_list);
26698@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26699 cn_reply->id.idx = CN_IDX_DRBD;
26700 cn_reply->id.val = CN_VAL_DRBD;
26701
26702- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26703+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26704 cn_reply->ack = 0; /* not used here. */
26705 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26706 (int)((char *)tl - (char *)reply->tag_list);
26707diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26708index 43beaca..4a5b1dd 100644
26709--- a/drivers/block/drbd/drbd_receiver.c
26710+++ b/drivers/block/drbd/drbd_receiver.c
26711@@ -894,7 +894,7 @@ retry:
26712 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26713 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26714
26715- atomic_set(&mdev->packet_seq, 0);
26716+ atomic_set_unchecked(&mdev->packet_seq, 0);
26717 mdev->peer_seq = 0;
26718
26719 drbd_thread_start(&mdev->asender);
26720@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26721 do {
26722 next_epoch = NULL;
26723
26724- epoch_size = atomic_read(&epoch->epoch_size);
26725+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26726
26727 switch (ev & ~EV_CLEANUP) {
26728 case EV_PUT:
26729@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26730 rv = FE_DESTROYED;
26731 } else {
26732 epoch->flags = 0;
26733- atomic_set(&epoch->epoch_size, 0);
26734+ atomic_set_unchecked(&epoch->epoch_size, 0);
26735 /* atomic_set(&epoch->active, 0); is already zero */
26736 if (rv == FE_STILL_LIVE)
26737 rv = FE_RECYCLED;
26738@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26739 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26740 drbd_flush(mdev);
26741
26742- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26743+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26744 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26745 if (epoch)
26746 break;
26747 }
26748
26749 epoch = mdev->current_epoch;
26750- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26751+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26752
26753 D_ASSERT(atomic_read(&epoch->active) == 0);
26754 D_ASSERT(epoch->flags == 0);
26755@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26756 }
26757
26758 epoch->flags = 0;
26759- atomic_set(&epoch->epoch_size, 0);
26760+ atomic_set_unchecked(&epoch->epoch_size, 0);
26761 atomic_set(&epoch->active, 0);
26762
26763 spin_lock(&mdev->epoch_lock);
26764- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26765+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26766 list_add(&epoch->list, &mdev->current_epoch->list);
26767 mdev->current_epoch = epoch;
26768 mdev->epochs++;
26769@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26770 spin_unlock(&mdev->peer_seq_lock);
26771
26772 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26773- atomic_inc(&mdev->current_epoch->epoch_size);
26774+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26775 return drbd_drain_block(mdev, data_size);
26776 }
26777
26778@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26779
26780 spin_lock(&mdev->epoch_lock);
26781 e->epoch = mdev->current_epoch;
26782- atomic_inc(&e->epoch->epoch_size);
26783+ atomic_inc_unchecked(&e->epoch->epoch_size);
26784 atomic_inc(&e->epoch->active);
26785 spin_unlock(&mdev->epoch_lock);
26786
26787@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
26788 D_ASSERT(list_empty(&mdev->done_ee));
26789
26790 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26791- atomic_set(&mdev->current_epoch->epoch_size, 0);
26792+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26793 D_ASSERT(list_empty(&mdev->current_epoch->list));
26794 }
26795
26796diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26797index 1e888c9..05cf1b0 100644
26798--- a/drivers/block/loop.c
26799+++ b/drivers/block/loop.c
26800@@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
26801 mm_segment_t old_fs = get_fs();
26802
26803 set_fs(get_ds());
26804- bw = file->f_op->write(file, buf, len, &pos);
26805+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26806 set_fs(old_fs);
26807 if (likely(bw == len))
26808 return 0;
26809diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
26810index 4364303..9adf4ee 100644
26811--- a/drivers/char/Kconfig
26812+++ b/drivers/char/Kconfig
26813@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26814
26815 config DEVKMEM
26816 bool "/dev/kmem virtual device support"
26817- default y
26818+ default n
26819+ depends on !GRKERNSEC_KMEM
26820 help
26821 Say Y here if you want to support the /dev/kmem device. The
26822 /dev/kmem device is rarely used, but can be used for certain
26823@@ -596,6 +597,7 @@ config DEVPORT
26824 bool
26825 depends on !M68K
26826 depends on ISA || PCI
26827+ depends on !GRKERNSEC_KMEM
26828 default y
26829
26830 source "drivers/s390/char/Kconfig"
26831diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
26832index 2e04433..22afc64 100644
26833--- a/drivers/char/agp/frontend.c
26834+++ b/drivers/char/agp/frontend.c
26835@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
26836 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26837 return -EFAULT;
26838
26839- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26840+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26841 return -EFAULT;
26842
26843 client = agp_find_client_by_pid(reserve.pid);
26844diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
26845index 095ab90..afad0a4 100644
26846--- a/drivers/char/briq_panel.c
26847+++ b/drivers/char/briq_panel.c
26848@@ -9,6 +9,7 @@
26849 #include <linux/types.h>
26850 #include <linux/errno.h>
26851 #include <linux/tty.h>
26852+#include <linux/mutex.h>
26853 #include <linux/timer.h>
26854 #include <linux/kernel.h>
26855 #include <linux/wait.h>
26856@@ -34,6 +35,7 @@ static int vfd_is_open;
26857 static unsigned char vfd[40];
26858 static int vfd_cursor;
26859 static unsigned char ledpb, led;
26860+static DEFINE_MUTEX(vfd_mutex);
26861
26862 static void update_vfd(void)
26863 {
26864@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26865 if (!vfd_is_open)
26866 return -EBUSY;
26867
26868+ mutex_lock(&vfd_mutex);
26869 for (;;) {
26870 char c;
26871 if (!indx)
26872 break;
26873- if (get_user(c, buf))
26874+ if (get_user(c, buf)) {
26875+ mutex_unlock(&vfd_mutex);
26876 return -EFAULT;
26877+ }
26878 if (esc) {
26879 set_led(c);
26880 esc = 0;
26881@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26882 buf++;
26883 }
26884 update_vfd();
26885+ mutex_unlock(&vfd_mutex);
26886
26887 return len;
26888 }
26889diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
26890index f773a9d..65cd683 100644
26891--- a/drivers/char/genrtc.c
26892+++ b/drivers/char/genrtc.c
26893@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
26894 switch (cmd) {
26895
26896 case RTC_PLL_GET:
26897+ memset(&pll, 0, sizeof(pll));
26898 if (get_rtc_pll(&pll))
26899 return -EINVAL;
26900 else
26901diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
26902index 0833896..cccce52 100644
26903--- a/drivers/char/hpet.c
26904+++ b/drivers/char/hpet.c
26905@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
26906 }
26907
26908 static int
26909-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26910+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26911 struct hpet_info *info)
26912 {
26913 struct hpet_timer __iomem *timer;
26914diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
26915index 58c0e63..46c16bf 100644
26916--- a/drivers/char/ipmi/ipmi_msghandler.c
26917+++ b/drivers/char/ipmi/ipmi_msghandler.c
26918@@ -415,7 +415,7 @@ struct ipmi_smi {
26919 struct proc_dir_entry *proc_dir;
26920 char proc_dir_name[10];
26921
26922- atomic_t stats[IPMI_NUM_STATS];
26923+ atomic_unchecked_t stats[IPMI_NUM_STATS];
26924
26925 /*
26926 * run_to_completion duplicate of smb_info, smi_info
26927@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26928
26929
26930 #define ipmi_inc_stat(intf, stat) \
26931- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26932+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26933 #define ipmi_get_stat(intf, stat) \
26934- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26935+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26936
26937 static int is_lan_addr(struct ipmi_addr *addr)
26938 {
26939@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
26940 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26941 init_waitqueue_head(&intf->waitq);
26942 for (i = 0; i < IPMI_NUM_STATS; i++)
26943- atomic_set(&intf->stats[i], 0);
26944+ atomic_set_unchecked(&intf->stats[i], 0);
26945
26946 intf->proc_dir = NULL;
26947
26948diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
26949index 9397ab4..d01bee1 100644
26950--- a/drivers/char/ipmi/ipmi_si_intf.c
26951+++ b/drivers/char/ipmi/ipmi_si_intf.c
26952@@ -277,7 +277,7 @@ struct smi_info {
26953 unsigned char slave_addr;
26954
26955 /* Counters and things for the proc filesystem. */
26956- atomic_t stats[SI_NUM_STATS];
26957+ atomic_unchecked_t stats[SI_NUM_STATS];
26958
26959 struct task_struct *thread;
26960
26961@@ -286,9 +286,9 @@ struct smi_info {
26962 };
26963
26964 #define smi_inc_stat(smi, stat) \
26965- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26966+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26967 #define smi_get_stat(smi, stat) \
26968- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26969+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26970
26971 #define SI_MAX_PARMS 4
26972
26973@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
26974 atomic_set(&new_smi->req_events, 0);
26975 new_smi->run_to_completion = 0;
26976 for (i = 0; i < SI_NUM_STATS; i++)
26977- atomic_set(&new_smi->stats[i], 0);
26978+ atomic_set_unchecked(&new_smi->stats[i], 0);
26979
26980 new_smi->interrupt_disabled = 1;
26981 atomic_set(&new_smi->stop_operation, 0);
26982diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
26983index 1aeaaba..e018570 100644
26984--- a/drivers/char/mbcs.c
26985+++ b/drivers/char/mbcs.c
26986@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
26987 return 0;
26988 }
26989
26990-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
26991+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
26992 {
26993 .part_num = MBCS_PART_NUM,
26994 .mfg_num = MBCS_MFG_NUM,
26995diff --git a/drivers/char/mem.c b/drivers/char/mem.c
26996index 1451790..f705c30 100644
26997--- a/drivers/char/mem.c
26998+++ b/drivers/char/mem.c
26999@@ -18,6 +18,7 @@
27000 #include <linux/raw.h>
27001 #include <linux/tty.h>
27002 #include <linux/capability.h>
27003+#include <linux/security.h>
27004 #include <linux/ptrace.h>
27005 #include <linux/device.h>
27006 #include <linux/highmem.h>
27007@@ -35,6 +36,10 @@
27008 # include <linux/efi.h>
27009 #endif
27010
27011+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27012+extern const struct file_operations grsec_fops;
27013+#endif
27014+
27015 static inline unsigned long size_inside_page(unsigned long start,
27016 unsigned long size)
27017 {
27018@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27019
27020 while (cursor < to) {
27021 if (!devmem_is_allowed(pfn)) {
27022+#ifdef CONFIG_GRKERNSEC_KMEM
27023+ gr_handle_mem_readwrite(from, to);
27024+#else
27025 printk(KERN_INFO
27026 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27027 current->comm, from, to);
27028+#endif
27029 return 0;
27030 }
27031 cursor += PAGE_SIZE;
27032@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27033 }
27034 return 1;
27035 }
27036+#elif defined(CONFIG_GRKERNSEC_KMEM)
27037+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27038+{
27039+ return 0;
27040+}
27041 #else
27042 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27043 {
27044@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27045
27046 while (count > 0) {
27047 unsigned long remaining;
27048+ char *temp;
27049
27050 sz = size_inside_page(p, count);
27051
27052@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27053 if (!ptr)
27054 return -EFAULT;
27055
27056- remaining = copy_to_user(buf, ptr, sz);
27057+#ifdef CONFIG_PAX_USERCOPY
27058+ temp = kmalloc(sz, GFP_KERNEL);
27059+ if (!temp) {
27060+ unxlate_dev_mem_ptr(p, ptr);
27061+ return -ENOMEM;
27062+ }
27063+ memcpy(temp, ptr, sz);
27064+#else
27065+ temp = ptr;
27066+#endif
27067+
27068+ remaining = copy_to_user(buf, temp, sz);
27069+
27070+#ifdef CONFIG_PAX_USERCOPY
27071+ kfree(temp);
27072+#endif
27073+
27074 unxlate_dev_mem_ptr(p, ptr);
27075 if (remaining)
27076 return -EFAULT;
27077@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27078 size_t count, loff_t *ppos)
27079 {
27080 unsigned long p = *ppos;
27081- ssize_t low_count, read, sz;
27082+ ssize_t low_count, read, sz, err = 0;
27083 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27084- int err = 0;
27085
27086 read = 0;
27087 if (p < (unsigned long) high_memory) {
27088@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27089 }
27090 #endif
27091 while (low_count > 0) {
27092+ char *temp;
27093+
27094 sz = size_inside_page(p, low_count);
27095
27096 /*
27097@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27098 */
27099 kbuf = xlate_dev_kmem_ptr((char *)p);
27100
27101- if (copy_to_user(buf, kbuf, sz))
27102+#ifdef CONFIG_PAX_USERCOPY
27103+ temp = kmalloc(sz, GFP_KERNEL);
27104+ if (!temp)
27105+ return -ENOMEM;
27106+ memcpy(temp, kbuf, sz);
27107+#else
27108+ temp = kbuf;
27109+#endif
27110+
27111+ err = copy_to_user(buf, temp, sz);
27112+
27113+#ifdef CONFIG_PAX_USERCOPY
27114+ kfree(temp);
27115+#endif
27116+
27117+ if (err)
27118 return -EFAULT;
27119 buf += sz;
27120 p += sz;
27121@@ -867,6 +914,9 @@ static const struct memdev {
27122 #ifdef CONFIG_CRASH_DUMP
27123 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27124 #endif
27125+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27126+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27127+#endif
27128 };
27129
27130 static int memory_open(struct inode *inode, struct file *filp)
27131diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27132index da3cfee..a5a6606 100644
27133--- a/drivers/char/nvram.c
27134+++ b/drivers/char/nvram.c
27135@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27136
27137 spin_unlock_irq(&rtc_lock);
27138
27139- if (copy_to_user(buf, contents, tmp - contents))
27140+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27141 return -EFAULT;
27142
27143 *ppos = i;
27144diff --git a/drivers/char/random.c b/drivers/char/random.c
27145index 6035ab8..bdfe4fd 100644
27146--- a/drivers/char/random.c
27147+++ b/drivers/char/random.c
27148@@ -261,8 +261,13 @@
27149 /*
27150 * Configuration information
27151 */
27152+#ifdef CONFIG_GRKERNSEC_RANDNET
27153+#define INPUT_POOL_WORDS 512
27154+#define OUTPUT_POOL_WORDS 128
27155+#else
27156 #define INPUT_POOL_WORDS 128
27157 #define OUTPUT_POOL_WORDS 32
27158+#endif
27159 #define SEC_XFER_SIZE 512
27160 #define EXTRACT_SIZE 10
27161
27162@@ -300,10 +305,17 @@ static struct poolinfo {
27163 int poolwords;
27164 int tap1, tap2, tap3, tap4, tap5;
27165 } poolinfo_table[] = {
27166+#ifdef CONFIG_GRKERNSEC_RANDNET
27167+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27168+ { 512, 411, 308, 208, 104, 1 },
27169+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27170+ { 128, 103, 76, 51, 25, 1 },
27171+#else
27172 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27173 { 128, 103, 76, 51, 25, 1 },
27174 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27175 { 32, 26, 20, 14, 7, 1 },
27176+#endif
27177 #if 0
27178 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27179 { 2048, 1638, 1231, 819, 411, 1 },
27180@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27181
27182 extract_buf(r, tmp);
27183 i = min_t(int, nbytes, EXTRACT_SIZE);
27184- if (copy_to_user(buf, tmp, i)) {
27185+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27186 ret = -EFAULT;
27187 break;
27188 }
27189@@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27190 #include <linux/sysctl.h>
27191
27192 static int min_read_thresh = 8, min_write_thresh;
27193-static int max_read_thresh = INPUT_POOL_WORDS * 32;
27194+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27195 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27196 static char sysctl_bootid[16];
27197
27198diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27199index 1ee8ce7..b778bef 100644
27200--- a/drivers/char/sonypi.c
27201+++ b/drivers/char/sonypi.c
27202@@ -55,6 +55,7 @@
27203 #include <asm/uaccess.h>
27204 #include <asm/io.h>
27205 #include <asm/system.h>
27206+#include <asm/local.h>
27207
27208 #include <linux/sonypi.h>
27209
27210@@ -491,7 +492,7 @@ static struct sonypi_device {
27211 spinlock_t fifo_lock;
27212 wait_queue_head_t fifo_proc_list;
27213 struct fasync_struct *fifo_async;
27214- int open_count;
27215+ local_t open_count;
27216 int model;
27217 struct input_dev *input_jog_dev;
27218 struct input_dev *input_key_dev;
27219@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27220 static int sonypi_misc_release(struct inode *inode, struct file *file)
27221 {
27222 mutex_lock(&sonypi_device.lock);
27223- sonypi_device.open_count--;
27224+ local_dec(&sonypi_device.open_count);
27225 mutex_unlock(&sonypi_device.lock);
27226 return 0;
27227 }
27228@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27229 {
27230 mutex_lock(&sonypi_device.lock);
27231 /* Flush input queue on first open */
27232- if (!sonypi_device.open_count)
27233+ if (!local_read(&sonypi_device.open_count))
27234 kfifo_reset(&sonypi_device.fifo);
27235- sonypi_device.open_count++;
27236+ local_inc(&sonypi_device.open_count);
27237 mutex_unlock(&sonypi_device.lock);
27238
27239 return 0;
27240diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27241index 361a1df..2471eee 100644
27242--- a/drivers/char/tpm/tpm.c
27243+++ b/drivers/char/tpm/tpm.c
27244@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27245 chip->vendor.req_complete_val)
27246 goto out_recv;
27247
27248- if ((status == chip->vendor.req_canceled)) {
27249+ if (status == chip->vendor.req_canceled) {
27250 dev_err(chip->dev, "Operation Canceled\n");
27251 rc = -ECANCELED;
27252 goto out;
27253diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27254index 0636520..169c1d0 100644
27255--- a/drivers/char/tpm/tpm_bios.c
27256+++ b/drivers/char/tpm/tpm_bios.c
27257@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27258 event = addr;
27259
27260 if ((event->event_type == 0 && event->event_size == 0) ||
27261- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27262+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27263 return NULL;
27264
27265 return addr;
27266@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27267 return NULL;
27268
27269 if ((event->event_type == 0 && event->event_size == 0) ||
27270- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27271+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27272 return NULL;
27273
27274 (*pos)++;
27275@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27276 int i;
27277
27278 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27279- seq_putc(m, data[i]);
27280+ if (!seq_putc(m, data[i]))
27281+ return -EFAULT;
27282
27283 return 0;
27284 }
27285@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27286 log->bios_event_log_end = log->bios_event_log + len;
27287
27288 virt = acpi_os_map_memory(start, len);
27289+ if (!virt) {
27290+ kfree(log->bios_event_log);
27291+ log->bios_event_log = NULL;
27292+ return -EFAULT;
27293+ }
27294
27295- memcpy(log->bios_event_log, virt, len);
27296+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27297
27298 acpi_os_unmap_memory(virt, len);
27299 return 0;
27300diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27301index 8e3c46d..c139b99 100644
27302--- a/drivers/char/virtio_console.c
27303+++ b/drivers/char/virtio_console.c
27304@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27305 if (to_user) {
27306 ssize_t ret;
27307
27308- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27309+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27310 if (ret)
27311 return -EFAULT;
27312 } else {
27313@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27314 if (!port_has_data(port) && !port->host_connected)
27315 return 0;
27316
27317- return fill_readbuf(port, ubuf, count, true);
27318+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27319 }
27320
27321 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27322diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27323index eb1d864..39ee5a7 100644
27324--- a/drivers/dma/dmatest.c
27325+++ b/drivers/dma/dmatest.c
27326@@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27327 }
27328 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27329 cnt = dmatest_add_threads(dtc, DMA_PQ);
27330- thread_count += cnt > 0 ?: 0;
27331+ thread_count += cnt > 0 ? cnt : 0;
27332 }
27333
27334 pr_info("dmatest: Started %u threads using %s\n",
27335diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27336index c9eee6d..f9d5280 100644
27337--- a/drivers/edac/amd64_edac.c
27338+++ b/drivers/edac/amd64_edac.c
27339@@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27340 * PCI core identifies what devices are on a system during boot, and then
27341 * inquiry this table to see if this driver is for a given device found.
27342 */
27343-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27344+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27345 {
27346 .vendor = PCI_VENDOR_ID_AMD,
27347 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27348diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27349index e47e73b..348e0bd 100644
27350--- a/drivers/edac/amd76x_edac.c
27351+++ b/drivers/edac/amd76x_edac.c
27352@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27353 edac_mc_free(mci);
27354 }
27355
27356-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27357+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27358 {
27359 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27360 AMD762},
27361diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27362index 1af531a..3a8ff27 100644
27363--- a/drivers/edac/e752x_edac.c
27364+++ b/drivers/edac/e752x_edac.c
27365@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27366 edac_mc_free(mci);
27367 }
27368
27369-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27370+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27371 {
27372 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27373 E7520},
27374diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27375index 6ffb6d2..383d8d7 100644
27376--- a/drivers/edac/e7xxx_edac.c
27377+++ b/drivers/edac/e7xxx_edac.c
27378@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27379 edac_mc_free(mci);
27380 }
27381
27382-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27383+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27384 {
27385 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27386 E7205},
27387diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27388index 495198a..ac08c85 100644
27389--- a/drivers/edac/edac_pci_sysfs.c
27390+++ b/drivers/edac/edac_pci_sysfs.c
27391@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27392 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27393 static int edac_pci_poll_msec = 1000; /* one second workq period */
27394
27395-static atomic_t pci_parity_count = ATOMIC_INIT(0);
27396-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27397+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27398+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27399
27400 static struct kobject *edac_pci_top_main_kobj;
27401 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27402@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27403 edac_printk(KERN_CRIT, EDAC_PCI,
27404 "Signaled System Error on %s\n",
27405 pci_name(dev));
27406- atomic_inc(&pci_nonparity_count);
27407+ atomic_inc_unchecked(&pci_nonparity_count);
27408 }
27409
27410 if (status & (PCI_STATUS_PARITY)) {
27411@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27412 "Master Data Parity Error on %s\n",
27413 pci_name(dev));
27414
27415- atomic_inc(&pci_parity_count);
27416+ atomic_inc_unchecked(&pci_parity_count);
27417 }
27418
27419 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27420@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27421 "Detected Parity Error on %s\n",
27422 pci_name(dev));
27423
27424- atomic_inc(&pci_parity_count);
27425+ atomic_inc_unchecked(&pci_parity_count);
27426 }
27427 }
27428
27429@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27430 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27431 "Signaled System Error on %s\n",
27432 pci_name(dev));
27433- atomic_inc(&pci_nonparity_count);
27434+ atomic_inc_unchecked(&pci_nonparity_count);
27435 }
27436
27437 if (status & (PCI_STATUS_PARITY)) {
27438@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27439 "Master Data Parity Error on "
27440 "%s\n", pci_name(dev));
27441
27442- atomic_inc(&pci_parity_count);
27443+ atomic_inc_unchecked(&pci_parity_count);
27444 }
27445
27446 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27447@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27448 "Detected Parity Error on %s\n",
27449 pci_name(dev));
27450
27451- atomic_inc(&pci_parity_count);
27452+ atomic_inc_unchecked(&pci_parity_count);
27453 }
27454 }
27455 }
27456@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27457 if (!check_pci_errors)
27458 return;
27459
27460- before_count = atomic_read(&pci_parity_count);
27461+ before_count = atomic_read_unchecked(&pci_parity_count);
27462
27463 /* scan all PCI devices looking for a Parity Error on devices and
27464 * bridges.
27465@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27466 /* Only if operator has selected panic on PCI Error */
27467 if (edac_pci_get_panic_on_pe()) {
27468 /* If the count is different 'after' from 'before' */
27469- if (before_count != atomic_read(&pci_parity_count))
27470+ if (before_count != atomic_read_unchecked(&pci_parity_count))
27471 panic("EDAC: PCI Parity Error");
27472 }
27473 }
27474diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27475index c0510b3..6e2a954 100644
27476--- a/drivers/edac/i3000_edac.c
27477+++ b/drivers/edac/i3000_edac.c
27478@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27479 edac_mc_free(mci);
27480 }
27481
27482-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27483+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27484 {
27485 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27486 I3000},
27487diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27488index aa08497..7e6822a 100644
27489--- a/drivers/edac/i3200_edac.c
27490+++ b/drivers/edac/i3200_edac.c
27491@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27492 edac_mc_free(mci);
27493 }
27494
27495-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27496+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27497 {
27498 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27499 I3200},
27500diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27501index 4dc3ac2..67d05a6 100644
27502--- a/drivers/edac/i5000_edac.c
27503+++ b/drivers/edac/i5000_edac.c
27504@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27505 *
27506 * The "E500P" device is the first device supported.
27507 */
27508-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27509+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27510 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27511 .driver_data = I5000P},
27512
27513diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27514index bcbdeec..9886d16 100644
27515--- a/drivers/edac/i5100_edac.c
27516+++ b/drivers/edac/i5100_edac.c
27517@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27518 edac_mc_free(mci);
27519 }
27520
27521-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27522+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27523 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27524 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27525 { 0, }
27526diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27527index 74d6ec34..baff517 100644
27528--- a/drivers/edac/i5400_edac.c
27529+++ b/drivers/edac/i5400_edac.c
27530@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27531 *
27532 * The "E500P" device is the first device supported.
27533 */
27534-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27535+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27536 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27537 {0,} /* 0 terminated list. */
27538 };
27539diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27540index 6104dba..e7ea8e1 100644
27541--- a/drivers/edac/i7300_edac.c
27542+++ b/drivers/edac/i7300_edac.c
27543@@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27544 *
27545 * Has only 8086:360c PCI ID
27546 */
27547-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27548+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27549 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27550 {0,} /* 0 terminated list. */
27551 };
27552diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27553index 70ad892..178943c 100644
27554--- a/drivers/edac/i7core_edac.c
27555+++ b/drivers/edac/i7core_edac.c
27556@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27557 /*
27558 * pci_device_id table for which devices we are looking for
27559 */
27560-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27561+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27562 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27563 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27564 {0,} /* 0 terminated list. */
27565diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27566index 4329d39..f3022ef 100644
27567--- a/drivers/edac/i82443bxgx_edac.c
27568+++ b/drivers/edac/i82443bxgx_edac.c
27569@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27570
27571 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27572
27573-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27574+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27575 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27576 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27577 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27578diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27579index 931a057..fd28340 100644
27580--- a/drivers/edac/i82860_edac.c
27581+++ b/drivers/edac/i82860_edac.c
27582@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27583 edac_mc_free(mci);
27584 }
27585
27586-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27587+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27588 {
27589 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27590 I82860},
27591diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27592index 33864c6..01edc61 100644
27593--- a/drivers/edac/i82875p_edac.c
27594+++ b/drivers/edac/i82875p_edac.c
27595@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27596 edac_mc_free(mci);
27597 }
27598
27599-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27600+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27601 {
27602 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27603 I82875P},
27604diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27605index a5da732..983363b 100644
27606--- a/drivers/edac/i82975x_edac.c
27607+++ b/drivers/edac/i82975x_edac.c
27608@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27609 edac_mc_free(mci);
27610 }
27611
27612-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27613+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27614 {
27615 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27616 I82975X
27617diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27618index 0106747..0b40417 100644
27619--- a/drivers/edac/mce_amd.h
27620+++ b/drivers/edac/mce_amd.h
27621@@ -83,7 +83,7 @@ struct amd_decoder_ops {
27622 bool (*dc_mce)(u16, u8);
27623 bool (*ic_mce)(u16, u8);
27624 bool (*nb_mce)(u16, u8);
27625-};
27626+} __no_const;
27627
27628 void amd_report_gart_errors(bool);
27629 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27630diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27631index b153674..ad2ba9b 100644
27632--- a/drivers/edac/r82600_edac.c
27633+++ b/drivers/edac/r82600_edac.c
27634@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27635 edac_mc_free(mci);
27636 }
27637
27638-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27639+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27640 {
27641 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27642 },
27643diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27644index 7a402bf..af0b211 100644
27645--- a/drivers/edac/sb_edac.c
27646+++ b/drivers/edac/sb_edac.c
27647@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27648 /*
27649 * pci_device_id table for which devices we are looking for
27650 */
27651-static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27652+static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27653 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27654 {0,} /* 0 terminated list. */
27655 };
27656diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27657index b6f47de..c5acf3a 100644
27658--- a/drivers/edac/x38_edac.c
27659+++ b/drivers/edac/x38_edac.c
27660@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27661 edac_mc_free(mci);
27662 }
27663
27664-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27665+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27666 {
27667 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27668 X38},
27669diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27670index 85661b0..c784559a 100644
27671--- a/drivers/firewire/core-card.c
27672+++ b/drivers/firewire/core-card.c
27673@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27674
27675 void fw_core_remove_card(struct fw_card *card)
27676 {
27677- struct fw_card_driver dummy_driver = dummy_driver_template;
27678+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
27679
27680 card->driver->update_phy_reg(card, 4,
27681 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27682diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27683index 4799393..37bd3ab 100644
27684--- a/drivers/firewire/core-cdev.c
27685+++ b/drivers/firewire/core-cdev.c
27686@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27687 int ret;
27688
27689 if ((request->channels == 0 && request->bandwidth == 0) ||
27690- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27691- request->bandwidth < 0)
27692+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27693 return -EINVAL;
27694
27695 r = kmalloc(sizeof(*r), GFP_KERNEL);
27696diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27697index 855ab3f..11f4bbd 100644
27698--- a/drivers/firewire/core-transaction.c
27699+++ b/drivers/firewire/core-transaction.c
27700@@ -37,6 +37,7 @@
27701 #include <linux/timer.h>
27702 #include <linux/types.h>
27703 #include <linux/workqueue.h>
27704+#include <linux/sched.h>
27705
27706 #include <asm/byteorder.h>
27707
27708diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27709index b45be57..5fad18b 100644
27710--- a/drivers/firewire/core.h
27711+++ b/drivers/firewire/core.h
27712@@ -101,6 +101,7 @@ struct fw_card_driver {
27713
27714 int (*stop_iso)(struct fw_iso_context *ctx);
27715 };
27716+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27717
27718 void fw_card_initialize(struct fw_card *card,
27719 const struct fw_card_driver *driver, struct device *device);
27720diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27721index 153980b..4b4d046 100644
27722--- a/drivers/firmware/dmi_scan.c
27723+++ b/drivers/firmware/dmi_scan.c
27724@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27725 }
27726 }
27727 else {
27728- /*
27729- * no iounmap() for that ioremap(); it would be a no-op, but
27730- * it's so early in setup that sucker gets confused into doing
27731- * what it shouldn't if we actually call it.
27732- */
27733 p = dmi_ioremap(0xF0000, 0x10000);
27734 if (p == NULL)
27735 goto error;
27736@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27737 if (buf == NULL)
27738 return -1;
27739
27740- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27741+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27742
27743 iounmap(buf);
27744 return 0;
27745diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27746index 98723cb..10ca85b 100644
27747--- a/drivers/gpio/gpio-vr41xx.c
27748+++ b/drivers/gpio/gpio-vr41xx.c
27749@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27750 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27751 maskl, pendl, maskh, pendh);
27752
27753- atomic_inc(&irq_err_count);
27754+ atomic_inc_unchecked(&irq_err_count);
27755
27756 return -EINVAL;
27757 }
27758diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27759index 8323fc3..5c1d755 100644
27760--- a/drivers/gpu/drm/drm_crtc.c
27761+++ b/drivers/gpu/drm/drm_crtc.c
27762@@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27763 */
27764 if ((out_resp->count_modes >= mode_count) && mode_count) {
27765 copied = 0;
27766- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27767+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27768 list_for_each_entry(mode, &connector->modes, head) {
27769 drm_crtc_convert_to_umode(&u_mode, mode);
27770 if (copy_to_user(mode_ptr + copied,
27771@@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27772
27773 if ((out_resp->count_props >= props_count) && props_count) {
27774 copied = 0;
27775- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27776- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27777+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27778+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27779 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27780 if (connector->property_ids[i] != 0) {
27781 if (put_user(connector->property_ids[i],
27782@@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27783
27784 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27785 copied = 0;
27786- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27787+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27788 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27789 if (connector->encoder_ids[i] != 0) {
27790 if (put_user(connector->encoder_ids[i],
27791@@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
27792 }
27793
27794 for (i = 0; i < crtc_req->count_connectors; i++) {
27795- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27796+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27797 if (get_user(out_id, &set_connectors_ptr[i])) {
27798 ret = -EFAULT;
27799 goto out;
27800@@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27801 fb = obj_to_fb(obj);
27802
27803 num_clips = r->num_clips;
27804- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27805+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27806
27807 if (!num_clips != !clips_ptr) {
27808 ret = -EINVAL;
27809@@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27810 out_resp->flags = property->flags;
27811
27812 if ((out_resp->count_values >= value_count) && value_count) {
27813- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27814+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27815 for (i = 0; i < value_count; i++) {
27816 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27817 ret = -EFAULT;
27818@@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27819 if (property->flags & DRM_MODE_PROP_ENUM) {
27820 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27821 copied = 0;
27822- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27823+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27824 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27825
27826 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
27827@@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27828 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27829 copied = 0;
27830 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27831- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27832+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27833
27834 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27835 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
27836@@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27837 struct drm_mode_get_blob *out_resp = data;
27838 struct drm_property_blob *blob;
27839 int ret = 0;
27840- void *blob_ptr;
27841+ void __user *blob_ptr;
27842
27843 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27844 return -EINVAL;
27845@@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27846 blob = obj_to_blob(obj);
27847
27848 if (out_resp->length == blob->length) {
27849- blob_ptr = (void *)(unsigned long)out_resp->data;
27850+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
27851 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27852 ret = -EFAULT;
27853 goto done;
27854diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
27855index d2619d7..bd6bd00 100644
27856--- a/drivers/gpu/drm/drm_crtc_helper.c
27857+++ b/drivers/gpu/drm/drm_crtc_helper.c
27858@@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
27859 struct drm_crtc *tmp;
27860 int crtc_mask = 1;
27861
27862- WARN(!crtc, "checking null crtc?\n");
27863+ BUG_ON(!crtc);
27864
27865 dev = crtc->dev;
27866
27867diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
27868index 40c187c..5746164 100644
27869--- a/drivers/gpu/drm/drm_drv.c
27870+++ b/drivers/gpu/drm/drm_drv.c
27871@@ -308,7 +308,7 @@ module_exit(drm_core_exit);
27872 /**
27873 * Copy and IOCTL return string to user space
27874 */
27875-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27876+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27877 {
27878 int len;
27879
27880@@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
27881
27882 dev = file_priv->minor->dev;
27883 atomic_inc(&dev->ioctl_count);
27884- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27885+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27886 ++file_priv->ioctl_count;
27887
27888 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27889diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
27890index 828bf65..cdaa0e9 100644
27891--- a/drivers/gpu/drm/drm_fops.c
27892+++ b/drivers/gpu/drm/drm_fops.c
27893@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
27894 }
27895
27896 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27897- atomic_set(&dev->counts[i], 0);
27898+ atomic_set_unchecked(&dev->counts[i], 0);
27899
27900 dev->sigdata.lock = NULL;
27901
27902@@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
27903
27904 retcode = drm_open_helper(inode, filp, dev);
27905 if (!retcode) {
27906- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27907- if (!dev->open_count++)
27908+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27909+ if (local_inc_return(&dev->open_count) == 1)
27910 retcode = drm_setup(dev);
27911 }
27912 if (!retcode) {
27913@@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
27914
27915 mutex_lock(&drm_global_mutex);
27916
27917- DRM_DEBUG("open_count = %d\n", dev->open_count);
27918+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27919
27920 if (dev->driver->preclose)
27921 dev->driver->preclose(dev, file_priv);
27922@@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
27923 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27924 task_pid_nr(current),
27925 (long)old_encode_dev(file_priv->minor->device),
27926- dev->open_count);
27927+ local_read(&dev->open_count));
27928
27929 /* Release any auth tokens that might point to this file_priv,
27930 (do that under the drm_global_mutex) */
27931@@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
27932 * End inline drm_release
27933 */
27934
27935- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27936- if (!--dev->open_count) {
27937+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27938+ if (local_dec_and_test(&dev->open_count)) {
27939 if (atomic_read(&dev->ioctl_count)) {
27940 DRM_ERROR("Device busy: %d\n",
27941 atomic_read(&dev->ioctl_count));
27942diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
27943index c87dc96..326055d 100644
27944--- a/drivers/gpu/drm/drm_global.c
27945+++ b/drivers/gpu/drm/drm_global.c
27946@@ -36,7 +36,7 @@
27947 struct drm_global_item {
27948 struct mutex mutex;
27949 void *object;
27950- int refcount;
27951+ atomic_t refcount;
27952 };
27953
27954 static struct drm_global_item glob[DRM_GLOBAL_NUM];
27955@@ -49,7 +49,7 @@ void drm_global_init(void)
27956 struct drm_global_item *item = &glob[i];
27957 mutex_init(&item->mutex);
27958 item->object = NULL;
27959- item->refcount = 0;
27960+ atomic_set(&item->refcount, 0);
27961 }
27962 }
27963
27964@@ -59,7 +59,7 @@ void drm_global_release(void)
27965 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
27966 struct drm_global_item *item = &glob[i];
27967 BUG_ON(item->object != NULL);
27968- BUG_ON(item->refcount != 0);
27969+ BUG_ON(atomic_read(&item->refcount) != 0);
27970 }
27971 }
27972
27973@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27974 void *object;
27975
27976 mutex_lock(&item->mutex);
27977- if (item->refcount == 0) {
27978+ if (atomic_read(&item->refcount) == 0) {
27979 item->object = kzalloc(ref->size, GFP_KERNEL);
27980 if (unlikely(item->object == NULL)) {
27981 ret = -ENOMEM;
27982@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27983 goto out_err;
27984
27985 }
27986- ++item->refcount;
27987+ atomic_inc(&item->refcount);
27988 ref->object = item->object;
27989 object = item->object;
27990 mutex_unlock(&item->mutex);
27991@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
27992 struct drm_global_item *item = &glob[ref->global_type];
27993
27994 mutex_lock(&item->mutex);
27995- BUG_ON(item->refcount == 0);
27996+ BUG_ON(atomic_read(&item->refcount) == 0);
27997 BUG_ON(ref->object != item->object);
27998- if (--item->refcount == 0) {
27999+ if (atomic_dec_and_test(&item->refcount)) {
28000 ref->release(ref);
28001 item->object = NULL;
28002 }
28003diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28004index ab1162d..42587b2 100644
28005--- a/drivers/gpu/drm/drm_info.c
28006+++ b/drivers/gpu/drm/drm_info.c
28007@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28008 struct drm_local_map *map;
28009 struct drm_map_list *r_list;
28010
28011- /* Hardcoded from _DRM_FRAME_BUFFER,
28012- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28013- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28014- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28015+ static const char * const types[] = {
28016+ [_DRM_FRAME_BUFFER] = "FB",
28017+ [_DRM_REGISTERS] = "REG",
28018+ [_DRM_SHM] = "SHM",
28019+ [_DRM_AGP] = "AGP",
28020+ [_DRM_SCATTER_GATHER] = "SG",
28021+ [_DRM_CONSISTENT] = "PCI",
28022+ [_DRM_GEM] = "GEM" };
28023 const char *type;
28024 int i;
28025
28026@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28027 map = r_list->map;
28028 if (!map)
28029 continue;
28030- if (map->type < 0 || map->type > 5)
28031+ if (map->type >= ARRAY_SIZE(types))
28032 type = "??";
28033 else
28034 type = types[map->type];
28035@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28036 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28037 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28038 vma->vm_flags & VM_IO ? 'i' : '-',
28039+#ifdef CONFIG_GRKERNSEC_HIDESYM
28040+ 0);
28041+#else
28042 vma->vm_pgoff);
28043+#endif
28044
28045 #if defined(__i386__)
28046 pgprot = pgprot_val(vma->vm_page_prot);
28047diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28048index ddd70db..40321e6 100644
28049--- a/drivers/gpu/drm/drm_ioc32.c
28050+++ b/drivers/gpu/drm/drm_ioc32.c
28051@@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28052 request = compat_alloc_user_space(nbytes);
28053 if (!access_ok(VERIFY_WRITE, request, nbytes))
28054 return -EFAULT;
28055- list = (struct drm_buf_desc *) (request + 1);
28056+ list = (struct drm_buf_desc __user *) (request + 1);
28057
28058 if (__put_user(count, &request->count)
28059 || __put_user(list, &request->list))
28060@@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28061 request = compat_alloc_user_space(nbytes);
28062 if (!access_ok(VERIFY_WRITE, request, nbytes))
28063 return -EFAULT;
28064- list = (struct drm_buf_pub *) (request + 1);
28065+ list = (struct drm_buf_pub __user *) (request + 1);
28066
28067 if (__put_user(count, &request->count)
28068 || __put_user(list, &request->list))
28069diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28070index 904d7e9..ab88581 100644
28071--- a/drivers/gpu/drm/drm_ioctl.c
28072+++ b/drivers/gpu/drm/drm_ioctl.c
28073@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28074 stats->data[i].value =
28075 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28076 else
28077- stats->data[i].value = atomic_read(&dev->counts[i]);
28078+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28079 stats->data[i].type = dev->types[i];
28080 }
28081
28082diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28083index 632ae24..244cf4a 100644
28084--- a/drivers/gpu/drm/drm_lock.c
28085+++ b/drivers/gpu/drm/drm_lock.c
28086@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28087 if (drm_lock_take(&master->lock, lock->context)) {
28088 master->lock.file_priv = file_priv;
28089 master->lock.lock_time = jiffies;
28090- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28091+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28092 break; /* Got lock */
28093 }
28094
28095@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28096 return -EINVAL;
28097 }
28098
28099- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28100+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28101
28102 if (drm_lock_free(&master->lock, lock->context)) {
28103 /* FIXME: Should really bail out here. */
28104diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28105index 8f371e8..9f85d52 100644
28106--- a/drivers/gpu/drm/i810/i810_dma.c
28107+++ b/drivers/gpu/drm/i810/i810_dma.c
28108@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28109 dma->buflist[vertex->idx],
28110 vertex->discard, vertex->used);
28111
28112- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28113- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28114+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28115+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28116 sarea_priv->last_enqueue = dev_priv->counter - 1;
28117 sarea_priv->last_dispatch = (int)hw_status[5];
28118
28119@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28120 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28121 mc->last_render);
28122
28123- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28124- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28125+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28126+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28127 sarea_priv->last_enqueue = dev_priv->counter - 1;
28128 sarea_priv->last_dispatch = (int)hw_status[5];
28129
28130diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28131index c9339f4..f5e1b9d 100644
28132--- a/drivers/gpu/drm/i810/i810_drv.h
28133+++ b/drivers/gpu/drm/i810/i810_drv.h
28134@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28135 int page_flipping;
28136
28137 wait_queue_head_t irq_queue;
28138- atomic_t irq_received;
28139- atomic_t irq_emitted;
28140+ atomic_unchecked_t irq_received;
28141+ atomic_unchecked_t irq_emitted;
28142
28143 int front_offset;
28144 } drm_i810_private_t;
28145diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28146index b2e3c97..58cf079 100644
28147--- a/drivers/gpu/drm/i915/i915_debugfs.c
28148+++ b/drivers/gpu/drm/i915/i915_debugfs.c
28149@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28150 I915_READ(GTIMR));
28151 }
28152 seq_printf(m, "Interrupts received: %d\n",
28153- atomic_read(&dev_priv->irq_received));
28154+ atomic_read_unchecked(&dev_priv->irq_received));
28155 for (i = 0; i < I915_NUM_RINGS; i++) {
28156 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28157 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28158@@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28159 return ret;
28160
28161 if (opregion->header)
28162- seq_write(m, opregion->header, OPREGION_SIZE);
28163+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28164
28165 mutex_unlock(&dev->struct_mutex);
28166
28167diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28168index c4da951..3c59c5c 100644
28169--- a/drivers/gpu/drm/i915/i915_dma.c
28170+++ b/drivers/gpu/drm/i915/i915_dma.c
28171@@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28172 bool can_switch;
28173
28174 spin_lock(&dev->count_lock);
28175- can_switch = (dev->open_count == 0);
28176+ can_switch = (local_read(&dev->open_count) == 0);
28177 spin_unlock(&dev->count_lock);
28178 return can_switch;
28179 }
28180diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28181index ae294a0..1755461 100644
28182--- a/drivers/gpu/drm/i915/i915_drv.h
28183+++ b/drivers/gpu/drm/i915/i915_drv.h
28184@@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28185 /* render clock increase/decrease */
28186 /* display clock increase/decrease */
28187 /* pll clock increase/decrease */
28188-};
28189+} __no_const;
28190
28191 struct intel_device_info {
28192 u8 gen;
28193@@ -318,7 +318,7 @@ typedef struct drm_i915_private {
28194 int current_page;
28195 int page_flipping;
28196
28197- atomic_t irq_received;
28198+ atomic_unchecked_t irq_received;
28199
28200 /* protects the irq masks */
28201 spinlock_t irq_lock;
28202@@ -893,7 +893,7 @@ struct drm_i915_gem_object {
28203 * will be page flipped away on the next vblank. When it
28204 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28205 */
28206- atomic_t pending_flip;
28207+ atomic_unchecked_t pending_flip;
28208 };
28209
28210 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28211@@ -1273,7 +1273,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28212 extern void intel_teardown_gmbus(struct drm_device *dev);
28213 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28214 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28215-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28216+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28217 {
28218 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28219 }
28220diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28221index b9da890..cad1d98 100644
28222--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28223+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28224@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28225 i915_gem_clflush_object(obj);
28226
28227 if (obj->base.pending_write_domain)
28228- cd->flips |= atomic_read(&obj->pending_flip);
28229+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28230
28231 /* The actual obj->write_domain will be updated with
28232 * pending_write_domain after we emit the accumulated flush for all
28233@@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28234
28235 static int
28236 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28237- int count)
28238+ unsigned int count)
28239 {
28240- int i;
28241+ unsigned int i;
28242
28243 for (i = 0; i < count; i++) {
28244 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28245diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28246index d47a53b..61154c2 100644
28247--- a/drivers/gpu/drm/i915/i915_irq.c
28248+++ b/drivers/gpu/drm/i915/i915_irq.c
28249@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28250 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28251 struct drm_i915_master_private *master_priv;
28252
28253- atomic_inc(&dev_priv->irq_received);
28254+ atomic_inc_unchecked(&dev_priv->irq_received);
28255
28256 /* disable master interrupt before clearing iir */
28257 de_ier = I915_READ(DEIER);
28258@@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28259 struct drm_i915_master_private *master_priv;
28260 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28261
28262- atomic_inc(&dev_priv->irq_received);
28263+ atomic_inc_unchecked(&dev_priv->irq_received);
28264
28265 if (IS_GEN6(dev))
28266 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28267@@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28268 int ret = IRQ_NONE, pipe;
28269 bool blc_event = false;
28270
28271- atomic_inc(&dev_priv->irq_received);
28272+ atomic_inc_unchecked(&dev_priv->irq_received);
28273
28274 iir = I915_READ(IIR);
28275
28276@@ -1750,7 +1750,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28277 {
28278 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28279
28280- atomic_set(&dev_priv->irq_received, 0);
28281+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28282
28283 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28284 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28285@@ -1938,7 +1938,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28286 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28287 int pipe;
28288
28289- atomic_set(&dev_priv->irq_received, 0);
28290+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28291
28292 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28293 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28294diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28295index daa5743..c0757a9 100644
28296--- a/drivers/gpu/drm/i915/intel_display.c
28297+++ b/drivers/gpu/drm/i915/intel_display.c
28298@@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28299
28300 wait_event(dev_priv->pending_flip_queue,
28301 atomic_read(&dev_priv->mm.wedged) ||
28302- atomic_read(&obj->pending_flip) == 0);
28303+ atomic_read_unchecked(&obj->pending_flip) == 0);
28304
28305 /* Big Hammer, we also need to ensure that any pending
28306 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28307@@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28308 obj = to_intel_framebuffer(crtc->fb)->obj;
28309 dev_priv = crtc->dev->dev_private;
28310 wait_event(dev_priv->pending_flip_queue,
28311- atomic_read(&obj->pending_flip) == 0);
28312+ atomic_read_unchecked(&obj->pending_flip) == 0);
28313 }
28314
28315 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28316@@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28317
28318 atomic_clear_mask(1 << intel_crtc->plane,
28319 &obj->pending_flip.counter);
28320- if (atomic_read(&obj->pending_flip) == 0)
28321+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
28322 wake_up(&dev_priv->pending_flip_queue);
28323
28324 schedule_work(&work->work);
28325@@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28326 /* Block clients from rendering to the new back buffer until
28327 * the flip occurs and the object is no longer visible.
28328 */
28329- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28330+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28331
28332 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28333 if (ret)
28334@@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28335 return 0;
28336
28337 cleanup_pending:
28338- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28339+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28340 drm_gem_object_unreference(&work->old_fb_obj->base);
28341 drm_gem_object_unreference(&obj->base);
28342 mutex_unlock(&dev->struct_mutex);
28343diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28344index 54558a0..2d97005 100644
28345--- a/drivers/gpu/drm/mga/mga_drv.h
28346+++ b/drivers/gpu/drm/mga/mga_drv.h
28347@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28348 u32 clear_cmd;
28349 u32 maccess;
28350
28351- atomic_t vbl_received; /**< Number of vblanks received. */
28352+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28353 wait_queue_head_t fence_queue;
28354- atomic_t last_fence_retired;
28355+ atomic_unchecked_t last_fence_retired;
28356 u32 next_fence_to_post;
28357
28358 unsigned int fb_cpp;
28359diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28360index 2581202..f230a8d9 100644
28361--- a/drivers/gpu/drm/mga/mga_irq.c
28362+++ b/drivers/gpu/drm/mga/mga_irq.c
28363@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28364 if (crtc != 0)
28365 return 0;
28366
28367- return atomic_read(&dev_priv->vbl_received);
28368+ return atomic_read_unchecked(&dev_priv->vbl_received);
28369 }
28370
28371
28372@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28373 /* VBLANK interrupt */
28374 if (status & MGA_VLINEPEN) {
28375 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28376- atomic_inc(&dev_priv->vbl_received);
28377+ atomic_inc_unchecked(&dev_priv->vbl_received);
28378 drm_handle_vblank(dev, 0);
28379 handled = 1;
28380 }
28381@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28382 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28383 MGA_WRITE(MGA_PRIMEND, prim_end);
28384
28385- atomic_inc(&dev_priv->last_fence_retired);
28386+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
28387 DRM_WAKEUP(&dev_priv->fence_queue);
28388 handled = 1;
28389 }
28390@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28391 * using fences.
28392 */
28393 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28394- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28395+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28396 - *sequence) <= (1 << 23)));
28397
28398 *sequence = cur_fence;
28399diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28400index 5fc201b..7b032b9 100644
28401--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28402+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28403@@ -201,7 +201,7 @@ struct methods {
28404 const char desc[8];
28405 void (*loadbios)(struct drm_device *, uint8_t *);
28406 const bool rw;
28407-};
28408+} __do_const;
28409
28410 static struct methods shadow_methods[] = {
28411 { "PRAMIN", load_vbios_pramin, true },
28412@@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28413 struct bit_table {
28414 const char id;
28415 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28416-};
28417+} __no_const;
28418
28419 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28420
28421diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28422index 4c0be3a..5757582 100644
28423--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28424+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28425@@ -238,7 +238,7 @@ struct nouveau_channel {
28426 struct list_head pending;
28427 uint32_t sequence;
28428 uint32_t sequence_ack;
28429- atomic_t last_sequence_irq;
28430+ atomic_unchecked_t last_sequence_irq;
28431 struct nouveau_vma vma;
28432 } fence;
28433
28434@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28435 u32 handle, u16 class);
28436 void (*set_tile_region)(struct drm_device *dev, int i);
28437 void (*tlb_flush)(struct drm_device *, int engine);
28438-};
28439+} __no_const;
28440
28441 struct nouveau_instmem_engine {
28442 void *priv;
28443@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28444 struct nouveau_mc_engine {
28445 int (*init)(struct drm_device *dev);
28446 void (*takedown)(struct drm_device *dev);
28447-};
28448+} __no_const;
28449
28450 struct nouveau_timer_engine {
28451 int (*init)(struct drm_device *dev);
28452 void (*takedown)(struct drm_device *dev);
28453 uint64_t (*read)(struct drm_device *dev);
28454-};
28455+} __no_const;
28456
28457 struct nouveau_fb_engine {
28458 int num_tiles;
28459@@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28460 void (*put)(struct drm_device *, struct nouveau_mem **);
28461
28462 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28463-};
28464+} __no_const;
28465
28466 struct nouveau_engine {
28467 struct nouveau_instmem_engine instmem;
28468@@ -706,7 +706,7 @@ struct drm_nouveau_private {
28469 struct drm_global_reference mem_global_ref;
28470 struct ttm_bo_global_ref bo_global_ref;
28471 struct ttm_bo_device bdev;
28472- atomic_t validate_sequence;
28473+ atomic_unchecked_t validate_sequence;
28474 } ttm;
28475
28476 struct {
28477diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28478index 2f6daae..c9d7b9e 100644
28479--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28480+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28481@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28482 if (USE_REFCNT(dev))
28483 sequence = nvchan_rd32(chan, 0x48);
28484 else
28485- sequence = atomic_read(&chan->fence.last_sequence_irq);
28486+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28487
28488 if (chan->fence.sequence_ack == sequence)
28489 goto out;
28490@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28491 return ret;
28492 }
28493
28494- atomic_set(&chan->fence.last_sequence_irq, 0);
28495+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28496 return 0;
28497 }
28498
28499diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28500index 7ce3fde..cb3ea04 100644
28501--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28502+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28503@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28504 int trycnt = 0;
28505 int ret, i;
28506
28507- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28508+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28509 retry:
28510 if (++trycnt > 100000) {
28511 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28512diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28513index d8831ab..0ba8356 100644
28514--- a/drivers/gpu/drm/nouveau/nouveau_state.c
28515+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28516@@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28517 bool can_switch;
28518
28519 spin_lock(&dev->count_lock);
28520- can_switch = (dev->open_count == 0);
28521+ can_switch = (local_read(&dev->open_count) == 0);
28522 spin_unlock(&dev->count_lock);
28523 return can_switch;
28524 }
28525diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28526index dbdea8e..cd6eeeb 100644
28527--- a/drivers/gpu/drm/nouveau/nv04_graph.c
28528+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28529@@ -554,7 +554,7 @@ static int
28530 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28531 u32 class, u32 mthd, u32 data)
28532 {
28533- atomic_set(&chan->fence.last_sequence_irq, data);
28534+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28535 return 0;
28536 }
28537
28538diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28539index bcac90b..53bfc76 100644
28540--- a/drivers/gpu/drm/r128/r128_cce.c
28541+++ b/drivers/gpu/drm/r128/r128_cce.c
28542@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28543
28544 /* GH: Simple idle check.
28545 */
28546- atomic_set(&dev_priv->idle_count, 0);
28547+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28548
28549 /* We don't support anything other than bus-mastering ring mode,
28550 * but the ring can be in either AGP or PCI space for the ring
28551diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28552index 930c71b..499aded 100644
28553--- a/drivers/gpu/drm/r128/r128_drv.h
28554+++ b/drivers/gpu/drm/r128/r128_drv.h
28555@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28556 int is_pci;
28557 unsigned long cce_buffers_offset;
28558
28559- atomic_t idle_count;
28560+ atomic_unchecked_t idle_count;
28561
28562 int page_flipping;
28563 int current_page;
28564 u32 crtc_offset;
28565 u32 crtc_offset_cntl;
28566
28567- atomic_t vbl_received;
28568+ atomic_unchecked_t vbl_received;
28569
28570 u32 color_fmt;
28571 unsigned int front_offset;
28572diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28573index 429d5a0..7e899ed 100644
28574--- a/drivers/gpu/drm/r128/r128_irq.c
28575+++ b/drivers/gpu/drm/r128/r128_irq.c
28576@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28577 if (crtc != 0)
28578 return 0;
28579
28580- return atomic_read(&dev_priv->vbl_received);
28581+ return atomic_read_unchecked(&dev_priv->vbl_received);
28582 }
28583
28584 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28585@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28586 /* VBLANK interrupt */
28587 if (status & R128_CRTC_VBLANK_INT) {
28588 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28589- atomic_inc(&dev_priv->vbl_received);
28590+ atomic_inc_unchecked(&dev_priv->vbl_received);
28591 drm_handle_vblank(dev, 0);
28592 return IRQ_HANDLED;
28593 }
28594diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28595index a9e33ce..09edd4b 100644
28596--- a/drivers/gpu/drm/r128/r128_state.c
28597+++ b/drivers/gpu/drm/r128/r128_state.c
28598@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28599
28600 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28601 {
28602- if (atomic_read(&dev_priv->idle_count) == 0)
28603+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28604 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28605 else
28606- atomic_set(&dev_priv->idle_count, 0);
28607+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28608 }
28609
28610 #endif
28611diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28612index 5a82b6b..9e69c73 100644
28613--- a/drivers/gpu/drm/radeon/mkregtable.c
28614+++ b/drivers/gpu/drm/radeon/mkregtable.c
28615@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28616 regex_t mask_rex;
28617 regmatch_t match[4];
28618 char buf[1024];
28619- size_t end;
28620+ long end;
28621 int len;
28622 int done = 0;
28623 int r;
28624 unsigned o;
28625 struct offset *offset;
28626 char last_reg_s[10];
28627- int last_reg;
28628+ unsigned long last_reg;
28629
28630 if (regcomp
28631 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28632diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28633index 8227e76..ce0b195 100644
28634--- a/drivers/gpu/drm/radeon/radeon.h
28635+++ b/drivers/gpu/drm/radeon/radeon.h
28636@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28637 */
28638 struct radeon_fence_driver {
28639 uint32_t scratch_reg;
28640- atomic_t seq;
28641+ atomic_unchecked_t seq;
28642 uint32_t last_seq;
28643 unsigned long last_jiffies;
28644 unsigned long last_timeout;
28645@@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28646 int x2, int y2);
28647 void (*draw_auto)(struct radeon_device *rdev);
28648 void (*set_default_state)(struct radeon_device *rdev);
28649-};
28650+} __no_const;
28651
28652 struct r600_blit {
28653 struct mutex mutex;
28654@@ -954,7 +954,7 @@ struct radeon_asic {
28655 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28656 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28657 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28658-};
28659+} __no_const;
28660
28661 /*
28662 * Asic structures
28663diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28664index 9231564..78b00fd 100644
28665--- a/drivers/gpu/drm/radeon/radeon_device.c
28666+++ b/drivers/gpu/drm/radeon/radeon_device.c
28667@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28668 bool can_switch;
28669
28670 spin_lock(&dev->count_lock);
28671- can_switch = (dev->open_count == 0);
28672+ can_switch = (local_read(&dev->open_count) == 0);
28673 spin_unlock(&dev->count_lock);
28674 return can_switch;
28675 }
28676diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28677index a1b59ca..86f2d44 100644
28678--- a/drivers/gpu/drm/radeon/radeon_drv.h
28679+++ b/drivers/gpu/drm/radeon/radeon_drv.h
28680@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28681
28682 /* SW interrupt */
28683 wait_queue_head_t swi_queue;
28684- atomic_t swi_emitted;
28685+ atomic_unchecked_t swi_emitted;
28686 int vblank_crtc;
28687 uint32_t irq_enable_reg;
28688 uint32_t r500_disp_irq_reg;
28689diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28690index 76ec0e9..6feb1a3 100644
28691--- a/drivers/gpu/drm/radeon/radeon_fence.c
28692+++ b/drivers/gpu/drm/radeon/radeon_fence.c
28693@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28694 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28695 return 0;
28696 }
28697- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28698+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28699 if (!rdev->cp.ready)
28700 /* FIXME: cp is not running assume everythings is done right
28701 * away
28702@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28703 return r;
28704 }
28705 radeon_fence_write(rdev, 0);
28706- atomic_set(&rdev->fence_drv.seq, 0);
28707+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28708 INIT_LIST_HEAD(&rdev->fence_drv.created);
28709 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28710 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28711diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28712index 48b7cea..342236f 100644
28713--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28714+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28715@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28716 request = compat_alloc_user_space(sizeof(*request));
28717 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28718 || __put_user(req32.param, &request->param)
28719- || __put_user((void __user *)(unsigned long)req32.value,
28720+ || __put_user((unsigned long)req32.value,
28721 &request->value))
28722 return -EFAULT;
28723
28724diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28725index 00da384..32f972d 100644
28726--- a/drivers/gpu/drm/radeon/radeon_irq.c
28727+++ b/drivers/gpu/drm/radeon/radeon_irq.c
28728@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28729 unsigned int ret;
28730 RING_LOCALS;
28731
28732- atomic_inc(&dev_priv->swi_emitted);
28733- ret = atomic_read(&dev_priv->swi_emitted);
28734+ atomic_inc_unchecked(&dev_priv->swi_emitted);
28735+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28736
28737 BEGIN_RING(4);
28738 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28739@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28740 drm_radeon_private_t *dev_priv =
28741 (drm_radeon_private_t *) dev->dev_private;
28742
28743- atomic_set(&dev_priv->swi_emitted, 0);
28744+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28745 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28746
28747 dev->max_vblank_count = 0x001fffff;
28748diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28749index e8422ae..d22d4a8 100644
28750--- a/drivers/gpu/drm/radeon/radeon_state.c
28751+++ b/drivers/gpu/drm/radeon/radeon_state.c
28752@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28753 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28754 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28755
28756- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28757+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28758 sarea_priv->nbox * sizeof(depth_boxes[0])))
28759 return -EFAULT;
28760
28761@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28762 {
28763 drm_radeon_private_t *dev_priv = dev->dev_private;
28764 drm_radeon_getparam_t *param = data;
28765- int value;
28766+ int value = 0;
28767
28768 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28769
28770diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28771index 0b5468b..9c4b308 100644
28772--- a/drivers/gpu/drm/radeon/radeon_ttm.c
28773+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
28774@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28775 }
28776 if (unlikely(ttm_vm_ops == NULL)) {
28777 ttm_vm_ops = vma->vm_ops;
28778- radeon_ttm_vm_ops = *ttm_vm_ops;
28779- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28780+ pax_open_kernel();
28781+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28782+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28783+ pax_close_kernel();
28784 }
28785 vma->vm_ops = &radeon_ttm_vm_ops;
28786 return 0;
28787diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
28788index a9049ed..501f284 100644
28789--- a/drivers/gpu/drm/radeon/rs690.c
28790+++ b/drivers/gpu/drm/radeon/rs690.c
28791@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
28792 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28793 rdev->pm.sideport_bandwidth.full)
28794 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28795- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28796+ read_delay_latency.full = dfixed_const(800 * 1000);
28797 read_delay_latency.full = dfixed_div(read_delay_latency,
28798 rdev->pm.igp_sideport_mclk);
28799+ a.full = dfixed_const(370);
28800+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28801 } else {
28802 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28803 rdev->pm.k8_bandwidth.full)
28804diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28805index 727e93d..1565650 100644
28806--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
28807+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28808@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
28809 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28810 struct shrink_control *sc)
28811 {
28812- static atomic_t start_pool = ATOMIC_INIT(0);
28813+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28814 unsigned i;
28815- unsigned pool_offset = atomic_add_return(1, &start_pool);
28816+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28817 struct ttm_page_pool *pool;
28818 int shrink_pages = sc->nr_to_scan;
28819
28820diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
28821index 9cf87d9..2000b7d 100644
28822--- a/drivers/gpu/drm/via/via_drv.h
28823+++ b/drivers/gpu/drm/via/via_drv.h
28824@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28825 typedef uint32_t maskarray_t[5];
28826
28827 typedef struct drm_via_irq {
28828- atomic_t irq_received;
28829+ atomic_unchecked_t irq_received;
28830 uint32_t pending_mask;
28831 uint32_t enable_mask;
28832 wait_queue_head_t irq_queue;
28833@@ -75,7 +75,7 @@ typedef struct drm_via_private {
28834 struct timeval last_vblank;
28835 int last_vblank_valid;
28836 unsigned usec_per_vblank;
28837- atomic_t vbl_received;
28838+ atomic_unchecked_t vbl_received;
28839 drm_via_state_t hc_state;
28840 char pci_buf[VIA_PCI_BUF_SIZE];
28841 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28842diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
28843index d391f48..10c8ca3 100644
28844--- a/drivers/gpu/drm/via/via_irq.c
28845+++ b/drivers/gpu/drm/via/via_irq.c
28846@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
28847 if (crtc != 0)
28848 return 0;
28849
28850- return atomic_read(&dev_priv->vbl_received);
28851+ return atomic_read_unchecked(&dev_priv->vbl_received);
28852 }
28853
28854 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28855@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28856
28857 status = VIA_READ(VIA_REG_INTERRUPT);
28858 if (status & VIA_IRQ_VBLANK_PENDING) {
28859- atomic_inc(&dev_priv->vbl_received);
28860- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28861+ atomic_inc_unchecked(&dev_priv->vbl_received);
28862+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28863 do_gettimeofday(&cur_vblank);
28864 if (dev_priv->last_vblank_valid) {
28865 dev_priv->usec_per_vblank =
28866@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28867 dev_priv->last_vblank = cur_vblank;
28868 dev_priv->last_vblank_valid = 1;
28869 }
28870- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28871+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28872 DRM_DEBUG("US per vblank is: %u\n",
28873 dev_priv->usec_per_vblank);
28874 }
28875@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28876
28877 for (i = 0; i < dev_priv->num_irqs; ++i) {
28878 if (status & cur_irq->pending_mask) {
28879- atomic_inc(&cur_irq->irq_received);
28880+ atomic_inc_unchecked(&cur_irq->irq_received);
28881 DRM_WAKEUP(&cur_irq->irq_queue);
28882 handled = 1;
28883 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
28884@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
28885 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28886 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28887 masks[irq][4]));
28888- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28889+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28890 } else {
28891 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28892 (((cur_irq_sequence =
28893- atomic_read(&cur_irq->irq_received)) -
28894+ atomic_read_unchecked(&cur_irq->irq_received)) -
28895 *sequence) <= (1 << 23)));
28896 }
28897 *sequence = cur_irq_sequence;
28898@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
28899 }
28900
28901 for (i = 0; i < dev_priv->num_irqs; ++i) {
28902- atomic_set(&cur_irq->irq_received, 0);
28903+ atomic_set_unchecked(&cur_irq->irq_received, 0);
28904 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28905 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28906 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28907@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
28908 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28909 case VIA_IRQ_RELATIVE:
28910 irqwait->request.sequence +=
28911- atomic_read(&cur_irq->irq_received);
28912+ atomic_read_unchecked(&cur_irq->irq_received);
28913 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28914 case VIA_IRQ_ABSOLUTE:
28915 break;
28916diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28917index dc27970..f18b008 100644
28918--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28919+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28920@@ -260,7 +260,7 @@ struct vmw_private {
28921 * Fencing and IRQs.
28922 */
28923
28924- atomic_t marker_seq;
28925+ atomic_unchecked_t marker_seq;
28926 wait_queue_head_t fence_queue;
28927 wait_queue_head_t fifo_queue;
28928 int fence_queue_waiters; /* Protected by hw_mutex */
28929diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28930index a0c2f12..68ae6cb 100644
28931--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28932+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28933@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
28934 (unsigned int) min,
28935 (unsigned int) fifo->capabilities);
28936
28937- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28938+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28939 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
28940 vmw_marker_queue_init(&fifo->marker_queue);
28941 return vmw_fifo_send_fence(dev_priv, &dummy);
28942@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
28943 if (reserveable)
28944 iowrite32(bytes, fifo_mem +
28945 SVGA_FIFO_RESERVED);
28946- return fifo_mem + (next_cmd >> 2);
28947+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
28948 } else {
28949 need_bounce = true;
28950 }
28951@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
28952
28953 fm = vmw_fifo_reserve(dev_priv, bytes);
28954 if (unlikely(fm == NULL)) {
28955- *seqno = atomic_read(&dev_priv->marker_seq);
28956+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
28957 ret = -ENOMEM;
28958 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
28959 false, 3*HZ);
28960@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
28961 }
28962
28963 do {
28964- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
28965+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
28966 } while (*seqno == 0);
28967
28968 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
28969diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28970index cabc95f..14b3d77 100644
28971--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28972+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28973@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
28974 * emitted. Then the fence is stale and signaled.
28975 */
28976
28977- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
28978+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
28979 > VMW_FENCE_WRAP);
28980
28981 return ret;
28982@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
28983
28984 if (fifo_idle)
28985 down_read(&fifo_state->rwsem);
28986- signal_seq = atomic_read(&dev_priv->marker_seq);
28987+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
28988 ret = 0;
28989
28990 for (;;) {
28991diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28992index 8a8725c..afed796 100644
28993--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28994+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28995@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
28996 while (!vmw_lag_lt(queue, us)) {
28997 spin_lock(&queue->lock);
28998 if (list_empty(&queue->head))
28999- seqno = atomic_read(&dev_priv->marker_seq);
29000+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29001 else {
29002 marker = list_first_entry(&queue->head,
29003 struct vmw_marker, head);
29004diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29005index bb656d8..4169fca 100644
29006--- a/drivers/hid/hid-core.c
29007+++ b/drivers/hid/hid-core.c
29008@@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
29009
29010 int hid_add_device(struct hid_device *hdev)
29011 {
29012- static atomic_t id = ATOMIC_INIT(0);
29013+ static atomic_unchecked_t id = ATOMIC_INIT(0);
29014 int ret;
29015
29016 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29017@@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
29018 /* XXX hack, any other cleaner solution after the driver core
29019 * is converted to allow more than 20 bytes as the device name? */
29020 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29021- hdev->vendor, hdev->product, atomic_inc_return(&id));
29022+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29023
29024 hid_debug_register(hdev, dev_name(&hdev->dev));
29025 ret = device_add(&hdev->dev);
29026diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29027index 4ef02b2..8a96831 100644
29028--- a/drivers/hid/usbhid/hiddev.c
29029+++ b/drivers/hid/usbhid/hiddev.c
29030@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29031 break;
29032
29033 case HIDIOCAPPLICATION:
29034- if (arg < 0 || arg >= hid->maxapplication)
29035+ if (arg >= hid->maxapplication)
29036 break;
29037
29038 for (i = 0; i < hid->maxcollection; i++)
29039diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
29040index 4065374..10ed7dc 100644
29041--- a/drivers/hv/channel.c
29042+++ b/drivers/hv/channel.c
29043@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
29044 int ret = 0;
29045 int t;
29046
29047- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
29048- atomic_inc(&vmbus_connection.next_gpadl_handle);
29049+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
29050+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
29051
29052 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
29053 if (ret)
29054diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
29055index 0fb100e..baf87e5 100644
29056--- a/drivers/hv/hv.c
29057+++ b/drivers/hv/hv.c
29058@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
29059 u64 output_address = (output) ? virt_to_phys(output) : 0;
29060 u32 output_address_hi = output_address >> 32;
29061 u32 output_address_lo = output_address & 0xFFFFFFFF;
29062- void *hypercall_page = hv_context.hypercall_page;
29063+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
29064
29065 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29066 "=a"(hv_status_lo) : "d" (control_hi),
29067diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29068index 0aee112..b72d21f 100644
29069--- a/drivers/hv/hyperv_vmbus.h
29070+++ b/drivers/hv/hyperv_vmbus.h
29071@@ -556,7 +556,7 @@ enum vmbus_connect_state {
29072 struct vmbus_connection {
29073 enum vmbus_connect_state conn_state;
29074
29075- atomic_t next_gpadl_handle;
29076+ atomic_unchecked_t next_gpadl_handle;
29077
29078 /*
29079 * Represents channel interrupts. Each bit position represents a
29080diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29081index d2d0a2a..90b8f4d 100644
29082--- a/drivers/hv/vmbus_drv.c
29083+++ b/drivers/hv/vmbus_drv.c
29084@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29085 {
29086 int ret = 0;
29087
29088- static atomic_t device_num = ATOMIC_INIT(0);
29089+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29090
29091 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29092- atomic_inc_return(&device_num));
29093+ atomic_inc_return_unchecked(&device_num));
29094
29095 child_device_obj->device.bus = &hv_bus;
29096 child_device_obj->device.parent = &hv_acpi_dev->dev;
29097diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29098index 66f6729..2d6de0a 100644
29099--- a/drivers/hwmon/acpi_power_meter.c
29100+++ b/drivers/hwmon/acpi_power_meter.c
29101@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29102 return res;
29103
29104 temp /= 1000;
29105- if (temp < 0)
29106- return -EINVAL;
29107
29108 mutex_lock(&resource->lock);
29109 resource->trip[attr->index - 7] = temp;
29110diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29111index 5357925..6cf0418 100644
29112--- a/drivers/hwmon/sht15.c
29113+++ b/drivers/hwmon/sht15.c
29114@@ -166,7 +166,7 @@ struct sht15_data {
29115 int supply_uV;
29116 bool supply_uV_valid;
29117 struct work_struct update_supply_work;
29118- atomic_t interrupt_handled;
29119+ atomic_unchecked_t interrupt_handled;
29120 };
29121
29122 /**
29123@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29124 return ret;
29125
29126 gpio_direction_input(data->pdata->gpio_data);
29127- atomic_set(&data->interrupt_handled, 0);
29128+ atomic_set_unchecked(&data->interrupt_handled, 0);
29129
29130 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29131 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29132 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29133 /* Only relevant if the interrupt hasn't occurred. */
29134- if (!atomic_read(&data->interrupt_handled))
29135+ if (!atomic_read_unchecked(&data->interrupt_handled))
29136 schedule_work(&data->read_work);
29137 }
29138 ret = wait_event_timeout(data->wait_queue,
29139@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29140
29141 /* First disable the interrupt */
29142 disable_irq_nosync(irq);
29143- atomic_inc(&data->interrupt_handled);
29144+ atomic_inc_unchecked(&data->interrupt_handled);
29145 /* Then schedule a reading work struct */
29146 if (data->state != SHT15_READING_NOTHING)
29147 schedule_work(&data->read_work);
29148@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29149 * If not, then start the interrupt again - care here as could
29150 * have gone low in meantime so verify it hasn't!
29151 */
29152- atomic_set(&data->interrupt_handled, 0);
29153+ atomic_set_unchecked(&data->interrupt_handled, 0);
29154 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29155 /* If still not occurred or another handler has been scheduled */
29156 if (gpio_get_value(data->pdata->gpio_data)
29157- || atomic_read(&data->interrupt_handled))
29158+ || atomic_read_unchecked(&data->interrupt_handled))
29159 return;
29160 }
29161
29162diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29163index 378fcb5..5e91fa8 100644
29164--- a/drivers/i2c/busses/i2c-amd756-s4882.c
29165+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29166@@ -43,7 +43,7 @@
29167 extern struct i2c_adapter amd756_smbus;
29168
29169 static struct i2c_adapter *s4882_adapter;
29170-static struct i2c_algorithm *s4882_algo;
29171+static i2c_algorithm_no_const *s4882_algo;
29172
29173 /* Wrapper access functions for multiplexed SMBus */
29174 static DEFINE_MUTEX(amd756_lock);
29175diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29176index 29015eb..af2d8e9 100644
29177--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29178+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29179@@ -41,7 +41,7 @@
29180 extern struct i2c_adapter *nforce2_smbus;
29181
29182 static struct i2c_adapter *s4985_adapter;
29183-static struct i2c_algorithm *s4985_algo;
29184+static i2c_algorithm_no_const *s4985_algo;
29185
29186 /* Wrapper access functions for multiplexed SMBus */
29187 static DEFINE_MUTEX(nforce2_lock);
29188diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29189index d7a4833..7fae376 100644
29190--- a/drivers/i2c/i2c-mux.c
29191+++ b/drivers/i2c/i2c-mux.c
29192@@ -28,7 +28,7 @@
29193 /* multiplexer per channel data */
29194 struct i2c_mux_priv {
29195 struct i2c_adapter adap;
29196- struct i2c_algorithm algo;
29197+ i2c_algorithm_no_const algo;
29198
29199 struct i2c_adapter *parent;
29200 void *mux_dev; /* the mux chip/device */
29201diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29202index 57d00ca..0145194 100644
29203--- a/drivers/ide/aec62xx.c
29204+++ b/drivers/ide/aec62xx.c
29205@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29206 .cable_detect = atp86x_cable_detect,
29207 };
29208
29209-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29210+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29211 { /* 0: AEC6210 */
29212 .name = DRV_NAME,
29213 .init_chipset = init_chipset_aec62xx,
29214diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29215index 2c8016a..911a27c 100644
29216--- a/drivers/ide/alim15x3.c
29217+++ b/drivers/ide/alim15x3.c
29218@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29219 .dma_sff_read_status = ide_dma_sff_read_status,
29220 };
29221
29222-static const struct ide_port_info ali15x3_chipset __devinitdata = {
29223+static const struct ide_port_info ali15x3_chipset __devinitconst = {
29224 .name = DRV_NAME,
29225 .init_chipset = init_chipset_ali15x3,
29226 .init_hwif = init_hwif_ali15x3,
29227diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29228index 3747b25..56fc995 100644
29229--- a/drivers/ide/amd74xx.c
29230+++ b/drivers/ide/amd74xx.c
29231@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29232 .udma_mask = udma, \
29233 }
29234
29235-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29236+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29237 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29238 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29239 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29240diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29241index 15f0ead..cb43480 100644
29242--- a/drivers/ide/atiixp.c
29243+++ b/drivers/ide/atiixp.c
29244@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29245 .cable_detect = atiixp_cable_detect,
29246 };
29247
29248-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29249+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29250 { /* 0: IXP200/300/400/700 */
29251 .name = DRV_NAME,
29252 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29253diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29254index 5f80312..d1fc438 100644
29255--- a/drivers/ide/cmd64x.c
29256+++ b/drivers/ide/cmd64x.c
29257@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29258 .dma_sff_read_status = ide_dma_sff_read_status,
29259 };
29260
29261-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29262+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29263 { /* 0: CMD643 */
29264 .name = DRV_NAME,
29265 .init_chipset = init_chipset_cmd64x,
29266diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29267index 2c1e5f7..1444762 100644
29268--- a/drivers/ide/cs5520.c
29269+++ b/drivers/ide/cs5520.c
29270@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29271 .set_dma_mode = cs5520_set_dma_mode,
29272 };
29273
29274-static const struct ide_port_info cyrix_chipset __devinitdata = {
29275+static const struct ide_port_info cyrix_chipset __devinitconst = {
29276 .name = DRV_NAME,
29277 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29278 .port_ops = &cs5520_port_ops,
29279diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29280index 4dc4eb9..49b40ad 100644
29281--- a/drivers/ide/cs5530.c
29282+++ b/drivers/ide/cs5530.c
29283@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29284 .udma_filter = cs5530_udma_filter,
29285 };
29286
29287-static const struct ide_port_info cs5530_chipset __devinitdata = {
29288+static const struct ide_port_info cs5530_chipset __devinitconst = {
29289 .name = DRV_NAME,
29290 .init_chipset = init_chipset_cs5530,
29291 .init_hwif = init_hwif_cs5530,
29292diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29293index 5059faf..18d4c85 100644
29294--- a/drivers/ide/cs5535.c
29295+++ b/drivers/ide/cs5535.c
29296@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29297 .cable_detect = cs5535_cable_detect,
29298 };
29299
29300-static const struct ide_port_info cs5535_chipset __devinitdata = {
29301+static const struct ide_port_info cs5535_chipset __devinitconst = {
29302 .name = DRV_NAME,
29303 .port_ops = &cs5535_port_ops,
29304 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29305diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29306index 847553f..3ffb49d 100644
29307--- a/drivers/ide/cy82c693.c
29308+++ b/drivers/ide/cy82c693.c
29309@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29310 .set_dma_mode = cy82c693_set_dma_mode,
29311 };
29312
29313-static const struct ide_port_info cy82c693_chipset __devinitdata = {
29314+static const struct ide_port_info cy82c693_chipset __devinitconst = {
29315 .name = DRV_NAME,
29316 .init_iops = init_iops_cy82c693,
29317 .port_ops = &cy82c693_port_ops,
29318diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29319index 58c51cd..4aec3b8 100644
29320--- a/drivers/ide/hpt366.c
29321+++ b/drivers/ide/hpt366.c
29322@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29323 }
29324 };
29325
29326-static const struct hpt_info hpt36x __devinitdata = {
29327+static const struct hpt_info hpt36x __devinitconst = {
29328 .chip_name = "HPT36x",
29329 .chip_type = HPT36x,
29330 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29331@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29332 .timings = &hpt36x_timings
29333 };
29334
29335-static const struct hpt_info hpt370 __devinitdata = {
29336+static const struct hpt_info hpt370 __devinitconst = {
29337 .chip_name = "HPT370",
29338 .chip_type = HPT370,
29339 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29340@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29341 .timings = &hpt37x_timings
29342 };
29343
29344-static const struct hpt_info hpt370a __devinitdata = {
29345+static const struct hpt_info hpt370a __devinitconst = {
29346 .chip_name = "HPT370A",
29347 .chip_type = HPT370A,
29348 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29349@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29350 .timings = &hpt37x_timings
29351 };
29352
29353-static const struct hpt_info hpt374 __devinitdata = {
29354+static const struct hpt_info hpt374 __devinitconst = {
29355 .chip_name = "HPT374",
29356 .chip_type = HPT374,
29357 .udma_mask = ATA_UDMA5,
29358@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29359 .timings = &hpt37x_timings
29360 };
29361
29362-static const struct hpt_info hpt372 __devinitdata = {
29363+static const struct hpt_info hpt372 __devinitconst = {
29364 .chip_name = "HPT372",
29365 .chip_type = HPT372,
29366 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29367@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29368 .timings = &hpt37x_timings
29369 };
29370
29371-static const struct hpt_info hpt372a __devinitdata = {
29372+static const struct hpt_info hpt372a __devinitconst = {
29373 .chip_name = "HPT372A",
29374 .chip_type = HPT372A,
29375 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29376@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29377 .timings = &hpt37x_timings
29378 };
29379
29380-static const struct hpt_info hpt302 __devinitdata = {
29381+static const struct hpt_info hpt302 __devinitconst = {
29382 .chip_name = "HPT302",
29383 .chip_type = HPT302,
29384 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29385@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29386 .timings = &hpt37x_timings
29387 };
29388
29389-static const struct hpt_info hpt371 __devinitdata = {
29390+static const struct hpt_info hpt371 __devinitconst = {
29391 .chip_name = "HPT371",
29392 .chip_type = HPT371,
29393 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29394@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29395 .timings = &hpt37x_timings
29396 };
29397
29398-static const struct hpt_info hpt372n __devinitdata = {
29399+static const struct hpt_info hpt372n __devinitconst = {
29400 .chip_name = "HPT372N",
29401 .chip_type = HPT372N,
29402 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29403@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29404 .timings = &hpt37x_timings
29405 };
29406
29407-static const struct hpt_info hpt302n __devinitdata = {
29408+static const struct hpt_info hpt302n __devinitconst = {
29409 .chip_name = "HPT302N",
29410 .chip_type = HPT302N,
29411 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29412@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29413 .timings = &hpt37x_timings
29414 };
29415
29416-static const struct hpt_info hpt371n __devinitdata = {
29417+static const struct hpt_info hpt371n __devinitconst = {
29418 .chip_name = "HPT371N",
29419 .chip_type = HPT371N,
29420 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29421@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29422 .dma_sff_read_status = ide_dma_sff_read_status,
29423 };
29424
29425-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29426+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29427 { /* 0: HPT36x */
29428 .name = DRV_NAME,
29429 .init_chipset = init_chipset_hpt366,
29430diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29431index 8126824..55a2798 100644
29432--- a/drivers/ide/ide-cd.c
29433+++ b/drivers/ide/ide-cd.c
29434@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29435 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29436 if ((unsigned long)buf & alignment
29437 || blk_rq_bytes(rq) & q->dma_pad_mask
29438- || object_is_on_stack(buf))
29439+ || object_starts_on_stack(buf))
29440 drive->dma = 0;
29441 }
29442 }
29443diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29444index a743e68..1cfd674 100644
29445--- a/drivers/ide/ide-pci-generic.c
29446+++ b/drivers/ide/ide-pci-generic.c
29447@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29448 .udma_mask = ATA_UDMA6, \
29449 }
29450
29451-static const struct ide_port_info generic_chipsets[] __devinitdata = {
29452+static const struct ide_port_info generic_chipsets[] __devinitconst = {
29453 /* 0: Unknown */
29454 DECLARE_GENERIC_PCI_DEV(0),
29455
29456diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29457index 560e66d..d5dd180 100644
29458--- a/drivers/ide/it8172.c
29459+++ b/drivers/ide/it8172.c
29460@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29461 .set_dma_mode = it8172_set_dma_mode,
29462 };
29463
29464-static const struct ide_port_info it8172_port_info __devinitdata = {
29465+static const struct ide_port_info it8172_port_info __devinitconst = {
29466 .name = DRV_NAME,
29467 .port_ops = &it8172_port_ops,
29468 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29469diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29470index 46816ba..1847aeb 100644
29471--- a/drivers/ide/it8213.c
29472+++ b/drivers/ide/it8213.c
29473@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29474 .cable_detect = it8213_cable_detect,
29475 };
29476
29477-static const struct ide_port_info it8213_chipset __devinitdata = {
29478+static const struct ide_port_info it8213_chipset __devinitconst = {
29479 .name = DRV_NAME,
29480 .enablebits = { {0x41, 0x80, 0x80} },
29481 .port_ops = &it8213_port_ops,
29482diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29483index 2e3169f..c5611db 100644
29484--- a/drivers/ide/it821x.c
29485+++ b/drivers/ide/it821x.c
29486@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29487 .cable_detect = it821x_cable_detect,
29488 };
29489
29490-static const struct ide_port_info it821x_chipset __devinitdata = {
29491+static const struct ide_port_info it821x_chipset __devinitconst = {
29492 .name = DRV_NAME,
29493 .init_chipset = init_chipset_it821x,
29494 .init_hwif = init_hwif_it821x,
29495diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29496index 74c2c4a..efddd7d 100644
29497--- a/drivers/ide/jmicron.c
29498+++ b/drivers/ide/jmicron.c
29499@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29500 .cable_detect = jmicron_cable_detect,
29501 };
29502
29503-static const struct ide_port_info jmicron_chipset __devinitdata = {
29504+static const struct ide_port_info jmicron_chipset __devinitconst = {
29505 .name = DRV_NAME,
29506 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29507 .port_ops = &jmicron_port_ops,
29508diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29509index 95327a2..73f78d8 100644
29510--- a/drivers/ide/ns87415.c
29511+++ b/drivers/ide/ns87415.c
29512@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29513 .dma_sff_read_status = superio_dma_sff_read_status,
29514 };
29515
29516-static const struct ide_port_info ns87415_chipset __devinitdata = {
29517+static const struct ide_port_info ns87415_chipset __devinitconst = {
29518 .name = DRV_NAME,
29519 .init_hwif = init_hwif_ns87415,
29520 .tp_ops = &ns87415_tp_ops,
29521diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29522index 1a53a4c..39edc66 100644
29523--- a/drivers/ide/opti621.c
29524+++ b/drivers/ide/opti621.c
29525@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29526 .set_pio_mode = opti621_set_pio_mode,
29527 };
29528
29529-static const struct ide_port_info opti621_chipset __devinitdata = {
29530+static const struct ide_port_info opti621_chipset __devinitconst = {
29531 .name = DRV_NAME,
29532 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29533 .port_ops = &opti621_port_ops,
29534diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29535index 9546fe2..2e5ceb6 100644
29536--- a/drivers/ide/pdc202xx_new.c
29537+++ b/drivers/ide/pdc202xx_new.c
29538@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29539 .udma_mask = udma, \
29540 }
29541
29542-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29543+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29544 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29545 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29546 };
29547diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29548index 3a35ec6..5634510 100644
29549--- a/drivers/ide/pdc202xx_old.c
29550+++ b/drivers/ide/pdc202xx_old.c
29551@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29552 .max_sectors = sectors, \
29553 }
29554
29555-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29556+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29557 { /* 0: PDC20246 */
29558 .name = DRV_NAME,
29559 .init_chipset = init_chipset_pdc202xx,
29560diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29561index 1892e81..fe0fd60 100644
29562--- a/drivers/ide/piix.c
29563+++ b/drivers/ide/piix.c
29564@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29565 .udma_mask = udma, \
29566 }
29567
29568-static const struct ide_port_info piix_pci_info[] __devinitdata = {
29569+static const struct ide_port_info piix_pci_info[] __devinitconst = {
29570 /* 0: MPIIX */
29571 { /*
29572 * MPIIX actually has only a single IDE channel mapped to
29573diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29574index a6414a8..c04173e 100644
29575--- a/drivers/ide/rz1000.c
29576+++ b/drivers/ide/rz1000.c
29577@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29578 }
29579 }
29580
29581-static const struct ide_port_info rz1000_chipset __devinitdata = {
29582+static const struct ide_port_info rz1000_chipset __devinitconst = {
29583 .name = DRV_NAME,
29584 .host_flags = IDE_HFLAG_NO_DMA,
29585 };
29586diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29587index 356b9b5..d4758eb 100644
29588--- a/drivers/ide/sc1200.c
29589+++ b/drivers/ide/sc1200.c
29590@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29591 .dma_sff_read_status = ide_dma_sff_read_status,
29592 };
29593
29594-static const struct ide_port_info sc1200_chipset __devinitdata = {
29595+static const struct ide_port_info sc1200_chipset __devinitconst = {
29596 .name = DRV_NAME,
29597 .port_ops = &sc1200_port_ops,
29598 .dma_ops = &sc1200_dma_ops,
29599diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29600index b7f5b0c..9701038 100644
29601--- a/drivers/ide/scc_pata.c
29602+++ b/drivers/ide/scc_pata.c
29603@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29604 .dma_sff_read_status = scc_dma_sff_read_status,
29605 };
29606
29607-static const struct ide_port_info scc_chipset __devinitdata = {
29608+static const struct ide_port_info scc_chipset __devinitconst = {
29609 .name = "sccIDE",
29610 .init_iops = init_iops_scc,
29611 .init_dma = scc_init_dma,
29612diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29613index 35fb8da..24d72ef 100644
29614--- a/drivers/ide/serverworks.c
29615+++ b/drivers/ide/serverworks.c
29616@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29617 .cable_detect = svwks_cable_detect,
29618 };
29619
29620-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29621+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29622 { /* 0: OSB4 */
29623 .name = DRV_NAME,
29624 .init_chipset = init_chipset_svwks,
29625diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29626index ddeda44..46f7e30 100644
29627--- a/drivers/ide/siimage.c
29628+++ b/drivers/ide/siimage.c
29629@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29630 .udma_mask = ATA_UDMA6, \
29631 }
29632
29633-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29634+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29635 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29636 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29637 };
29638diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29639index 4a00225..09e61b4 100644
29640--- a/drivers/ide/sis5513.c
29641+++ b/drivers/ide/sis5513.c
29642@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29643 .cable_detect = sis_cable_detect,
29644 };
29645
29646-static const struct ide_port_info sis5513_chipset __devinitdata = {
29647+static const struct ide_port_info sis5513_chipset __devinitconst = {
29648 .name = DRV_NAME,
29649 .init_chipset = init_chipset_sis5513,
29650 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29651diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29652index f21dc2a..d051cd2 100644
29653--- a/drivers/ide/sl82c105.c
29654+++ b/drivers/ide/sl82c105.c
29655@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29656 .dma_sff_read_status = ide_dma_sff_read_status,
29657 };
29658
29659-static const struct ide_port_info sl82c105_chipset __devinitdata = {
29660+static const struct ide_port_info sl82c105_chipset __devinitconst = {
29661 .name = DRV_NAME,
29662 .init_chipset = init_chipset_sl82c105,
29663 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29664diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29665index 864ffe0..863a5e9 100644
29666--- a/drivers/ide/slc90e66.c
29667+++ b/drivers/ide/slc90e66.c
29668@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29669 .cable_detect = slc90e66_cable_detect,
29670 };
29671
29672-static const struct ide_port_info slc90e66_chipset __devinitdata = {
29673+static const struct ide_port_info slc90e66_chipset __devinitconst = {
29674 .name = DRV_NAME,
29675 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29676 .port_ops = &slc90e66_port_ops,
29677diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29678index 4799d5c..1794678 100644
29679--- a/drivers/ide/tc86c001.c
29680+++ b/drivers/ide/tc86c001.c
29681@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29682 .dma_sff_read_status = ide_dma_sff_read_status,
29683 };
29684
29685-static const struct ide_port_info tc86c001_chipset __devinitdata = {
29686+static const struct ide_port_info tc86c001_chipset __devinitconst = {
29687 .name = DRV_NAME,
29688 .init_hwif = init_hwif_tc86c001,
29689 .port_ops = &tc86c001_port_ops,
29690diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29691index 281c914..55ce1b8 100644
29692--- a/drivers/ide/triflex.c
29693+++ b/drivers/ide/triflex.c
29694@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29695 .set_dma_mode = triflex_set_mode,
29696 };
29697
29698-static const struct ide_port_info triflex_device __devinitdata = {
29699+static const struct ide_port_info triflex_device __devinitconst = {
29700 .name = DRV_NAME,
29701 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29702 .port_ops = &triflex_port_ops,
29703diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29704index 4b42ca0..e494a98 100644
29705--- a/drivers/ide/trm290.c
29706+++ b/drivers/ide/trm290.c
29707@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29708 .dma_check = trm290_dma_check,
29709 };
29710
29711-static const struct ide_port_info trm290_chipset __devinitdata = {
29712+static const struct ide_port_info trm290_chipset __devinitconst = {
29713 .name = DRV_NAME,
29714 .init_hwif = init_hwif_trm290,
29715 .tp_ops = &trm290_tp_ops,
29716diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29717index f46f49c..eb77678 100644
29718--- a/drivers/ide/via82cxxx.c
29719+++ b/drivers/ide/via82cxxx.c
29720@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29721 .cable_detect = via82cxxx_cable_detect,
29722 };
29723
29724-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29725+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29726 .name = DRV_NAME,
29727 .init_chipset = init_chipset_via82cxxx,
29728 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29729diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29730index eb0e2cc..14241c7 100644
29731--- a/drivers/ieee802154/fakehard.c
29732+++ b/drivers/ieee802154/fakehard.c
29733@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29734 phy->transmit_power = 0xbf;
29735
29736 dev->netdev_ops = &fake_ops;
29737- dev->ml_priv = &fake_mlme;
29738+ dev->ml_priv = (void *)&fake_mlme;
29739
29740 priv = netdev_priv(dev);
29741 priv->phy = phy;
29742diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29743index 8b72f39..55df4c8 100644
29744--- a/drivers/infiniband/core/cm.c
29745+++ b/drivers/infiniband/core/cm.c
29746@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29747
29748 struct cm_counter_group {
29749 struct kobject obj;
29750- atomic_long_t counter[CM_ATTR_COUNT];
29751+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29752 };
29753
29754 struct cm_counter_attribute {
29755@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29756 struct ib_mad_send_buf *msg = NULL;
29757 int ret;
29758
29759- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29760+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29761 counter[CM_REQ_COUNTER]);
29762
29763 /* Quick state check to discard duplicate REQs. */
29764@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29765 if (!cm_id_priv)
29766 return;
29767
29768- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29769+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29770 counter[CM_REP_COUNTER]);
29771 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29772 if (ret)
29773@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
29774 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29775 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29776 spin_unlock_irq(&cm_id_priv->lock);
29777- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29778+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29779 counter[CM_RTU_COUNTER]);
29780 goto out;
29781 }
29782@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
29783 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29784 dreq_msg->local_comm_id);
29785 if (!cm_id_priv) {
29786- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29787+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29788 counter[CM_DREQ_COUNTER]);
29789 cm_issue_drep(work->port, work->mad_recv_wc);
29790 return -EINVAL;
29791@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
29792 case IB_CM_MRA_REP_RCVD:
29793 break;
29794 case IB_CM_TIMEWAIT:
29795- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29796+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29797 counter[CM_DREQ_COUNTER]);
29798 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29799 goto unlock;
29800@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
29801 cm_free_msg(msg);
29802 goto deref;
29803 case IB_CM_DREQ_RCVD:
29804- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29805+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29806 counter[CM_DREQ_COUNTER]);
29807 goto unlock;
29808 default:
29809@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
29810 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29811 cm_id_priv->msg, timeout)) {
29812 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29813- atomic_long_inc(&work->port->
29814+ atomic_long_inc_unchecked(&work->port->
29815 counter_group[CM_RECV_DUPLICATES].
29816 counter[CM_MRA_COUNTER]);
29817 goto out;
29818@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
29819 break;
29820 case IB_CM_MRA_REQ_RCVD:
29821 case IB_CM_MRA_REP_RCVD:
29822- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29823+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29824 counter[CM_MRA_COUNTER]);
29825 /* fall through */
29826 default:
29827@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
29828 case IB_CM_LAP_IDLE:
29829 break;
29830 case IB_CM_MRA_LAP_SENT:
29831- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29832+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29833 counter[CM_LAP_COUNTER]);
29834 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29835 goto unlock;
29836@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
29837 cm_free_msg(msg);
29838 goto deref;
29839 case IB_CM_LAP_RCVD:
29840- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29841+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29842 counter[CM_LAP_COUNTER]);
29843 goto unlock;
29844 default:
29845@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
29846 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29847 if (cur_cm_id_priv) {
29848 spin_unlock_irq(&cm.lock);
29849- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29850+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29851 counter[CM_SIDR_REQ_COUNTER]);
29852 goto out; /* Duplicate message. */
29853 }
29854@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
29855 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29856 msg->retries = 1;
29857
29858- atomic_long_add(1 + msg->retries,
29859+ atomic_long_add_unchecked(1 + msg->retries,
29860 &port->counter_group[CM_XMIT].counter[attr_index]);
29861 if (msg->retries)
29862- atomic_long_add(msg->retries,
29863+ atomic_long_add_unchecked(msg->retries,
29864 &port->counter_group[CM_XMIT_RETRIES].
29865 counter[attr_index]);
29866
29867@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
29868 }
29869
29870 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29871- atomic_long_inc(&port->counter_group[CM_RECV].
29872+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29873 counter[attr_id - CM_ATTR_ID_OFFSET]);
29874
29875 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29876@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
29877 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29878
29879 return sprintf(buf, "%ld\n",
29880- atomic_long_read(&group->counter[cm_attr->index]));
29881+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29882 }
29883
29884 static const struct sysfs_ops cm_counter_ops = {
29885diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
29886index 176c8f9..2627b62 100644
29887--- a/drivers/infiniband/core/fmr_pool.c
29888+++ b/drivers/infiniband/core/fmr_pool.c
29889@@ -98,8 +98,8 @@ struct ib_fmr_pool {
29890
29891 struct task_struct *thread;
29892
29893- atomic_t req_ser;
29894- atomic_t flush_ser;
29895+ atomic_unchecked_t req_ser;
29896+ atomic_unchecked_t flush_ser;
29897
29898 wait_queue_head_t force_wait;
29899 };
29900@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29901 struct ib_fmr_pool *pool = pool_ptr;
29902
29903 do {
29904- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29905+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29906 ib_fmr_batch_release(pool);
29907
29908- atomic_inc(&pool->flush_ser);
29909+ atomic_inc_unchecked(&pool->flush_ser);
29910 wake_up_interruptible(&pool->force_wait);
29911
29912 if (pool->flush_function)
29913@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29914 }
29915
29916 set_current_state(TASK_INTERRUPTIBLE);
29917- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29918+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29919 !kthread_should_stop())
29920 schedule();
29921 __set_current_state(TASK_RUNNING);
29922@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
29923 pool->dirty_watermark = params->dirty_watermark;
29924 pool->dirty_len = 0;
29925 spin_lock_init(&pool->pool_lock);
29926- atomic_set(&pool->req_ser, 0);
29927- atomic_set(&pool->flush_ser, 0);
29928+ atomic_set_unchecked(&pool->req_ser, 0);
29929+ atomic_set_unchecked(&pool->flush_ser, 0);
29930 init_waitqueue_head(&pool->force_wait);
29931
29932 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29933@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
29934 }
29935 spin_unlock_irq(&pool->pool_lock);
29936
29937- serial = atomic_inc_return(&pool->req_ser);
29938+ serial = atomic_inc_return_unchecked(&pool->req_ser);
29939 wake_up_process(pool->thread);
29940
29941 if (wait_event_interruptible(pool->force_wait,
29942- atomic_read(&pool->flush_ser) - serial >= 0))
29943+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29944 return -EINTR;
29945
29946 return 0;
29947@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
29948 } else {
29949 list_add_tail(&fmr->list, &pool->dirty_list);
29950 if (++pool->dirty_len >= pool->dirty_watermark) {
29951- atomic_inc(&pool->req_ser);
29952+ atomic_inc_unchecked(&pool->req_ser);
29953 wake_up_process(pool->thread);
29954 }
29955 }
29956diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
29957index 40c8353..946b0e4 100644
29958--- a/drivers/infiniband/hw/cxgb4/mem.c
29959+++ b/drivers/infiniband/hw/cxgb4/mem.c
29960@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
29961 int err;
29962 struct fw_ri_tpte tpt;
29963 u32 stag_idx;
29964- static atomic_t key;
29965+ static atomic_unchecked_t key;
29966
29967 if (c4iw_fatal_error(rdev))
29968 return -EIO;
29969@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
29970 &rdev->resource.tpt_fifo_lock);
29971 if (!stag_idx)
29972 return -ENOMEM;
29973- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
29974+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
29975 }
29976 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
29977 __func__, stag_state, type, pdid, stag_idx);
29978diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
29979index 79b3dbc..96e5fcc 100644
29980--- a/drivers/infiniband/hw/ipath/ipath_rc.c
29981+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
29982@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
29983 struct ib_atomic_eth *ateth;
29984 struct ipath_ack_entry *e;
29985 u64 vaddr;
29986- atomic64_t *maddr;
29987+ atomic64_unchecked_t *maddr;
29988 u64 sdata;
29989 u32 rkey;
29990 u8 next;
29991@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
29992 IB_ACCESS_REMOTE_ATOMIC)))
29993 goto nack_acc_unlck;
29994 /* Perform atomic OP and save result. */
29995- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
29996+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
29997 sdata = be64_to_cpu(ateth->swap_data);
29998 e = &qp->s_ack_queue[qp->r_head_ack_queue];
29999 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30000- (u64) atomic64_add_return(sdata, maddr) - sdata :
30001+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30002 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30003 be64_to_cpu(ateth->compare_data),
30004 sdata);
30005diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30006index 1f95bba..9530f87 100644
30007--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30008+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30009@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30010 unsigned long flags;
30011 struct ib_wc wc;
30012 u64 sdata;
30013- atomic64_t *maddr;
30014+ atomic64_unchecked_t *maddr;
30015 enum ib_wc_status send_status;
30016
30017 /*
30018@@ -382,11 +382,11 @@ again:
30019 IB_ACCESS_REMOTE_ATOMIC)))
30020 goto acc_err;
30021 /* Perform atomic OP and save result. */
30022- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30023+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30024 sdata = wqe->wr.wr.atomic.compare_add;
30025 *(u64 *) sqp->s_sge.sge.vaddr =
30026 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30027- (u64) atomic64_add_return(sdata, maddr) - sdata :
30028+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30029 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30030 sdata, wqe->wr.wr.atomic.swap);
30031 goto send_comp;
30032diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30033index 5965b3d..16817fb 100644
30034--- a/drivers/infiniband/hw/nes/nes.c
30035+++ b/drivers/infiniband/hw/nes/nes.c
30036@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30037 LIST_HEAD(nes_adapter_list);
30038 static LIST_HEAD(nes_dev_list);
30039
30040-atomic_t qps_destroyed;
30041+atomic_unchecked_t qps_destroyed;
30042
30043 static unsigned int ee_flsh_adapter;
30044 static unsigned int sysfs_nonidx_addr;
30045@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30046 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30047 struct nes_adapter *nesadapter = nesdev->nesadapter;
30048
30049- atomic_inc(&qps_destroyed);
30050+ atomic_inc_unchecked(&qps_destroyed);
30051
30052 /* Free the control structures */
30053
30054diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30055index 568b4f1..5ea3eff 100644
30056--- a/drivers/infiniband/hw/nes/nes.h
30057+++ b/drivers/infiniband/hw/nes/nes.h
30058@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
30059 extern unsigned int wqm_quanta;
30060 extern struct list_head nes_adapter_list;
30061
30062-extern atomic_t cm_connects;
30063-extern atomic_t cm_accepts;
30064-extern atomic_t cm_disconnects;
30065-extern atomic_t cm_closes;
30066-extern atomic_t cm_connecteds;
30067-extern atomic_t cm_connect_reqs;
30068-extern atomic_t cm_rejects;
30069-extern atomic_t mod_qp_timouts;
30070-extern atomic_t qps_created;
30071-extern atomic_t qps_destroyed;
30072-extern atomic_t sw_qps_destroyed;
30073+extern atomic_unchecked_t cm_connects;
30074+extern atomic_unchecked_t cm_accepts;
30075+extern atomic_unchecked_t cm_disconnects;
30076+extern atomic_unchecked_t cm_closes;
30077+extern atomic_unchecked_t cm_connecteds;
30078+extern atomic_unchecked_t cm_connect_reqs;
30079+extern atomic_unchecked_t cm_rejects;
30080+extern atomic_unchecked_t mod_qp_timouts;
30081+extern atomic_unchecked_t qps_created;
30082+extern atomic_unchecked_t qps_destroyed;
30083+extern atomic_unchecked_t sw_qps_destroyed;
30084 extern u32 mh_detected;
30085 extern u32 mh_pauses_sent;
30086 extern u32 cm_packets_sent;
30087@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30088 extern u32 cm_packets_received;
30089 extern u32 cm_packets_dropped;
30090 extern u32 cm_packets_retrans;
30091-extern atomic_t cm_listens_created;
30092-extern atomic_t cm_listens_destroyed;
30093+extern atomic_unchecked_t cm_listens_created;
30094+extern atomic_unchecked_t cm_listens_destroyed;
30095 extern u32 cm_backlog_drops;
30096-extern atomic_t cm_loopbacks;
30097-extern atomic_t cm_nodes_created;
30098-extern atomic_t cm_nodes_destroyed;
30099-extern atomic_t cm_accel_dropped_pkts;
30100-extern atomic_t cm_resets_recvd;
30101-extern atomic_t pau_qps_created;
30102-extern atomic_t pau_qps_destroyed;
30103+extern atomic_unchecked_t cm_loopbacks;
30104+extern atomic_unchecked_t cm_nodes_created;
30105+extern atomic_unchecked_t cm_nodes_destroyed;
30106+extern atomic_unchecked_t cm_accel_dropped_pkts;
30107+extern atomic_unchecked_t cm_resets_recvd;
30108+extern atomic_unchecked_t pau_qps_created;
30109+extern atomic_unchecked_t pau_qps_destroyed;
30110
30111 extern u32 int_mod_timer_init;
30112 extern u32 int_mod_cq_depth_256;
30113diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30114index 0a52d72..0642f36 100644
30115--- a/drivers/infiniband/hw/nes/nes_cm.c
30116+++ b/drivers/infiniband/hw/nes/nes_cm.c
30117@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30118 u32 cm_packets_retrans;
30119 u32 cm_packets_created;
30120 u32 cm_packets_received;
30121-atomic_t cm_listens_created;
30122-atomic_t cm_listens_destroyed;
30123+atomic_unchecked_t cm_listens_created;
30124+atomic_unchecked_t cm_listens_destroyed;
30125 u32 cm_backlog_drops;
30126-atomic_t cm_loopbacks;
30127-atomic_t cm_nodes_created;
30128-atomic_t cm_nodes_destroyed;
30129-atomic_t cm_accel_dropped_pkts;
30130-atomic_t cm_resets_recvd;
30131+atomic_unchecked_t cm_loopbacks;
30132+atomic_unchecked_t cm_nodes_created;
30133+atomic_unchecked_t cm_nodes_destroyed;
30134+atomic_unchecked_t cm_accel_dropped_pkts;
30135+atomic_unchecked_t cm_resets_recvd;
30136
30137 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30138 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30139@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30140
30141 static struct nes_cm_core *g_cm_core;
30142
30143-atomic_t cm_connects;
30144-atomic_t cm_accepts;
30145-atomic_t cm_disconnects;
30146-atomic_t cm_closes;
30147-atomic_t cm_connecteds;
30148-atomic_t cm_connect_reqs;
30149-atomic_t cm_rejects;
30150+atomic_unchecked_t cm_connects;
30151+atomic_unchecked_t cm_accepts;
30152+atomic_unchecked_t cm_disconnects;
30153+atomic_unchecked_t cm_closes;
30154+atomic_unchecked_t cm_connecteds;
30155+atomic_unchecked_t cm_connect_reqs;
30156+atomic_unchecked_t cm_rejects;
30157
30158 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30159 {
30160@@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30161 kfree(listener);
30162 listener = NULL;
30163 ret = 0;
30164- atomic_inc(&cm_listens_destroyed);
30165+ atomic_inc_unchecked(&cm_listens_destroyed);
30166 } else {
30167 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30168 }
30169@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30170 cm_node->rem_mac);
30171
30172 add_hte_node(cm_core, cm_node);
30173- atomic_inc(&cm_nodes_created);
30174+ atomic_inc_unchecked(&cm_nodes_created);
30175
30176 return cm_node;
30177 }
30178@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30179 }
30180
30181 atomic_dec(&cm_core->node_cnt);
30182- atomic_inc(&cm_nodes_destroyed);
30183+ atomic_inc_unchecked(&cm_nodes_destroyed);
30184 nesqp = cm_node->nesqp;
30185 if (nesqp) {
30186 nesqp->cm_node = NULL;
30187@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30188
30189 static void drop_packet(struct sk_buff *skb)
30190 {
30191- atomic_inc(&cm_accel_dropped_pkts);
30192+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30193 dev_kfree_skb_any(skb);
30194 }
30195
30196@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30197 {
30198
30199 int reset = 0; /* whether to send reset in case of err.. */
30200- atomic_inc(&cm_resets_recvd);
30201+ atomic_inc_unchecked(&cm_resets_recvd);
30202 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30203 " refcnt=%d\n", cm_node, cm_node->state,
30204 atomic_read(&cm_node->ref_count));
30205@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30206 rem_ref_cm_node(cm_node->cm_core, cm_node);
30207 return NULL;
30208 }
30209- atomic_inc(&cm_loopbacks);
30210+ atomic_inc_unchecked(&cm_loopbacks);
30211 loopbackremotenode->loopbackpartner = cm_node;
30212 loopbackremotenode->tcp_cntxt.rcv_wscale =
30213 NES_CM_DEFAULT_RCV_WND_SCALE;
30214@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30215 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30216 else {
30217 rem_ref_cm_node(cm_core, cm_node);
30218- atomic_inc(&cm_accel_dropped_pkts);
30219+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30220 dev_kfree_skb_any(skb);
30221 }
30222 break;
30223@@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30224
30225 if ((cm_id) && (cm_id->event_handler)) {
30226 if (issue_disconn) {
30227- atomic_inc(&cm_disconnects);
30228+ atomic_inc_unchecked(&cm_disconnects);
30229 cm_event.event = IW_CM_EVENT_DISCONNECT;
30230 cm_event.status = disconn_status;
30231 cm_event.local_addr = cm_id->local_addr;
30232@@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30233 }
30234
30235 if (issue_close) {
30236- atomic_inc(&cm_closes);
30237+ atomic_inc_unchecked(&cm_closes);
30238 nes_disconnect(nesqp, 1);
30239
30240 cm_id->provider_data = nesqp;
30241@@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30242
30243 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30244 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30245- atomic_inc(&cm_accepts);
30246+ atomic_inc_unchecked(&cm_accepts);
30247
30248 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30249 netdev_refcnt_read(nesvnic->netdev));
30250@@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30251 struct nes_cm_core *cm_core;
30252 u8 *start_buff;
30253
30254- atomic_inc(&cm_rejects);
30255+ atomic_inc_unchecked(&cm_rejects);
30256 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30257 loopback = cm_node->loopbackpartner;
30258 cm_core = cm_node->cm_core;
30259@@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30260 ntohl(cm_id->local_addr.sin_addr.s_addr),
30261 ntohs(cm_id->local_addr.sin_port));
30262
30263- atomic_inc(&cm_connects);
30264+ atomic_inc_unchecked(&cm_connects);
30265 nesqp->active_conn = 1;
30266
30267 /* cache the cm_id in the qp */
30268@@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30269 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30270 return err;
30271 }
30272- atomic_inc(&cm_listens_created);
30273+ atomic_inc_unchecked(&cm_listens_created);
30274 }
30275
30276 cm_id->add_ref(cm_id);
30277@@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30278
30279 if (nesqp->destroyed)
30280 return;
30281- atomic_inc(&cm_connecteds);
30282+ atomic_inc_unchecked(&cm_connecteds);
30283 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30284 " local port 0x%04X. jiffies = %lu.\n",
30285 nesqp->hwqp.qp_id,
30286@@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30287
30288 cm_id->add_ref(cm_id);
30289 ret = cm_id->event_handler(cm_id, &cm_event);
30290- atomic_inc(&cm_closes);
30291+ atomic_inc_unchecked(&cm_closes);
30292 cm_event.event = IW_CM_EVENT_CLOSE;
30293 cm_event.status = 0;
30294 cm_event.provider_data = cm_id->provider_data;
30295@@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30296 return;
30297 cm_id = cm_node->cm_id;
30298
30299- atomic_inc(&cm_connect_reqs);
30300+ atomic_inc_unchecked(&cm_connect_reqs);
30301 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30302 cm_node, cm_id, jiffies);
30303
30304@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30305 return;
30306 cm_id = cm_node->cm_id;
30307
30308- atomic_inc(&cm_connect_reqs);
30309+ atomic_inc_unchecked(&cm_connect_reqs);
30310 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30311 cm_node, cm_id, jiffies);
30312
30313diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30314index b3b2a24..7bfaf1e 100644
30315--- a/drivers/infiniband/hw/nes/nes_mgt.c
30316+++ b/drivers/infiniband/hw/nes/nes_mgt.c
30317@@ -40,8 +40,8 @@
30318 #include "nes.h"
30319 #include "nes_mgt.h"
30320
30321-atomic_t pau_qps_created;
30322-atomic_t pau_qps_destroyed;
30323+atomic_unchecked_t pau_qps_created;
30324+atomic_unchecked_t pau_qps_destroyed;
30325
30326 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30327 {
30328@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30329 {
30330 struct sk_buff *skb;
30331 unsigned long flags;
30332- atomic_inc(&pau_qps_destroyed);
30333+ atomic_inc_unchecked(&pau_qps_destroyed);
30334
30335 /* Free packets that have not yet been forwarded */
30336 /* Lock is acquired by skb_dequeue when removing the skb */
30337@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30338 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30339 skb_queue_head_init(&nesqp->pau_list);
30340 spin_lock_init(&nesqp->pau_lock);
30341- atomic_inc(&pau_qps_created);
30342+ atomic_inc_unchecked(&pau_qps_created);
30343 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30344 }
30345
30346diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30347index c00d2f3..8834298 100644
30348--- a/drivers/infiniband/hw/nes/nes_nic.c
30349+++ b/drivers/infiniband/hw/nes/nes_nic.c
30350@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30351 target_stat_values[++index] = mh_detected;
30352 target_stat_values[++index] = mh_pauses_sent;
30353 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30354- target_stat_values[++index] = atomic_read(&cm_connects);
30355- target_stat_values[++index] = atomic_read(&cm_accepts);
30356- target_stat_values[++index] = atomic_read(&cm_disconnects);
30357- target_stat_values[++index] = atomic_read(&cm_connecteds);
30358- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30359- target_stat_values[++index] = atomic_read(&cm_rejects);
30360- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30361- target_stat_values[++index] = atomic_read(&qps_created);
30362- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30363- target_stat_values[++index] = atomic_read(&qps_destroyed);
30364- target_stat_values[++index] = atomic_read(&cm_closes);
30365+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30366+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30367+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30368+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30369+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30370+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30371+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30372+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30373+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30374+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30375+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30376 target_stat_values[++index] = cm_packets_sent;
30377 target_stat_values[++index] = cm_packets_bounced;
30378 target_stat_values[++index] = cm_packets_created;
30379 target_stat_values[++index] = cm_packets_received;
30380 target_stat_values[++index] = cm_packets_dropped;
30381 target_stat_values[++index] = cm_packets_retrans;
30382- target_stat_values[++index] = atomic_read(&cm_listens_created);
30383- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30384+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30385+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30386 target_stat_values[++index] = cm_backlog_drops;
30387- target_stat_values[++index] = atomic_read(&cm_loopbacks);
30388- target_stat_values[++index] = atomic_read(&cm_nodes_created);
30389- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30390- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30391- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30392+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30393+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30394+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30395+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30396+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30397 target_stat_values[++index] = nesadapter->free_4kpbl;
30398 target_stat_values[++index] = nesadapter->free_256pbl;
30399 target_stat_values[++index] = int_mod_timer_init;
30400 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30401 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30402 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30403- target_stat_values[++index] = atomic_read(&pau_qps_created);
30404- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30405+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30406+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30407 }
30408
30409 /**
30410diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30411index 5095bc4..41e8fff 100644
30412--- a/drivers/infiniband/hw/nes/nes_verbs.c
30413+++ b/drivers/infiniband/hw/nes/nes_verbs.c
30414@@ -46,9 +46,9 @@
30415
30416 #include <rdma/ib_umem.h>
30417
30418-atomic_t mod_qp_timouts;
30419-atomic_t qps_created;
30420-atomic_t sw_qps_destroyed;
30421+atomic_unchecked_t mod_qp_timouts;
30422+atomic_unchecked_t qps_created;
30423+atomic_unchecked_t sw_qps_destroyed;
30424
30425 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30426
30427@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30428 if (init_attr->create_flags)
30429 return ERR_PTR(-EINVAL);
30430
30431- atomic_inc(&qps_created);
30432+ atomic_inc_unchecked(&qps_created);
30433 switch (init_attr->qp_type) {
30434 case IB_QPT_RC:
30435 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30436@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30437 struct iw_cm_event cm_event;
30438 int ret = 0;
30439
30440- atomic_inc(&sw_qps_destroyed);
30441+ atomic_inc_unchecked(&sw_qps_destroyed);
30442 nesqp->destroyed = 1;
30443
30444 /* Blow away the connection if it exists. */
30445diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30446index b881bdc..c2e360c 100644
30447--- a/drivers/infiniband/hw/qib/qib.h
30448+++ b/drivers/infiniband/hw/qib/qib.h
30449@@ -51,6 +51,7 @@
30450 #include <linux/completion.h>
30451 #include <linux/kref.h>
30452 #include <linux/sched.h>
30453+#include <linux/slab.h>
30454
30455 #include "qib_common.h"
30456 #include "qib_verbs.h"
30457diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30458index c351aa4..e6967c2 100644
30459--- a/drivers/input/gameport/gameport.c
30460+++ b/drivers/input/gameport/gameport.c
30461@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30462 */
30463 static void gameport_init_port(struct gameport *gameport)
30464 {
30465- static atomic_t gameport_no = ATOMIC_INIT(0);
30466+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30467
30468 __module_get(THIS_MODULE);
30469
30470 mutex_init(&gameport->drv_mutex);
30471 device_initialize(&gameport->dev);
30472 dev_set_name(&gameport->dev, "gameport%lu",
30473- (unsigned long)atomic_inc_return(&gameport_no) - 1);
30474+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30475 gameport->dev.bus = &gameport_bus;
30476 gameport->dev.release = gameport_release_port;
30477 if (gameport->parent)
30478diff --git a/drivers/input/input.c b/drivers/input/input.c
30479index da38d97..2aa0b79 100644
30480--- a/drivers/input/input.c
30481+++ b/drivers/input/input.c
30482@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30483 */
30484 int input_register_device(struct input_dev *dev)
30485 {
30486- static atomic_t input_no = ATOMIC_INIT(0);
30487+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30488 struct input_handler *handler;
30489 const char *path;
30490 int error;
30491@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30492 dev->setkeycode = input_default_setkeycode;
30493
30494 dev_set_name(&dev->dev, "input%ld",
30495- (unsigned long) atomic_inc_return(&input_no) - 1);
30496+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30497
30498 error = device_add(&dev->dev);
30499 if (error)
30500diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30501index b8d8611..7a4a04b 100644
30502--- a/drivers/input/joystick/sidewinder.c
30503+++ b/drivers/input/joystick/sidewinder.c
30504@@ -30,6 +30,7 @@
30505 #include <linux/kernel.h>
30506 #include <linux/module.h>
30507 #include <linux/slab.h>
30508+#include <linux/sched.h>
30509 #include <linux/init.h>
30510 #include <linux/input.h>
30511 #include <linux/gameport.h>
30512diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30513index d728875..844c89b 100644
30514--- a/drivers/input/joystick/xpad.c
30515+++ b/drivers/input/joystick/xpad.c
30516@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30517
30518 static int xpad_led_probe(struct usb_xpad *xpad)
30519 {
30520- static atomic_t led_seq = ATOMIC_INIT(0);
30521+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30522 long led_no;
30523 struct xpad_led *led;
30524 struct led_classdev *led_cdev;
30525@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30526 if (!led)
30527 return -ENOMEM;
30528
30529- led_no = (long)atomic_inc_return(&led_seq) - 1;
30530+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30531
30532 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30533 led->xpad = xpad;
30534diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30535index 0110b5a..d3ad144 100644
30536--- a/drivers/input/mousedev.c
30537+++ b/drivers/input/mousedev.c
30538@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30539
30540 spin_unlock_irq(&client->packet_lock);
30541
30542- if (copy_to_user(buffer, data, count))
30543+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
30544 return -EFAULT;
30545
30546 return count;
30547diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30548index ba70058..571d25d 100644
30549--- a/drivers/input/serio/serio.c
30550+++ b/drivers/input/serio/serio.c
30551@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30552 */
30553 static void serio_init_port(struct serio *serio)
30554 {
30555- static atomic_t serio_no = ATOMIC_INIT(0);
30556+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30557
30558 __module_get(THIS_MODULE);
30559
30560@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30561 mutex_init(&serio->drv_mutex);
30562 device_initialize(&serio->dev);
30563 dev_set_name(&serio->dev, "serio%ld",
30564- (long)atomic_inc_return(&serio_no) - 1);
30565+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
30566 serio->dev.bus = &serio_bus;
30567 serio->dev.release = serio_release_port;
30568 serio->dev.groups = serio_device_attr_groups;
30569diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30570index e44933d..9ba484a 100644
30571--- a/drivers/isdn/capi/capi.c
30572+++ b/drivers/isdn/capi/capi.c
30573@@ -83,8 +83,8 @@ struct capiminor {
30574
30575 struct capi20_appl *ap;
30576 u32 ncci;
30577- atomic_t datahandle;
30578- atomic_t msgid;
30579+ atomic_unchecked_t datahandle;
30580+ atomic_unchecked_t msgid;
30581
30582 struct tty_port port;
30583 int ttyinstop;
30584@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30585 capimsg_setu16(s, 2, mp->ap->applid);
30586 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30587 capimsg_setu8 (s, 5, CAPI_RESP);
30588- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30589+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30590 capimsg_setu32(s, 8, mp->ncci);
30591 capimsg_setu16(s, 12, datahandle);
30592 }
30593@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30594 mp->outbytes -= len;
30595 spin_unlock_bh(&mp->outlock);
30596
30597- datahandle = atomic_inc_return(&mp->datahandle);
30598+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30599 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30600 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30601 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30602 capimsg_setu16(skb->data, 2, mp->ap->applid);
30603 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30604 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30605- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30606+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30607 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30608 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30609 capimsg_setu16(skb->data, 16, len); /* Data length */
30610diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30611index db621db..825ea1a 100644
30612--- a/drivers/isdn/gigaset/common.c
30613+++ b/drivers/isdn/gigaset/common.c
30614@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30615 cs->commands_pending = 0;
30616 cs->cur_at_seq = 0;
30617 cs->gotfwver = -1;
30618- cs->open_count = 0;
30619+ local_set(&cs->open_count, 0);
30620 cs->dev = NULL;
30621 cs->tty = NULL;
30622 cs->tty_dev = NULL;
30623diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30624index 212efaf..f187c6b 100644
30625--- a/drivers/isdn/gigaset/gigaset.h
30626+++ b/drivers/isdn/gigaset/gigaset.h
30627@@ -35,6 +35,7 @@
30628 #include <linux/tty_driver.h>
30629 #include <linux/list.h>
30630 #include <linux/atomic.h>
30631+#include <asm/local.h>
30632
30633 #define GIG_VERSION {0, 5, 0, 0}
30634 #define GIG_COMPAT {0, 4, 0, 0}
30635@@ -433,7 +434,7 @@ struct cardstate {
30636 spinlock_t cmdlock;
30637 unsigned curlen, cmdbytes;
30638
30639- unsigned open_count;
30640+ local_t open_count;
30641 struct tty_struct *tty;
30642 struct tasklet_struct if_wake_tasklet;
30643 unsigned control_state;
30644diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30645index ee0a549..a7c9798 100644
30646--- a/drivers/isdn/gigaset/interface.c
30647+++ b/drivers/isdn/gigaset/interface.c
30648@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30649 }
30650 tty->driver_data = cs;
30651
30652- ++cs->open_count;
30653-
30654- if (cs->open_count == 1) {
30655+ if (local_inc_return(&cs->open_count) == 1) {
30656 spin_lock_irqsave(&cs->lock, flags);
30657 cs->tty = tty;
30658 spin_unlock_irqrestore(&cs->lock, flags);
30659@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30660
30661 if (!cs->connected)
30662 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30663- else if (!cs->open_count)
30664+ else if (!local_read(&cs->open_count))
30665 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30666 else {
30667- if (!--cs->open_count) {
30668+ if (!local_dec_return(&cs->open_count)) {
30669 spin_lock_irqsave(&cs->lock, flags);
30670 cs->tty = NULL;
30671 spin_unlock_irqrestore(&cs->lock, flags);
30672@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30673 if (!cs->connected) {
30674 gig_dbg(DEBUG_IF, "not connected");
30675 retval = -ENODEV;
30676- } else if (!cs->open_count)
30677+ } else if (!local_read(&cs->open_count))
30678 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30679 else {
30680 retval = 0;
30681@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30682 retval = -ENODEV;
30683 goto done;
30684 }
30685- if (!cs->open_count) {
30686+ if (!local_read(&cs->open_count)) {
30687 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30688 retval = -ENODEV;
30689 goto done;
30690@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30691 if (!cs->connected) {
30692 gig_dbg(DEBUG_IF, "not connected");
30693 retval = -ENODEV;
30694- } else if (!cs->open_count)
30695+ } else if (!local_read(&cs->open_count))
30696 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30697 else if (cs->mstate != MS_LOCKED) {
30698 dev_warn(cs->dev, "can't write to unlocked device\n");
30699@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30700
30701 if (!cs->connected)
30702 gig_dbg(DEBUG_IF, "not connected");
30703- else if (!cs->open_count)
30704+ else if (!local_read(&cs->open_count))
30705 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30706 else if (cs->mstate != MS_LOCKED)
30707 dev_warn(cs->dev, "can't write to unlocked device\n");
30708@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30709
30710 if (!cs->connected)
30711 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30712- else if (!cs->open_count)
30713+ else if (!local_read(&cs->open_count))
30714 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30715 else
30716 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30717@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30718
30719 if (!cs->connected)
30720 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30721- else if (!cs->open_count)
30722+ else if (!local_read(&cs->open_count))
30723 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30724 else
30725 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30726@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30727 goto out;
30728 }
30729
30730- if (!cs->open_count) {
30731+ if (!local_read(&cs->open_count)) {
30732 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30733 goto out;
30734 }
30735diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30736index 2a57da59..e7a12ed 100644
30737--- a/drivers/isdn/hardware/avm/b1.c
30738+++ b/drivers/isdn/hardware/avm/b1.c
30739@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30740 }
30741 if (left) {
30742 if (t4file->user) {
30743- if (copy_from_user(buf, dp, left))
30744+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30745 return -EFAULT;
30746 } else {
30747 memcpy(buf, dp, left);
30748@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30749 }
30750 if (left) {
30751 if (config->user) {
30752- if (copy_from_user(buf, dp, left))
30753+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30754 return -EFAULT;
30755 } else {
30756 memcpy(buf, dp, left);
30757diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30758index 85784a7..a19ca98 100644
30759--- a/drivers/isdn/hardware/eicon/divasync.h
30760+++ b/drivers/isdn/hardware/eicon/divasync.h
30761@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30762 } diva_didd_add_adapter_t;
30763 typedef struct _diva_didd_remove_adapter {
30764 IDI_CALL p_request;
30765-} diva_didd_remove_adapter_t;
30766+} __no_const diva_didd_remove_adapter_t;
30767 typedef struct _diva_didd_read_adapter_array {
30768 void * buffer;
30769 dword length;
30770diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30771index a3bd163..8956575 100644
30772--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
30773+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
30774@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
30775 typedef struct _diva_os_idi_adapter_interface {
30776 diva_init_card_proc_t cleanup_adapter_proc;
30777 diva_cmd_card_proc_t cmd_proc;
30778-} diva_os_idi_adapter_interface_t;
30779+} __no_const diva_os_idi_adapter_interface_t;
30780
30781 typedef struct _diva_os_xdi_adapter {
30782 struct list_head link;
30783diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
30784index 2339d73..802ab87 100644
30785--- a/drivers/isdn/i4l/isdn_net.c
30786+++ b/drivers/isdn/i4l/isdn_net.c
30787@@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
30788 {
30789 isdn_net_local *lp = netdev_priv(dev);
30790 unsigned char *p;
30791- ushort len = 0;
30792+ int len = 0;
30793
30794 switch (lp->p_encap) {
30795 case ISDN_NET_ENCAP_ETHER:
30796diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
30797index 1f355bb..43f1fea 100644
30798--- a/drivers/isdn/icn/icn.c
30799+++ b/drivers/isdn/icn/icn.c
30800@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
30801 if (count > len)
30802 count = len;
30803 if (user) {
30804- if (copy_from_user(msg, buf, count))
30805+ if (count > sizeof msg || copy_from_user(msg, buf, count))
30806 return -EFAULT;
30807 } else
30808 memcpy(msg, buf, count);
30809diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
30810index b5fdcb7..5b6c59f 100644
30811--- a/drivers/lguest/core.c
30812+++ b/drivers/lguest/core.c
30813@@ -92,9 +92,17 @@ static __init int map_switcher(void)
30814 * it's worked so far. The end address needs +1 because __get_vm_area
30815 * allocates an extra guard page, so we need space for that.
30816 */
30817+
30818+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30819+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30820+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30821+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30822+#else
30823 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30824 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30825 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30826+#endif
30827+
30828 if (!switcher_vma) {
30829 err = -ENOMEM;
30830 printk("lguest: could not map switcher pages high\n");
30831@@ -119,7 +127,7 @@ static __init int map_switcher(void)
30832 * Now the Switcher is mapped at the right address, we can't fail!
30833 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
30834 */
30835- memcpy(switcher_vma->addr, start_switcher_text,
30836+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30837 end_switcher_text - start_switcher_text);
30838
30839 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30840diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
30841index 65af42f..530c87a 100644
30842--- a/drivers/lguest/x86/core.c
30843+++ b/drivers/lguest/x86/core.c
30844@@ -59,7 +59,7 @@ static struct {
30845 /* Offset from where switcher.S was compiled to where we've copied it */
30846 static unsigned long switcher_offset(void)
30847 {
30848- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30849+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30850 }
30851
30852 /* This cpu's struct lguest_pages. */
30853@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
30854 * These copies are pretty cheap, so we do them unconditionally: */
30855 /* Save the current Host top-level page directory.
30856 */
30857+
30858+#ifdef CONFIG_PAX_PER_CPU_PGD
30859+ pages->state.host_cr3 = read_cr3();
30860+#else
30861 pages->state.host_cr3 = __pa(current->mm->pgd);
30862+#endif
30863+
30864 /*
30865 * Set up the Guest's page tables to see this CPU's pages (and no
30866 * other CPU's pages).
30867@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
30868 * compiled-in switcher code and the high-mapped copy we just made.
30869 */
30870 for (i = 0; i < IDT_ENTRIES; i++)
30871- default_idt_entries[i] += switcher_offset();
30872+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30873
30874 /*
30875 * Set up the Switcher's per-cpu areas.
30876@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
30877 * it will be undisturbed when we switch. To change %cs and jump we
30878 * need this structure to feed to Intel's "lcall" instruction.
30879 */
30880- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30881+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30882 lguest_entry.segment = LGUEST_CS;
30883
30884 /*
30885diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
30886index 40634b0..4f5855e 100644
30887--- a/drivers/lguest/x86/switcher_32.S
30888+++ b/drivers/lguest/x86/switcher_32.S
30889@@ -87,6 +87,7 @@
30890 #include <asm/page.h>
30891 #include <asm/segment.h>
30892 #include <asm/lguest.h>
30893+#include <asm/processor-flags.h>
30894
30895 // We mark the start of the code to copy
30896 // It's placed in .text tho it's never run here
30897@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30898 // Changes type when we load it: damn Intel!
30899 // For after we switch over our page tables
30900 // That entry will be read-only: we'd crash.
30901+
30902+#ifdef CONFIG_PAX_KERNEXEC
30903+ mov %cr0, %edx
30904+ xor $X86_CR0_WP, %edx
30905+ mov %edx, %cr0
30906+#endif
30907+
30908 movl $(GDT_ENTRY_TSS*8), %edx
30909 ltr %dx
30910
30911@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30912 // Let's clear it again for our return.
30913 // The GDT descriptor of the Host
30914 // Points to the table after two "size" bytes
30915- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30916+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30917 // Clear "used" from type field (byte 5, bit 2)
30918- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30919+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30920+
30921+#ifdef CONFIG_PAX_KERNEXEC
30922+ mov %cr0, %eax
30923+ xor $X86_CR0_WP, %eax
30924+ mov %eax, %cr0
30925+#endif
30926
30927 // Once our page table's switched, the Guest is live!
30928 // The Host fades as we run this final step.
30929@@ -295,13 +309,12 @@ deliver_to_host:
30930 // I consulted gcc, and it gave
30931 // These instructions, which I gladly credit:
30932 leal (%edx,%ebx,8), %eax
30933- movzwl (%eax),%edx
30934- movl 4(%eax), %eax
30935- xorw %ax, %ax
30936- orl %eax, %edx
30937+ movl 4(%eax), %edx
30938+ movw (%eax), %dx
30939 // Now the address of the handler's in %edx
30940 // We call it now: its "iret" drops us home.
30941- jmp *%edx
30942+ ljmp $__KERNEL_CS, $1f
30943+1: jmp *%edx
30944
30945 // Every interrupt can come to us here
30946 // But we must truly tell each apart.
30947diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
30948index 4daf9e5..b8d1d0f 100644
30949--- a/drivers/macintosh/macio_asic.c
30950+++ b/drivers/macintosh/macio_asic.c
30951@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
30952 * MacIO is matched against any Apple ID, it's probe() function
30953 * will then decide wether it applies or not
30954 */
30955-static const struct pci_device_id __devinitdata pci_ids [] = { {
30956+static const struct pci_device_id __devinitconst pci_ids [] = { {
30957 .vendor = PCI_VENDOR_ID_APPLE,
30958 .device = PCI_ANY_ID,
30959 .subvendor = PCI_ANY_ID,
30960diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
30961index 31c2dc2..a2de7a6 100644
30962--- a/drivers/md/dm-ioctl.c
30963+++ b/drivers/md/dm-ioctl.c
30964@@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
30965 cmd == DM_LIST_VERSIONS_CMD)
30966 return 0;
30967
30968- if ((cmd == DM_DEV_CREATE_CMD)) {
30969+ if (cmd == DM_DEV_CREATE_CMD) {
30970 if (!*param->name) {
30971 DMWARN("name not supplied when creating device");
30972 return -EINVAL;
30973diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
30974index 9bfd057..01180bc 100644
30975--- a/drivers/md/dm-raid1.c
30976+++ b/drivers/md/dm-raid1.c
30977@@ -40,7 +40,7 @@ enum dm_raid1_error {
30978
30979 struct mirror {
30980 struct mirror_set *ms;
30981- atomic_t error_count;
30982+ atomic_unchecked_t error_count;
30983 unsigned long error_type;
30984 struct dm_dev *dev;
30985 sector_t offset;
30986@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
30987 struct mirror *m;
30988
30989 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
30990- if (!atomic_read(&m->error_count))
30991+ if (!atomic_read_unchecked(&m->error_count))
30992 return m;
30993
30994 return NULL;
30995@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
30996 * simple way to tell if a device has encountered
30997 * errors.
30998 */
30999- atomic_inc(&m->error_count);
31000+ atomic_inc_unchecked(&m->error_count);
31001
31002 if (test_and_set_bit(error_type, &m->error_type))
31003 return;
31004@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31005 struct mirror *m = get_default_mirror(ms);
31006
31007 do {
31008- if (likely(!atomic_read(&m->error_count)))
31009+ if (likely(!atomic_read_unchecked(&m->error_count)))
31010 return m;
31011
31012 if (m-- == ms->mirror)
31013@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31014 {
31015 struct mirror *default_mirror = get_default_mirror(m->ms);
31016
31017- return !atomic_read(&default_mirror->error_count);
31018+ return !atomic_read_unchecked(&default_mirror->error_count);
31019 }
31020
31021 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31022@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31023 */
31024 if (likely(region_in_sync(ms, region, 1)))
31025 m = choose_mirror(ms, bio->bi_sector);
31026- else if (m && atomic_read(&m->error_count))
31027+ else if (m && atomic_read_unchecked(&m->error_count))
31028 m = NULL;
31029
31030 if (likely(m))
31031@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31032 }
31033
31034 ms->mirror[mirror].ms = ms;
31035- atomic_set(&(ms->mirror[mirror].error_count), 0);
31036+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31037 ms->mirror[mirror].error_type = 0;
31038 ms->mirror[mirror].offset = offset;
31039
31040@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31041 */
31042 static char device_status_char(struct mirror *m)
31043 {
31044- if (!atomic_read(&(m->error_count)))
31045+ if (!atomic_read_unchecked(&(m->error_count)))
31046 return 'A';
31047
31048 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31049diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31050index 3d80cf0..b77cc47 100644
31051--- a/drivers/md/dm-stripe.c
31052+++ b/drivers/md/dm-stripe.c
31053@@ -20,7 +20,7 @@ struct stripe {
31054 struct dm_dev *dev;
31055 sector_t physical_start;
31056
31057- atomic_t error_count;
31058+ atomic_unchecked_t error_count;
31059 };
31060
31061 struct stripe_c {
31062@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31063 kfree(sc);
31064 return r;
31065 }
31066- atomic_set(&(sc->stripe[i].error_count), 0);
31067+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31068 }
31069
31070 ti->private = sc;
31071@@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31072 DMEMIT("%d ", sc->stripes);
31073 for (i = 0; i < sc->stripes; i++) {
31074 DMEMIT("%s ", sc->stripe[i].dev->name);
31075- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31076+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31077 'D' : 'A';
31078 }
31079 buffer[i] = '\0';
31080@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31081 */
31082 for (i = 0; i < sc->stripes; i++)
31083 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31084- atomic_inc(&(sc->stripe[i].error_count));
31085- if (atomic_read(&(sc->stripe[i].error_count)) <
31086+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31087+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31088 DM_IO_ERROR_THRESHOLD)
31089 schedule_work(&sc->trigger_event);
31090 }
31091diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31092index 8e91321..fd17aef 100644
31093--- a/drivers/md/dm-table.c
31094+++ b/drivers/md/dm-table.c
31095@@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31096 if (!dev_size)
31097 return 0;
31098
31099- if ((start >= dev_size) || (start + len > dev_size)) {
31100+ if ((start >= dev_size) || (len > dev_size - start)) {
31101 DMWARN("%s: %s too small for target: "
31102 "start=%llu, len=%llu, dev_size=%llu",
31103 dm_device_name(ti->table->md), bdevname(bdev, b),
31104diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31105index 59c4f04..4c7b661 100644
31106--- a/drivers/md/dm-thin-metadata.c
31107+++ b/drivers/md/dm-thin-metadata.c
31108@@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31109
31110 pmd->info.tm = tm;
31111 pmd->info.levels = 2;
31112- pmd->info.value_type.context = pmd->data_sm;
31113+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31114 pmd->info.value_type.size = sizeof(__le64);
31115 pmd->info.value_type.inc = data_block_inc;
31116 pmd->info.value_type.dec = data_block_dec;
31117@@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31118
31119 pmd->bl_info.tm = tm;
31120 pmd->bl_info.levels = 1;
31121- pmd->bl_info.value_type.context = pmd->data_sm;
31122+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31123 pmd->bl_info.value_type.size = sizeof(__le64);
31124 pmd->bl_info.value_type.inc = data_block_inc;
31125 pmd->bl_info.value_type.dec = data_block_dec;
31126diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31127index 4720f68..78d1df7 100644
31128--- a/drivers/md/dm.c
31129+++ b/drivers/md/dm.c
31130@@ -177,9 +177,9 @@ struct mapped_device {
31131 /*
31132 * Event handling.
31133 */
31134- atomic_t event_nr;
31135+ atomic_unchecked_t event_nr;
31136 wait_queue_head_t eventq;
31137- atomic_t uevent_seq;
31138+ atomic_unchecked_t uevent_seq;
31139 struct list_head uevent_list;
31140 spinlock_t uevent_lock; /* Protect access to uevent_list */
31141
31142@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31143 rwlock_init(&md->map_lock);
31144 atomic_set(&md->holders, 1);
31145 atomic_set(&md->open_count, 0);
31146- atomic_set(&md->event_nr, 0);
31147- atomic_set(&md->uevent_seq, 0);
31148+ atomic_set_unchecked(&md->event_nr, 0);
31149+ atomic_set_unchecked(&md->uevent_seq, 0);
31150 INIT_LIST_HEAD(&md->uevent_list);
31151 spin_lock_init(&md->uevent_lock);
31152
31153@@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31154
31155 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31156
31157- atomic_inc(&md->event_nr);
31158+ atomic_inc_unchecked(&md->event_nr);
31159 wake_up(&md->eventq);
31160 }
31161
31162@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31163
31164 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31165 {
31166- return atomic_add_return(1, &md->uevent_seq);
31167+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31168 }
31169
31170 uint32_t dm_get_event_nr(struct mapped_device *md)
31171 {
31172- return atomic_read(&md->event_nr);
31173+ return atomic_read_unchecked(&md->event_nr);
31174 }
31175
31176 int dm_wait_event(struct mapped_device *md, int event_nr)
31177 {
31178 return wait_event_interruptible(md->eventq,
31179- (event_nr != atomic_read(&md->event_nr)));
31180+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31181 }
31182
31183 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31184diff --git a/drivers/md/md.c b/drivers/md/md.c
31185index f47f1f8..b7f559e 100644
31186--- a/drivers/md/md.c
31187+++ b/drivers/md/md.c
31188@@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31189 * start build, activate spare
31190 */
31191 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31192-static atomic_t md_event_count;
31193+static atomic_unchecked_t md_event_count;
31194 void md_new_event(struct mddev *mddev)
31195 {
31196- atomic_inc(&md_event_count);
31197+ atomic_inc_unchecked(&md_event_count);
31198 wake_up(&md_event_waiters);
31199 }
31200 EXPORT_SYMBOL_GPL(md_new_event);
31201@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31202 */
31203 static void md_new_event_inintr(struct mddev *mddev)
31204 {
31205- atomic_inc(&md_event_count);
31206+ atomic_inc_unchecked(&md_event_count);
31207 wake_up(&md_event_waiters);
31208 }
31209
31210@@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31211
31212 rdev->preferred_minor = 0xffff;
31213 rdev->data_offset = le64_to_cpu(sb->data_offset);
31214- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31215+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31216
31217 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31218 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31219@@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31220 else
31221 sb->resync_offset = cpu_to_le64(0);
31222
31223- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31224+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31225
31226 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31227 sb->size = cpu_to_le64(mddev->dev_sectors);
31228@@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31229 static ssize_t
31230 errors_show(struct md_rdev *rdev, char *page)
31231 {
31232- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31233+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31234 }
31235
31236 static ssize_t
31237@@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31238 char *e;
31239 unsigned long n = simple_strtoul(buf, &e, 10);
31240 if (*buf && (*e == 0 || *e == '\n')) {
31241- atomic_set(&rdev->corrected_errors, n);
31242+ atomic_set_unchecked(&rdev->corrected_errors, n);
31243 return len;
31244 }
31245 return -EINVAL;
31246@@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31247 rdev->sb_loaded = 0;
31248 rdev->bb_page = NULL;
31249 atomic_set(&rdev->nr_pending, 0);
31250- atomic_set(&rdev->read_errors, 0);
31251- atomic_set(&rdev->corrected_errors, 0);
31252+ atomic_set_unchecked(&rdev->read_errors, 0);
31253+ atomic_set_unchecked(&rdev->corrected_errors, 0);
31254
31255 INIT_LIST_HEAD(&rdev->same_set);
31256 init_waitqueue_head(&rdev->blocked_wait);
31257@@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31258
31259 spin_unlock(&pers_lock);
31260 seq_printf(seq, "\n");
31261- seq->poll_event = atomic_read(&md_event_count);
31262+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31263 return 0;
31264 }
31265 if (v == (void*)2) {
31266@@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31267 chunk_kb ? "KB" : "B");
31268 if (bitmap->file) {
31269 seq_printf(seq, ", file: ");
31270- seq_path(seq, &bitmap->file->f_path, " \t\n");
31271+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31272 }
31273
31274 seq_printf(seq, "\n");
31275@@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31276 return error;
31277
31278 seq = file->private_data;
31279- seq->poll_event = atomic_read(&md_event_count);
31280+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31281 return error;
31282 }
31283
31284@@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31285 /* always allow read */
31286 mask = POLLIN | POLLRDNORM;
31287
31288- if (seq->poll_event != atomic_read(&md_event_count))
31289+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31290 mask |= POLLERR | POLLPRI;
31291 return mask;
31292 }
31293@@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31294 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31295 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31296 (int)part_stat_read(&disk->part0, sectors[1]) -
31297- atomic_read(&disk->sync_io);
31298+ atomic_read_unchecked(&disk->sync_io);
31299 /* sync IO will cause sync_io to increase before the disk_stats
31300 * as sync_io is counted when a request starts, and
31301 * disk_stats is counted when it completes.
31302diff --git a/drivers/md/md.h b/drivers/md/md.h
31303index cf742d9..7c7c745 100644
31304--- a/drivers/md/md.h
31305+++ b/drivers/md/md.h
31306@@ -120,13 +120,13 @@ struct md_rdev {
31307 * only maintained for arrays that
31308 * support hot removal
31309 */
31310- atomic_t read_errors; /* number of consecutive read errors that
31311+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
31312 * we have tried to ignore.
31313 */
31314 struct timespec last_read_error; /* monotonic time since our
31315 * last read error
31316 */
31317- atomic_t corrected_errors; /* number of corrected read errors,
31318+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31319 * for reporting to userspace and storing
31320 * in superblock.
31321 */
31322@@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31323
31324 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31325 {
31326- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31327+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31328 }
31329
31330 struct md_personality
31331diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31332index 50ed53b..4f29d7d 100644
31333--- a/drivers/md/persistent-data/dm-space-map-checker.c
31334+++ b/drivers/md/persistent-data/dm-space-map-checker.c
31335@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31336 /*----------------------------------------------------------------*/
31337
31338 struct sm_checker {
31339- struct dm_space_map sm;
31340+ dm_space_map_no_const sm;
31341
31342 struct count_array old_counts;
31343 struct count_array counts;
31344diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31345index fc469ba..2d91555 100644
31346--- a/drivers/md/persistent-data/dm-space-map-disk.c
31347+++ b/drivers/md/persistent-data/dm-space-map-disk.c
31348@@ -23,7 +23,7 @@
31349 * Space map interface.
31350 */
31351 struct sm_disk {
31352- struct dm_space_map sm;
31353+ dm_space_map_no_const sm;
31354
31355 struct ll_disk ll;
31356 struct ll_disk old_ll;
31357diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31358index e89ae5e..062e4c2 100644
31359--- a/drivers/md/persistent-data/dm-space-map-metadata.c
31360+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31361@@ -43,7 +43,7 @@ struct block_op {
31362 };
31363
31364 struct sm_metadata {
31365- struct dm_space_map sm;
31366+ dm_space_map_no_const sm;
31367
31368 struct ll_disk ll;
31369 struct ll_disk old_ll;
31370diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31371index 1cbfc6b..56e1dbb 100644
31372--- a/drivers/md/persistent-data/dm-space-map.h
31373+++ b/drivers/md/persistent-data/dm-space-map.h
31374@@ -60,6 +60,7 @@ struct dm_space_map {
31375 int (*root_size)(struct dm_space_map *sm, size_t *result);
31376 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31377 };
31378+typedef struct dm_space_map __no_const dm_space_map_no_const;
31379
31380 /*----------------------------------------------------------------*/
31381
31382diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31383index 7d9e071..015b1d5 100644
31384--- a/drivers/md/raid1.c
31385+++ b/drivers/md/raid1.c
31386@@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31387 if (r1_sync_page_io(rdev, sect, s,
31388 bio->bi_io_vec[idx].bv_page,
31389 READ) != 0)
31390- atomic_add(s, &rdev->corrected_errors);
31391+ atomic_add_unchecked(s, &rdev->corrected_errors);
31392 }
31393 sectors -= s;
31394 sect += s;
31395@@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31396 test_bit(In_sync, &rdev->flags)) {
31397 if (r1_sync_page_io(rdev, sect, s,
31398 conf->tmppage, READ)) {
31399- atomic_add(s, &rdev->corrected_errors);
31400+ atomic_add_unchecked(s, &rdev->corrected_errors);
31401 printk(KERN_INFO
31402 "md/raid1:%s: read error corrected "
31403 "(%d sectors at %llu on %s)\n",
31404diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31405index 685ddf3..955b087 100644
31406--- a/drivers/md/raid10.c
31407+++ b/drivers/md/raid10.c
31408@@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31409 /* The write handler will notice the lack of
31410 * R10BIO_Uptodate and record any errors etc
31411 */
31412- atomic_add(r10_bio->sectors,
31413+ atomic_add_unchecked(r10_bio->sectors,
31414 &conf->mirrors[d].rdev->corrected_errors);
31415
31416 /* for reconstruct, we always reschedule after a read.
31417@@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31418 {
31419 struct timespec cur_time_mon;
31420 unsigned long hours_since_last;
31421- unsigned int read_errors = atomic_read(&rdev->read_errors);
31422+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31423
31424 ktime_get_ts(&cur_time_mon);
31425
31426@@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31427 * overflowing the shift of read_errors by hours_since_last.
31428 */
31429 if (hours_since_last >= 8 * sizeof(read_errors))
31430- atomic_set(&rdev->read_errors, 0);
31431+ atomic_set_unchecked(&rdev->read_errors, 0);
31432 else
31433- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31434+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31435 }
31436
31437 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31438@@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31439 return;
31440
31441 check_decay_read_errors(mddev, rdev);
31442- atomic_inc(&rdev->read_errors);
31443- if (atomic_read(&rdev->read_errors) > max_read_errors) {
31444+ atomic_inc_unchecked(&rdev->read_errors);
31445+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31446 char b[BDEVNAME_SIZE];
31447 bdevname(rdev->bdev, b);
31448
31449@@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31450 "md/raid10:%s: %s: Raid device exceeded "
31451 "read_error threshold [cur %d:max %d]\n",
31452 mdname(mddev), b,
31453- atomic_read(&rdev->read_errors), max_read_errors);
31454+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31455 printk(KERN_NOTICE
31456 "md/raid10:%s: %s: Failing raid device\n",
31457 mdname(mddev), b);
31458@@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31459 (unsigned long long)(
31460 sect + rdev->data_offset),
31461 bdevname(rdev->bdev, b));
31462- atomic_add(s, &rdev->corrected_errors);
31463+ atomic_add_unchecked(s, &rdev->corrected_errors);
31464 }
31465
31466 rdev_dec_pending(rdev, mddev);
31467diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31468index 858fdbb..b2dac95 100644
31469--- a/drivers/md/raid5.c
31470+++ b/drivers/md/raid5.c
31471@@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31472 (unsigned long long)(sh->sector
31473 + rdev->data_offset),
31474 bdevname(rdev->bdev, b));
31475- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31476+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31477 clear_bit(R5_ReadError, &sh->dev[i].flags);
31478 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31479 }
31480- if (atomic_read(&conf->disks[i].rdev->read_errors))
31481- atomic_set(&conf->disks[i].rdev->read_errors, 0);
31482+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31483+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31484 } else {
31485 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31486 int retry = 0;
31487 rdev = conf->disks[i].rdev;
31488
31489 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31490- atomic_inc(&rdev->read_errors);
31491+ atomic_inc_unchecked(&rdev->read_errors);
31492 if (conf->mddev->degraded >= conf->max_degraded)
31493 printk_ratelimited(
31494 KERN_WARNING
31495@@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31496 (unsigned long long)(sh->sector
31497 + rdev->data_offset),
31498 bdn);
31499- else if (atomic_read(&rdev->read_errors)
31500+ else if (atomic_read_unchecked(&rdev->read_errors)
31501 > conf->max_nr_stripes)
31502 printk(KERN_WARNING
31503 "md/raid:%s: Too many read errors, failing device %s.\n",
31504diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31505index ba9a643..e474ab5 100644
31506--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31507+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31508@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31509 .subvendor = _subvend, .subdevice = _subdev, \
31510 .driver_data = (unsigned long)&_driverdata }
31511
31512-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31513+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31514 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31515 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31516 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31517diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31518index a7d876f..8c21b61 100644
31519--- a/drivers/media/dvb/dvb-core/dvb_demux.h
31520+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31521@@ -73,7 +73,7 @@ struct dvb_demux_feed {
31522 union {
31523 dmx_ts_cb ts;
31524 dmx_section_cb sec;
31525- } cb;
31526+ } __no_const cb;
31527
31528 struct dvb_demux *demux;
31529 void *priv;
31530diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31531index f732877..d38c35a 100644
31532--- a/drivers/media/dvb/dvb-core/dvbdev.c
31533+++ b/drivers/media/dvb/dvb-core/dvbdev.c
31534@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31535 const struct dvb_device *template, void *priv, int type)
31536 {
31537 struct dvb_device *dvbdev;
31538- struct file_operations *dvbdevfops;
31539+ file_operations_no_const *dvbdevfops;
31540 struct device *clsdev;
31541 int minor;
31542 int id;
31543diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31544index 9f2a02c..5920f88 100644
31545--- a/drivers/media/dvb/dvb-usb/cxusb.c
31546+++ b/drivers/media/dvb/dvb-usb/cxusb.c
31547@@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31548 struct dib0700_adapter_state {
31549 int (*set_param_save) (struct dvb_frontend *,
31550 struct dvb_frontend_parameters *);
31551-};
31552+} __no_const;
31553
31554 static int dib7070_set_param_override(struct dvb_frontend *fe,
31555 struct dvb_frontend_parameters *fep)
31556diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31557index f103ec1..5e8968b 100644
31558--- a/drivers/media/dvb/dvb-usb/dw2102.c
31559+++ b/drivers/media/dvb/dvb-usb/dw2102.c
31560@@ -95,7 +95,7 @@ struct su3000_state {
31561
31562 struct s6x0_state {
31563 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31564-};
31565+} __no_const;
31566
31567 /* debug */
31568 static int dvb_usb_dw2102_debug;
31569diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31570index 404f63a..4796533 100644
31571--- a/drivers/media/dvb/frontends/dib3000.h
31572+++ b/drivers/media/dvb/frontends/dib3000.h
31573@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31574 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31575 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31576 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31577-};
31578+} __no_const;
31579
31580 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31581 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31582diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31583index 90bf573..e8463da 100644
31584--- a/drivers/media/dvb/frontends/ds3000.c
31585+++ b/drivers/media/dvb/frontends/ds3000.c
31586@@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31587
31588 for (i = 0; i < 30 ; i++) {
31589 ds3000_read_status(fe, &status);
31590- if (status && FE_HAS_LOCK)
31591+ if (status & FE_HAS_LOCK)
31592 break;
31593
31594 msleep(10);
31595diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31596index 0564192..75b16f5 100644
31597--- a/drivers/media/dvb/ngene/ngene-cards.c
31598+++ b/drivers/media/dvb/ngene/ngene-cards.c
31599@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31600
31601 /****************************************************************************/
31602
31603-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31604+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31605 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31606 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31607 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31608diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31609index 16a089f..ab1667d 100644
31610--- a/drivers/media/radio/radio-cadet.c
31611+++ b/drivers/media/radio/radio-cadet.c
31612@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31613 unsigned char readbuf[RDS_BUFFER];
31614 int i = 0;
31615
31616+ if (count > RDS_BUFFER)
31617+ return -EFAULT;
31618 mutex_lock(&dev->lock);
31619 if (dev->rdsstat == 0) {
31620 dev->rdsstat = 1;
31621diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31622index 61287fc..8b08712 100644
31623--- a/drivers/media/rc/redrat3.c
31624+++ b/drivers/media/rc/redrat3.c
31625@@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31626 return carrier;
31627 }
31628
31629-static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31630+static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31631 {
31632 struct redrat3_dev *rr3 = rcdev->priv;
31633 struct device *dev = rr3->dev;
31634diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31635index 9cde353..8c6a1c3 100644
31636--- a/drivers/media/video/au0828/au0828.h
31637+++ b/drivers/media/video/au0828/au0828.h
31638@@ -191,7 +191,7 @@ struct au0828_dev {
31639
31640 /* I2C */
31641 struct i2c_adapter i2c_adap;
31642- struct i2c_algorithm i2c_algo;
31643+ i2c_algorithm_no_const i2c_algo;
31644 struct i2c_client i2c_client;
31645 u32 i2c_rc;
31646
31647diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31648index 68d1240..46b32eb 100644
31649--- a/drivers/media/video/cx88/cx88-alsa.c
31650+++ b/drivers/media/video/cx88/cx88-alsa.c
31651@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31652 * Only boards with eeprom and byte 1 at eeprom=1 have it
31653 */
31654
31655-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31656+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31657 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31658 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31659 {0, }
31660diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31661index 305e6aa..0143317 100644
31662--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31663+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31664@@ -196,7 +196,7 @@ struct pvr2_hdw {
31665
31666 /* I2C stuff */
31667 struct i2c_adapter i2c_adap;
31668- struct i2c_algorithm i2c_algo;
31669+ i2c_algorithm_no_const i2c_algo;
31670 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31671 int i2c_cx25840_hack_state;
31672 int i2c_linked;
31673diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31674index a0895bf..b7ebb1b 100644
31675--- a/drivers/media/video/timblogiw.c
31676+++ b/drivers/media/video/timblogiw.c
31677@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31678
31679 /* Platform device functions */
31680
31681-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31682+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31683 .vidioc_querycap = timblogiw_querycap,
31684 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31685 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31686@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31687 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31688 };
31689
31690-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31691+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31692 .owner = THIS_MODULE,
31693 .open = timblogiw_open,
31694 .release = timblogiw_close,
31695diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31696index e9c6a60..daf6a33 100644
31697--- a/drivers/message/fusion/mptbase.c
31698+++ b/drivers/message/fusion/mptbase.c
31699@@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31700 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31701 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31702
31703+#ifdef CONFIG_GRKERNSEC_HIDESYM
31704+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31705+#else
31706 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31707 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31708+#endif
31709+
31710 /*
31711 * Rounding UP to nearest 4-kB boundary here...
31712 */
31713diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31714index 9d95042..b808101 100644
31715--- a/drivers/message/fusion/mptsas.c
31716+++ b/drivers/message/fusion/mptsas.c
31717@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31718 return 0;
31719 }
31720
31721+static inline void
31722+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31723+{
31724+ if (phy_info->port_details) {
31725+ phy_info->port_details->rphy = rphy;
31726+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31727+ ioc->name, rphy));
31728+ }
31729+
31730+ if (rphy) {
31731+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31732+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31733+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31734+ ioc->name, rphy, rphy->dev.release));
31735+ }
31736+}
31737+
31738 /* no mutex */
31739 static void
31740 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31741@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31742 return NULL;
31743 }
31744
31745-static inline void
31746-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31747-{
31748- if (phy_info->port_details) {
31749- phy_info->port_details->rphy = rphy;
31750- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31751- ioc->name, rphy));
31752- }
31753-
31754- if (rphy) {
31755- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31756- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31757- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31758- ioc->name, rphy, rphy->dev.release));
31759- }
31760-}
31761-
31762 static inline struct sas_port *
31763 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31764 {
31765diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
31766index 0c3ced7..1fe34ec 100644
31767--- a/drivers/message/fusion/mptscsih.c
31768+++ b/drivers/message/fusion/mptscsih.c
31769@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31770
31771 h = shost_priv(SChost);
31772
31773- if (h) {
31774- if (h->info_kbuf == NULL)
31775- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31776- return h->info_kbuf;
31777- h->info_kbuf[0] = '\0';
31778+ if (!h)
31779+ return NULL;
31780
31781- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31782- h->info_kbuf[size-1] = '\0';
31783- }
31784+ if (h->info_kbuf == NULL)
31785+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31786+ return h->info_kbuf;
31787+ h->info_kbuf[0] = '\0';
31788+
31789+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31790+ h->info_kbuf[size-1] = '\0';
31791
31792 return h->info_kbuf;
31793 }
31794diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
31795index 07dbeaf..5533142 100644
31796--- a/drivers/message/i2o/i2o_proc.c
31797+++ b/drivers/message/i2o/i2o_proc.c
31798@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
31799 "Array Controller Device"
31800 };
31801
31802-static char *chtostr(u8 * chars, int n)
31803-{
31804- char tmp[256];
31805- tmp[0] = 0;
31806- return strncat(tmp, (char *)chars, n);
31807-}
31808-
31809 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31810 char *group)
31811 {
31812@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
31813
31814 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31815 seq_printf(seq, "%-#8x", ddm_table.module_id);
31816- seq_printf(seq, "%-29s",
31817- chtostr(ddm_table.module_name_version, 28));
31818+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31819 seq_printf(seq, "%9d ", ddm_table.data_size);
31820 seq_printf(seq, "%8d", ddm_table.code_size);
31821
31822@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
31823
31824 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31825 seq_printf(seq, "%-#8x", dst->module_id);
31826- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31827- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31828+ seq_printf(seq, "%-.28s", dst->module_name_version);
31829+ seq_printf(seq, "%-.8s", dst->date);
31830 seq_printf(seq, "%8d ", dst->module_size);
31831 seq_printf(seq, "%8d ", dst->mpb_size);
31832 seq_printf(seq, "0x%04x", dst->module_flags);
31833@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
31834 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31835 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31836 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31837- seq_printf(seq, "Vendor info : %s\n",
31838- chtostr((u8 *) (work32 + 2), 16));
31839- seq_printf(seq, "Product info : %s\n",
31840- chtostr((u8 *) (work32 + 6), 16));
31841- seq_printf(seq, "Description : %s\n",
31842- chtostr((u8 *) (work32 + 10), 16));
31843- seq_printf(seq, "Product rev. : %s\n",
31844- chtostr((u8 *) (work32 + 14), 8));
31845+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31846+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31847+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31848+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31849
31850 seq_printf(seq, "Serial number : ");
31851 print_serial_number(seq, (u8 *) (work32 + 16),
31852@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
31853 }
31854
31855 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31856- seq_printf(seq, "Module name : %s\n",
31857- chtostr(result.module_name, 24));
31858- seq_printf(seq, "Module revision : %s\n",
31859- chtostr(result.module_rev, 8));
31860+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
31861+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31862
31863 seq_printf(seq, "Serial number : ");
31864 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31865@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
31866 return 0;
31867 }
31868
31869- seq_printf(seq, "Device name : %s\n",
31870- chtostr(result.device_name, 64));
31871- seq_printf(seq, "Service name : %s\n",
31872- chtostr(result.service_name, 64));
31873- seq_printf(seq, "Physical name : %s\n",
31874- chtostr(result.physical_location, 64));
31875- seq_printf(seq, "Instance number : %s\n",
31876- chtostr(result.instance_number, 4));
31877+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
31878+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
31879+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31880+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31881
31882 return 0;
31883 }
31884diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
31885index a8c08f3..155fe3d 100644
31886--- a/drivers/message/i2o/iop.c
31887+++ b/drivers/message/i2o/iop.c
31888@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
31889
31890 spin_lock_irqsave(&c->context_list_lock, flags);
31891
31892- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31893- atomic_inc(&c->context_list_counter);
31894+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31895+ atomic_inc_unchecked(&c->context_list_counter);
31896
31897- entry->context = atomic_read(&c->context_list_counter);
31898+ entry->context = atomic_read_unchecked(&c->context_list_counter);
31899
31900 list_add(&entry->list, &c->context_list);
31901
31902@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
31903
31904 #if BITS_PER_LONG == 64
31905 spin_lock_init(&c->context_list_lock);
31906- atomic_set(&c->context_list_counter, 0);
31907+ atomic_set_unchecked(&c->context_list_counter, 0);
31908 INIT_LIST_HEAD(&c->context_list);
31909 #endif
31910
31911diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
31912index 7ce65f4..e66e9bc 100644
31913--- a/drivers/mfd/abx500-core.c
31914+++ b/drivers/mfd/abx500-core.c
31915@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
31916
31917 struct abx500_device_entry {
31918 struct list_head list;
31919- struct abx500_ops ops;
31920+ abx500_ops_no_const ops;
31921 struct device *dev;
31922 };
31923
31924diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
31925index 5c2a06a..8fa077c 100644
31926--- a/drivers/mfd/janz-cmodio.c
31927+++ b/drivers/mfd/janz-cmodio.c
31928@@ -13,6 +13,7 @@
31929
31930 #include <linux/kernel.h>
31931 #include <linux/module.h>
31932+#include <linux/slab.h>
31933 #include <linux/init.h>
31934 #include <linux/pci.h>
31935 #include <linux/interrupt.h>
31936diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
31937index 29d12a7..f900ba4 100644
31938--- a/drivers/misc/lis3lv02d/lis3lv02d.c
31939+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
31940@@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
31941 * the lid is closed. This leads to interrupts as soon as a little move
31942 * is done.
31943 */
31944- atomic_inc(&lis3->count);
31945+ atomic_inc_unchecked(&lis3->count);
31946
31947 wake_up_interruptible(&lis3->misc_wait);
31948 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
31949@@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
31950 if (lis3->pm_dev)
31951 pm_runtime_get_sync(lis3->pm_dev);
31952
31953- atomic_set(&lis3->count, 0);
31954+ atomic_set_unchecked(&lis3->count, 0);
31955 return 0;
31956 }
31957
31958@@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
31959 add_wait_queue(&lis3->misc_wait, &wait);
31960 while (true) {
31961 set_current_state(TASK_INTERRUPTIBLE);
31962- data = atomic_xchg(&lis3->count, 0);
31963+ data = atomic_xchg_unchecked(&lis3->count, 0);
31964 if (data)
31965 break;
31966
31967@@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
31968 struct lis3lv02d, miscdev);
31969
31970 poll_wait(file, &lis3->misc_wait, wait);
31971- if (atomic_read(&lis3->count))
31972+ if (atomic_read_unchecked(&lis3->count))
31973 return POLLIN | POLLRDNORM;
31974 return 0;
31975 }
31976diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
31977index 2b1482a..5d33616 100644
31978--- a/drivers/misc/lis3lv02d/lis3lv02d.h
31979+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
31980@@ -266,7 +266,7 @@ struct lis3lv02d {
31981 struct input_polled_dev *idev; /* input device */
31982 struct platform_device *pdev; /* platform device */
31983 struct regulator_bulk_data regulators[2];
31984- atomic_t count; /* interrupt count after last read */
31985+ atomic_unchecked_t count; /* interrupt count after last read */
31986 union axis_conversion ac; /* hw -> logical axis */
31987 int mapped_btns[3];
31988
31989diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
31990index 2f30bad..c4c13d0 100644
31991--- a/drivers/misc/sgi-gru/gruhandles.c
31992+++ b/drivers/misc/sgi-gru/gruhandles.c
31993@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31994 unsigned long nsec;
31995
31996 nsec = CLKS2NSEC(clks);
31997- atomic_long_inc(&mcs_op_statistics[op].count);
31998- atomic_long_add(nsec, &mcs_op_statistics[op].total);
31999+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32000+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32001 if (mcs_op_statistics[op].max < nsec)
32002 mcs_op_statistics[op].max = nsec;
32003 }
32004diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32005index 7768b87..f8aac38 100644
32006--- a/drivers/misc/sgi-gru/gruprocfs.c
32007+++ b/drivers/misc/sgi-gru/gruprocfs.c
32008@@ -32,9 +32,9 @@
32009
32010 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32011
32012-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32013+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32014 {
32015- unsigned long val = atomic_long_read(v);
32016+ unsigned long val = atomic_long_read_unchecked(v);
32017
32018 seq_printf(s, "%16lu %s\n", val, id);
32019 }
32020@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32021
32022 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32023 for (op = 0; op < mcsop_last; op++) {
32024- count = atomic_long_read(&mcs_op_statistics[op].count);
32025- total = atomic_long_read(&mcs_op_statistics[op].total);
32026+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32027+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32028 max = mcs_op_statistics[op].max;
32029 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32030 count ? total / count : 0, max);
32031diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32032index 5c3ce24..4915ccb 100644
32033--- a/drivers/misc/sgi-gru/grutables.h
32034+++ b/drivers/misc/sgi-gru/grutables.h
32035@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32036 * GRU statistics.
32037 */
32038 struct gru_stats_s {
32039- atomic_long_t vdata_alloc;
32040- atomic_long_t vdata_free;
32041- atomic_long_t gts_alloc;
32042- atomic_long_t gts_free;
32043- atomic_long_t gms_alloc;
32044- atomic_long_t gms_free;
32045- atomic_long_t gts_double_allocate;
32046- atomic_long_t assign_context;
32047- atomic_long_t assign_context_failed;
32048- atomic_long_t free_context;
32049- atomic_long_t load_user_context;
32050- atomic_long_t load_kernel_context;
32051- atomic_long_t lock_kernel_context;
32052- atomic_long_t unlock_kernel_context;
32053- atomic_long_t steal_user_context;
32054- atomic_long_t steal_kernel_context;
32055- atomic_long_t steal_context_failed;
32056- atomic_long_t nopfn;
32057- atomic_long_t asid_new;
32058- atomic_long_t asid_next;
32059- atomic_long_t asid_wrap;
32060- atomic_long_t asid_reuse;
32061- atomic_long_t intr;
32062- atomic_long_t intr_cbr;
32063- atomic_long_t intr_tfh;
32064- atomic_long_t intr_spurious;
32065- atomic_long_t intr_mm_lock_failed;
32066- atomic_long_t call_os;
32067- atomic_long_t call_os_wait_queue;
32068- atomic_long_t user_flush_tlb;
32069- atomic_long_t user_unload_context;
32070- atomic_long_t user_exception;
32071- atomic_long_t set_context_option;
32072- atomic_long_t check_context_retarget_intr;
32073- atomic_long_t check_context_unload;
32074- atomic_long_t tlb_dropin;
32075- atomic_long_t tlb_preload_page;
32076- atomic_long_t tlb_dropin_fail_no_asid;
32077- atomic_long_t tlb_dropin_fail_upm;
32078- atomic_long_t tlb_dropin_fail_invalid;
32079- atomic_long_t tlb_dropin_fail_range_active;
32080- atomic_long_t tlb_dropin_fail_idle;
32081- atomic_long_t tlb_dropin_fail_fmm;
32082- atomic_long_t tlb_dropin_fail_no_exception;
32083- atomic_long_t tfh_stale_on_fault;
32084- atomic_long_t mmu_invalidate_range;
32085- atomic_long_t mmu_invalidate_page;
32086- atomic_long_t flush_tlb;
32087- atomic_long_t flush_tlb_gru;
32088- atomic_long_t flush_tlb_gru_tgh;
32089- atomic_long_t flush_tlb_gru_zero_asid;
32090+ atomic_long_unchecked_t vdata_alloc;
32091+ atomic_long_unchecked_t vdata_free;
32092+ atomic_long_unchecked_t gts_alloc;
32093+ atomic_long_unchecked_t gts_free;
32094+ atomic_long_unchecked_t gms_alloc;
32095+ atomic_long_unchecked_t gms_free;
32096+ atomic_long_unchecked_t gts_double_allocate;
32097+ atomic_long_unchecked_t assign_context;
32098+ atomic_long_unchecked_t assign_context_failed;
32099+ atomic_long_unchecked_t free_context;
32100+ atomic_long_unchecked_t load_user_context;
32101+ atomic_long_unchecked_t load_kernel_context;
32102+ atomic_long_unchecked_t lock_kernel_context;
32103+ atomic_long_unchecked_t unlock_kernel_context;
32104+ atomic_long_unchecked_t steal_user_context;
32105+ atomic_long_unchecked_t steal_kernel_context;
32106+ atomic_long_unchecked_t steal_context_failed;
32107+ atomic_long_unchecked_t nopfn;
32108+ atomic_long_unchecked_t asid_new;
32109+ atomic_long_unchecked_t asid_next;
32110+ atomic_long_unchecked_t asid_wrap;
32111+ atomic_long_unchecked_t asid_reuse;
32112+ atomic_long_unchecked_t intr;
32113+ atomic_long_unchecked_t intr_cbr;
32114+ atomic_long_unchecked_t intr_tfh;
32115+ atomic_long_unchecked_t intr_spurious;
32116+ atomic_long_unchecked_t intr_mm_lock_failed;
32117+ atomic_long_unchecked_t call_os;
32118+ atomic_long_unchecked_t call_os_wait_queue;
32119+ atomic_long_unchecked_t user_flush_tlb;
32120+ atomic_long_unchecked_t user_unload_context;
32121+ atomic_long_unchecked_t user_exception;
32122+ atomic_long_unchecked_t set_context_option;
32123+ atomic_long_unchecked_t check_context_retarget_intr;
32124+ atomic_long_unchecked_t check_context_unload;
32125+ atomic_long_unchecked_t tlb_dropin;
32126+ atomic_long_unchecked_t tlb_preload_page;
32127+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32128+ atomic_long_unchecked_t tlb_dropin_fail_upm;
32129+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
32130+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
32131+ atomic_long_unchecked_t tlb_dropin_fail_idle;
32132+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
32133+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32134+ atomic_long_unchecked_t tfh_stale_on_fault;
32135+ atomic_long_unchecked_t mmu_invalidate_range;
32136+ atomic_long_unchecked_t mmu_invalidate_page;
32137+ atomic_long_unchecked_t flush_tlb;
32138+ atomic_long_unchecked_t flush_tlb_gru;
32139+ atomic_long_unchecked_t flush_tlb_gru_tgh;
32140+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32141
32142- atomic_long_t copy_gpa;
32143- atomic_long_t read_gpa;
32144+ atomic_long_unchecked_t copy_gpa;
32145+ atomic_long_unchecked_t read_gpa;
32146
32147- atomic_long_t mesq_receive;
32148- atomic_long_t mesq_receive_none;
32149- atomic_long_t mesq_send;
32150- atomic_long_t mesq_send_failed;
32151- atomic_long_t mesq_noop;
32152- atomic_long_t mesq_send_unexpected_error;
32153- atomic_long_t mesq_send_lb_overflow;
32154- atomic_long_t mesq_send_qlimit_reached;
32155- atomic_long_t mesq_send_amo_nacked;
32156- atomic_long_t mesq_send_put_nacked;
32157- atomic_long_t mesq_page_overflow;
32158- atomic_long_t mesq_qf_locked;
32159- atomic_long_t mesq_qf_noop_not_full;
32160- atomic_long_t mesq_qf_switch_head_failed;
32161- atomic_long_t mesq_qf_unexpected_error;
32162- atomic_long_t mesq_noop_unexpected_error;
32163- atomic_long_t mesq_noop_lb_overflow;
32164- atomic_long_t mesq_noop_qlimit_reached;
32165- atomic_long_t mesq_noop_amo_nacked;
32166- atomic_long_t mesq_noop_put_nacked;
32167- atomic_long_t mesq_noop_page_overflow;
32168+ atomic_long_unchecked_t mesq_receive;
32169+ atomic_long_unchecked_t mesq_receive_none;
32170+ atomic_long_unchecked_t mesq_send;
32171+ atomic_long_unchecked_t mesq_send_failed;
32172+ atomic_long_unchecked_t mesq_noop;
32173+ atomic_long_unchecked_t mesq_send_unexpected_error;
32174+ atomic_long_unchecked_t mesq_send_lb_overflow;
32175+ atomic_long_unchecked_t mesq_send_qlimit_reached;
32176+ atomic_long_unchecked_t mesq_send_amo_nacked;
32177+ atomic_long_unchecked_t mesq_send_put_nacked;
32178+ atomic_long_unchecked_t mesq_page_overflow;
32179+ atomic_long_unchecked_t mesq_qf_locked;
32180+ atomic_long_unchecked_t mesq_qf_noop_not_full;
32181+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
32182+ atomic_long_unchecked_t mesq_qf_unexpected_error;
32183+ atomic_long_unchecked_t mesq_noop_unexpected_error;
32184+ atomic_long_unchecked_t mesq_noop_lb_overflow;
32185+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
32186+ atomic_long_unchecked_t mesq_noop_amo_nacked;
32187+ atomic_long_unchecked_t mesq_noop_put_nacked;
32188+ atomic_long_unchecked_t mesq_noop_page_overflow;
32189
32190 };
32191
32192@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32193 tghop_invalidate, mcsop_last};
32194
32195 struct mcs_op_statistic {
32196- atomic_long_t count;
32197- atomic_long_t total;
32198+ atomic_long_unchecked_t count;
32199+ atomic_long_unchecked_t total;
32200 unsigned long max;
32201 };
32202
32203@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32204
32205 #define STAT(id) do { \
32206 if (gru_options & OPT_STATS) \
32207- atomic_long_inc(&gru_stats.id); \
32208+ atomic_long_inc_unchecked(&gru_stats.id); \
32209 } while (0)
32210
32211 #ifdef CONFIG_SGI_GRU_DEBUG
32212diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32213index 851b2f2..a4ec097 100644
32214--- a/drivers/misc/sgi-xp/xp.h
32215+++ b/drivers/misc/sgi-xp/xp.h
32216@@ -289,7 +289,7 @@ struct xpc_interface {
32217 xpc_notify_func, void *);
32218 void (*received) (short, int, void *);
32219 enum xp_retval (*partid_to_nasids) (short, void *);
32220-};
32221+} __no_const;
32222
32223 extern struct xpc_interface xpc_interface;
32224
32225diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32226index b94d5f7..7f494c5 100644
32227--- a/drivers/misc/sgi-xp/xpc.h
32228+++ b/drivers/misc/sgi-xp/xpc.h
32229@@ -835,6 +835,7 @@ struct xpc_arch_operations {
32230 void (*received_payload) (struct xpc_channel *, void *);
32231 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32232 };
32233+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32234
32235 /* struct xpc_partition act_state values (for XPC HB) */
32236
32237@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32238 /* found in xpc_main.c */
32239 extern struct device *xpc_part;
32240 extern struct device *xpc_chan;
32241-extern struct xpc_arch_operations xpc_arch_ops;
32242+extern xpc_arch_operations_no_const xpc_arch_ops;
32243 extern int xpc_disengage_timelimit;
32244 extern int xpc_disengage_timedout;
32245 extern int xpc_activate_IRQ_rcvd;
32246diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32247index 8d082b4..aa749ae 100644
32248--- a/drivers/misc/sgi-xp/xpc_main.c
32249+++ b/drivers/misc/sgi-xp/xpc_main.c
32250@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32251 .notifier_call = xpc_system_die,
32252 };
32253
32254-struct xpc_arch_operations xpc_arch_ops;
32255+xpc_arch_operations_no_const xpc_arch_ops;
32256
32257 /*
32258 * Timer function to enforce the timelimit on the partition disengage.
32259diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32260index 6878a94..fe5c5f1 100644
32261--- a/drivers/mmc/host/sdhci-pci.c
32262+++ b/drivers/mmc/host/sdhci-pci.c
32263@@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32264 .probe = via_probe,
32265 };
32266
32267-static const struct pci_device_id pci_ids[] __devinitdata = {
32268+static const struct pci_device_id pci_ids[] __devinitconst = {
32269 {
32270 .vendor = PCI_VENDOR_ID_RICOH,
32271 .device = PCI_DEVICE_ID_RICOH_R5C822,
32272diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32273index e9fad91..0a7a16a 100644
32274--- a/drivers/mtd/devices/doc2000.c
32275+++ b/drivers/mtd/devices/doc2000.c
32276@@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32277
32278 /* The ECC will not be calculated correctly if less than 512 is written */
32279 /* DBB-
32280- if (len != 0x200 && eccbuf)
32281+ if (len != 0x200)
32282 printk(KERN_WARNING
32283 "ECC needs a full sector write (adr: %lx size %lx)\n",
32284 (long) to, (long) len);
32285diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32286index a3f7a27..234016e 100644
32287--- a/drivers/mtd/devices/doc2001.c
32288+++ b/drivers/mtd/devices/doc2001.c
32289@@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32290 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32291
32292 /* Don't allow read past end of device */
32293- if (from >= this->totlen)
32294+ if (from >= this->totlen || !len)
32295 return -EINVAL;
32296
32297 /* Don't allow a single read to cross a 512-byte block boundary */
32298diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32299index 3984d48..28aa897 100644
32300--- a/drivers/mtd/nand/denali.c
32301+++ b/drivers/mtd/nand/denali.c
32302@@ -26,6 +26,7 @@
32303 #include <linux/pci.h>
32304 #include <linux/mtd/mtd.h>
32305 #include <linux/module.h>
32306+#include <linux/slab.h>
32307
32308 #include "denali.h"
32309
32310diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32311index ac40925..483b753 100644
32312--- a/drivers/mtd/nftlmount.c
32313+++ b/drivers/mtd/nftlmount.c
32314@@ -24,6 +24,7 @@
32315 #include <asm/errno.h>
32316 #include <linux/delay.h>
32317 #include <linux/slab.h>
32318+#include <linux/sched.h>
32319 #include <linux/mtd/mtd.h>
32320 #include <linux/mtd/nand.h>
32321 #include <linux/mtd/nftl.h>
32322diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32323index 6c3fb5a..c542a81 100644
32324--- a/drivers/mtd/ubi/build.c
32325+++ b/drivers/mtd/ubi/build.c
32326@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32327 static int __init bytes_str_to_int(const char *str)
32328 {
32329 char *endp;
32330- unsigned long result;
32331+ unsigned long result, scale = 1;
32332
32333 result = simple_strtoul(str, &endp, 0);
32334 if (str == endp || result >= INT_MAX) {
32335@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32336
32337 switch (*endp) {
32338 case 'G':
32339- result *= 1024;
32340+ scale *= 1024;
32341 case 'M':
32342- result *= 1024;
32343+ scale *= 1024;
32344 case 'K':
32345- result *= 1024;
32346+ scale *= 1024;
32347 if (endp[1] == 'i' && endp[2] == 'B')
32348 endp += 2;
32349 case '\0':
32350@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32351 return -EINVAL;
32352 }
32353
32354- return result;
32355+ if ((intoverflow_t)result*scale >= INT_MAX) {
32356+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32357+ str);
32358+ return -EINVAL;
32359+ }
32360+
32361+ return result*scale;
32362 }
32363
32364 /**
32365diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32366index 1feae59..c2a61d2 100644
32367--- a/drivers/net/ethernet/atheros/atlx/atl2.c
32368+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32369@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32370 */
32371
32372 #define ATL2_PARAM(X, desc) \
32373- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32374+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32375 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32376 MODULE_PARM_DESC(X, desc);
32377 #else
32378diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32379index 9a517c2..a50cfcb 100644
32380--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32381+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32382@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32383
32384 int (*wait_comp)(struct bnx2x *bp,
32385 struct bnx2x_rx_mode_ramrod_params *p);
32386-};
32387+} __no_const;
32388
32389 /********************** Set multicast group ***********************************/
32390
32391diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32392index 94b4bd0..73c02de 100644
32393--- a/drivers/net/ethernet/broadcom/tg3.h
32394+++ b/drivers/net/ethernet/broadcom/tg3.h
32395@@ -134,6 +134,7 @@
32396 #define CHIPREV_ID_5750_A0 0x4000
32397 #define CHIPREV_ID_5750_A1 0x4001
32398 #define CHIPREV_ID_5750_A3 0x4003
32399+#define CHIPREV_ID_5750_C1 0x4201
32400 #define CHIPREV_ID_5750_C2 0x4202
32401 #define CHIPREV_ID_5752_A0_HW 0x5000
32402 #define CHIPREV_ID_5752_A0 0x6000
32403diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32404index c5f5479..2e8c260 100644
32405--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32406+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32407@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32408 */
32409 struct l2t_skb_cb {
32410 arp_failure_handler_func arp_failure_handler;
32411-};
32412+} __no_const;
32413
32414 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32415
32416diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32417index 871bcaa..4043505 100644
32418--- a/drivers/net/ethernet/dec/tulip/de4x5.c
32419+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32420@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32421 for (i=0; i<ETH_ALEN; i++) {
32422 tmp.addr[i] = dev->dev_addr[i];
32423 }
32424- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32425+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32426 break;
32427
32428 case DE4X5_SET_HWADDR: /* Set the hardware address */
32429@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32430 spin_lock_irqsave(&lp->lock, flags);
32431 memcpy(&statbuf, &lp->pktStats, ioc->len);
32432 spin_unlock_irqrestore(&lp->lock, flags);
32433- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32434+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32435 return -EFAULT;
32436 break;
32437 }
32438diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32439index 14d5b61..1398636 100644
32440--- a/drivers/net/ethernet/dec/tulip/eeprom.c
32441+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32442@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32443 {NULL}};
32444
32445
32446-static const char *block_name[] __devinitdata = {
32447+static const char *block_name[] __devinitconst = {
32448 "21140 non-MII",
32449 "21140 MII PHY",
32450 "21142 Serial PHY",
32451diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32452index 4d01219..b58d26d 100644
32453--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32454+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32455@@ -236,7 +236,7 @@ struct pci_id_info {
32456 int drv_flags; /* Driver use, intended as capability flags. */
32457 };
32458
32459-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32460+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32461 { /* Sometime a Level-One switch card. */
32462 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32463 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32464diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32465index dcd7f7a..ecb7fb3 100644
32466--- a/drivers/net/ethernet/dlink/sundance.c
32467+++ b/drivers/net/ethernet/dlink/sundance.c
32468@@ -218,7 +218,7 @@ enum {
32469 struct pci_id_info {
32470 const char *name;
32471 };
32472-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32473+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32474 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32475 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32476 {"D-Link DFE-580TX 4 port Server Adapter"},
32477diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32478index bf266a0..e024af7 100644
32479--- a/drivers/net/ethernet/emulex/benet/be_main.c
32480+++ b/drivers/net/ethernet/emulex/benet/be_main.c
32481@@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32482
32483 if (wrapped)
32484 newacc += 65536;
32485- ACCESS_ONCE(*acc) = newacc;
32486+ ACCESS_ONCE_RW(*acc) = newacc;
32487 }
32488
32489 void be_parse_stats(struct be_adapter *adapter)
32490diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32491index 61d2bdd..7f1154a 100644
32492--- a/drivers/net/ethernet/fealnx.c
32493+++ b/drivers/net/ethernet/fealnx.c
32494@@ -150,7 +150,7 @@ struct chip_info {
32495 int flags;
32496 };
32497
32498-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32499+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32500 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32501 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32502 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32503diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32504index e1159e5..e18684d 100644
32505--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32506+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32507@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32508 {
32509 struct e1000_hw *hw = &adapter->hw;
32510 struct e1000_mac_info *mac = &hw->mac;
32511- struct e1000_mac_operations *func = &mac->ops;
32512+ e1000_mac_operations_no_const *func = &mac->ops;
32513
32514 /* Set media type */
32515 switch (adapter->pdev->device) {
32516diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32517index a3e65fd..f451444 100644
32518--- a/drivers/net/ethernet/intel/e1000e/82571.c
32519+++ b/drivers/net/ethernet/intel/e1000e/82571.c
32520@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32521 {
32522 struct e1000_hw *hw = &adapter->hw;
32523 struct e1000_mac_info *mac = &hw->mac;
32524- struct e1000_mac_operations *func = &mac->ops;
32525+ e1000_mac_operations_no_const *func = &mac->ops;
32526 u32 swsm = 0;
32527 u32 swsm2 = 0;
32528 bool force_clear_smbi = false;
32529diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32530index 2967039..ca8c40c 100644
32531--- a/drivers/net/ethernet/intel/e1000e/hw.h
32532+++ b/drivers/net/ethernet/intel/e1000e/hw.h
32533@@ -778,6 +778,7 @@ struct e1000_mac_operations {
32534 void (*write_vfta)(struct e1000_hw *, u32, u32);
32535 s32 (*read_mac_addr)(struct e1000_hw *);
32536 };
32537+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32538
32539 /*
32540 * When to use various PHY register access functions:
32541@@ -818,6 +819,7 @@ struct e1000_phy_operations {
32542 void (*power_up)(struct e1000_hw *);
32543 void (*power_down)(struct e1000_hw *);
32544 };
32545+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32546
32547 /* Function pointers for the NVM. */
32548 struct e1000_nvm_operations {
32549@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32550 s32 (*validate)(struct e1000_hw *);
32551 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32552 };
32553+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32554
32555 struct e1000_mac_info {
32556- struct e1000_mac_operations ops;
32557+ e1000_mac_operations_no_const ops;
32558 u8 addr[ETH_ALEN];
32559 u8 perm_addr[ETH_ALEN];
32560
32561@@ -872,7 +875,7 @@ struct e1000_mac_info {
32562 };
32563
32564 struct e1000_phy_info {
32565- struct e1000_phy_operations ops;
32566+ e1000_phy_operations_no_const ops;
32567
32568 enum e1000_phy_type type;
32569
32570@@ -906,7 +909,7 @@ struct e1000_phy_info {
32571 };
32572
32573 struct e1000_nvm_info {
32574- struct e1000_nvm_operations ops;
32575+ e1000_nvm_operations_no_const ops;
32576
32577 enum e1000_nvm_type type;
32578 enum e1000_nvm_override override;
32579diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32580index 4519a13..f97fcd0 100644
32581--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32582+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32583@@ -314,6 +314,7 @@ struct e1000_mac_operations {
32584 s32 (*read_mac_addr)(struct e1000_hw *);
32585 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32586 };
32587+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32588
32589 struct e1000_phy_operations {
32590 s32 (*acquire)(struct e1000_hw *);
32591@@ -330,6 +331,7 @@ struct e1000_phy_operations {
32592 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32593 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32594 };
32595+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32596
32597 struct e1000_nvm_operations {
32598 s32 (*acquire)(struct e1000_hw *);
32599@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32600 s32 (*update)(struct e1000_hw *);
32601 s32 (*validate)(struct e1000_hw *);
32602 };
32603+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32604
32605 struct e1000_info {
32606 s32 (*get_invariants)(struct e1000_hw *);
32607@@ -350,7 +353,7 @@ struct e1000_info {
32608 extern const struct e1000_info e1000_82575_info;
32609
32610 struct e1000_mac_info {
32611- struct e1000_mac_operations ops;
32612+ e1000_mac_operations_no_const ops;
32613
32614 u8 addr[6];
32615 u8 perm_addr[6];
32616@@ -388,7 +391,7 @@ struct e1000_mac_info {
32617 };
32618
32619 struct e1000_phy_info {
32620- struct e1000_phy_operations ops;
32621+ e1000_phy_operations_no_const ops;
32622
32623 enum e1000_phy_type type;
32624
32625@@ -423,7 +426,7 @@ struct e1000_phy_info {
32626 };
32627
32628 struct e1000_nvm_info {
32629- struct e1000_nvm_operations ops;
32630+ e1000_nvm_operations_no_const ops;
32631 enum e1000_nvm_type type;
32632 enum e1000_nvm_override override;
32633
32634@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32635 s32 (*check_for_ack)(struct e1000_hw *, u16);
32636 s32 (*check_for_rst)(struct e1000_hw *, u16);
32637 };
32638+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32639
32640 struct e1000_mbx_stats {
32641 u32 msgs_tx;
32642@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32643 };
32644
32645 struct e1000_mbx_info {
32646- struct e1000_mbx_operations ops;
32647+ e1000_mbx_operations_no_const ops;
32648 struct e1000_mbx_stats stats;
32649 u32 timeout;
32650 u32 usec_delay;
32651diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32652index d7ed58f..64cde36 100644
32653--- a/drivers/net/ethernet/intel/igbvf/vf.h
32654+++ b/drivers/net/ethernet/intel/igbvf/vf.h
32655@@ -189,9 +189,10 @@ struct e1000_mac_operations {
32656 s32 (*read_mac_addr)(struct e1000_hw *);
32657 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32658 };
32659+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32660
32661 struct e1000_mac_info {
32662- struct e1000_mac_operations ops;
32663+ e1000_mac_operations_no_const ops;
32664 u8 addr[6];
32665 u8 perm_addr[6];
32666
32667@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32668 s32 (*check_for_ack)(struct e1000_hw *);
32669 s32 (*check_for_rst)(struct e1000_hw *);
32670 };
32671+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32672
32673 struct e1000_mbx_stats {
32674 u32 msgs_tx;
32675@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32676 };
32677
32678 struct e1000_mbx_info {
32679- struct e1000_mbx_operations ops;
32680+ e1000_mbx_operations_no_const ops;
32681 struct e1000_mbx_stats stats;
32682 u32 timeout;
32683 u32 usec_delay;
32684diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32685index 6c5cca8..de8ef63 100644
32686--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32687+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32688@@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32689 s32 (*update_checksum)(struct ixgbe_hw *);
32690 u16 (*calc_checksum)(struct ixgbe_hw *);
32691 };
32692+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32693
32694 struct ixgbe_mac_operations {
32695 s32 (*init_hw)(struct ixgbe_hw *);
32696@@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32697 /* Manageability interface */
32698 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32699 };
32700+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32701
32702 struct ixgbe_phy_operations {
32703 s32 (*identify)(struct ixgbe_hw *);
32704@@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32705 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32706 s32 (*check_overtemp)(struct ixgbe_hw *);
32707 };
32708+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32709
32710 struct ixgbe_eeprom_info {
32711- struct ixgbe_eeprom_operations ops;
32712+ ixgbe_eeprom_operations_no_const ops;
32713 enum ixgbe_eeprom_type type;
32714 u32 semaphore_delay;
32715 u16 word_size;
32716@@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32717
32718 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32719 struct ixgbe_mac_info {
32720- struct ixgbe_mac_operations ops;
32721+ ixgbe_mac_operations_no_const ops;
32722 enum ixgbe_mac_type type;
32723 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32724 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32725@@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32726 };
32727
32728 struct ixgbe_phy_info {
32729- struct ixgbe_phy_operations ops;
32730+ ixgbe_phy_operations_no_const ops;
32731 struct mdio_if_info mdio;
32732 enum ixgbe_phy_type type;
32733 u32 id;
32734@@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32735 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32736 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32737 };
32738+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32739
32740 struct ixgbe_mbx_stats {
32741 u32 msgs_tx;
32742@@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32743 };
32744
32745 struct ixgbe_mbx_info {
32746- struct ixgbe_mbx_operations ops;
32747+ ixgbe_mbx_operations_no_const ops;
32748 struct ixgbe_mbx_stats stats;
32749 u32 timeout;
32750 u32 usec_delay;
32751diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
32752index 10306b4..28df758 100644
32753--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
32754+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
32755@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32756 s32 (*clear_vfta)(struct ixgbe_hw *);
32757 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
32758 };
32759+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32760
32761 enum ixgbe_mac_type {
32762 ixgbe_mac_unknown = 0,
32763@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
32764 };
32765
32766 struct ixgbe_mac_info {
32767- struct ixgbe_mac_operations ops;
32768+ ixgbe_mac_operations_no_const ops;
32769 u8 addr[6];
32770 u8 perm_addr[6];
32771
32772@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
32773 s32 (*check_for_ack)(struct ixgbe_hw *);
32774 s32 (*check_for_rst)(struct ixgbe_hw *);
32775 };
32776+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32777
32778 struct ixgbe_mbx_stats {
32779 u32 msgs_tx;
32780@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
32781 };
32782
32783 struct ixgbe_mbx_info {
32784- struct ixgbe_mbx_operations ops;
32785+ ixgbe_mbx_operations_no_const ops;
32786 struct ixgbe_mbx_stats stats;
32787 u32 timeout;
32788 u32 udelay;
32789diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
32790index 94bbc85..78c12e6 100644
32791--- a/drivers/net/ethernet/mellanox/mlx4/main.c
32792+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
32793@@ -40,6 +40,7 @@
32794 #include <linux/dma-mapping.h>
32795 #include <linux/slab.h>
32796 #include <linux/io-mapping.h>
32797+#include <linux/sched.h>
32798
32799 #include <linux/mlx4/device.h>
32800 #include <linux/mlx4/doorbell.h>
32801diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32802index 5046a64..71ca936 100644
32803--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
32804+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32805@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
32806 void (*link_down)(struct __vxge_hw_device *devh);
32807 void (*crit_err)(struct __vxge_hw_device *devh,
32808 enum vxge_hw_event type, u64 ext_data);
32809-};
32810+} __no_const;
32811
32812 /*
32813 * struct __vxge_hw_blockpool_entry - Block private data structure
32814diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32815index 4a518a3..936b334 100644
32816--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32817+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32818@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32819 struct vxge_hw_mempool_dma *dma_object,
32820 u32 index,
32821 u32 is_last);
32822-};
32823+} __no_const;
32824
32825 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32826 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32827diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
32828index c8f47f1..5da9840 100644
32829--- a/drivers/net/ethernet/realtek/r8169.c
32830+++ b/drivers/net/ethernet/realtek/r8169.c
32831@@ -698,17 +698,17 @@ struct rtl8169_private {
32832 struct mdio_ops {
32833 void (*write)(void __iomem *, int, int);
32834 int (*read)(void __iomem *, int);
32835- } mdio_ops;
32836+ } __no_const mdio_ops;
32837
32838 struct pll_power_ops {
32839 void (*down)(struct rtl8169_private *);
32840 void (*up)(struct rtl8169_private *);
32841- } pll_power_ops;
32842+ } __no_const pll_power_ops;
32843
32844 struct jumbo_ops {
32845 void (*enable)(struct rtl8169_private *);
32846 void (*disable)(struct rtl8169_private *);
32847- } jumbo_ops;
32848+ } __no_const jumbo_ops;
32849
32850 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32851 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32852diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
32853index 1b4658c..a30dabb 100644
32854--- a/drivers/net/ethernet/sis/sis190.c
32855+++ b/drivers/net/ethernet/sis/sis190.c
32856@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
32857 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32858 struct net_device *dev)
32859 {
32860- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32861+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32862 struct sis190_private *tp = netdev_priv(dev);
32863 struct pci_dev *isa_bridge;
32864 u8 reg, tmp8;
32865diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
32866index edfa15d..002bfa9 100644
32867--- a/drivers/net/ppp/ppp_generic.c
32868+++ b/drivers/net/ppp/ppp_generic.c
32869@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32870 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32871 struct ppp_stats stats;
32872 struct ppp_comp_stats cstats;
32873- char *vers;
32874
32875 switch (cmd) {
32876 case SIOCGPPPSTATS:
32877@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32878 break;
32879
32880 case SIOCGPPPVER:
32881- vers = PPP_VERSION;
32882- if (copy_to_user(addr, vers, strlen(vers) + 1))
32883+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32884 break;
32885 err = 0;
32886 break;
32887diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
32888index 515f122..41dd273 100644
32889--- a/drivers/net/tokenring/abyss.c
32890+++ b/drivers/net/tokenring/abyss.c
32891@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
32892
32893 static int __init abyss_init (void)
32894 {
32895- abyss_netdev_ops = tms380tr_netdev_ops;
32896+ pax_open_kernel();
32897+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32898
32899- abyss_netdev_ops.ndo_open = abyss_open;
32900- abyss_netdev_ops.ndo_stop = abyss_close;
32901+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32902+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32903+ pax_close_kernel();
32904
32905 return pci_register_driver(&abyss_driver);
32906 }
32907diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
32908index 6153cfd..cf69c1c 100644
32909--- a/drivers/net/tokenring/madgemc.c
32910+++ b/drivers/net/tokenring/madgemc.c
32911@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
32912
32913 static int __init madgemc_init (void)
32914 {
32915- madgemc_netdev_ops = tms380tr_netdev_ops;
32916- madgemc_netdev_ops.ndo_open = madgemc_open;
32917- madgemc_netdev_ops.ndo_stop = madgemc_close;
32918+ pax_open_kernel();
32919+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32920+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32921+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32922+ pax_close_kernel();
32923
32924 return mca_register_driver (&madgemc_driver);
32925 }
32926diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
32927index 8d362e6..f91cc52 100644
32928--- a/drivers/net/tokenring/proteon.c
32929+++ b/drivers/net/tokenring/proteon.c
32930@@ -353,9 +353,11 @@ static int __init proteon_init(void)
32931 struct platform_device *pdev;
32932 int i, num = 0, err = 0;
32933
32934- proteon_netdev_ops = tms380tr_netdev_ops;
32935- proteon_netdev_ops.ndo_open = proteon_open;
32936- proteon_netdev_ops.ndo_stop = tms380tr_close;
32937+ pax_open_kernel();
32938+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32939+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32940+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32941+ pax_close_kernel();
32942
32943 err = platform_driver_register(&proteon_driver);
32944 if (err)
32945diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
32946index 46db5c5..37c1536 100644
32947--- a/drivers/net/tokenring/skisa.c
32948+++ b/drivers/net/tokenring/skisa.c
32949@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32950 struct platform_device *pdev;
32951 int i, num = 0, err = 0;
32952
32953- sk_isa_netdev_ops = tms380tr_netdev_ops;
32954- sk_isa_netdev_ops.ndo_open = sk_isa_open;
32955- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32956+ pax_open_kernel();
32957+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32958+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
32959+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32960+ pax_close_kernel();
32961
32962 err = platform_driver_register(&sk_isa_driver);
32963 if (err)
32964diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
32965index 304fe78..db112fa 100644
32966--- a/drivers/net/usb/hso.c
32967+++ b/drivers/net/usb/hso.c
32968@@ -71,7 +71,7 @@
32969 #include <asm/byteorder.h>
32970 #include <linux/serial_core.h>
32971 #include <linux/serial.h>
32972-
32973+#include <asm/local.h>
32974
32975 #define MOD_AUTHOR "Option Wireless"
32976 #define MOD_DESCRIPTION "USB High Speed Option driver"
32977@@ -257,7 +257,7 @@ struct hso_serial {
32978
32979 /* from usb_serial_port */
32980 struct tty_struct *tty;
32981- int open_count;
32982+ local_t open_count;
32983 spinlock_t serial_lock;
32984
32985 int (*write_data) (struct hso_serial *serial);
32986@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
32987 struct urb *urb;
32988
32989 urb = serial->rx_urb[0];
32990- if (serial->open_count > 0) {
32991+ if (local_read(&serial->open_count) > 0) {
32992 count = put_rxbuf_data(urb, serial);
32993 if (count == -1)
32994 return;
32995@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
32996 DUMP1(urb->transfer_buffer, urb->actual_length);
32997
32998 /* Anyone listening? */
32999- if (serial->open_count == 0)
33000+ if (local_read(&serial->open_count) == 0)
33001 return;
33002
33003 if (status == 0) {
33004@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33005 spin_unlock_irq(&serial->serial_lock);
33006
33007 /* check for port already opened, if not set the termios */
33008- serial->open_count++;
33009- if (serial->open_count == 1) {
33010+ if (local_inc_return(&serial->open_count) == 1) {
33011 serial->rx_state = RX_IDLE;
33012 /* Force default termio settings */
33013 _hso_serial_set_termios(tty, NULL);
33014@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33015 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
33016 if (result) {
33017 hso_stop_serial_device(serial->parent);
33018- serial->open_count--;
33019+ local_dec(&serial->open_count);
33020 kref_put(&serial->parent->ref, hso_serial_ref_free);
33021 }
33022 } else {
33023@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
33024
33025 /* reset the rts and dtr */
33026 /* do the actual close */
33027- serial->open_count--;
33028+ local_dec(&serial->open_count);
33029
33030- if (serial->open_count <= 0) {
33031- serial->open_count = 0;
33032+ if (local_read(&serial->open_count) <= 0) {
33033+ local_set(&serial->open_count, 0);
33034 spin_lock_irq(&serial->serial_lock);
33035 if (serial->tty == tty) {
33036 serial->tty->driver_data = NULL;
33037@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
33038
33039 /* the actual setup */
33040 spin_lock_irqsave(&serial->serial_lock, flags);
33041- if (serial->open_count)
33042+ if (local_read(&serial->open_count))
33043 _hso_serial_set_termios(tty, old);
33044 else
33045 tty->termios = old;
33046@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
33047 D1("Pending read interrupt on port %d\n", i);
33048 spin_lock(&serial->serial_lock);
33049 if (serial->rx_state == RX_IDLE &&
33050- serial->open_count > 0) {
33051+ local_read(&serial->open_count) > 0) {
33052 /* Setup and send a ctrl req read on
33053 * port i */
33054 if (!serial->rx_urb_filled[0]) {
33055@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
33056 /* Start all serial ports */
33057 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33058 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33059- if (dev2ser(serial_table[i])->open_count) {
33060+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
33061 result =
33062 hso_start_serial_device(serial_table[i], GFP_NOIO);
33063 hso_kick_transmit(dev2ser(serial_table[i]));
33064diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33065index e662cbc..8d4a102 100644
33066--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
33067+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33068@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
33069 * Return with error code if any of the queue indices
33070 * is out of range
33071 */
33072- if (p->ring_index[i] < 0 ||
33073- p->ring_index[i] >= adapter->num_rx_queues)
33074+ if (p->ring_index[i] >= adapter->num_rx_queues)
33075 return -EINVAL;
33076 }
33077
33078diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33079index 0f9ee46..e2d6e65 100644
33080--- a/drivers/net/wireless/ath/ath.h
33081+++ b/drivers/net/wireless/ath/ath.h
33082@@ -119,6 +119,7 @@ struct ath_ops {
33083 void (*write_flush) (void *);
33084 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33085 };
33086+typedef struct ath_ops __no_const ath_ops_no_const;
33087
33088 struct ath_common;
33089 struct ath_bus_ops;
33090diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33091index b592016..fe47870 100644
33092--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33093+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33094@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33095 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33096 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33097
33098- ACCESS_ONCE(ads->ds_link) = i->link;
33099- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33100+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
33101+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33102
33103 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33104 ctl6 = SM(i->keytype, AR_EncrType);
33105@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33106
33107 if ((i->is_first || i->is_last) &&
33108 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33109- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33110+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33111 | set11nTries(i->rates, 1)
33112 | set11nTries(i->rates, 2)
33113 | set11nTries(i->rates, 3)
33114 | (i->dur_update ? AR_DurUpdateEna : 0)
33115 | SM(0, AR_BurstDur);
33116
33117- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33118+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33119 | set11nRate(i->rates, 1)
33120 | set11nRate(i->rates, 2)
33121 | set11nRate(i->rates, 3);
33122 } else {
33123- ACCESS_ONCE(ads->ds_ctl2) = 0;
33124- ACCESS_ONCE(ads->ds_ctl3) = 0;
33125+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33126+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33127 }
33128
33129 if (!i->is_first) {
33130- ACCESS_ONCE(ads->ds_ctl0) = 0;
33131- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33132- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33133+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33134+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33135+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33136 return;
33137 }
33138
33139@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33140 break;
33141 }
33142
33143- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33144+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33145 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33146 | SM(i->txpower, AR_XmitPower)
33147 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33148@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33149 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33150 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33151
33152- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33153- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33154+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33155+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33156
33157 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33158 return;
33159
33160- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33161+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33162 | set11nPktDurRTSCTS(i->rates, 1);
33163
33164- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33165+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33166 | set11nPktDurRTSCTS(i->rates, 3);
33167
33168- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33169+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33170 | set11nRateFlags(i->rates, 1)
33171 | set11nRateFlags(i->rates, 2)
33172 | set11nRateFlags(i->rates, 3)
33173diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33174index f5ae3c6..7936af3 100644
33175--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33176+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33177@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33178 (i->qcu << AR_TxQcuNum_S) | 0x17;
33179
33180 checksum += val;
33181- ACCESS_ONCE(ads->info) = val;
33182+ ACCESS_ONCE_RW(ads->info) = val;
33183
33184 checksum += i->link;
33185- ACCESS_ONCE(ads->link) = i->link;
33186+ ACCESS_ONCE_RW(ads->link) = i->link;
33187
33188 checksum += i->buf_addr[0];
33189- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33190+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33191 checksum += i->buf_addr[1];
33192- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33193+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33194 checksum += i->buf_addr[2];
33195- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33196+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33197 checksum += i->buf_addr[3];
33198- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33199+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33200
33201 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33202- ACCESS_ONCE(ads->ctl3) = val;
33203+ ACCESS_ONCE_RW(ads->ctl3) = val;
33204 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33205- ACCESS_ONCE(ads->ctl5) = val;
33206+ ACCESS_ONCE_RW(ads->ctl5) = val;
33207 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33208- ACCESS_ONCE(ads->ctl7) = val;
33209+ ACCESS_ONCE_RW(ads->ctl7) = val;
33210 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33211- ACCESS_ONCE(ads->ctl9) = val;
33212+ ACCESS_ONCE_RW(ads->ctl9) = val;
33213
33214 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33215- ACCESS_ONCE(ads->ctl10) = checksum;
33216+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
33217
33218 if (i->is_first || i->is_last) {
33219- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33220+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33221 | set11nTries(i->rates, 1)
33222 | set11nTries(i->rates, 2)
33223 | set11nTries(i->rates, 3)
33224 | (i->dur_update ? AR_DurUpdateEna : 0)
33225 | SM(0, AR_BurstDur);
33226
33227- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33228+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33229 | set11nRate(i->rates, 1)
33230 | set11nRate(i->rates, 2)
33231 | set11nRate(i->rates, 3);
33232 } else {
33233- ACCESS_ONCE(ads->ctl13) = 0;
33234- ACCESS_ONCE(ads->ctl14) = 0;
33235+ ACCESS_ONCE_RW(ads->ctl13) = 0;
33236+ ACCESS_ONCE_RW(ads->ctl14) = 0;
33237 }
33238
33239 ads->ctl20 = 0;
33240@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33241
33242 ctl17 = SM(i->keytype, AR_EncrType);
33243 if (!i->is_first) {
33244- ACCESS_ONCE(ads->ctl11) = 0;
33245- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33246- ACCESS_ONCE(ads->ctl15) = 0;
33247- ACCESS_ONCE(ads->ctl16) = 0;
33248- ACCESS_ONCE(ads->ctl17) = ctl17;
33249- ACCESS_ONCE(ads->ctl18) = 0;
33250- ACCESS_ONCE(ads->ctl19) = 0;
33251+ ACCESS_ONCE_RW(ads->ctl11) = 0;
33252+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33253+ ACCESS_ONCE_RW(ads->ctl15) = 0;
33254+ ACCESS_ONCE_RW(ads->ctl16) = 0;
33255+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33256+ ACCESS_ONCE_RW(ads->ctl18) = 0;
33257+ ACCESS_ONCE_RW(ads->ctl19) = 0;
33258 return;
33259 }
33260
33261- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33262+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33263 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33264 | SM(i->txpower, AR_XmitPower)
33265 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33266@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33267 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33268 ctl12 |= SM(val, AR_PAPRDChainMask);
33269
33270- ACCESS_ONCE(ads->ctl12) = ctl12;
33271- ACCESS_ONCE(ads->ctl17) = ctl17;
33272+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33273+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33274
33275- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33276+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33277 | set11nPktDurRTSCTS(i->rates, 1);
33278
33279- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33280+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33281 | set11nPktDurRTSCTS(i->rates, 3);
33282
33283- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33284+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33285 | set11nRateFlags(i->rates, 1)
33286 | set11nRateFlags(i->rates, 2)
33287 | set11nRateFlags(i->rates, 3)
33288 | SM(i->rtscts_rate, AR_RTSCTSRate);
33289
33290- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33291+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33292 }
33293
33294 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33295diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33296index f389b3c..7359e18 100644
33297--- a/drivers/net/wireless/ath/ath9k/hw.h
33298+++ b/drivers/net/wireless/ath/ath9k/hw.h
33299@@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33300
33301 /* ANI */
33302 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33303-};
33304+} __no_const;
33305
33306 /**
33307 * struct ath_hw_ops - callbacks used by hardware code and driver code
33308@@ -635,7 +635,7 @@ struct ath_hw_ops {
33309 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33310 struct ath_hw_antcomb_conf *antconf);
33311
33312-};
33313+} __no_const;
33314
33315 struct ath_nf_limits {
33316 s16 max;
33317@@ -655,7 +655,7 @@ enum ath_cal_list {
33318 #define AH_FASTCC 0x4
33319
33320 struct ath_hw {
33321- struct ath_ops reg_ops;
33322+ ath_ops_no_const reg_ops;
33323
33324 struct ieee80211_hw *hw;
33325 struct ath_common common;
33326diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33327index bea8524..c677c06 100644
33328--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33329+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33330@@ -547,7 +547,7 @@ struct phy_func_ptr {
33331 void (*carrsuppr)(struct brcms_phy *);
33332 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33333 void (*detach)(struct brcms_phy *);
33334-};
33335+} __no_const;
33336
33337 struct brcms_phy {
33338 struct brcms_phy_pub pubpi_ro;
33339diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33340index 05f2ad1..ae00eea 100644
33341--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33342+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33343@@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33344 */
33345 if (iwl3945_mod_params.disable_hw_scan) {
33346 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33347- iwl3945_hw_ops.hw_scan = NULL;
33348+ pax_open_kernel();
33349+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33350+ pax_close_kernel();
33351 }
33352
33353 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33354diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33355index 69a77e2..552b42c 100644
33356--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33357+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33358@@ -71,8 +71,8 @@ do { \
33359 } while (0)
33360
33361 #else
33362-#define IWL_DEBUG(m, level, fmt, args...)
33363-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33364+#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33365+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33366 #define iwl_print_hex_dump(m, level, p, len)
33367 #endif /* CONFIG_IWLWIFI_DEBUG */
33368
33369diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33370index 523ad55..f8c5dc5 100644
33371--- a/drivers/net/wireless/mac80211_hwsim.c
33372+++ b/drivers/net/wireless/mac80211_hwsim.c
33373@@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33374 return -EINVAL;
33375
33376 if (fake_hw_scan) {
33377- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33378- mac80211_hwsim_ops.sw_scan_start = NULL;
33379- mac80211_hwsim_ops.sw_scan_complete = NULL;
33380+ pax_open_kernel();
33381+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33382+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33383+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33384+ pax_close_kernel();
33385 }
33386
33387 spin_lock_init(&hwsim_radio_lock);
33388diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33389index 30f138b..c904585 100644
33390--- a/drivers/net/wireless/mwifiex/main.h
33391+++ b/drivers/net/wireless/mwifiex/main.h
33392@@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33393 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33394 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33395 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33396-};
33397+} __no_const;
33398
33399 struct mwifiex_adapter {
33400 u8 iface_type;
33401diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33402index 0c13840..a5c3ed6 100644
33403--- a/drivers/net/wireless/rndis_wlan.c
33404+++ b/drivers/net/wireless/rndis_wlan.c
33405@@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33406
33407 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33408
33409- if (rts_threshold < 0 || rts_threshold > 2347)
33410+ if (rts_threshold > 2347)
33411 rts_threshold = 2347;
33412
33413 tmp = cpu_to_le32(rts_threshold);
33414diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33415index a77f1bb..c608b2b 100644
33416--- a/drivers/net/wireless/wl1251/wl1251.h
33417+++ b/drivers/net/wireless/wl1251/wl1251.h
33418@@ -266,7 +266,7 @@ struct wl1251_if_operations {
33419 void (*reset)(struct wl1251 *wl);
33420 void (*enable_irq)(struct wl1251 *wl);
33421 void (*disable_irq)(struct wl1251 *wl);
33422-};
33423+} __no_const;
33424
33425 struct wl1251 {
33426 struct ieee80211_hw *hw;
33427diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33428index f34b5b2..b5abb9f 100644
33429--- a/drivers/oprofile/buffer_sync.c
33430+++ b/drivers/oprofile/buffer_sync.c
33431@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33432 if (cookie == NO_COOKIE)
33433 offset = pc;
33434 if (cookie == INVALID_COOKIE) {
33435- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33436+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33437 offset = pc;
33438 }
33439 if (cookie != last_cookie) {
33440@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33441 /* add userspace sample */
33442
33443 if (!mm) {
33444- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33445+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33446 return 0;
33447 }
33448
33449 cookie = lookup_dcookie(mm, s->eip, &offset);
33450
33451 if (cookie == INVALID_COOKIE) {
33452- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33453+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33454 return 0;
33455 }
33456
33457@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33458 /* ignore backtraces if failed to add a sample */
33459 if (state == sb_bt_start) {
33460 state = sb_bt_ignore;
33461- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33462+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33463 }
33464 }
33465 release_mm(mm);
33466diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33467index c0cc4e7..44d4e54 100644
33468--- a/drivers/oprofile/event_buffer.c
33469+++ b/drivers/oprofile/event_buffer.c
33470@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33471 }
33472
33473 if (buffer_pos == buffer_size) {
33474- atomic_inc(&oprofile_stats.event_lost_overflow);
33475+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33476 return;
33477 }
33478
33479diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33480index f8c752e..28bf4fc 100644
33481--- a/drivers/oprofile/oprof.c
33482+++ b/drivers/oprofile/oprof.c
33483@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33484 if (oprofile_ops.switch_events())
33485 return;
33486
33487- atomic_inc(&oprofile_stats.multiplex_counter);
33488+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33489 start_switch_worker();
33490 }
33491
33492diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33493index 917d28e..d62d981 100644
33494--- a/drivers/oprofile/oprofile_stats.c
33495+++ b/drivers/oprofile/oprofile_stats.c
33496@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33497 cpu_buf->sample_invalid_eip = 0;
33498 }
33499
33500- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33501- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33502- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33503- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33504- atomic_set(&oprofile_stats.multiplex_counter, 0);
33505+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33506+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33507+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33508+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33509+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33510 }
33511
33512
33513diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33514index 38b6fc0..b5cbfce 100644
33515--- a/drivers/oprofile/oprofile_stats.h
33516+++ b/drivers/oprofile/oprofile_stats.h
33517@@ -13,11 +13,11 @@
33518 #include <linux/atomic.h>
33519
33520 struct oprofile_stat_struct {
33521- atomic_t sample_lost_no_mm;
33522- atomic_t sample_lost_no_mapping;
33523- atomic_t bt_lost_no_mapping;
33524- atomic_t event_lost_overflow;
33525- atomic_t multiplex_counter;
33526+ atomic_unchecked_t sample_lost_no_mm;
33527+ atomic_unchecked_t sample_lost_no_mapping;
33528+ atomic_unchecked_t bt_lost_no_mapping;
33529+ atomic_unchecked_t event_lost_overflow;
33530+ atomic_unchecked_t multiplex_counter;
33531 };
33532
33533 extern struct oprofile_stat_struct oprofile_stats;
33534diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33535index 2f0aa0f..90fab02 100644
33536--- a/drivers/oprofile/oprofilefs.c
33537+++ b/drivers/oprofile/oprofilefs.c
33538@@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33539
33540
33541 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33542- char const *name, atomic_t *val)
33543+ char const *name, atomic_unchecked_t *val)
33544 {
33545 return __oprofilefs_create_file(sb, root, name,
33546 &atomic_ro_fops, 0444, val);
33547diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33548index 3f56bc0..707d642 100644
33549--- a/drivers/parport/procfs.c
33550+++ b/drivers/parport/procfs.c
33551@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33552
33553 *ppos += len;
33554
33555- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33556+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33557 }
33558
33559 #ifdef CONFIG_PARPORT_1284
33560@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33561
33562 *ppos += len;
33563
33564- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33565+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33566 }
33567 #endif /* IEEE1284.3 support. */
33568
33569diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33570index 9fff878..ad0ad53 100644
33571--- a/drivers/pci/hotplug/cpci_hotplug.h
33572+++ b/drivers/pci/hotplug/cpci_hotplug.h
33573@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33574 int (*hardware_test) (struct slot* slot, u32 value);
33575 u8 (*get_power) (struct slot* slot);
33576 int (*set_power) (struct slot* slot, int value);
33577-};
33578+} __no_const;
33579
33580 struct cpci_hp_controller {
33581 unsigned int irq;
33582diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33583index 76ba8a1..20ca857 100644
33584--- a/drivers/pci/hotplug/cpqphp_nvram.c
33585+++ b/drivers/pci/hotplug/cpqphp_nvram.c
33586@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33587
33588 void compaq_nvram_init (void __iomem *rom_start)
33589 {
33590+
33591+#ifndef CONFIG_PAX_KERNEXEC
33592 if (rom_start) {
33593 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33594 }
33595+#endif
33596+
33597 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33598
33599 /* initialize our int15 lock */
33600diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33601index 1cfbf22..be96487 100644
33602--- a/drivers/pci/pcie/aspm.c
33603+++ b/drivers/pci/pcie/aspm.c
33604@@ -27,9 +27,9 @@
33605 #define MODULE_PARAM_PREFIX "pcie_aspm."
33606
33607 /* Note: those are not register definitions */
33608-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33609-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33610-#define ASPM_STATE_L1 (4) /* L1 state */
33611+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33612+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33613+#define ASPM_STATE_L1 (4U) /* L1 state */
33614 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33615 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33616
33617diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33618index 04e74f4..a960176 100644
33619--- a/drivers/pci/probe.c
33620+++ b/drivers/pci/probe.c
33621@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33622 u32 l, sz, mask;
33623 u16 orig_cmd;
33624
33625- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33626+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33627
33628 if (!dev->mmio_always_on) {
33629 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33630diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33631index 27911b5..5b6db88 100644
33632--- a/drivers/pci/proc.c
33633+++ b/drivers/pci/proc.c
33634@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33635 static int __init pci_proc_init(void)
33636 {
33637 struct pci_dev *dev = NULL;
33638+
33639+#ifdef CONFIG_GRKERNSEC_PROC_ADD
33640+#ifdef CONFIG_GRKERNSEC_PROC_USER
33641+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33642+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33643+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33644+#endif
33645+#else
33646 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33647+#endif
33648 proc_create("devices", 0, proc_bus_pci_dir,
33649 &proc_bus_pci_dev_operations);
33650 proc_initialized = 1;
33651diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33652index 7b82868..b9344c9 100644
33653--- a/drivers/platform/x86/thinkpad_acpi.c
33654+++ b/drivers/platform/x86/thinkpad_acpi.c
33655@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33656 return 0;
33657 }
33658
33659-void static hotkey_mask_warn_incomplete_mask(void)
33660+static void hotkey_mask_warn_incomplete_mask(void)
33661 {
33662 /* log only what the user can fix... */
33663 const u32 wantedmask = hotkey_driver_mask &
33664@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33665 }
33666 }
33667
33668-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33669- struct tp_nvram_state *newn,
33670- const u32 event_mask)
33671-{
33672-
33673 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33674 do { \
33675 if ((event_mask & (1 << __scancode)) && \
33676@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33677 tpacpi_hotkey_send_key(__scancode); \
33678 } while (0)
33679
33680- void issue_volchange(const unsigned int oldvol,
33681- const unsigned int newvol)
33682- {
33683- unsigned int i = oldvol;
33684+static void issue_volchange(const unsigned int oldvol,
33685+ const unsigned int newvol,
33686+ const u32 event_mask)
33687+{
33688+ unsigned int i = oldvol;
33689
33690- while (i > newvol) {
33691- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33692- i--;
33693- }
33694- while (i < newvol) {
33695- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33696- i++;
33697- }
33698+ while (i > newvol) {
33699+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33700+ i--;
33701 }
33702+ while (i < newvol) {
33703+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33704+ i++;
33705+ }
33706+}
33707
33708- void issue_brightnesschange(const unsigned int oldbrt,
33709- const unsigned int newbrt)
33710- {
33711- unsigned int i = oldbrt;
33712+static void issue_brightnesschange(const unsigned int oldbrt,
33713+ const unsigned int newbrt,
33714+ const u32 event_mask)
33715+{
33716+ unsigned int i = oldbrt;
33717
33718- while (i > newbrt) {
33719- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33720- i--;
33721- }
33722- while (i < newbrt) {
33723- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33724- i++;
33725- }
33726+ while (i > newbrt) {
33727+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33728+ i--;
33729+ }
33730+ while (i < newbrt) {
33731+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33732+ i++;
33733 }
33734+}
33735
33736+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33737+ struct tp_nvram_state *newn,
33738+ const u32 event_mask)
33739+{
33740 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33741 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33742 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33743@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33744 oldn->volume_level != newn->volume_level) {
33745 /* recently muted, or repeated mute keypress, or
33746 * multiple presses ending in mute */
33747- issue_volchange(oldn->volume_level, newn->volume_level);
33748+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33749 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
33750 }
33751 } else {
33752@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33753 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33754 }
33755 if (oldn->volume_level != newn->volume_level) {
33756- issue_volchange(oldn->volume_level, newn->volume_level);
33757+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33758 } else if (oldn->volume_toggle != newn->volume_toggle) {
33759 /* repeated vol up/down keypress at end of scale ? */
33760 if (newn->volume_level == 0)
33761@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33762 /* handle brightness */
33763 if (oldn->brightness_level != newn->brightness_level) {
33764 issue_brightnesschange(oldn->brightness_level,
33765- newn->brightness_level);
33766+ newn->brightness_level,
33767+ event_mask);
33768 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
33769 /* repeated key presses that didn't change state */
33770 if (newn->brightness_level == 0)
33771@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33772 && !tp_features.bright_unkfw)
33773 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33774 }
33775+}
33776
33777 #undef TPACPI_COMPARE_KEY
33778 #undef TPACPI_MAY_SEND_KEY
33779-}
33780
33781 /*
33782 * Polling driver
33783diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
33784index b859d16..5cc6b1a 100644
33785--- a/drivers/pnp/pnpbios/bioscalls.c
33786+++ b/drivers/pnp/pnpbios/bioscalls.c
33787@@ -59,7 +59,7 @@ do { \
33788 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33789 } while(0)
33790
33791-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33792+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33793 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33794
33795 /*
33796@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33797
33798 cpu = get_cpu();
33799 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33800+
33801+ pax_open_kernel();
33802 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33803+ pax_close_kernel();
33804
33805 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33806 spin_lock_irqsave(&pnp_bios_lock, flags);
33807@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33808 :"memory");
33809 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33810
33811+ pax_open_kernel();
33812 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33813+ pax_close_kernel();
33814+
33815 put_cpu();
33816
33817 /* If we get here and this is set then the PnP BIOS faulted on us. */
33818@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
33819 return status;
33820 }
33821
33822-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33823+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33824 {
33825 int i;
33826
33827@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33828 pnp_bios_callpoint.offset = header->fields.pm16offset;
33829 pnp_bios_callpoint.segment = PNP_CS16;
33830
33831+ pax_open_kernel();
33832+
33833 for_each_possible_cpu(i) {
33834 struct desc_struct *gdt = get_cpu_gdt_table(i);
33835 if (!gdt)
33836@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33837 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33838 (unsigned long)__va(header->fields.pm16dseg));
33839 }
33840+
33841+ pax_close_kernel();
33842 }
33843diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
33844index b0ecacb..7c9da2e 100644
33845--- a/drivers/pnp/resource.c
33846+++ b/drivers/pnp/resource.c
33847@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
33848 return 1;
33849
33850 /* check if the resource is valid */
33851- if (*irq < 0 || *irq > 15)
33852+ if (*irq > 15)
33853 return 0;
33854
33855 /* check if the resource is reserved */
33856@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
33857 return 1;
33858
33859 /* check if the resource is valid */
33860- if (*dma < 0 || *dma == 4 || *dma > 7)
33861+ if (*dma == 4 || *dma > 7)
33862 return 0;
33863
33864 /* check if the resource is reserved */
33865diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
33866index bb16f5b..c751eef 100644
33867--- a/drivers/power/bq27x00_battery.c
33868+++ b/drivers/power/bq27x00_battery.c
33869@@ -67,7 +67,7 @@
33870 struct bq27x00_device_info;
33871 struct bq27x00_access_methods {
33872 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33873-};
33874+} __no_const;
33875
33876 enum bq27x00_chip { BQ27000, BQ27500 };
33877
33878diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
33879index 33f5d9a..d957d3f 100644
33880--- a/drivers/regulator/max8660.c
33881+++ b/drivers/regulator/max8660.c
33882@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
33883 max8660->shadow_regs[MAX8660_OVER1] = 5;
33884 } else {
33885 /* Otherwise devices can be toggled via software */
33886- max8660_dcdc_ops.enable = max8660_dcdc_enable;
33887- max8660_dcdc_ops.disable = max8660_dcdc_disable;
33888+ pax_open_kernel();
33889+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33890+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33891+ pax_close_kernel();
33892 }
33893
33894 /*
33895diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
33896index 023d17d..74ef35b 100644
33897--- a/drivers/regulator/mc13892-regulator.c
33898+++ b/drivers/regulator/mc13892-regulator.c
33899@@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
33900 }
33901 mc13xxx_unlock(mc13892);
33902
33903- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33904+ pax_open_kernel();
33905+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33906 = mc13892_vcam_set_mode;
33907- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33908+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33909 = mc13892_vcam_get_mode;
33910+ pax_close_kernel();
33911 for (i = 0; i < pdata->num_regulators; i++) {
33912 init_data = &pdata->regulators[i];
33913 priv->regulators[i] = regulator_register(
33914diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
33915index cace6d3..f623fda 100644
33916--- a/drivers/rtc/rtc-dev.c
33917+++ b/drivers/rtc/rtc-dev.c
33918@@ -14,6 +14,7 @@
33919 #include <linux/module.h>
33920 #include <linux/rtc.h>
33921 #include <linux/sched.h>
33922+#include <linux/grsecurity.h>
33923 #include "rtc-core.h"
33924
33925 static dev_t rtc_devt;
33926@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
33927 if (copy_from_user(&tm, uarg, sizeof(tm)))
33928 return -EFAULT;
33929
33930+ gr_log_timechange();
33931+
33932 return rtc_set_time(rtc, &tm);
33933
33934 case RTC_PIE_ON:
33935diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
33936index ffb5878..e6d785c 100644
33937--- a/drivers/scsi/aacraid/aacraid.h
33938+++ b/drivers/scsi/aacraid/aacraid.h
33939@@ -492,7 +492,7 @@ struct adapter_ops
33940 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33941 /* Administrative operations */
33942 int (*adapter_comm)(struct aac_dev * dev, int comm);
33943-};
33944+} __no_const;
33945
33946 /*
33947 * Define which interrupt handler needs to be installed
33948diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
33949index 705e13e..91c873c 100644
33950--- a/drivers/scsi/aacraid/linit.c
33951+++ b/drivers/scsi/aacraid/linit.c
33952@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
33953 #elif defined(__devinitconst)
33954 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33955 #else
33956-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33957+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33958 #endif
33959 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33960 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33961diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
33962index d5ff142..49c0ebb 100644
33963--- a/drivers/scsi/aic94xx/aic94xx_init.c
33964+++ b/drivers/scsi/aic94xx/aic94xx_init.c
33965@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
33966 .lldd_control_phy = asd_control_phy,
33967 };
33968
33969-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33970+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33971 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33972 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33973 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33974diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
33975index a796de9..1ef20e1 100644
33976--- a/drivers/scsi/bfa/bfa.h
33977+++ b/drivers/scsi/bfa/bfa.h
33978@@ -196,7 +196,7 @@ struct bfa_hwif_s {
33979 u32 *end);
33980 int cpe_vec_q0;
33981 int rme_vec_q0;
33982-};
33983+} __no_const;
33984 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33985
33986 struct bfa_faa_cbfn_s {
33987diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
33988index e07bd47..cd1bbbb 100644
33989--- a/drivers/scsi/bfa/bfa_fcpim.c
33990+++ b/drivers/scsi/bfa/bfa_fcpim.c
33991@@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
33992
33993 bfa_iotag_attach(fcp);
33994
33995- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
33996+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
33997 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
33998 (fcp->num_itns * sizeof(struct bfa_itn_s));
33999 memset(fcp->itn_arr, 0,
34000@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34001 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
34002 {
34003 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
34004- struct bfa_itn_s *itn;
34005+ bfa_itn_s_no_const *itn;
34006
34007 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
34008 itn->isr = isr;
34009diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
34010index 1080bcb..a3b39e3 100644
34011--- a/drivers/scsi/bfa/bfa_fcpim.h
34012+++ b/drivers/scsi/bfa/bfa_fcpim.h
34013@@ -37,6 +37,7 @@ struct bfa_iotag_s {
34014 struct bfa_itn_s {
34015 bfa_isr_func_t isr;
34016 };
34017+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
34018
34019 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34020 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
34021@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
34022 struct list_head iotag_tio_free_q; /* free IO resources */
34023 struct list_head iotag_unused_q; /* unused IO resources*/
34024 struct bfa_iotag_s *iotag_arr;
34025- struct bfa_itn_s *itn_arr;
34026+ bfa_itn_s_no_const *itn_arr;
34027 int num_ioim_reqs;
34028 int num_fwtio_reqs;
34029 int num_itns;
34030diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
34031index 546d46b..642fa5b 100644
34032--- a/drivers/scsi/bfa/bfa_ioc.h
34033+++ b/drivers/scsi/bfa/bfa_ioc.h
34034@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
34035 bfa_ioc_disable_cbfn_t disable_cbfn;
34036 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
34037 bfa_ioc_reset_cbfn_t reset_cbfn;
34038-};
34039+} __no_const;
34040
34041 /*
34042 * IOC event notification mechanism.
34043@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
34044 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
34045 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
34046 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
34047-};
34048+} __no_const;
34049
34050 /*
34051 * Queue element to wait for room in request queue. FIFO order is
34052diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
34053index 351dc0b..951dc32 100644
34054--- a/drivers/scsi/hosts.c
34055+++ b/drivers/scsi/hosts.c
34056@@ -42,7 +42,7 @@
34057 #include "scsi_logging.h"
34058
34059
34060-static atomic_t scsi_host_next_hn; /* host_no for next new host */
34061+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34062
34063
34064 static void scsi_host_cls_release(struct device *dev)
34065@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
34066 * subtract one because we increment first then return, but we need to
34067 * know what the next host number was before increment
34068 */
34069- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34070+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34071 shost->dma_channel = 0xff;
34072
34073 /* These three are default values which can be overridden */
34074diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
34075index 865d452..e9b7fa7 100644
34076--- a/drivers/scsi/hpsa.c
34077+++ b/drivers/scsi/hpsa.c
34078@@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34079 u32 a;
34080
34081 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34082- return h->access.command_completed(h);
34083+ return h->access->command_completed(h);
34084
34085 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34086 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34087@@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34088 while (!list_empty(&h->reqQ)) {
34089 c = list_entry(h->reqQ.next, struct CommandList, list);
34090 /* can't do anything if fifo is full */
34091- if ((h->access.fifo_full(h))) {
34092+ if ((h->access->fifo_full(h))) {
34093 dev_warn(&h->pdev->dev, "fifo full\n");
34094 break;
34095 }
34096@@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34097 h->Qdepth--;
34098
34099 /* Tell the controller execute command */
34100- h->access.submit_command(h, c);
34101+ h->access->submit_command(h, c);
34102
34103 /* Put job onto the completed Q */
34104 addQ(&h->cmpQ, c);
34105@@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34106
34107 static inline unsigned long get_next_completion(struct ctlr_info *h)
34108 {
34109- return h->access.command_completed(h);
34110+ return h->access->command_completed(h);
34111 }
34112
34113 static inline bool interrupt_pending(struct ctlr_info *h)
34114 {
34115- return h->access.intr_pending(h);
34116+ return h->access->intr_pending(h);
34117 }
34118
34119 static inline long interrupt_not_for_us(struct ctlr_info *h)
34120 {
34121- return (h->access.intr_pending(h) == 0) ||
34122+ return (h->access->intr_pending(h) == 0) ||
34123 (h->interrupts_enabled == 0);
34124 }
34125
34126@@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34127 if (prod_index < 0)
34128 return -ENODEV;
34129 h->product_name = products[prod_index].product_name;
34130- h->access = *(products[prod_index].access);
34131+ h->access = products[prod_index].access;
34132
34133 if (hpsa_board_disabled(h->pdev)) {
34134 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34135@@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34136
34137 assert_spin_locked(&lockup_detector_lock);
34138 remove_ctlr_from_lockup_detector_list(h);
34139- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34140+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34141 spin_lock_irqsave(&h->lock, flags);
34142 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34143 spin_unlock_irqrestore(&h->lock, flags);
34144@@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34145 }
34146
34147 /* make sure the board interrupts are off */
34148- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34149+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34150
34151 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34152 goto clean2;
34153@@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34154 * fake ones to scoop up any residual completions.
34155 */
34156 spin_lock_irqsave(&h->lock, flags);
34157- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34158+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34159 spin_unlock_irqrestore(&h->lock, flags);
34160 free_irq(h->intr[h->intr_mode], h);
34161 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34162@@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34163 dev_info(&h->pdev->dev, "Board READY.\n");
34164 dev_info(&h->pdev->dev,
34165 "Waiting for stale completions to drain.\n");
34166- h->access.set_intr_mask(h, HPSA_INTR_ON);
34167+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34168 msleep(10000);
34169- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34170+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34171
34172 rc = controller_reset_failed(h->cfgtable);
34173 if (rc)
34174@@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34175 }
34176
34177 /* Turn the interrupts on so we can service requests */
34178- h->access.set_intr_mask(h, HPSA_INTR_ON);
34179+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34180
34181 hpsa_hba_inquiry(h);
34182 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34183@@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34184 * To write all data in the battery backed cache to disks
34185 */
34186 hpsa_flush_cache(h);
34187- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34188+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34189 free_irq(h->intr[h->intr_mode], h);
34190 #ifdef CONFIG_PCI_MSI
34191 if (h->msix_vector)
34192@@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34193 return;
34194 }
34195 /* Change the access methods to the performant access methods */
34196- h->access = SA5_performant_access;
34197+ h->access = &SA5_performant_access;
34198 h->transMethod = CFGTBL_Trans_Performant;
34199 }
34200
34201diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34202index 91edafb..a9b88ec 100644
34203--- a/drivers/scsi/hpsa.h
34204+++ b/drivers/scsi/hpsa.h
34205@@ -73,7 +73,7 @@ struct ctlr_info {
34206 unsigned int msix_vector;
34207 unsigned int msi_vector;
34208 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34209- struct access_method access;
34210+ struct access_method *access;
34211
34212 /* queue and queue Info */
34213 struct list_head reqQ;
34214diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34215index f2df059..a3a9930 100644
34216--- a/drivers/scsi/ips.h
34217+++ b/drivers/scsi/ips.h
34218@@ -1027,7 +1027,7 @@ typedef struct {
34219 int (*intr)(struct ips_ha *);
34220 void (*enableint)(struct ips_ha *);
34221 uint32_t (*statupd)(struct ips_ha *);
34222-} ips_hw_func_t;
34223+} __no_const ips_hw_func_t;
34224
34225 typedef struct ips_ha {
34226 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34227diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34228index 9de9db2..1e09660 100644
34229--- a/drivers/scsi/libfc/fc_exch.c
34230+++ b/drivers/scsi/libfc/fc_exch.c
34231@@ -105,12 +105,12 @@ struct fc_exch_mgr {
34232 * all together if not used XXX
34233 */
34234 struct {
34235- atomic_t no_free_exch;
34236- atomic_t no_free_exch_xid;
34237- atomic_t xid_not_found;
34238- atomic_t xid_busy;
34239- atomic_t seq_not_found;
34240- atomic_t non_bls_resp;
34241+ atomic_unchecked_t no_free_exch;
34242+ atomic_unchecked_t no_free_exch_xid;
34243+ atomic_unchecked_t xid_not_found;
34244+ atomic_unchecked_t xid_busy;
34245+ atomic_unchecked_t seq_not_found;
34246+ atomic_unchecked_t non_bls_resp;
34247 } stats;
34248 };
34249
34250@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34251 /* allocate memory for exchange */
34252 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34253 if (!ep) {
34254- atomic_inc(&mp->stats.no_free_exch);
34255+ atomic_inc_unchecked(&mp->stats.no_free_exch);
34256 goto out;
34257 }
34258 memset(ep, 0, sizeof(*ep));
34259@@ -780,7 +780,7 @@ out:
34260 return ep;
34261 err:
34262 spin_unlock_bh(&pool->lock);
34263- atomic_inc(&mp->stats.no_free_exch_xid);
34264+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34265 mempool_free(ep, mp->ep_pool);
34266 return NULL;
34267 }
34268@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34269 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34270 ep = fc_exch_find(mp, xid);
34271 if (!ep) {
34272- atomic_inc(&mp->stats.xid_not_found);
34273+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34274 reject = FC_RJT_OX_ID;
34275 goto out;
34276 }
34277@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34278 ep = fc_exch_find(mp, xid);
34279 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34280 if (ep) {
34281- atomic_inc(&mp->stats.xid_busy);
34282+ atomic_inc_unchecked(&mp->stats.xid_busy);
34283 reject = FC_RJT_RX_ID;
34284 goto rel;
34285 }
34286@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34287 }
34288 xid = ep->xid; /* get our XID */
34289 } else if (!ep) {
34290- atomic_inc(&mp->stats.xid_not_found);
34291+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34292 reject = FC_RJT_RX_ID; /* XID not found */
34293 goto out;
34294 }
34295@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34296 } else {
34297 sp = &ep->seq;
34298 if (sp->id != fh->fh_seq_id) {
34299- atomic_inc(&mp->stats.seq_not_found);
34300+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34301 if (f_ctl & FC_FC_END_SEQ) {
34302 /*
34303 * Update sequence_id based on incoming last
34304@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34305
34306 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34307 if (!ep) {
34308- atomic_inc(&mp->stats.xid_not_found);
34309+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34310 goto out;
34311 }
34312 if (ep->esb_stat & ESB_ST_COMPLETE) {
34313- atomic_inc(&mp->stats.xid_not_found);
34314+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34315 goto rel;
34316 }
34317 if (ep->rxid == FC_XID_UNKNOWN)
34318 ep->rxid = ntohs(fh->fh_rx_id);
34319 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34320- atomic_inc(&mp->stats.xid_not_found);
34321+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34322 goto rel;
34323 }
34324 if (ep->did != ntoh24(fh->fh_s_id) &&
34325 ep->did != FC_FID_FLOGI) {
34326- atomic_inc(&mp->stats.xid_not_found);
34327+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34328 goto rel;
34329 }
34330 sof = fr_sof(fp);
34331@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34332 sp->ssb_stat |= SSB_ST_RESP;
34333 sp->id = fh->fh_seq_id;
34334 } else if (sp->id != fh->fh_seq_id) {
34335- atomic_inc(&mp->stats.seq_not_found);
34336+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34337 goto rel;
34338 }
34339
34340@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34341 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34342
34343 if (!sp)
34344- atomic_inc(&mp->stats.xid_not_found);
34345+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34346 else
34347- atomic_inc(&mp->stats.non_bls_resp);
34348+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34349
34350 fc_frame_free(fp);
34351 }
34352diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34353index db9238f..4378ed2 100644
34354--- a/drivers/scsi/libsas/sas_ata.c
34355+++ b/drivers/scsi/libsas/sas_ata.c
34356@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34357 .postreset = ata_std_postreset,
34358 .error_handler = ata_std_error_handler,
34359 .post_internal_cmd = sas_ata_post_internal,
34360- .qc_defer = ata_std_qc_defer,
34361+ .qc_defer = ata_std_qc_defer,
34362 .qc_prep = ata_noop_qc_prep,
34363 .qc_issue = sas_ata_qc_issue,
34364 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34365diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34366index bb4c8e0..f33d849 100644
34367--- a/drivers/scsi/lpfc/lpfc.h
34368+++ b/drivers/scsi/lpfc/lpfc.h
34369@@ -425,7 +425,7 @@ struct lpfc_vport {
34370 struct dentry *debug_nodelist;
34371 struct dentry *vport_debugfs_root;
34372 struct lpfc_debugfs_trc *disc_trc;
34373- atomic_t disc_trc_cnt;
34374+ atomic_unchecked_t disc_trc_cnt;
34375 #endif
34376 uint8_t stat_data_enabled;
34377 uint8_t stat_data_blocked;
34378@@ -835,8 +835,8 @@ struct lpfc_hba {
34379 struct timer_list fabric_block_timer;
34380 unsigned long bit_flags;
34381 #define FABRIC_COMANDS_BLOCKED 0
34382- atomic_t num_rsrc_err;
34383- atomic_t num_cmd_success;
34384+ atomic_unchecked_t num_rsrc_err;
34385+ atomic_unchecked_t num_cmd_success;
34386 unsigned long last_rsrc_error_time;
34387 unsigned long last_ramp_down_time;
34388 unsigned long last_ramp_up_time;
34389@@ -866,7 +866,7 @@ struct lpfc_hba {
34390
34391 struct dentry *debug_slow_ring_trc;
34392 struct lpfc_debugfs_trc *slow_ring_trc;
34393- atomic_t slow_ring_trc_cnt;
34394+ atomic_unchecked_t slow_ring_trc_cnt;
34395 /* iDiag debugfs sub-directory */
34396 struct dentry *idiag_root;
34397 struct dentry *idiag_pci_cfg;
34398diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34399index 2838259..a07cfb5 100644
34400--- a/drivers/scsi/lpfc/lpfc_debugfs.c
34401+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34402@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34403
34404 #include <linux/debugfs.h>
34405
34406-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34407+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34408 static unsigned long lpfc_debugfs_start_time = 0L;
34409
34410 /* iDiag */
34411@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34412 lpfc_debugfs_enable = 0;
34413
34414 len = 0;
34415- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34416+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34417 (lpfc_debugfs_max_disc_trc - 1);
34418 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34419 dtp = vport->disc_trc + i;
34420@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34421 lpfc_debugfs_enable = 0;
34422
34423 len = 0;
34424- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34425+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34426 (lpfc_debugfs_max_slow_ring_trc - 1);
34427 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34428 dtp = phba->slow_ring_trc + i;
34429@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34430 !vport || !vport->disc_trc)
34431 return;
34432
34433- index = atomic_inc_return(&vport->disc_trc_cnt) &
34434+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34435 (lpfc_debugfs_max_disc_trc - 1);
34436 dtp = vport->disc_trc + index;
34437 dtp->fmt = fmt;
34438 dtp->data1 = data1;
34439 dtp->data2 = data2;
34440 dtp->data3 = data3;
34441- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34442+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34443 dtp->jif = jiffies;
34444 #endif
34445 return;
34446@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34447 !phba || !phba->slow_ring_trc)
34448 return;
34449
34450- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34451+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34452 (lpfc_debugfs_max_slow_ring_trc - 1);
34453 dtp = phba->slow_ring_trc + index;
34454 dtp->fmt = fmt;
34455 dtp->data1 = data1;
34456 dtp->data2 = data2;
34457 dtp->data3 = data3;
34458- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34459+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34460 dtp->jif = jiffies;
34461 #endif
34462 return;
34463@@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34464 "slow_ring buffer\n");
34465 goto debug_failed;
34466 }
34467- atomic_set(&phba->slow_ring_trc_cnt, 0);
34468+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34469 memset(phba->slow_ring_trc, 0,
34470 (sizeof(struct lpfc_debugfs_trc) *
34471 lpfc_debugfs_max_slow_ring_trc));
34472@@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34473 "buffer\n");
34474 goto debug_failed;
34475 }
34476- atomic_set(&vport->disc_trc_cnt, 0);
34477+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34478
34479 snprintf(name, sizeof(name), "discovery_trace");
34480 vport->debug_disc_trc =
34481diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34482index 55bc4fc..a2a109c 100644
34483--- a/drivers/scsi/lpfc/lpfc_init.c
34484+++ b/drivers/scsi/lpfc/lpfc_init.c
34485@@ -10027,8 +10027,10 @@ lpfc_init(void)
34486 printk(LPFC_COPYRIGHT "\n");
34487
34488 if (lpfc_enable_npiv) {
34489- lpfc_transport_functions.vport_create = lpfc_vport_create;
34490- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34491+ pax_open_kernel();
34492+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34493+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34494+ pax_close_kernel();
34495 }
34496 lpfc_transport_template =
34497 fc_attach_transport(&lpfc_transport_functions);
34498diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34499index 2e1e54e..1af0a0d 100644
34500--- a/drivers/scsi/lpfc/lpfc_scsi.c
34501+++ b/drivers/scsi/lpfc/lpfc_scsi.c
34502@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34503 uint32_t evt_posted;
34504
34505 spin_lock_irqsave(&phba->hbalock, flags);
34506- atomic_inc(&phba->num_rsrc_err);
34507+ atomic_inc_unchecked(&phba->num_rsrc_err);
34508 phba->last_rsrc_error_time = jiffies;
34509
34510 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34511@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34512 unsigned long flags;
34513 struct lpfc_hba *phba = vport->phba;
34514 uint32_t evt_posted;
34515- atomic_inc(&phba->num_cmd_success);
34516+ atomic_inc_unchecked(&phba->num_cmd_success);
34517
34518 if (vport->cfg_lun_queue_depth <= queue_depth)
34519 return;
34520@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34521 unsigned long num_rsrc_err, num_cmd_success;
34522 int i;
34523
34524- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34525- num_cmd_success = atomic_read(&phba->num_cmd_success);
34526+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34527+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34528
34529 vports = lpfc_create_vport_work_array(phba);
34530 if (vports != NULL)
34531@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34532 }
34533 }
34534 lpfc_destroy_vport_work_array(phba, vports);
34535- atomic_set(&phba->num_rsrc_err, 0);
34536- atomic_set(&phba->num_cmd_success, 0);
34537+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34538+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34539 }
34540
34541 /**
34542@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34543 }
34544 }
34545 lpfc_destroy_vport_work_array(phba, vports);
34546- atomic_set(&phba->num_rsrc_err, 0);
34547- atomic_set(&phba->num_cmd_success, 0);
34548+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34549+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34550 }
34551
34552 /**
34553diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34554index 5163edb..7b142bc 100644
34555--- a/drivers/scsi/pmcraid.c
34556+++ b/drivers/scsi/pmcraid.c
34557@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34558 res->scsi_dev = scsi_dev;
34559 scsi_dev->hostdata = res;
34560 res->change_detected = 0;
34561- atomic_set(&res->read_failures, 0);
34562- atomic_set(&res->write_failures, 0);
34563+ atomic_set_unchecked(&res->read_failures, 0);
34564+ atomic_set_unchecked(&res->write_failures, 0);
34565 rc = 0;
34566 }
34567 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34568@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34569
34570 /* If this was a SCSI read/write command keep count of errors */
34571 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34572- atomic_inc(&res->read_failures);
34573+ atomic_inc_unchecked(&res->read_failures);
34574 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34575- atomic_inc(&res->write_failures);
34576+ atomic_inc_unchecked(&res->write_failures);
34577
34578 if (!RES_IS_GSCSI(res->cfg_entry) &&
34579 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34580@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34581 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34582 * hrrq_id assigned here in queuecommand
34583 */
34584- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34585+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34586 pinstance->num_hrrq;
34587 cmd->cmd_done = pmcraid_io_done;
34588
34589@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34590 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34591 * hrrq_id assigned here in queuecommand
34592 */
34593- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34594+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34595 pinstance->num_hrrq;
34596
34597 if (request_size) {
34598@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34599
34600 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34601 /* add resources only after host is added into system */
34602- if (!atomic_read(&pinstance->expose_resources))
34603+ if (!atomic_read_unchecked(&pinstance->expose_resources))
34604 return;
34605
34606 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34607@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34608 init_waitqueue_head(&pinstance->reset_wait_q);
34609
34610 atomic_set(&pinstance->outstanding_cmds, 0);
34611- atomic_set(&pinstance->last_message_id, 0);
34612- atomic_set(&pinstance->expose_resources, 0);
34613+ atomic_set_unchecked(&pinstance->last_message_id, 0);
34614+ atomic_set_unchecked(&pinstance->expose_resources, 0);
34615
34616 INIT_LIST_HEAD(&pinstance->free_res_q);
34617 INIT_LIST_HEAD(&pinstance->used_res_q);
34618@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34619 /* Schedule worker thread to handle CCN and take care of adding and
34620 * removing devices to OS
34621 */
34622- atomic_set(&pinstance->expose_resources, 1);
34623+ atomic_set_unchecked(&pinstance->expose_resources, 1);
34624 schedule_work(&pinstance->worker_q);
34625 return rc;
34626
34627diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34628index ca496c7..9c791d5 100644
34629--- a/drivers/scsi/pmcraid.h
34630+++ b/drivers/scsi/pmcraid.h
34631@@ -748,7 +748,7 @@ struct pmcraid_instance {
34632 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34633
34634 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34635- atomic_t last_message_id;
34636+ atomic_unchecked_t last_message_id;
34637
34638 /* configuration table */
34639 struct pmcraid_config_table *cfg_table;
34640@@ -777,7 +777,7 @@ struct pmcraid_instance {
34641 atomic_t outstanding_cmds;
34642
34643 /* should add/delete resources to mid-layer now ?*/
34644- atomic_t expose_resources;
34645+ atomic_unchecked_t expose_resources;
34646
34647
34648
34649@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34650 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34651 };
34652 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34653- atomic_t read_failures; /* count of failed READ commands */
34654- atomic_t write_failures; /* count of failed WRITE commands */
34655+ atomic_unchecked_t read_failures; /* count of failed READ commands */
34656+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34657
34658 /* To indicate add/delete/modify during CCN */
34659 u8 change_detected;
34660diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34661index fcf052c..a8025a4 100644
34662--- a/drivers/scsi/qla2xxx/qla_def.h
34663+++ b/drivers/scsi/qla2xxx/qla_def.h
34664@@ -2244,7 +2244,7 @@ struct isp_operations {
34665 int (*get_flash_version) (struct scsi_qla_host *, void *);
34666 int (*start_scsi) (srb_t *);
34667 int (*abort_isp) (struct scsi_qla_host *);
34668-};
34669+} __no_const;
34670
34671 /* MSI-X Support *************************************************************/
34672
34673diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34674index fd5edc6..4906148 100644
34675--- a/drivers/scsi/qla4xxx/ql4_def.h
34676+++ b/drivers/scsi/qla4xxx/ql4_def.h
34677@@ -258,7 +258,7 @@ struct ddb_entry {
34678 * (4000 only) */
34679 atomic_t relogin_timer; /* Max Time to wait for
34680 * relogin to complete */
34681- atomic_t relogin_retry_count; /* Num of times relogin has been
34682+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34683 * retried */
34684 uint32_t default_time2wait; /* Default Min time between
34685 * relogins (+aens) */
34686diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34687index 4169c8b..a8b896b 100644
34688--- a/drivers/scsi/qla4xxx/ql4_os.c
34689+++ b/drivers/scsi/qla4xxx/ql4_os.c
34690@@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34691 */
34692 if (!iscsi_is_session_online(cls_sess)) {
34693 /* Reset retry relogin timer */
34694- atomic_inc(&ddb_entry->relogin_retry_count);
34695+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34696 DEBUG2(ql4_printk(KERN_INFO, ha,
34697 "%s: index[%d] relogin timed out-retrying"
34698 " relogin (%d), retry (%d)\n", __func__,
34699 ddb_entry->fw_ddb_index,
34700- atomic_read(&ddb_entry->relogin_retry_count),
34701+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34702 ddb_entry->default_time2wait + 4));
34703 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34704 atomic_set(&ddb_entry->retry_relogin_timer,
34705@@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34706
34707 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34708 atomic_set(&ddb_entry->relogin_timer, 0);
34709- atomic_set(&ddb_entry->relogin_retry_count, 0);
34710+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34711
34712 ddb_entry->default_relogin_timeout =
34713 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34714diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34715index 2aeb2e9..46e3925 100644
34716--- a/drivers/scsi/scsi.c
34717+++ b/drivers/scsi/scsi.c
34718@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34719 unsigned long timeout;
34720 int rtn = 0;
34721
34722- atomic_inc(&cmd->device->iorequest_cnt);
34723+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34724
34725 /* check if the device is still usable */
34726 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34727diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34728index f85cfa6..a57c9e8 100644
34729--- a/drivers/scsi/scsi_lib.c
34730+++ b/drivers/scsi/scsi_lib.c
34731@@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34732 shost = sdev->host;
34733 scsi_init_cmd_errh(cmd);
34734 cmd->result = DID_NO_CONNECT << 16;
34735- atomic_inc(&cmd->device->iorequest_cnt);
34736+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34737
34738 /*
34739 * SCSI request completion path will do scsi_device_unbusy(),
34740@@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34741
34742 INIT_LIST_HEAD(&cmd->eh_entry);
34743
34744- atomic_inc(&cmd->device->iodone_cnt);
34745+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34746 if (cmd->result)
34747- atomic_inc(&cmd->device->ioerr_cnt);
34748+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34749
34750 disposition = scsi_decide_disposition(cmd);
34751 if (disposition != SUCCESS &&
34752diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
34753index 04c2a27..9d8bd66 100644
34754--- a/drivers/scsi/scsi_sysfs.c
34755+++ b/drivers/scsi/scsi_sysfs.c
34756@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
34757 char *buf) \
34758 { \
34759 struct scsi_device *sdev = to_scsi_device(dev); \
34760- unsigned long long count = atomic_read(&sdev->field); \
34761+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34762 return snprintf(buf, 20, "0x%llx\n", count); \
34763 } \
34764 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34765diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
34766index 84a1fdf..693b0d6 100644
34767--- a/drivers/scsi/scsi_tgt_lib.c
34768+++ b/drivers/scsi/scsi_tgt_lib.c
34769@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
34770 int err;
34771
34772 dprintk("%lx %u\n", uaddr, len);
34773- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34774+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34775 if (err) {
34776 /*
34777 * TODO: need to fixup sg_tablesize, max_segment_size,
34778diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
34779index 1b21491..1b7f60e 100644
34780--- a/drivers/scsi/scsi_transport_fc.c
34781+++ b/drivers/scsi/scsi_transport_fc.c
34782@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
34783 * Netlink Infrastructure
34784 */
34785
34786-static atomic_t fc_event_seq;
34787+static atomic_unchecked_t fc_event_seq;
34788
34789 /**
34790 * fc_get_event_number - Obtain the next sequential FC event number
34791@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34792 u32
34793 fc_get_event_number(void)
34794 {
34795- return atomic_add_return(1, &fc_event_seq);
34796+ return atomic_add_return_unchecked(1, &fc_event_seq);
34797 }
34798 EXPORT_SYMBOL(fc_get_event_number);
34799
34800@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
34801 {
34802 int error;
34803
34804- atomic_set(&fc_event_seq, 0);
34805+ atomic_set_unchecked(&fc_event_seq, 0);
34806
34807 error = transport_class_register(&fc_host_class);
34808 if (error)
34809@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
34810 char *cp;
34811
34812 *val = simple_strtoul(buf, &cp, 0);
34813- if ((*cp && (*cp != '\n')) || (*val < 0))
34814+ if (*cp && (*cp != '\n'))
34815 return -EINVAL;
34816 /*
34817 * Check for overflow; dev_loss_tmo is u32
34818diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
34819index 96029e6..4d77fa0 100644
34820--- a/drivers/scsi/scsi_transport_iscsi.c
34821+++ b/drivers/scsi/scsi_transport_iscsi.c
34822@@ -79,7 +79,7 @@ struct iscsi_internal {
34823 struct transport_container session_cont;
34824 };
34825
34826-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34827+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34828 static struct workqueue_struct *iscsi_eh_timer_workq;
34829
34830 static DEFINE_IDA(iscsi_sess_ida);
34831@@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
34832 int err;
34833
34834 ihost = shost->shost_data;
34835- session->sid = atomic_add_return(1, &iscsi_session_nr);
34836+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34837
34838 if (target_id == ISCSI_MAX_TARGET) {
34839 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
34840@@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
34841 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34842 ISCSI_TRANSPORT_VERSION);
34843
34844- atomic_set(&iscsi_session_nr, 0);
34845+ atomic_set_unchecked(&iscsi_session_nr, 0);
34846
34847 err = class_register(&iscsi_transport_class);
34848 if (err)
34849diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
34850index 21a045e..ec89e03 100644
34851--- a/drivers/scsi/scsi_transport_srp.c
34852+++ b/drivers/scsi/scsi_transport_srp.c
34853@@ -33,7 +33,7 @@
34854 #include "scsi_transport_srp_internal.h"
34855
34856 struct srp_host_attrs {
34857- atomic_t next_port_id;
34858+ atomic_unchecked_t next_port_id;
34859 };
34860 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34861
34862@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
34863 struct Scsi_Host *shost = dev_to_shost(dev);
34864 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34865
34866- atomic_set(&srp_host->next_port_id, 0);
34867+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34868 return 0;
34869 }
34870
34871@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
34872 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34873 rport->roles = ids->roles;
34874
34875- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34876+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34877 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34878
34879 transport_setup_device(&rport->dev);
34880diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
34881index 441a1c5..07cece7 100644
34882--- a/drivers/scsi/sg.c
34883+++ b/drivers/scsi/sg.c
34884@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
34885 sdp->disk->disk_name,
34886 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34887 NULL,
34888- (char *)arg);
34889+ (char __user *)arg);
34890 case BLKTRACESTART:
34891 return blk_trace_startstop(sdp->device->request_queue, 1);
34892 case BLKTRACESTOP:
34893@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
34894 const struct file_operations * fops;
34895 };
34896
34897-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34898+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34899 {"allow_dio", &adio_fops},
34900 {"debug", &debug_fops},
34901 {"def_reserved_size", &dressz_fops},
34902@@ -2327,7 +2327,7 @@ sg_proc_init(void)
34903 {
34904 int k, mask;
34905 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34906- struct sg_proc_leaf * leaf;
34907+ const struct sg_proc_leaf * leaf;
34908
34909 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34910 if (!sg_proc_sgp)
34911diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
34912index f64250e..1ee3049 100644
34913--- a/drivers/spi/spi-dw-pci.c
34914+++ b/drivers/spi/spi-dw-pci.c
34915@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
34916 #define spi_resume NULL
34917 #endif
34918
34919-static const struct pci_device_id pci_ids[] __devinitdata = {
34920+static const struct pci_device_id pci_ids[] __devinitconst = {
34921 /* Intel MID platform SPI controller 0 */
34922 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34923 {},
34924diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
34925index 77eae99..b7cdcc9 100644
34926--- a/drivers/spi/spi.c
34927+++ b/drivers/spi/spi.c
34928@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
34929 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34930
34931 /* portable code must never pass more than 32 bytes */
34932-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34933+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34934
34935 static u8 *buf;
34936
34937diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
34938index 436fe97..4082570 100644
34939--- a/drivers/staging/gma500/power.c
34940+++ b/drivers/staging/gma500/power.c
34941@@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
34942 ret = gma_resume_pci(dev->pdev);
34943 if (ret == 0) {
34944 /* FIXME: we want to defer this for Medfield/Oaktrail */
34945- gma_resume_display(dev);
34946+ gma_resume_display(dev->pdev);
34947 psb_irq_preinstall(dev);
34948 psb_irq_postinstall(dev);
34949 pm_runtime_get(&dev->pdev->dev);
34950diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
34951index bafccb3..e3ac78d 100644
34952--- a/drivers/staging/hv/rndis_filter.c
34953+++ b/drivers/staging/hv/rndis_filter.c
34954@@ -42,7 +42,7 @@ struct rndis_device {
34955
34956 enum rndis_device_state state;
34957 bool link_state;
34958- atomic_t new_req_id;
34959+ atomic_unchecked_t new_req_id;
34960
34961 spinlock_t request_lock;
34962 struct list_head req_list;
34963@@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34964 * template
34965 */
34966 set = &rndis_msg->msg.set_req;
34967- set->req_id = atomic_inc_return(&dev->new_req_id);
34968+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34969
34970 /* Add to the request list */
34971 spin_lock_irqsave(&dev->request_lock, flags);
34972@@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34973
34974 /* Setup the rndis set */
34975 halt = &request->request_msg.msg.halt_req;
34976- halt->req_id = atomic_inc_return(&dev->new_req_id);
34977+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34978
34979 /* Ignore return since this msg is optional. */
34980 rndis_filter_send_request(dev, request);
34981diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
34982index 9e8f010..af9efb5 100644
34983--- a/drivers/staging/iio/buffer_generic.h
34984+++ b/drivers/staging/iio/buffer_generic.h
34985@@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
34986
34987 int (*is_enabled)(struct iio_buffer *buffer);
34988 int (*enable)(struct iio_buffer *buffer);
34989-};
34990+} __no_const;
34991
34992 /**
34993 * struct iio_buffer_setup_ops - buffer setup related callbacks
34994diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
34995index 8b307b4..a97ac91 100644
34996--- a/drivers/staging/octeon/ethernet-rx.c
34997+++ b/drivers/staging/octeon/ethernet-rx.c
34998@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
34999 /* Increment RX stats for virtual ports */
35000 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35001 #ifdef CONFIG_64BIT
35002- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35003- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35004+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35005+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35006 #else
35007- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35008- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35009+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35010+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35011 #endif
35012 }
35013 netif_receive_skb(skb);
35014@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35015 dev->name);
35016 */
35017 #ifdef CONFIG_64BIT
35018- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35019+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35020 #else
35021- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35022+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35023 #endif
35024 dev_kfree_skb_irq(skb);
35025 }
35026diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
35027index 076f866..2308070 100644
35028--- a/drivers/staging/octeon/ethernet.c
35029+++ b/drivers/staging/octeon/ethernet.c
35030@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
35031 * since the RX tasklet also increments it.
35032 */
35033 #ifdef CONFIG_64BIT
35034- atomic64_add(rx_status.dropped_packets,
35035- (atomic64_t *)&priv->stats.rx_dropped);
35036+ atomic64_add_unchecked(rx_status.dropped_packets,
35037+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35038 #else
35039- atomic_add(rx_status.dropped_packets,
35040- (atomic_t *)&priv->stats.rx_dropped);
35041+ atomic_add_unchecked(rx_status.dropped_packets,
35042+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
35043 #endif
35044 }
35045
35046diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
35047index 7a19555..466456d 100644
35048--- a/drivers/staging/pohmelfs/inode.c
35049+++ b/drivers/staging/pohmelfs/inode.c
35050@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35051 mutex_init(&psb->mcache_lock);
35052 psb->mcache_root = RB_ROOT;
35053 psb->mcache_timeout = msecs_to_jiffies(5000);
35054- atomic_long_set(&psb->mcache_gen, 0);
35055+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
35056
35057 psb->trans_max_pages = 100;
35058
35059@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35060 INIT_LIST_HEAD(&psb->crypto_ready_list);
35061 INIT_LIST_HEAD(&psb->crypto_active_list);
35062
35063- atomic_set(&psb->trans_gen, 1);
35064+ atomic_set_unchecked(&psb->trans_gen, 1);
35065 atomic_long_set(&psb->total_inodes, 0);
35066
35067 mutex_init(&psb->state_lock);
35068diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
35069index e22665c..a2a9390 100644
35070--- a/drivers/staging/pohmelfs/mcache.c
35071+++ b/drivers/staging/pohmelfs/mcache.c
35072@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
35073 m->data = data;
35074 m->start = start;
35075 m->size = size;
35076- m->gen = atomic_long_inc_return(&psb->mcache_gen);
35077+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35078
35079 mutex_lock(&psb->mcache_lock);
35080 err = pohmelfs_mcache_insert(psb, m);
35081diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35082index 985b6b7..7699e05 100644
35083--- a/drivers/staging/pohmelfs/netfs.h
35084+++ b/drivers/staging/pohmelfs/netfs.h
35085@@ -571,14 +571,14 @@ struct pohmelfs_config;
35086 struct pohmelfs_sb {
35087 struct rb_root mcache_root;
35088 struct mutex mcache_lock;
35089- atomic_long_t mcache_gen;
35090+ atomic_long_unchecked_t mcache_gen;
35091 unsigned long mcache_timeout;
35092
35093 unsigned int idx;
35094
35095 unsigned int trans_retries;
35096
35097- atomic_t trans_gen;
35098+ atomic_unchecked_t trans_gen;
35099
35100 unsigned int crypto_attached_size;
35101 unsigned int crypto_align_size;
35102diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35103index 06c1a74..866eebc 100644
35104--- a/drivers/staging/pohmelfs/trans.c
35105+++ b/drivers/staging/pohmelfs/trans.c
35106@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35107 int err;
35108 struct netfs_cmd *cmd = t->iovec.iov_base;
35109
35110- t->gen = atomic_inc_return(&psb->trans_gen);
35111+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35112
35113 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35114 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35115diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35116index 86308a0..feaa925 100644
35117--- a/drivers/staging/rtl8712/rtl871x_io.h
35118+++ b/drivers/staging/rtl8712/rtl871x_io.h
35119@@ -108,7 +108,7 @@ struct _io_ops {
35120 u8 *pmem);
35121 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35122 u8 *pmem);
35123-};
35124+} __no_const;
35125
35126 struct io_req {
35127 struct list_head list;
35128diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35129index c7b5e8b..783d6cb 100644
35130--- a/drivers/staging/sbe-2t3e3/netdev.c
35131+++ b/drivers/staging/sbe-2t3e3/netdev.c
35132@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35133 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35134
35135 if (rlen)
35136- if (copy_to_user(data, &resp, rlen))
35137+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35138 return -EFAULT;
35139
35140 return 0;
35141diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35142index be21617..0954e45 100644
35143--- a/drivers/staging/usbip/usbip_common.h
35144+++ b/drivers/staging/usbip/usbip_common.h
35145@@ -289,7 +289,7 @@ struct usbip_device {
35146 void (*shutdown)(struct usbip_device *);
35147 void (*reset)(struct usbip_device *);
35148 void (*unusable)(struct usbip_device *);
35149- } eh_ops;
35150+ } __no_const eh_ops;
35151 };
35152
35153 #if 0
35154diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35155index 88b3298..3783eee 100644
35156--- a/drivers/staging/usbip/vhci.h
35157+++ b/drivers/staging/usbip/vhci.h
35158@@ -88,7 +88,7 @@ struct vhci_hcd {
35159 unsigned resuming:1;
35160 unsigned long re_timeout;
35161
35162- atomic_t seqnum;
35163+ atomic_unchecked_t seqnum;
35164
35165 /*
35166 * NOTE:
35167diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35168index 2ee97e2..0420b86 100644
35169--- a/drivers/staging/usbip/vhci_hcd.c
35170+++ b/drivers/staging/usbip/vhci_hcd.c
35171@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35172 return;
35173 }
35174
35175- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35176+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35177 if (priv->seqnum == 0xffff)
35178 dev_info(&urb->dev->dev, "seqnum max\n");
35179
35180@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35181 return -ENOMEM;
35182 }
35183
35184- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35185+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35186 if (unlink->seqnum == 0xffff)
35187 pr_info("seqnum max\n");
35188
35189@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35190 vdev->rhport = rhport;
35191 }
35192
35193- atomic_set(&vhci->seqnum, 0);
35194+ atomic_set_unchecked(&vhci->seqnum, 0);
35195 spin_lock_init(&vhci->lock);
35196
35197 hcd->power_budget = 0; /* no limit */
35198diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35199index 3872b8c..fe6d2f4 100644
35200--- a/drivers/staging/usbip/vhci_rx.c
35201+++ b/drivers/staging/usbip/vhci_rx.c
35202@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35203 if (!urb) {
35204 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35205 pr_info("max seqnum %d\n",
35206- atomic_read(&the_controller->seqnum));
35207+ atomic_read_unchecked(&the_controller->seqnum));
35208 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35209 return;
35210 }
35211diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35212index 7735027..30eed13 100644
35213--- a/drivers/staging/vt6655/hostap.c
35214+++ b/drivers/staging/vt6655/hostap.c
35215@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35216 *
35217 */
35218
35219+static net_device_ops_no_const apdev_netdev_ops;
35220+
35221 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35222 {
35223 PSDevice apdev_priv;
35224 struct net_device *dev = pDevice->dev;
35225 int ret;
35226- const struct net_device_ops apdev_netdev_ops = {
35227- .ndo_start_xmit = pDevice->tx_80211,
35228- };
35229
35230 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35231
35232@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35233 *apdev_priv = *pDevice;
35234 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35235
35236+ /* only half broken now */
35237+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35238 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35239
35240 pDevice->apdev->type = ARPHRD_IEEE80211;
35241diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35242index 51b5adf..098e320 100644
35243--- a/drivers/staging/vt6656/hostap.c
35244+++ b/drivers/staging/vt6656/hostap.c
35245@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35246 *
35247 */
35248
35249+static net_device_ops_no_const apdev_netdev_ops;
35250+
35251 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35252 {
35253 PSDevice apdev_priv;
35254 struct net_device *dev = pDevice->dev;
35255 int ret;
35256- const struct net_device_ops apdev_netdev_ops = {
35257- .ndo_start_xmit = pDevice->tx_80211,
35258- };
35259
35260 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35261
35262@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35263 *apdev_priv = *pDevice;
35264 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35265
35266+ /* only half broken now */
35267+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35268 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35269
35270 pDevice->apdev->type = ARPHRD_IEEE80211;
35271diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35272index 7843dfd..3db105f 100644
35273--- a/drivers/staging/wlan-ng/hfa384x_usb.c
35274+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35275@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35276
35277 struct usbctlx_completor {
35278 int (*complete) (struct usbctlx_completor *);
35279-};
35280+} __no_const;
35281
35282 static int
35283 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35284diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35285index 1ca66ea..76f1343 100644
35286--- a/drivers/staging/zcache/tmem.c
35287+++ b/drivers/staging/zcache/tmem.c
35288@@ -39,7 +39,7 @@
35289 * A tmem host implementation must use this function to register callbacks
35290 * for memory allocation.
35291 */
35292-static struct tmem_hostops tmem_hostops;
35293+static tmem_hostops_no_const tmem_hostops;
35294
35295 static void tmem_objnode_tree_init(void);
35296
35297@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35298 * A tmem host implementation must use this function to register
35299 * callbacks for a page-accessible memory (PAM) implementation
35300 */
35301-static struct tmem_pamops tmem_pamops;
35302+static tmem_pamops_no_const tmem_pamops;
35303
35304 void tmem_register_pamops(struct tmem_pamops *m)
35305 {
35306diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35307index ed147c4..94fc3c6 100644
35308--- a/drivers/staging/zcache/tmem.h
35309+++ b/drivers/staging/zcache/tmem.h
35310@@ -180,6 +180,7 @@ struct tmem_pamops {
35311 void (*new_obj)(struct tmem_obj *);
35312 int (*replace_in_obj)(void *, struct tmem_obj *);
35313 };
35314+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35315 extern void tmem_register_pamops(struct tmem_pamops *m);
35316
35317 /* memory allocation methods provided by the host implementation */
35318@@ -189,6 +190,7 @@ struct tmem_hostops {
35319 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35320 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35321 };
35322+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35323 extern void tmem_register_hostops(struct tmem_hostops *m);
35324
35325 /* core tmem accessor functions */
35326diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35327index 0c1d5c73..88e90a8 100644
35328--- a/drivers/target/iscsi/iscsi_target.c
35329+++ b/drivers/target/iscsi/iscsi_target.c
35330@@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35331 * outstanding_r2ts reaches zero, go ahead and send the delayed
35332 * TASK_ABORTED status.
35333 */
35334- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35335+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35336 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35337 if (--cmd->outstanding_r2ts < 1) {
35338 iscsit_stop_dataout_timer(cmd);
35339diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35340index 6845228..df77141 100644
35341--- a/drivers/target/target_core_tmr.c
35342+++ b/drivers/target/target_core_tmr.c
35343@@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35344 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35345 cmd->t_task_list_num,
35346 atomic_read(&cmd->t_task_cdbs_left),
35347- atomic_read(&cmd->t_task_cdbs_sent),
35348+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35349 atomic_read(&cmd->t_transport_active),
35350 atomic_read(&cmd->t_transport_stop),
35351 atomic_read(&cmd->t_transport_sent));
35352@@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35353 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35354 " task: %p, t_fe_count: %d dev: %p\n", task,
35355 fe_count, dev);
35356- atomic_set(&cmd->t_transport_aborted, 1);
35357+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35358 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35359
35360 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35361@@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35362 }
35363 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35364 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35365- atomic_set(&cmd->t_transport_aborted, 1);
35366+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35367 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35368
35369 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35370diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35371index 861628e..659ae80 100644
35372--- a/drivers/target/target_core_transport.c
35373+++ b/drivers/target/target_core_transport.c
35374@@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35375
35376 dev->queue_depth = dev_limits->queue_depth;
35377 atomic_set(&dev->depth_left, dev->queue_depth);
35378- atomic_set(&dev->dev_ordered_id, 0);
35379+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
35380
35381 se_dev_set_default_attribs(dev, dev_limits);
35382
35383@@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35384 * Used to determine when ORDERED commands should go from
35385 * Dormant to Active status.
35386 */
35387- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35388+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35389 smp_mb__after_atomic_inc();
35390 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35391 cmd->se_ordered_id, cmd->sam_task_attr,
35392@@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35393 " t_transport_active: %d t_transport_stop: %d"
35394 " t_transport_sent: %d\n", cmd->t_task_list_num,
35395 atomic_read(&cmd->t_task_cdbs_left),
35396- atomic_read(&cmd->t_task_cdbs_sent),
35397+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35398 atomic_read(&cmd->t_task_cdbs_ex_left),
35399 atomic_read(&cmd->t_transport_active),
35400 atomic_read(&cmd->t_transport_stop),
35401@@ -2089,9 +2089,9 @@ check_depth:
35402
35403 spin_lock_irqsave(&cmd->t_state_lock, flags);
35404 task->task_flags |= (TF_ACTIVE | TF_SENT);
35405- atomic_inc(&cmd->t_task_cdbs_sent);
35406+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35407
35408- if (atomic_read(&cmd->t_task_cdbs_sent) ==
35409+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35410 cmd->t_task_list_num)
35411 atomic_set(&cmd->t_transport_sent, 1);
35412
35413@@ -4273,7 +4273,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35414 atomic_set(&cmd->transport_lun_stop, 0);
35415 }
35416 if (!atomic_read(&cmd->t_transport_active) ||
35417- atomic_read(&cmd->t_transport_aborted)) {
35418+ atomic_read_unchecked(&cmd->t_transport_aborted)) {
35419 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35420 return false;
35421 }
35422@@ -4522,7 +4522,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35423 {
35424 int ret = 0;
35425
35426- if (atomic_read(&cmd->t_transport_aborted) != 0) {
35427+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35428 if (!send_status ||
35429 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35430 return 1;
35431@@ -4559,7 +4559,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35432 */
35433 if (cmd->data_direction == DMA_TO_DEVICE) {
35434 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35435- atomic_inc(&cmd->t_transport_aborted);
35436+ atomic_inc_unchecked(&cmd->t_transport_aborted);
35437 smp_mb__after_atomic_inc();
35438 }
35439 }
35440diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35441index b9040be..e3f5aab 100644
35442--- a/drivers/tty/hvc/hvcs.c
35443+++ b/drivers/tty/hvc/hvcs.c
35444@@ -83,6 +83,7 @@
35445 #include <asm/hvcserver.h>
35446 #include <asm/uaccess.h>
35447 #include <asm/vio.h>
35448+#include <asm/local.h>
35449
35450 /*
35451 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35452@@ -270,7 +271,7 @@ struct hvcs_struct {
35453 unsigned int index;
35454
35455 struct tty_struct *tty;
35456- int open_count;
35457+ local_t open_count;
35458
35459 /*
35460 * Used to tell the driver kernel_thread what operations need to take
35461@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35462
35463 spin_lock_irqsave(&hvcsd->lock, flags);
35464
35465- if (hvcsd->open_count > 0) {
35466+ if (local_read(&hvcsd->open_count) > 0) {
35467 spin_unlock_irqrestore(&hvcsd->lock, flags);
35468 printk(KERN_INFO "HVCS: vterm state unchanged. "
35469 "The hvcs device node is still in use.\n");
35470@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35471 if ((retval = hvcs_partner_connect(hvcsd)))
35472 goto error_release;
35473
35474- hvcsd->open_count = 1;
35475+ local_set(&hvcsd->open_count, 1);
35476 hvcsd->tty = tty;
35477 tty->driver_data = hvcsd;
35478
35479@@ -1179,7 +1180,7 @@ fast_open:
35480
35481 spin_lock_irqsave(&hvcsd->lock, flags);
35482 kref_get(&hvcsd->kref);
35483- hvcsd->open_count++;
35484+ local_inc(&hvcsd->open_count);
35485 hvcsd->todo_mask |= HVCS_SCHED_READ;
35486 spin_unlock_irqrestore(&hvcsd->lock, flags);
35487
35488@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35489 hvcsd = tty->driver_data;
35490
35491 spin_lock_irqsave(&hvcsd->lock, flags);
35492- if (--hvcsd->open_count == 0) {
35493+ if (local_dec_and_test(&hvcsd->open_count)) {
35494
35495 vio_disable_interrupts(hvcsd->vdev);
35496
35497@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35498 free_irq(irq, hvcsd);
35499 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35500 return;
35501- } else if (hvcsd->open_count < 0) {
35502+ } else if (local_read(&hvcsd->open_count) < 0) {
35503 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35504 " is missmanaged.\n",
35505- hvcsd->vdev->unit_address, hvcsd->open_count);
35506+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35507 }
35508
35509 spin_unlock_irqrestore(&hvcsd->lock, flags);
35510@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35511
35512 spin_lock_irqsave(&hvcsd->lock, flags);
35513 /* Preserve this so that we know how many kref refs to put */
35514- temp_open_count = hvcsd->open_count;
35515+ temp_open_count = local_read(&hvcsd->open_count);
35516
35517 /*
35518 * Don't kref put inside the spinlock because the destruction
35519@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35520 hvcsd->tty->driver_data = NULL;
35521 hvcsd->tty = NULL;
35522
35523- hvcsd->open_count = 0;
35524+ local_set(&hvcsd->open_count, 0);
35525
35526 /* This will drop any buffered data on the floor which is OK in a hangup
35527 * scenario. */
35528@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35529 * the middle of a write operation? This is a crummy place to do this
35530 * but we want to keep it all in the spinlock.
35531 */
35532- if (hvcsd->open_count <= 0) {
35533+ if (local_read(&hvcsd->open_count) <= 0) {
35534 spin_unlock_irqrestore(&hvcsd->lock, flags);
35535 return -ENODEV;
35536 }
35537@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35538 {
35539 struct hvcs_struct *hvcsd = tty->driver_data;
35540
35541- if (!hvcsd || hvcsd->open_count <= 0)
35542+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35543 return 0;
35544
35545 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35546diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35547index ef92869..f4ebd88 100644
35548--- a/drivers/tty/ipwireless/tty.c
35549+++ b/drivers/tty/ipwireless/tty.c
35550@@ -29,6 +29,7 @@
35551 #include <linux/tty_driver.h>
35552 #include <linux/tty_flip.h>
35553 #include <linux/uaccess.h>
35554+#include <asm/local.h>
35555
35556 #include "tty.h"
35557 #include "network.h"
35558@@ -51,7 +52,7 @@ struct ipw_tty {
35559 int tty_type;
35560 struct ipw_network *network;
35561 struct tty_struct *linux_tty;
35562- int open_count;
35563+ local_t open_count;
35564 unsigned int control_lines;
35565 struct mutex ipw_tty_mutex;
35566 int tx_bytes_queued;
35567@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35568 mutex_unlock(&tty->ipw_tty_mutex);
35569 return -ENODEV;
35570 }
35571- if (tty->open_count == 0)
35572+ if (local_read(&tty->open_count) == 0)
35573 tty->tx_bytes_queued = 0;
35574
35575- tty->open_count++;
35576+ local_inc(&tty->open_count);
35577
35578 tty->linux_tty = linux_tty;
35579 linux_tty->driver_data = tty;
35580@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35581
35582 static void do_ipw_close(struct ipw_tty *tty)
35583 {
35584- tty->open_count--;
35585-
35586- if (tty->open_count == 0) {
35587+ if (local_dec_return(&tty->open_count) == 0) {
35588 struct tty_struct *linux_tty = tty->linux_tty;
35589
35590 if (linux_tty != NULL) {
35591@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35592 return;
35593
35594 mutex_lock(&tty->ipw_tty_mutex);
35595- if (tty->open_count == 0) {
35596+ if (local_read(&tty->open_count) == 0) {
35597 mutex_unlock(&tty->ipw_tty_mutex);
35598 return;
35599 }
35600@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35601 return;
35602 }
35603
35604- if (!tty->open_count) {
35605+ if (!local_read(&tty->open_count)) {
35606 mutex_unlock(&tty->ipw_tty_mutex);
35607 return;
35608 }
35609@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35610 return -ENODEV;
35611
35612 mutex_lock(&tty->ipw_tty_mutex);
35613- if (!tty->open_count) {
35614+ if (!local_read(&tty->open_count)) {
35615 mutex_unlock(&tty->ipw_tty_mutex);
35616 return -EINVAL;
35617 }
35618@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35619 if (!tty)
35620 return -ENODEV;
35621
35622- if (!tty->open_count)
35623+ if (!local_read(&tty->open_count))
35624 return -EINVAL;
35625
35626 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35627@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35628 if (!tty)
35629 return 0;
35630
35631- if (!tty->open_count)
35632+ if (!local_read(&tty->open_count))
35633 return 0;
35634
35635 return tty->tx_bytes_queued;
35636@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35637 if (!tty)
35638 return -ENODEV;
35639
35640- if (!tty->open_count)
35641+ if (!local_read(&tty->open_count))
35642 return -EINVAL;
35643
35644 return get_control_lines(tty);
35645@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35646 if (!tty)
35647 return -ENODEV;
35648
35649- if (!tty->open_count)
35650+ if (!local_read(&tty->open_count))
35651 return -EINVAL;
35652
35653 return set_control_lines(tty, set, clear);
35654@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35655 if (!tty)
35656 return -ENODEV;
35657
35658- if (!tty->open_count)
35659+ if (!local_read(&tty->open_count))
35660 return -EINVAL;
35661
35662 /* FIXME: Exactly how is the tty object locked here .. */
35663@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35664 against a parallel ioctl etc */
35665 mutex_lock(&ttyj->ipw_tty_mutex);
35666 }
35667- while (ttyj->open_count)
35668+ while (local_read(&ttyj->open_count))
35669 do_ipw_close(ttyj);
35670 ipwireless_disassociate_network_ttys(network,
35671 ttyj->channel_idx);
35672diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35673index fc7bbba..9527e93 100644
35674--- a/drivers/tty/n_gsm.c
35675+++ b/drivers/tty/n_gsm.c
35676@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35677 kref_init(&dlci->ref);
35678 mutex_init(&dlci->mutex);
35679 dlci->fifo = &dlci->_fifo;
35680- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35681+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35682 kfree(dlci);
35683 return NULL;
35684 }
35685diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35686index 39d6ab6..eb97f41 100644
35687--- a/drivers/tty/n_tty.c
35688+++ b/drivers/tty/n_tty.c
35689@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35690 {
35691 *ops = tty_ldisc_N_TTY;
35692 ops->owner = NULL;
35693- ops->refcount = ops->flags = 0;
35694+ atomic_set(&ops->refcount, 0);
35695+ ops->flags = 0;
35696 }
35697 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35698diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35699index e18604b..a7d5a11 100644
35700--- a/drivers/tty/pty.c
35701+++ b/drivers/tty/pty.c
35702@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35703 register_sysctl_table(pty_root_table);
35704
35705 /* Now create the /dev/ptmx special device */
35706+ pax_open_kernel();
35707 tty_default_fops(&ptmx_fops);
35708- ptmx_fops.open = ptmx_open;
35709+ *(void **)&ptmx_fops.open = ptmx_open;
35710+ pax_close_kernel();
35711
35712 cdev_init(&ptmx_cdev, &ptmx_fops);
35713 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35714diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35715index 2b42a01..32a2ed3 100644
35716--- a/drivers/tty/serial/kgdboc.c
35717+++ b/drivers/tty/serial/kgdboc.c
35718@@ -24,8 +24,9 @@
35719 #define MAX_CONFIG_LEN 40
35720
35721 static struct kgdb_io kgdboc_io_ops;
35722+static struct kgdb_io kgdboc_io_ops_console;
35723
35724-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35725+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35726 static int configured = -1;
35727
35728 static char config[MAX_CONFIG_LEN];
35729@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35730 kgdboc_unregister_kbd();
35731 if (configured == 1)
35732 kgdb_unregister_io_module(&kgdboc_io_ops);
35733+ else if (configured == 2)
35734+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35735 }
35736
35737 static int configure_kgdboc(void)
35738@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35739 int err;
35740 char *cptr = config;
35741 struct console *cons;
35742+ int is_console = 0;
35743
35744 err = kgdboc_option_setup(config);
35745 if (err || !strlen(config) || isspace(config[0]))
35746 goto noconfig;
35747
35748 err = -ENODEV;
35749- kgdboc_io_ops.is_console = 0;
35750 kgdb_tty_driver = NULL;
35751
35752 kgdboc_use_kms = 0;
35753@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
35754 int idx;
35755 if (cons->device && cons->device(cons, &idx) == p &&
35756 idx == tty_line) {
35757- kgdboc_io_ops.is_console = 1;
35758+ is_console = 1;
35759 break;
35760 }
35761 cons = cons->next;
35762@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
35763 kgdb_tty_line = tty_line;
35764
35765 do_register:
35766- err = kgdb_register_io_module(&kgdboc_io_ops);
35767+ if (is_console) {
35768+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35769+ configured = 2;
35770+ } else {
35771+ err = kgdb_register_io_module(&kgdboc_io_ops);
35772+ configured = 1;
35773+ }
35774 if (err)
35775 goto noconfig;
35776
35777- configured = 1;
35778-
35779 return 0;
35780
35781 noconfig:
35782@@ -213,7 +220,7 @@ noconfig:
35783 static int __init init_kgdboc(void)
35784 {
35785 /* Already configured? */
35786- if (configured == 1)
35787+ if (configured >= 1)
35788 return 0;
35789
35790 return configure_kgdboc();
35791@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
35792 if (config[len - 1] == '\n')
35793 config[len - 1] = '\0';
35794
35795- if (configured == 1)
35796+ if (configured >= 1)
35797 cleanup_kgdboc();
35798
35799 /* Go and configure with the new params. */
35800@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
35801 .post_exception = kgdboc_post_exp_handler,
35802 };
35803
35804+static struct kgdb_io kgdboc_io_ops_console = {
35805+ .name = "kgdboc",
35806+ .read_char = kgdboc_get_char,
35807+ .write_char = kgdboc_put_char,
35808+ .pre_exception = kgdboc_pre_exp_handler,
35809+ .post_exception = kgdboc_post_exp_handler,
35810+ .is_console = 1
35811+};
35812+
35813 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35814 /* This is only available if kgdboc is a built in for early debugging */
35815 static int __init kgdboc_early_init(char *opt)
35816diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
35817index 05085be..67eadb0 100644
35818--- a/drivers/tty/tty_io.c
35819+++ b/drivers/tty/tty_io.c
35820@@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35821
35822 void tty_default_fops(struct file_operations *fops)
35823 {
35824- *fops = tty_fops;
35825+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35826 }
35827
35828 /*
35829diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
35830index 8e0924f..4204eb4 100644
35831--- a/drivers/tty/tty_ldisc.c
35832+++ b/drivers/tty/tty_ldisc.c
35833@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
35834 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35835 struct tty_ldisc_ops *ldo = ld->ops;
35836
35837- ldo->refcount--;
35838+ atomic_dec(&ldo->refcount);
35839 module_put(ldo->owner);
35840 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35841
35842@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
35843 spin_lock_irqsave(&tty_ldisc_lock, flags);
35844 tty_ldiscs[disc] = new_ldisc;
35845 new_ldisc->num = disc;
35846- new_ldisc->refcount = 0;
35847+ atomic_set(&new_ldisc->refcount, 0);
35848 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35849
35850 return ret;
35851@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
35852 return -EINVAL;
35853
35854 spin_lock_irqsave(&tty_ldisc_lock, flags);
35855- if (tty_ldiscs[disc]->refcount)
35856+ if (atomic_read(&tty_ldiscs[disc]->refcount))
35857 ret = -EBUSY;
35858 else
35859 tty_ldiscs[disc] = NULL;
35860@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
35861 if (ldops) {
35862 ret = ERR_PTR(-EAGAIN);
35863 if (try_module_get(ldops->owner)) {
35864- ldops->refcount++;
35865+ atomic_inc(&ldops->refcount);
35866 ret = ldops;
35867 }
35868 }
35869@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
35870 unsigned long flags;
35871
35872 spin_lock_irqsave(&tty_ldisc_lock, flags);
35873- ldops->refcount--;
35874+ atomic_dec(&ldops->refcount);
35875 module_put(ldops->owner);
35876 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35877 }
35878diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
35879index a605549..6bd3c96 100644
35880--- a/drivers/tty/vt/keyboard.c
35881+++ b/drivers/tty/vt/keyboard.c
35882@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
35883 kbd->kbdmode == VC_OFF) &&
35884 value != KVAL(K_SAK))
35885 return; /* SAK is allowed even in raw mode */
35886+
35887+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35888+ {
35889+ void *func = fn_handler[value];
35890+ if (func == fn_show_state || func == fn_show_ptregs ||
35891+ func == fn_show_mem)
35892+ return;
35893+ }
35894+#endif
35895+
35896 fn_handler[value](vc);
35897 }
35898
35899diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
35900index 65447c5..0526f0a 100644
35901--- a/drivers/tty/vt/vt_ioctl.c
35902+++ b/drivers/tty/vt/vt_ioctl.c
35903@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35904 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35905 return -EFAULT;
35906
35907- if (!capable(CAP_SYS_TTY_CONFIG))
35908- perm = 0;
35909-
35910 switch (cmd) {
35911 case KDGKBENT:
35912 key_map = key_maps[s];
35913@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35914 val = (i ? K_HOLE : K_NOSUCHMAP);
35915 return put_user(val, &user_kbe->kb_value);
35916 case KDSKBENT:
35917+ if (!capable(CAP_SYS_TTY_CONFIG))
35918+ perm = 0;
35919+
35920 if (!perm)
35921 return -EPERM;
35922 if (!i && v == K_NOSUCHMAP) {
35923@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35924 int i, j, k;
35925 int ret;
35926
35927- if (!capable(CAP_SYS_TTY_CONFIG))
35928- perm = 0;
35929-
35930 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35931 if (!kbs) {
35932 ret = -ENOMEM;
35933@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35934 kfree(kbs);
35935 return ((p && *p) ? -EOVERFLOW : 0);
35936 case KDSKBSENT:
35937+ if (!capable(CAP_SYS_TTY_CONFIG))
35938+ perm = 0;
35939+
35940 if (!perm) {
35941 ret = -EPERM;
35942 goto reterr;
35943diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
35944index a783d53..cb30d94 100644
35945--- a/drivers/uio/uio.c
35946+++ b/drivers/uio/uio.c
35947@@ -25,6 +25,7 @@
35948 #include <linux/kobject.h>
35949 #include <linux/cdev.h>
35950 #include <linux/uio_driver.h>
35951+#include <asm/local.h>
35952
35953 #define UIO_MAX_DEVICES (1U << MINORBITS)
35954
35955@@ -32,10 +33,10 @@ struct uio_device {
35956 struct module *owner;
35957 struct device *dev;
35958 int minor;
35959- atomic_t event;
35960+ atomic_unchecked_t event;
35961 struct fasync_struct *async_queue;
35962 wait_queue_head_t wait;
35963- int vma_count;
35964+ local_t vma_count;
35965 struct uio_info *info;
35966 struct kobject *map_dir;
35967 struct kobject *portio_dir;
35968@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
35969 struct device_attribute *attr, char *buf)
35970 {
35971 struct uio_device *idev = dev_get_drvdata(dev);
35972- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
35973+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
35974 }
35975
35976 static struct device_attribute uio_class_attributes[] = {
35977@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
35978 {
35979 struct uio_device *idev = info->uio_dev;
35980
35981- atomic_inc(&idev->event);
35982+ atomic_inc_unchecked(&idev->event);
35983 wake_up_interruptible(&idev->wait);
35984 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35985 }
35986@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
35987 }
35988
35989 listener->dev = idev;
35990- listener->event_count = atomic_read(&idev->event);
35991+ listener->event_count = atomic_read_unchecked(&idev->event);
35992 filep->private_data = listener;
35993
35994 if (idev->info->open) {
35995@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
35996 return -EIO;
35997
35998 poll_wait(filep, &idev->wait, wait);
35999- if (listener->event_count != atomic_read(&idev->event))
36000+ if (listener->event_count != atomic_read_unchecked(&idev->event))
36001 return POLLIN | POLLRDNORM;
36002 return 0;
36003 }
36004@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
36005 do {
36006 set_current_state(TASK_INTERRUPTIBLE);
36007
36008- event_count = atomic_read(&idev->event);
36009+ event_count = atomic_read_unchecked(&idev->event);
36010 if (event_count != listener->event_count) {
36011 if (copy_to_user(buf, &event_count, count))
36012 retval = -EFAULT;
36013@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
36014 static void uio_vma_open(struct vm_area_struct *vma)
36015 {
36016 struct uio_device *idev = vma->vm_private_data;
36017- idev->vma_count++;
36018+ local_inc(&idev->vma_count);
36019 }
36020
36021 static void uio_vma_close(struct vm_area_struct *vma)
36022 {
36023 struct uio_device *idev = vma->vm_private_data;
36024- idev->vma_count--;
36025+ local_dec(&idev->vma_count);
36026 }
36027
36028 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36029@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
36030 idev->owner = owner;
36031 idev->info = info;
36032 init_waitqueue_head(&idev->wait);
36033- atomic_set(&idev->event, 0);
36034+ atomic_set_unchecked(&idev->event, 0);
36035
36036 ret = uio_get_minor(idev);
36037 if (ret)
36038diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
36039index a845f8b..4f54072 100644
36040--- a/drivers/usb/atm/cxacru.c
36041+++ b/drivers/usb/atm/cxacru.c
36042@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
36043 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36044 if (ret < 2)
36045 return -EINVAL;
36046- if (index < 0 || index > 0x7f)
36047+ if (index > 0x7f)
36048 return -EINVAL;
36049 pos += tmp;
36050
36051diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
36052index d3448ca..d2864ca 100644
36053--- a/drivers/usb/atm/usbatm.c
36054+++ b/drivers/usb/atm/usbatm.c
36055@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36056 if (printk_ratelimit())
36057 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36058 __func__, vpi, vci);
36059- atomic_inc(&vcc->stats->rx_err);
36060+ atomic_inc_unchecked(&vcc->stats->rx_err);
36061 return;
36062 }
36063
36064@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36065 if (length > ATM_MAX_AAL5_PDU) {
36066 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36067 __func__, length, vcc);
36068- atomic_inc(&vcc->stats->rx_err);
36069+ atomic_inc_unchecked(&vcc->stats->rx_err);
36070 goto out;
36071 }
36072
36073@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36074 if (sarb->len < pdu_length) {
36075 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36076 __func__, pdu_length, sarb->len, vcc);
36077- atomic_inc(&vcc->stats->rx_err);
36078+ atomic_inc_unchecked(&vcc->stats->rx_err);
36079 goto out;
36080 }
36081
36082 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36083 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36084 __func__, vcc);
36085- atomic_inc(&vcc->stats->rx_err);
36086+ atomic_inc_unchecked(&vcc->stats->rx_err);
36087 goto out;
36088 }
36089
36090@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36091 if (printk_ratelimit())
36092 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36093 __func__, length);
36094- atomic_inc(&vcc->stats->rx_drop);
36095+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36096 goto out;
36097 }
36098
36099@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36100
36101 vcc->push(vcc, skb);
36102
36103- atomic_inc(&vcc->stats->rx);
36104+ atomic_inc_unchecked(&vcc->stats->rx);
36105 out:
36106 skb_trim(sarb, 0);
36107 }
36108@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36109 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36110
36111 usbatm_pop(vcc, skb);
36112- atomic_inc(&vcc->stats->tx);
36113+ atomic_inc_unchecked(&vcc->stats->tx);
36114
36115 skb = skb_dequeue(&instance->sndqueue);
36116 }
36117@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36118 if (!left--)
36119 return sprintf(page,
36120 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36121- atomic_read(&atm_dev->stats.aal5.tx),
36122- atomic_read(&atm_dev->stats.aal5.tx_err),
36123- atomic_read(&atm_dev->stats.aal5.rx),
36124- atomic_read(&atm_dev->stats.aal5.rx_err),
36125- atomic_read(&atm_dev->stats.aal5.rx_drop));
36126+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36127+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36128+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36129+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36130+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36131
36132 if (!left--) {
36133 if (instance->disconnected)
36134diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36135index d956965..4179a77 100644
36136--- a/drivers/usb/core/devices.c
36137+++ b/drivers/usb/core/devices.c
36138@@ -126,7 +126,7 @@ static const char format_endpt[] =
36139 * time it gets called.
36140 */
36141 static struct device_connect_event {
36142- atomic_t count;
36143+ atomic_unchecked_t count;
36144 wait_queue_head_t wait;
36145 } device_event = {
36146 .count = ATOMIC_INIT(1),
36147@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36148
36149 void usbfs_conn_disc_event(void)
36150 {
36151- atomic_add(2, &device_event.count);
36152+ atomic_add_unchecked(2, &device_event.count);
36153 wake_up(&device_event.wait);
36154 }
36155
36156@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36157
36158 poll_wait(file, &device_event.wait, wait);
36159
36160- event_count = atomic_read(&device_event.count);
36161+ event_count = atomic_read_unchecked(&device_event.count);
36162 if (file->f_version != event_count) {
36163 file->f_version = event_count;
36164 return POLLIN | POLLRDNORM;
36165diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36166index b3bdfed..a9460e0 100644
36167--- a/drivers/usb/core/message.c
36168+++ b/drivers/usb/core/message.c
36169@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36170 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36171 if (buf) {
36172 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36173- if (len > 0) {
36174- smallbuf = kmalloc(++len, GFP_NOIO);
36175+ if (len++ > 0) {
36176+ smallbuf = kmalloc(len, GFP_NOIO);
36177 if (!smallbuf)
36178 return buf;
36179 memcpy(smallbuf, buf, len);
36180diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36181index 1fc8f12..20647c1 100644
36182--- a/drivers/usb/early/ehci-dbgp.c
36183+++ b/drivers/usb/early/ehci-dbgp.c
36184@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36185
36186 #ifdef CONFIG_KGDB
36187 static struct kgdb_io kgdbdbgp_io_ops;
36188-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36189+static struct kgdb_io kgdbdbgp_io_ops_console;
36190+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36191 #else
36192 #define dbgp_kgdb_mode (0)
36193 #endif
36194@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36195 .write_char = kgdbdbgp_write_char,
36196 };
36197
36198+static struct kgdb_io kgdbdbgp_io_ops_console = {
36199+ .name = "kgdbdbgp",
36200+ .read_char = kgdbdbgp_read_char,
36201+ .write_char = kgdbdbgp_write_char,
36202+ .is_console = 1
36203+};
36204+
36205 static int kgdbdbgp_wait_time;
36206
36207 static int __init kgdbdbgp_parse_config(char *str)
36208@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36209 ptr++;
36210 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36211 }
36212- kgdb_register_io_module(&kgdbdbgp_io_ops);
36213- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36214+ if (early_dbgp_console.index != -1)
36215+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36216+ else
36217+ kgdb_register_io_module(&kgdbdbgp_io_ops);
36218
36219 return 0;
36220 }
36221diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36222index d6bea3e..60b250e 100644
36223--- a/drivers/usb/wusbcore/wa-hc.h
36224+++ b/drivers/usb/wusbcore/wa-hc.h
36225@@ -192,7 +192,7 @@ struct wahc {
36226 struct list_head xfer_delayed_list;
36227 spinlock_t xfer_list_lock;
36228 struct work_struct xfer_work;
36229- atomic_t xfer_id_count;
36230+ atomic_unchecked_t xfer_id_count;
36231 };
36232
36233
36234@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36235 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36236 spin_lock_init(&wa->xfer_list_lock);
36237 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36238- atomic_set(&wa->xfer_id_count, 1);
36239+ atomic_set_unchecked(&wa->xfer_id_count, 1);
36240 }
36241
36242 /**
36243diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36244index 57c01ab..8a05959 100644
36245--- a/drivers/usb/wusbcore/wa-xfer.c
36246+++ b/drivers/usb/wusbcore/wa-xfer.c
36247@@ -296,7 +296,7 @@ out:
36248 */
36249 static void wa_xfer_id_init(struct wa_xfer *xfer)
36250 {
36251- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36252+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36253 }
36254
36255 /*
36256diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36257index c14c42b..f955cc2 100644
36258--- a/drivers/vhost/vhost.c
36259+++ b/drivers/vhost/vhost.c
36260@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36261 return 0;
36262 }
36263
36264-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36265+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36266 {
36267 struct file *eventfp, *filep = NULL,
36268 *pollstart = NULL, *pollstop = NULL;
36269diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36270index b0b2ac3..89a4399 100644
36271--- a/drivers/video/aty/aty128fb.c
36272+++ b/drivers/video/aty/aty128fb.c
36273@@ -148,7 +148,7 @@ enum {
36274 };
36275
36276 /* Must match above enum */
36277-static const char *r128_family[] __devinitdata = {
36278+static const char *r128_family[] __devinitconst = {
36279 "AGP",
36280 "PCI",
36281 "PRO AGP",
36282diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36283index 5c3960d..15cf8fc 100644
36284--- a/drivers/video/fbcmap.c
36285+++ b/drivers/video/fbcmap.c
36286@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36287 rc = -ENODEV;
36288 goto out;
36289 }
36290- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36291- !info->fbops->fb_setcmap)) {
36292+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36293 rc = -EINVAL;
36294 goto out1;
36295 }
36296diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36297index ad93629..e020fc3 100644
36298--- a/drivers/video/fbmem.c
36299+++ b/drivers/video/fbmem.c
36300@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36301 image->dx += image->width + 8;
36302 }
36303 } else if (rotate == FB_ROTATE_UD) {
36304- for (x = 0; x < num && image->dx >= 0; x++) {
36305+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36306 info->fbops->fb_imageblit(info, image);
36307 image->dx -= image->width + 8;
36308 }
36309@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36310 image->dy += image->height + 8;
36311 }
36312 } else if (rotate == FB_ROTATE_CCW) {
36313- for (x = 0; x < num && image->dy >= 0; x++) {
36314+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36315 info->fbops->fb_imageblit(info, image);
36316 image->dy -= image->height + 8;
36317 }
36318@@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36319 return -EFAULT;
36320 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36321 return -EINVAL;
36322- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36323+ if (con2fb.framebuffer >= FB_MAX)
36324 return -EINVAL;
36325 if (!registered_fb[con2fb.framebuffer])
36326 request_module("fb%d", con2fb.framebuffer);
36327diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36328index 5a5d092..265c5ed 100644
36329--- a/drivers/video/geode/gx1fb_core.c
36330+++ b/drivers/video/geode/gx1fb_core.c
36331@@ -29,7 +29,7 @@ static int crt_option = 1;
36332 static char panel_option[32] = "";
36333
36334 /* Modes relevant to the GX1 (taken from modedb.c) */
36335-static const struct fb_videomode __devinitdata gx1_modedb[] = {
36336+static const struct fb_videomode __devinitconst gx1_modedb[] = {
36337 /* 640x480-60 VESA */
36338 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36339 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36340diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36341index 0fad23f..0e9afa4 100644
36342--- a/drivers/video/gxt4500.c
36343+++ b/drivers/video/gxt4500.c
36344@@ -156,7 +156,7 @@ struct gxt4500_par {
36345 static char *mode_option;
36346
36347 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36348-static const struct fb_videomode defaultmode __devinitdata = {
36349+static const struct fb_videomode defaultmode __devinitconst = {
36350 .refresh = 60,
36351 .xres = 1280,
36352 .yres = 1024,
36353@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36354 return 0;
36355 }
36356
36357-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36358+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36359 .id = "IBM GXT4500P",
36360 .type = FB_TYPE_PACKED_PIXELS,
36361 .visual = FB_VISUAL_PSEUDOCOLOR,
36362diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36363index 7672d2e..b56437f 100644
36364--- a/drivers/video/i810/i810_accel.c
36365+++ b/drivers/video/i810/i810_accel.c
36366@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36367 }
36368 }
36369 printk("ringbuffer lockup!!!\n");
36370+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36371 i810_report_error(mmio);
36372 par->dev_flags |= LOCKUP;
36373 info->pixmap.scan_align = 1;
36374diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36375index 318f6fb..9a389c1 100644
36376--- a/drivers/video/i810/i810_main.c
36377+++ b/drivers/video/i810/i810_main.c
36378@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36379 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36380
36381 /* PCI */
36382-static const char *i810_pci_list[] __devinitdata = {
36383+static const char *i810_pci_list[] __devinitconst = {
36384 "Intel(R) 810 Framebuffer Device" ,
36385 "Intel(R) 810-DC100 Framebuffer Device" ,
36386 "Intel(R) 810E Framebuffer Device" ,
36387diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36388index de36693..3c63fc2 100644
36389--- a/drivers/video/jz4740_fb.c
36390+++ b/drivers/video/jz4740_fb.c
36391@@ -136,7 +136,7 @@ struct jzfb {
36392 uint32_t pseudo_palette[16];
36393 };
36394
36395-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36396+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36397 .id = "JZ4740 FB",
36398 .type = FB_TYPE_PACKED_PIXELS,
36399 .visual = FB_VISUAL_TRUECOLOR,
36400diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36401index 3c14e43..eafa544 100644
36402--- a/drivers/video/logo/logo_linux_clut224.ppm
36403+++ b/drivers/video/logo/logo_linux_clut224.ppm
36404@@ -1,1604 +1,1123 @@
36405 P3
36406-# Standard 224-color Linux logo
36407 80 80
36408 255
36409- 0 0 0 0 0 0 0 0 0 0 0 0
36410- 0 0 0 0 0 0 0 0 0 0 0 0
36411- 0 0 0 0 0 0 0 0 0 0 0 0
36412- 0 0 0 0 0 0 0 0 0 0 0 0
36413- 0 0 0 0 0 0 0 0 0 0 0 0
36414- 0 0 0 0 0 0 0 0 0 0 0 0
36415- 0 0 0 0 0 0 0 0 0 0 0 0
36416- 0 0 0 0 0 0 0 0 0 0 0 0
36417- 0 0 0 0 0 0 0 0 0 0 0 0
36418- 6 6 6 6 6 6 10 10 10 10 10 10
36419- 10 10 10 6 6 6 6 6 6 6 6 6
36420- 0 0 0 0 0 0 0 0 0 0 0 0
36421- 0 0 0 0 0 0 0 0 0 0 0 0
36422- 0 0 0 0 0 0 0 0 0 0 0 0
36423- 0 0 0 0 0 0 0 0 0 0 0 0
36424- 0 0 0 0 0 0 0 0 0 0 0 0
36425- 0 0 0 0 0 0 0 0 0 0 0 0
36426- 0 0 0 0 0 0 0 0 0 0 0 0
36427- 0 0 0 0 0 0 0 0 0 0 0 0
36428- 0 0 0 0 0 0 0 0 0 0 0 0
36429- 0 0 0 0 0 0 0 0 0 0 0 0
36430- 0 0 0 0 0 0 0 0 0 0 0 0
36431- 0 0 0 0 0 0 0 0 0 0 0 0
36432- 0 0 0 0 0 0 0 0 0 0 0 0
36433- 0 0 0 0 0 0 0 0 0 0 0 0
36434- 0 0 0 0 0 0 0 0 0 0 0 0
36435- 0 0 0 0 0 0 0 0 0 0 0 0
36436- 0 0 0 0 0 0 0 0 0 0 0 0
36437- 0 0 0 6 6 6 10 10 10 14 14 14
36438- 22 22 22 26 26 26 30 30 30 34 34 34
36439- 30 30 30 30 30 30 26 26 26 18 18 18
36440- 14 14 14 10 10 10 6 6 6 0 0 0
36441- 0 0 0 0 0 0 0 0 0 0 0 0
36442- 0 0 0 0 0 0 0 0 0 0 0 0
36443- 0 0 0 0 0 0 0 0 0 0 0 0
36444- 0 0 0 0 0 0 0 0 0 0 0 0
36445- 0 0 0 0 0 0 0 0 0 0 0 0
36446- 0 0 0 0 0 0 0 0 0 0 0 0
36447- 0 0 0 0 0 0 0 0 0 0 0 0
36448- 0 0 0 0 0 0 0 0 0 0 0 0
36449- 0 0 0 0 0 0 0 0 0 0 0 0
36450- 0 0 0 0 0 1 0 0 1 0 0 0
36451- 0 0 0 0 0 0 0 0 0 0 0 0
36452- 0 0 0 0 0 0 0 0 0 0 0 0
36453- 0 0 0 0 0 0 0 0 0 0 0 0
36454- 0 0 0 0 0 0 0 0 0 0 0 0
36455- 0 0 0 0 0 0 0 0 0 0 0 0
36456- 0 0 0 0 0 0 0 0 0 0 0 0
36457- 6 6 6 14 14 14 26 26 26 42 42 42
36458- 54 54 54 66 66 66 78 78 78 78 78 78
36459- 78 78 78 74 74 74 66 66 66 54 54 54
36460- 42 42 42 26 26 26 18 18 18 10 10 10
36461- 6 6 6 0 0 0 0 0 0 0 0 0
36462- 0 0 0 0 0 0 0 0 0 0 0 0
36463- 0 0 0 0 0 0 0 0 0 0 0 0
36464- 0 0 0 0 0 0 0 0 0 0 0 0
36465- 0 0 0 0 0 0 0 0 0 0 0 0
36466- 0 0 0 0 0 0 0 0 0 0 0 0
36467- 0 0 0 0 0 0 0 0 0 0 0 0
36468- 0 0 0 0 0 0 0 0 0 0 0 0
36469- 0 0 0 0 0 0 0 0 0 0 0 0
36470- 0 0 1 0 0 0 0 0 0 0 0 0
36471- 0 0 0 0 0 0 0 0 0 0 0 0
36472- 0 0 0 0 0 0 0 0 0 0 0 0
36473- 0 0 0 0 0 0 0 0 0 0 0 0
36474- 0 0 0 0 0 0 0 0 0 0 0 0
36475- 0 0 0 0 0 0 0 0 0 0 0 0
36476- 0 0 0 0 0 0 0 0 0 10 10 10
36477- 22 22 22 42 42 42 66 66 66 86 86 86
36478- 66 66 66 38 38 38 38 38 38 22 22 22
36479- 26 26 26 34 34 34 54 54 54 66 66 66
36480- 86 86 86 70 70 70 46 46 46 26 26 26
36481- 14 14 14 6 6 6 0 0 0 0 0 0
36482- 0 0 0 0 0 0 0 0 0 0 0 0
36483- 0 0 0 0 0 0 0 0 0 0 0 0
36484- 0 0 0 0 0 0 0 0 0 0 0 0
36485- 0 0 0 0 0 0 0 0 0 0 0 0
36486- 0 0 0 0 0 0 0 0 0 0 0 0
36487- 0 0 0 0 0 0 0 0 0 0 0 0
36488- 0 0 0 0 0 0 0 0 0 0 0 0
36489- 0 0 0 0 0 0 0 0 0 0 0 0
36490- 0 0 1 0 0 1 0 0 1 0 0 0
36491- 0 0 0 0 0 0 0 0 0 0 0 0
36492- 0 0 0 0 0 0 0 0 0 0 0 0
36493- 0 0 0 0 0 0 0 0 0 0 0 0
36494- 0 0 0 0 0 0 0 0 0 0 0 0
36495- 0 0 0 0 0 0 0 0 0 0 0 0
36496- 0 0 0 0 0 0 10 10 10 26 26 26
36497- 50 50 50 82 82 82 58 58 58 6 6 6
36498- 2 2 6 2 2 6 2 2 6 2 2 6
36499- 2 2 6 2 2 6 2 2 6 2 2 6
36500- 6 6 6 54 54 54 86 86 86 66 66 66
36501- 38 38 38 18 18 18 6 6 6 0 0 0
36502- 0 0 0 0 0 0 0 0 0 0 0 0
36503- 0 0 0 0 0 0 0 0 0 0 0 0
36504- 0 0 0 0 0 0 0 0 0 0 0 0
36505- 0 0 0 0 0 0 0 0 0 0 0 0
36506- 0 0 0 0 0 0 0 0 0 0 0 0
36507- 0 0 0 0 0 0 0 0 0 0 0 0
36508- 0 0 0 0 0 0 0 0 0 0 0 0
36509- 0 0 0 0 0 0 0 0 0 0 0 0
36510- 0 0 0 0 0 0 0 0 0 0 0 0
36511- 0 0 0 0 0 0 0 0 0 0 0 0
36512- 0 0 0 0 0 0 0 0 0 0 0 0
36513- 0 0 0 0 0 0 0 0 0 0 0 0
36514- 0 0 0 0 0 0 0 0 0 0 0 0
36515- 0 0 0 0 0 0 0 0 0 0 0 0
36516- 0 0 0 6 6 6 22 22 22 50 50 50
36517- 78 78 78 34 34 34 2 2 6 2 2 6
36518- 2 2 6 2 2 6 2 2 6 2 2 6
36519- 2 2 6 2 2 6 2 2 6 2 2 6
36520- 2 2 6 2 2 6 6 6 6 70 70 70
36521- 78 78 78 46 46 46 22 22 22 6 6 6
36522- 0 0 0 0 0 0 0 0 0 0 0 0
36523- 0 0 0 0 0 0 0 0 0 0 0 0
36524- 0 0 0 0 0 0 0 0 0 0 0 0
36525- 0 0 0 0 0 0 0 0 0 0 0 0
36526- 0 0 0 0 0 0 0 0 0 0 0 0
36527- 0 0 0 0 0 0 0 0 0 0 0 0
36528- 0 0 0 0 0 0 0 0 0 0 0 0
36529- 0 0 0 0 0 0 0 0 0 0 0 0
36530- 0 0 1 0 0 1 0 0 1 0 0 0
36531- 0 0 0 0 0 0 0 0 0 0 0 0
36532- 0 0 0 0 0 0 0 0 0 0 0 0
36533- 0 0 0 0 0 0 0 0 0 0 0 0
36534- 0 0 0 0 0 0 0 0 0 0 0 0
36535- 0 0 0 0 0 0 0 0 0 0 0 0
36536- 6 6 6 18 18 18 42 42 42 82 82 82
36537- 26 26 26 2 2 6 2 2 6 2 2 6
36538- 2 2 6 2 2 6 2 2 6 2 2 6
36539- 2 2 6 2 2 6 2 2 6 14 14 14
36540- 46 46 46 34 34 34 6 6 6 2 2 6
36541- 42 42 42 78 78 78 42 42 42 18 18 18
36542- 6 6 6 0 0 0 0 0 0 0 0 0
36543- 0 0 0 0 0 0 0 0 0 0 0 0
36544- 0 0 0 0 0 0 0 0 0 0 0 0
36545- 0 0 0 0 0 0 0 0 0 0 0 0
36546- 0 0 0 0 0 0 0 0 0 0 0 0
36547- 0 0 0 0 0 0 0 0 0 0 0 0
36548- 0 0 0 0 0 0 0 0 0 0 0 0
36549- 0 0 0 0 0 0 0 0 0 0 0 0
36550- 0 0 1 0 0 0 0 0 1 0 0 0
36551- 0 0 0 0 0 0 0 0 0 0 0 0
36552- 0 0 0 0 0 0 0 0 0 0 0 0
36553- 0 0 0 0 0 0 0 0 0 0 0 0
36554- 0 0 0 0 0 0 0 0 0 0 0 0
36555- 0 0 0 0 0 0 0 0 0 0 0 0
36556- 10 10 10 30 30 30 66 66 66 58 58 58
36557- 2 2 6 2 2 6 2 2 6 2 2 6
36558- 2 2 6 2 2 6 2 2 6 2 2 6
36559- 2 2 6 2 2 6 2 2 6 26 26 26
36560- 86 86 86 101 101 101 46 46 46 10 10 10
36561- 2 2 6 58 58 58 70 70 70 34 34 34
36562- 10 10 10 0 0 0 0 0 0 0 0 0
36563- 0 0 0 0 0 0 0 0 0 0 0 0
36564- 0 0 0 0 0 0 0 0 0 0 0 0
36565- 0 0 0 0 0 0 0 0 0 0 0 0
36566- 0 0 0 0 0 0 0 0 0 0 0 0
36567- 0 0 0 0 0 0 0 0 0 0 0 0
36568- 0 0 0 0 0 0 0 0 0 0 0 0
36569- 0 0 0 0 0 0 0 0 0 0 0 0
36570- 0 0 1 0 0 1 0 0 1 0 0 0
36571- 0 0 0 0 0 0 0 0 0 0 0 0
36572- 0 0 0 0 0 0 0 0 0 0 0 0
36573- 0 0 0 0 0 0 0 0 0 0 0 0
36574- 0 0 0 0 0 0 0 0 0 0 0 0
36575- 0 0 0 0 0 0 0 0 0 0 0 0
36576- 14 14 14 42 42 42 86 86 86 10 10 10
36577- 2 2 6 2 2 6 2 2 6 2 2 6
36578- 2 2 6 2 2 6 2 2 6 2 2 6
36579- 2 2 6 2 2 6 2 2 6 30 30 30
36580- 94 94 94 94 94 94 58 58 58 26 26 26
36581- 2 2 6 6 6 6 78 78 78 54 54 54
36582- 22 22 22 6 6 6 0 0 0 0 0 0
36583- 0 0 0 0 0 0 0 0 0 0 0 0
36584- 0 0 0 0 0 0 0 0 0 0 0 0
36585- 0 0 0 0 0 0 0 0 0 0 0 0
36586- 0 0 0 0 0 0 0 0 0 0 0 0
36587- 0 0 0 0 0 0 0 0 0 0 0 0
36588- 0 0 0 0 0 0 0 0 0 0 0 0
36589- 0 0 0 0 0 0 0 0 0 0 0 0
36590- 0 0 0 0 0 0 0 0 0 0 0 0
36591- 0 0 0 0 0 0 0 0 0 0 0 0
36592- 0 0 0 0 0 0 0 0 0 0 0 0
36593- 0 0 0 0 0 0 0 0 0 0 0 0
36594- 0 0 0 0 0 0 0 0 0 0 0 0
36595- 0 0 0 0 0 0 0 0 0 6 6 6
36596- 22 22 22 62 62 62 62 62 62 2 2 6
36597- 2 2 6 2 2 6 2 2 6 2 2 6
36598- 2 2 6 2 2 6 2 2 6 2 2 6
36599- 2 2 6 2 2 6 2 2 6 26 26 26
36600- 54 54 54 38 38 38 18 18 18 10 10 10
36601- 2 2 6 2 2 6 34 34 34 82 82 82
36602- 38 38 38 14 14 14 0 0 0 0 0 0
36603- 0 0 0 0 0 0 0 0 0 0 0 0
36604- 0 0 0 0 0 0 0 0 0 0 0 0
36605- 0 0 0 0 0 0 0 0 0 0 0 0
36606- 0 0 0 0 0 0 0 0 0 0 0 0
36607- 0 0 0 0 0 0 0 0 0 0 0 0
36608- 0 0 0 0 0 0 0 0 0 0 0 0
36609- 0 0 0 0 0 0 0 0 0 0 0 0
36610- 0 0 0 0 0 1 0 0 1 0 0 0
36611- 0 0 0 0 0 0 0 0 0 0 0 0
36612- 0 0 0 0 0 0 0 0 0 0 0 0
36613- 0 0 0 0 0 0 0 0 0 0 0 0
36614- 0 0 0 0 0 0 0 0 0 0 0 0
36615- 0 0 0 0 0 0 0 0 0 6 6 6
36616- 30 30 30 78 78 78 30 30 30 2 2 6
36617- 2 2 6 2 2 6 2 2 6 2 2 6
36618- 2 2 6 2 2 6 2 2 6 2 2 6
36619- 2 2 6 2 2 6 2 2 6 10 10 10
36620- 10 10 10 2 2 6 2 2 6 2 2 6
36621- 2 2 6 2 2 6 2 2 6 78 78 78
36622- 50 50 50 18 18 18 6 6 6 0 0 0
36623- 0 0 0 0 0 0 0 0 0 0 0 0
36624- 0 0 0 0 0 0 0 0 0 0 0 0
36625- 0 0 0 0 0 0 0 0 0 0 0 0
36626- 0 0 0 0 0 0 0 0 0 0 0 0
36627- 0 0 0 0 0 0 0 0 0 0 0 0
36628- 0 0 0 0 0 0 0 0 0 0 0 0
36629- 0 0 0 0 0 0 0 0 0 0 0 0
36630- 0 0 1 0 0 0 0 0 0 0 0 0
36631- 0 0 0 0 0 0 0 0 0 0 0 0
36632- 0 0 0 0 0 0 0 0 0 0 0 0
36633- 0 0 0 0 0 0 0 0 0 0 0 0
36634- 0 0 0 0 0 0 0 0 0 0 0 0
36635- 0 0 0 0 0 0 0 0 0 10 10 10
36636- 38 38 38 86 86 86 14 14 14 2 2 6
36637- 2 2 6 2 2 6 2 2 6 2 2 6
36638- 2 2 6 2 2 6 2 2 6 2 2 6
36639- 2 2 6 2 2 6 2 2 6 2 2 6
36640- 2 2 6 2 2 6 2 2 6 2 2 6
36641- 2 2 6 2 2 6 2 2 6 54 54 54
36642- 66 66 66 26 26 26 6 6 6 0 0 0
36643- 0 0 0 0 0 0 0 0 0 0 0 0
36644- 0 0 0 0 0 0 0 0 0 0 0 0
36645- 0 0 0 0 0 0 0 0 0 0 0 0
36646- 0 0 0 0 0 0 0 0 0 0 0 0
36647- 0 0 0 0 0 0 0 0 0 0 0 0
36648- 0 0 0 0 0 0 0 0 0 0 0 0
36649- 0 0 0 0 0 0 0 0 0 0 0 0
36650- 0 0 0 0 0 1 0 0 1 0 0 0
36651- 0 0 0 0 0 0 0 0 0 0 0 0
36652- 0 0 0 0 0 0 0 0 0 0 0 0
36653- 0 0 0 0 0 0 0 0 0 0 0 0
36654- 0 0 0 0 0 0 0 0 0 0 0 0
36655- 0 0 0 0 0 0 0 0 0 14 14 14
36656- 42 42 42 82 82 82 2 2 6 2 2 6
36657- 2 2 6 6 6 6 10 10 10 2 2 6
36658- 2 2 6 2 2 6 2 2 6 2 2 6
36659- 2 2 6 2 2 6 2 2 6 6 6 6
36660- 14 14 14 10 10 10 2 2 6 2 2 6
36661- 2 2 6 2 2 6 2 2 6 18 18 18
36662- 82 82 82 34 34 34 10 10 10 0 0 0
36663- 0 0 0 0 0 0 0 0 0 0 0 0
36664- 0 0 0 0 0 0 0 0 0 0 0 0
36665- 0 0 0 0 0 0 0 0 0 0 0 0
36666- 0 0 0 0 0 0 0 0 0 0 0 0
36667- 0 0 0 0 0 0 0 0 0 0 0 0
36668- 0 0 0 0 0 0 0 0 0 0 0 0
36669- 0 0 0 0 0 0 0 0 0 0 0 0
36670- 0 0 1 0 0 0 0 0 0 0 0 0
36671- 0 0 0 0 0 0 0 0 0 0 0 0
36672- 0 0 0 0 0 0 0 0 0 0 0 0
36673- 0 0 0 0 0 0 0 0 0 0 0 0
36674- 0 0 0 0 0 0 0 0 0 0 0 0
36675- 0 0 0 0 0 0 0 0 0 14 14 14
36676- 46 46 46 86 86 86 2 2 6 2 2 6
36677- 6 6 6 6 6 6 22 22 22 34 34 34
36678- 6 6 6 2 2 6 2 2 6 2 2 6
36679- 2 2 6 2 2 6 18 18 18 34 34 34
36680- 10 10 10 50 50 50 22 22 22 2 2 6
36681- 2 2 6 2 2 6 2 2 6 10 10 10
36682- 86 86 86 42 42 42 14 14 14 0 0 0
36683- 0 0 0 0 0 0 0 0 0 0 0 0
36684- 0 0 0 0 0 0 0 0 0 0 0 0
36685- 0 0 0 0 0 0 0 0 0 0 0 0
36686- 0 0 0 0 0 0 0 0 0 0 0 0
36687- 0 0 0 0 0 0 0 0 0 0 0 0
36688- 0 0 0 0 0 0 0 0 0 0 0 0
36689- 0 0 0 0 0 0 0 0 0 0 0 0
36690- 0 0 1 0 0 1 0 0 1 0 0 0
36691- 0 0 0 0 0 0 0 0 0 0 0 0
36692- 0 0 0 0 0 0 0 0 0 0 0 0
36693- 0 0 0 0 0 0 0 0 0 0 0 0
36694- 0 0 0 0 0 0 0 0 0 0 0 0
36695- 0 0 0 0 0 0 0 0 0 14 14 14
36696- 46 46 46 86 86 86 2 2 6 2 2 6
36697- 38 38 38 116 116 116 94 94 94 22 22 22
36698- 22 22 22 2 2 6 2 2 6 2 2 6
36699- 14 14 14 86 86 86 138 138 138 162 162 162
36700-154 154 154 38 38 38 26 26 26 6 6 6
36701- 2 2 6 2 2 6 2 2 6 2 2 6
36702- 86 86 86 46 46 46 14 14 14 0 0 0
36703- 0 0 0 0 0 0 0 0 0 0 0 0
36704- 0 0 0 0 0 0 0 0 0 0 0 0
36705- 0 0 0 0 0 0 0 0 0 0 0 0
36706- 0 0 0 0 0 0 0 0 0 0 0 0
36707- 0 0 0 0 0 0 0 0 0 0 0 0
36708- 0 0 0 0 0 0 0 0 0 0 0 0
36709- 0 0 0 0 0 0 0 0 0 0 0 0
36710- 0 0 0 0 0 0 0 0 0 0 0 0
36711- 0 0 0 0 0 0 0 0 0 0 0 0
36712- 0 0 0 0 0 0 0 0 0 0 0 0
36713- 0 0 0 0 0 0 0 0 0 0 0 0
36714- 0 0 0 0 0 0 0 0 0 0 0 0
36715- 0 0 0 0 0 0 0 0 0 14 14 14
36716- 46 46 46 86 86 86 2 2 6 14 14 14
36717-134 134 134 198 198 198 195 195 195 116 116 116
36718- 10 10 10 2 2 6 2 2 6 6 6 6
36719-101 98 89 187 187 187 210 210 210 218 218 218
36720-214 214 214 134 134 134 14 14 14 6 6 6
36721- 2 2 6 2 2 6 2 2 6 2 2 6
36722- 86 86 86 50 50 50 18 18 18 6 6 6
36723- 0 0 0 0 0 0 0 0 0 0 0 0
36724- 0 0 0 0 0 0 0 0 0 0 0 0
36725- 0 0 0 0 0 0 0 0 0 0 0 0
36726- 0 0 0 0 0 0 0 0 0 0 0 0
36727- 0 0 0 0 0 0 0 0 0 0 0 0
36728- 0 0 0 0 0 0 0 0 0 0 0 0
36729- 0 0 0 0 0 0 0 0 1 0 0 0
36730- 0 0 1 0 0 1 0 0 1 0 0 0
36731- 0 0 0 0 0 0 0 0 0 0 0 0
36732- 0 0 0 0 0 0 0 0 0 0 0 0
36733- 0 0 0 0 0 0 0 0 0 0 0 0
36734- 0 0 0 0 0 0 0 0 0 0 0 0
36735- 0 0 0 0 0 0 0 0 0 14 14 14
36736- 46 46 46 86 86 86 2 2 6 54 54 54
36737-218 218 218 195 195 195 226 226 226 246 246 246
36738- 58 58 58 2 2 6 2 2 6 30 30 30
36739-210 210 210 253 253 253 174 174 174 123 123 123
36740-221 221 221 234 234 234 74 74 74 2 2 6
36741- 2 2 6 2 2 6 2 2 6 2 2 6
36742- 70 70 70 58 58 58 22 22 22 6 6 6
36743- 0 0 0 0 0 0 0 0 0 0 0 0
36744- 0 0 0 0 0 0 0 0 0 0 0 0
36745- 0 0 0 0 0 0 0 0 0 0 0 0
36746- 0 0 0 0 0 0 0 0 0 0 0 0
36747- 0 0 0 0 0 0 0 0 0 0 0 0
36748- 0 0 0 0 0 0 0 0 0 0 0 0
36749- 0 0 0 0 0 0 0 0 0 0 0 0
36750- 0 0 0 0 0 0 0 0 0 0 0 0
36751- 0 0 0 0 0 0 0 0 0 0 0 0
36752- 0 0 0 0 0 0 0 0 0 0 0 0
36753- 0 0 0 0 0 0 0 0 0 0 0 0
36754- 0 0 0 0 0 0 0 0 0 0 0 0
36755- 0 0 0 0 0 0 0 0 0 14 14 14
36756- 46 46 46 82 82 82 2 2 6 106 106 106
36757-170 170 170 26 26 26 86 86 86 226 226 226
36758-123 123 123 10 10 10 14 14 14 46 46 46
36759-231 231 231 190 190 190 6 6 6 70 70 70
36760- 90 90 90 238 238 238 158 158 158 2 2 6
36761- 2 2 6 2 2 6 2 2 6 2 2 6
36762- 70 70 70 58 58 58 22 22 22 6 6 6
36763- 0 0 0 0 0 0 0 0 0 0 0 0
36764- 0 0 0 0 0 0 0 0 0 0 0 0
36765- 0 0 0 0 0 0 0 0 0 0 0 0
36766- 0 0 0 0 0 0 0 0 0 0 0 0
36767- 0 0 0 0 0 0 0 0 0 0 0 0
36768- 0 0 0 0 0 0 0 0 0 0 0 0
36769- 0 0 0 0 0 0 0 0 1 0 0 0
36770- 0 0 1 0 0 1 0 0 1 0 0 0
36771- 0 0 0 0 0 0 0 0 0 0 0 0
36772- 0 0 0 0 0 0 0 0 0 0 0 0
36773- 0 0 0 0 0 0 0 0 0 0 0 0
36774- 0 0 0 0 0 0 0 0 0 0 0 0
36775- 0 0 0 0 0 0 0 0 0 14 14 14
36776- 42 42 42 86 86 86 6 6 6 116 116 116
36777-106 106 106 6 6 6 70 70 70 149 149 149
36778-128 128 128 18 18 18 38 38 38 54 54 54
36779-221 221 221 106 106 106 2 2 6 14 14 14
36780- 46 46 46 190 190 190 198 198 198 2 2 6
36781- 2 2 6 2 2 6 2 2 6 2 2 6
36782- 74 74 74 62 62 62 22 22 22 6 6 6
36783- 0 0 0 0 0 0 0 0 0 0 0 0
36784- 0 0 0 0 0 0 0 0 0 0 0 0
36785- 0 0 0 0 0 0 0 0 0 0 0 0
36786- 0 0 0 0 0 0 0 0 0 0 0 0
36787- 0 0 0 0 0 0 0 0 0 0 0 0
36788- 0 0 0 0 0 0 0 0 0 0 0 0
36789- 0 0 0 0 0 0 0 0 1 0 0 0
36790- 0 0 1 0 0 0 0 0 1 0 0 0
36791- 0 0 0 0 0 0 0 0 0 0 0 0
36792- 0 0 0 0 0 0 0 0 0 0 0 0
36793- 0 0 0 0 0 0 0 0 0 0 0 0
36794- 0 0 0 0 0 0 0 0 0 0 0 0
36795- 0 0 0 0 0 0 0 0 0 14 14 14
36796- 42 42 42 94 94 94 14 14 14 101 101 101
36797-128 128 128 2 2 6 18 18 18 116 116 116
36798-118 98 46 121 92 8 121 92 8 98 78 10
36799-162 162 162 106 106 106 2 2 6 2 2 6
36800- 2 2 6 195 195 195 195 195 195 6 6 6
36801- 2 2 6 2 2 6 2 2 6 2 2 6
36802- 74 74 74 62 62 62 22 22 22 6 6 6
36803- 0 0 0 0 0 0 0 0 0 0 0 0
36804- 0 0 0 0 0 0 0 0 0 0 0 0
36805- 0 0 0 0 0 0 0 0 0 0 0 0
36806- 0 0 0 0 0 0 0 0 0 0 0 0
36807- 0 0 0 0 0 0 0 0 0 0 0 0
36808- 0 0 0 0 0 0 0 0 0 0 0 0
36809- 0 0 0 0 0 0 0 0 1 0 0 1
36810- 0 0 1 0 0 0 0 0 1 0 0 0
36811- 0 0 0 0 0 0 0 0 0 0 0 0
36812- 0 0 0 0 0 0 0 0 0 0 0 0
36813- 0 0 0 0 0 0 0 0 0 0 0 0
36814- 0 0 0 0 0 0 0 0 0 0 0 0
36815- 0 0 0 0 0 0 0 0 0 10 10 10
36816- 38 38 38 90 90 90 14 14 14 58 58 58
36817-210 210 210 26 26 26 54 38 6 154 114 10
36818-226 170 11 236 186 11 225 175 15 184 144 12
36819-215 174 15 175 146 61 37 26 9 2 2 6
36820- 70 70 70 246 246 246 138 138 138 2 2 6
36821- 2 2 6 2 2 6 2 2 6 2 2 6
36822- 70 70 70 66 66 66 26 26 26 6 6 6
36823- 0 0 0 0 0 0 0 0 0 0 0 0
36824- 0 0 0 0 0 0 0 0 0 0 0 0
36825- 0 0 0 0 0 0 0 0 0 0 0 0
36826- 0 0 0 0 0 0 0 0 0 0 0 0
36827- 0 0 0 0 0 0 0 0 0 0 0 0
36828- 0 0 0 0 0 0 0 0 0 0 0 0
36829- 0 0 0 0 0 0 0 0 0 0 0 0
36830- 0 0 0 0 0 0 0 0 0 0 0 0
36831- 0 0 0 0 0 0 0 0 0 0 0 0
36832- 0 0 0 0 0 0 0 0 0 0 0 0
36833- 0 0 0 0 0 0 0 0 0 0 0 0
36834- 0 0 0 0 0 0 0 0 0 0 0 0
36835- 0 0 0 0 0 0 0 0 0 10 10 10
36836- 38 38 38 86 86 86 14 14 14 10 10 10
36837-195 195 195 188 164 115 192 133 9 225 175 15
36838-239 182 13 234 190 10 232 195 16 232 200 30
36839-245 207 45 241 208 19 232 195 16 184 144 12
36840-218 194 134 211 206 186 42 42 42 2 2 6
36841- 2 2 6 2 2 6 2 2 6 2 2 6
36842- 50 50 50 74 74 74 30 30 30 6 6 6
36843- 0 0 0 0 0 0 0 0 0 0 0 0
36844- 0 0 0 0 0 0 0 0 0 0 0 0
36845- 0 0 0 0 0 0 0 0 0 0 0 0
36846- 0 0 0 0 0 0 0 0 0 0 0 0
36847- 0 0 0 0 0 0 0 0 0 0 0 0
36848- 0 0 0 0 0 0 0 0 0 0 0 0
36849- 0 0 0 0 0 0 0 0 0 0 0 0
36850- 0 0 0 0 0 0 0 0 0 0 0 0
36851- 0 0 0 0 0 0 0 0 0 0 0 0
36852- 0 0 0 0 0 0 0 0 0 0 0 0
36853- 0 0 0 0 0 0 0 0 0 0 0 0
36854- 0 0 0 0 0 0 0 0 0 0 0 0
36855- 0 0 0 0 0 0 0 0 0 10 10 10
36856- 34 34 34 86 86 86 14 14 14 2 2 6
36857-121 87 25 192 133 9 219 162 10 239 182 13
36858-236 186 11 232 195 16 241 208 19 244 214 54
36859-246 218 60 246 218 38 246 215 20 241 208 19
36860-241 208 19 226 184 13 121 87 25 2 2 6
36861- 2 2 6 2 2 6 2 2 6 2 2 6
36862- 50 50 50 82 82 82 34 34 34 10 10 10
36863- 0 0 0 0 0 0 0 0 0 0 0 0
36864- 0 0 0 0 0 0 0 0 0 0 0 0
36865- 0 0 0 0 0 0 0 0 0 0 0 0
36866- 0 0 0 0 0 0 0 0 0 0 0 0
36867- 0 0 0 0 0 0 0 0 0 0 0 0
36868- 0 0 0 0 0 0 0 0 0 0 0 0
36869- 0 0 0 0 0 0 0 0 0 0 0 0
36870- 0 0 0 0 0 0 0 0 0 0 0 0
36871- 0 0 0 0 0 0 0 0 0 0 0 0
36872- 0 0 0 0 0 0 0 0 0 0 0 0
36873- 0 0 0 0 0 0 0 0 0 0 0 0
36874- 0 0 0 0 0 0 0 0 0 0 0 0
36875- 0 0 0 0 0 0 0 0 0 10 10 10
36876- 34 34 34 82 82 82 30 30 30 61 42 6
36877-180 123 7 206 145 10 230 174 11 239 182 13
36878-234 190 10 238 202 15 241 208 19 246 218 74
36879-246 218 38 246 215 20 246 215 20 246 215 20
36880-226 184 13 215 174 15 184 144 12 6 6 6
36881- 2 2 6 2 2 6 2 2 6 2 2 6
36882- 26 26 26 94 94 94 42 42 42 14 14 14
36883- 0 0 0 0 0 0 0 0 0 0 0 0
36884- 0 0 0 0 0 0 0 0 0 0 0 0
36885- 0 0 0 0 0 0 0 0 0 0 0 0
36886- 0 0 0 0 0 0 0 0 0 0 0 0
36887- 0 0 0 0 0 0 0 0 0 0 0 0
36888- 0 0 0 0 0 0 0 0 0 0 0 0
36889- 0 0 0 0 0 0 0 0 0 0 0 0
36890- 0 0 0 0 0 0 0 0 0 0 0 0
36891- 0 0 0 0 0 0 0 0 0 0 0 0
36892- 0 0 0 0 0 0 0 0 0 0 0 0
36893- 0 0 0 0 0 0 0 0 0 0 0 0
36894- 0 0 0 0 0 0 0 0 0 0 0 0
36895- 0 0 0 0 0 0 0 0 0 10 10 10
36896- 30 30 30 78 78 78 50 50 50 104 69 6
36897-192 133 9 216 158 10 236 178 12 236 186 11
36898-232 195 16 241 208 19 244 214 54 245 215 43
36899-246 215 20 246 215 20 241 208 19 198 155 10
36900-200 144 11 216 158 10 156 118 10 2 2 6
36901- 2 2 6 2 2 6 2 2 6 2 2 6
36902- 6 6 6 90 90 90 54 54 54 18 18 18
36903- 6 6 6 0 0 0 0 0 0 0 0 0
36904- 0 0 0 0 0 0 0 0 0 0 0 0
36905- 0 0 0 0 0 0 0 0 0 0 0 0
36906- 0 0 0 0 0 0 0 0 0 0 0 0
36907- 0 0 0 0 0 0 0 0 0 0 0 0
36908- 0 0 0 0 0 0 0 0 0 0 0 0
36909- 0 0 0 0 0 0 0 0 0 0 0 0
36910- 0 0 0 0 0 0 0 0 0 0 0 0
36911- 0 0 0 0 0 0 0 0 0 0 0 0
36912- 0 0 0 0 0 0 0 0 0 0 0 0
36913- 0 0 0 0 0 0 0 0 0 0 0 0
36914- 0 0 0 0 0 0 0 0 0 0 0 0
36915- 0 0 0 0 0 0 0 0 0 10 10 10
36916- 30 30 30 78 78 78 46 46 46 22 22 22
36917-137 92 6 210 162 10 239 182 13 238 190 10
36918-238 202 15 241 208 19 246 215 20 246 215 20
36919-241 208 19 203 166 17 185 133 11 210 150 10
36920-216 158 10 210 150 10 102 78 10 2 2 6
36921- 6 6 6 54 54 54 14 14 14 2 2 6
36922- 2 2 6 62 62 62 74 74 74 30 30 30
36923- 10 10 10 0 0 0 0 0 0 0 0 0
36924- 0 0 0 0 0 0 0 0 0 0 0 0
36925- 0 0 0 0 0 0 0 0 0 0 0 0
36926- 0 0 0 0 0 0 0 0 0 0 0 0
36927- 0 0 0 0 0 0 0 0 0 0 0 0
36928- 0 0 0 0 0 0 0 0 0 0 0 0
36929- 0 0 0 0 0 0 0 0 0 0 0 0
36930- 0 0 0 0 0 0 0 0 0 0 0 0
36931- 0 0 0 0 0 0 0 0 0 0 0 0
36932- 0 0 0 0 0 0 0 0 0 0 0 0
36933- 0 0 0 0 0 0 0 0 0 0 0 0
36934- 0 0 0 0 0 0 0 0 0 0 0 0
36935- 0 0 0 0 0 0 0 0 0 10 10 10
36936- 34 34 34 78 78 78 50 50 50 6 6 6
36937- 94 70 30 139 102 15 190 146 13 226 184 13
36938-232 200 30 232 195 16 215 174 15 190 146 13
36939-168 122 10 192 133 9 210 150 10 213 154 11
36940-202 150 34 182 157 106 101 98 89 2 2 6
36941- 2 2 6 78 78 78 116 116 116 58 58 58
36942- 2 2 6 22 22 22 90 90 90 46 46 46
36943- 18 18 18 6 6 6 0 0 0 0 0 0
36944- 0 0 0 0 0 0 0 0 0 0 0 0
36945- 0 0 0 0 0 0 0 0 0 0 0 0
36946- 0 0 0 0 0 0 0 0 0 0 0 0
36947- 0 0 0 0 0 0 0 0 0 0 0 0
36948- 0 0 0 0 0 0 0 0 0 0 0 0
36949- 0 0 0 0 0 0 0 0 0 0 0 0
36950- 0 0 0 0 0 0 0 0 0 0 0 0
36951- 0 0 0 0 0 0 0 0 0 0 0 0
36952- 0 0 0 0 0 0 0 0 0 0 0 0
36953- 0 0 0 0 0 0 0 0 0 0 0 0
36954- 0 0 0 0 0 0 0 0 0 0 0 0
36955- 0 0 0 0 0 0 0 0 0 10 10 10
36956- 38 38 38 86 86 86 50 50 50 6 6 6
36957-128 128 128 174 154 114 156 107 11 168 122 10
36958-198 155 10 184 144 12 197 138 11 200 144 11
36959-206 145 10 206 145 10 197 138 11 188 164 115
36960-195 195 195 198 198 198 174 174 174 14 14 14
36961- 2 2 6 22 22 22 116 116 116 116 116 116
36962- 22 22 22 2 2 6 74 74 74 70 70 70
36963- 30 30 30 10 10 10 0 0 0 0 0 0
36964- 0 0 0 0 0 0 0 0 0 0 0 0
36965- 0 0 0 0 0 0 0 0 0 0 0 0
36966- 0 0 0 0 0 0 0 0 0 0 0 0
36967- 0 0 0 0 0 0 0 0 0 0 0 0
36968- 0 0 0 0 0 0 0 0 0 0 0 0
36969- 0 0 0 0 0 0 0 0 0 0 0 0
36970- 0 0 0 0 0 0 0 0 0 0 0 0
36971- 0 0 0 0 0 0 0 0 0 0 0 0
36972- 0 0 0 0 0 0 0 0 0 0 0 0
36973- 0 0 0 0 0 0 0 0 0 0 0 0
36974- 0 0 0 0 0 0 0 0 0 0 0 0
36975- 0 0 0 0 0 0 6 6 6 18 18 18
36976- 50 50 50 101 101 101 26 26 26 10 10 10
36977-138 138 138 190 190 190 174 154 114 156 107 11
36978-197 138 11 200 144 11 197 138 11 192 133 9
36979-180 123 7 190 142 34 190 178 144 187 187 187
36980-202 202 202 221 221 221 214 214 214 66 66 66
36981- 2 2 6 2 2 6 50 50 50 62 62 62
36982- 6 6 6 2 2 6 10 10 10 90 90 90
36983- 50 50 50 18 18 18 6 6 6 0 0 0
36984- 0 0 0 0 0 0 0 0 0 0 0 0
36985- 0 0 0 0 0 0 0 0 0 0 0 0
36986- 0 0 0 0 0 0 0 0 0 0 0 0
36987- 0 0 0 0 0 0 0 0 0 0 0 0
36988- 0 0 0 0 0 0 0 0 0 0 0 0
36989- 0 0 0 0 0 0 0 0 0 0 0 0
36990- 0 0 0 0 0 0 0 0 0 0 0 0
36991- 0 0 0 0 0 0 0 0 0 0 0 0
36992- 0 0 0 0 0 0 0 0 0 0 0 0
36993- 0 0 0 0 0 0 0 0 0 0 0 0
36994- 0 0 0 0 0 0 0 0 0 0 0 0
36995- 0 0 0 0 0 0 10 10 10 34 34 34
36996- 74 74 74 74 74 74 2 2 6 6 6 6
36997-144 144 144 198 198 198 190 190 190 178 166 146
36998-154 121 60 156 107 11 156 107 11 168 124 44
36999-174 154 114 187 187 187 190 190 190 210 210 210
37000-246 246 246 253 253 253 253 253 253 182 182 182
37001- 6 6 6 2 2 6 2 2 6 2 2 6
37002- 2 2 6 2 2 6 2 2 6 62 62 62
37003- 74 74 74 34 34 34 14 14 14 0 0 0
37004- 0 0 0 0 0 0 0 0 0 0 0 0
37005- 0 0 0 0 0 0 0 0 0 0 0 0
37006- 0 0 0 0 0 0 0 0 0 0 0 0
37007- 0 0 0 0 0 0 0 0 0 0 0 0
37008- 0 0 0 0 0 0 0 0 0 0 0 0
37009- 0 0 0 0 0 0 0 0 0 0 0 0
37010- 0 0 0 0 0 0 0 0 0 0 0 0
37011- 0 0 0 0 0 0 0 0 0 0 0 0
37012- 0 0 0 0 0 0 0 0 0 0 0 0
37013- 0 0 0 0 0 0 0 0 0 0 0 0
37014- 0 0 0 0 0 0 0 0 0 0 0 0
37015- 0 0 0 10 10 10 22 22 22 54 54 54
37016- 94 94 94 18 18 18 2 2 6 46 46 46
37017-234 234 234 221 221 221 190 190 190 190 190 190
37018-190 190 190 187 187 187 187 187 187 190 190 190
37019-190 190 190 195 195 195 214 214 214 242 242 242
37020-253 253 253 253 253 253 253 253 253 253 253 253
37021- 82 82 82 2 2 6 2 2 6 2 2 6
37022- 2 2 6 2 2 6 2 2 6 14 14 14
37023- 86 86 86 54 54 54 22 22 22 6 6 6
37024- 0 0 0 0 0 0 0 0 0 0 0 0
37025- 0 0 0 0 0 0 0 0 0 0 0 0
37026- 0 0 0 0 0 0 0 0 0 0 0 0
37027- 0 0 0 0 0 0 0 0 0 0 0 0
37028- 0 0 0 0 0 0 0 0 0 0 0 0
37029- 0 0 0 0 0 0 0 0 0 0 0 0
37030- 0 0 0 0 0 0 0 0 0 0 0 0
37031- 0 0 0 0 0 0 0 0 0 0 0 0
37032- 0 0 0 0 0 0 0 0 0 0 0 0
37033- 0 0 0 0 0 0 0 0 0 0 0 0
37034- 0 0 0 0 0 0 0 0 0 0 0 0
37035- 6 6 6 18 18 18 46 46 46 90 90 90
37036- 46 46 46 18 18 18 6 6 6 182 182 182
37037-253 253 253 246 246 246 206 206 206 190 190 190
37038-190 190 190 190 190 190 190 190 190 190 190 190
37039-206 206 206 231 231 231 250 250 250 253 253 253
37040-253 253 253 253 253 253 253 253 253 253 253 253
37041-202 202 202 14 14 14 2 2 6 2 2 6
37042- 2 2 6 2 2 6 2 2 6 2 2 6
37043- 42 42 42 86 86 86 42 42 42 18 18 18
37044- 6 6 6 0 0 0 0 0 0 0 0 0
37045- 0 0 0 0 0 0 0 0 0 0 0 0
37046- 0 0 0 0 0 0 0 0 0 0 0 0
37047- 0 0 0 0 0 0 0 0 0 0 0 0
37048- 0 0 0 0 0 0 0 0 0 0 0 0
37049- 0 0 0 0 0 0 0 0 0 0 0 0
37050- 0 0 0 0 0 0 0 0 0 0 0 0
37051- 0 0 0 0 0 0 0 0 0 0 0 0
37052- 0 0 0 0 0 0 0 0 0 0 0 0
37053- 0 0 0 0 0 0 0 0 0 0 0 0
37054- 0 0 0 0 0 0 0 0 0 6 6 6
37055- 14 14 14 38 38 38 74 74 74 66 66 66
37056- 2 2 6 6 6 6 90 90 90 250 250 250
37057-253 253 253 253 253 253 238 238 238 198 198 198
37058-190 190 190 190 190 190 195 195 195 221 221 221
37059-246 246 246 253 253 253 253 253 253 253 253 253
37060-253 253 253 253 253 253 253 253 253 253 253 253
37061-253 253 253 82 82 82 2 2 6 2 2 6
37062- 2 2 6 2 2 6 2 2 6 2 2 6
37063- 2 2 6 78 78 78 70 70 70 34 34 34
37064- 14 14 14 6 6 6 0 0 0 0 0 0
37065- 0 0 0 0 0 0 0 0 0 0 0 0
37066- 0 0 0 0 0 0 0 0 0 0 0 0
37067- 0 0 0 0 0 0 0 0 0 0 0 0
37068- 0 0 0 0 0 0 0 0 0 0 0 0
37069- 0 0 0 0 0 0 0 0 0 0 0 0
37070- 0 0 0 0 0 0 0 0 0 0 0 0
37071- 0 0 0 0 0 0 0 0 0 0 0 0
37072- 0 0 0 0 0 0 0 0 0 0 0 0
37073- 0 0 0 0 0 0 0 0 0 0 0 0
37074- 0 0 0 0 0 0 0 0 0 14 14 14
37075- 34 34 34 66 66 66 78 78 78 6 6 6
37076- 2 2 6 18 18 18 218 218 218 253 253 253
37077-253 253 253 253 253 253 253 253 253 246 246 246
37078-226 226 226 231 231 231 246 246 246 253 253 253
37079-253 253 253 253 253 253 253 253 253 253 253 253
37080-253 253 253 253 253 253 253 253 253 253 253 253
37081-253 253 253 178 178 178 2 2 6 2 2 6
37082- 2 2 6 2 2 6 2 2 6 2 2 6
37083- 2 2 6 18 18 18 90 90 90 62 62 62
37084- 30 30 30 10 10 10 0 0 0 0 0 0
37085- 0 0 0 0 0 0 0 0 0 0 0 0
37086- 0 0 0 0 0 0 0 0 0 0 0 0
37087- 0 0 0 0 0 0 0 0 0 0 0 0
37088- 0 0 0 0 0 0 0 0 0 0 0 0
37089- 0 0 0 0 0 0 0 0 0 0 0 0
37090- 0 0 0 0 0 0 0 0 0 0 0 0
37091- 0 0 0 0 0 0 0 0 0 0 0 0
37092- 0 0 0 0 0 0 0 0 0 0 0 0
37093- 0 0 0 0 0 0 0 0 0 0 0 0
37094- 0 0 0 0 0 0 10 10 10 26 26 26
37095- 58 58 58 90 90 90 18 18 18 2 2 6
37096- 2 2 6 110 110 110 253 253 253 253 253 253
37097-253 253 253 253 253 253 253 253 253 253 253 253
37098-250 250 250 253 253 253 253 253 253 253 253 253
37099-253 253 253 253 253 253 253 253 253 253 253 253
37100-253 253 253 253 253 253 253 253 253 253 253 253
37101-253 253 253 231 231 231 18 18 18 2 2 6
37102- 2 2 6 2 2 6 2 2 6 2 2 6
37103- 2 2 6 2 2 6 18 18 18 94 94 94
37104- 54 54 54 26 26 26 10 10 10 0 0 0
37105- 0 0 0 0 0 0 0 0 0 0 0 0
37106- 0 0 0 0 0 0 0 0 0 0 0 0
37107- 0 0 0 0 0 0 0 0 0 0 0 0
37108- 0 0 0 0 0 0 0 0 0 0 0 0
37109- 0 0 0 0 0 0 0 0 0 0 0 0
37110- 0 0 0 0 0 0 0 0 0 0 0 0
37111- 0 0 0 0 0 0 0 0 0 0 0 0
37112- 0 0 0 0 0 0 0 0 0 0 0 0
37113- 0 0 0 0 0 0 0 0 0 0 0 0
37114- 0 0 0 6 6 6 22 22 22 50 50 50
37115- 90 90 90 26 26 26 2 2 6 2 2 6
37116- 14 14 14 195 195 195 250 250 250 253 253 253
37117-253 253 253 253 253 253 253 253 253 253 253 253
37118-253 253 253 253 253 253 253 253 253 253 253 253
37119-253 253 253 253 253 253 253 253 253 253 253 253
37120-253 253 253 253 253 253 253 253 253 253 253 253
37121-250 250 250 242 242 242 54 54 54 2 2 6
37122- 2 2 6 2 2 6 2 2 6 2 2 6
37123- 2 2 6 2 2 6 2 2 6 38 38 38
37124- 86 86 86 50 50 50 22 22 22 6 6 6
37125- 0 0 0 0 0 0 0 0 0 0 0 0
37126- 0 0 0 0 0 0 0 0 0 0 0 0
37127- 0 0 0 0 0 0 0 0 0 0 0 0
37128- 0 0 0 0 0 0 0 0 0 0 0 0
37129- 0 0 0 0 0 0 0 0 0 0 0 0
37130- 0 0 0 0 0 0 0 0 0 0 0 0
37131- 0 0 0 0 0 0 0 0 0 0 0 0
37132- 0 0 0 0 0 0 0 0 0 0 0 0
37133- 0 0 0 0 0 0 0 0 0 0 0 0
37134- 6 6 6 14 14 14 38 38 38 82 82 82
37135- 34 34 34 2 2 6 2 2 6 2 2 6
37136- 42 42 42 195 195 195 246 246 246 253 253 253
37137-253 253 253 253 253 253 253 253 253 250 250 250
37138-242 242 242 242 242 242 250 250 250 253 253 253
37139-253 253 253 253 253 253 253 253 253 253 253 253
37140-253 253 253 250 250 250 246 246 246 238 238 238
37141-226 226 226 231 231 231 101 101 101 6 6 6
37142- 2 2 6 2 2 6 2 2 6 2 2 6
37143- 2 2 6 2 2 6 2 2 6 2 2 6
37144- 38 38 38 82 82 82 42 42 42 14 14 14
37145- 6 6 6 0 0 0 0 0 0 0 0 0
37146- 0 0 0 0 0 0 0 0 0 0 0 0
37147- 0 0 0 0 0 0 0 0 0 0 0 0
37148- 0 0 0 0 0 0 0 0 0 0 0 0
37149- 0 0 0 0 0 0 0 0 0 0 0 0
37150- 0 0 0 0 0 0 0 0 0 0 0 0
37151- 0 0 0 0 0 0 0 0 0 0 0 0
37152- 0 0 0 0 0 0 0 0 0 0 0 0
37153- 0 0 0 0 0 0 0 0 0 0 0 0
37154- 10 10 10 26 26 26 62 62 62 66 66 66
37155- 2 2 6 2 2 6 2 2 6 6 6 6
37156- 70 70 70 170 170 170 206 206 206 234 234 234
37157-246 246 246 250 250 250 250 250 250 238 238 238
37158-226 226 226 231 231 231 238 238 238 250 250 250
37159-250 250 250 250 250 250 246 246 246 231 231 231
37160-214 214 214 206 206 206 202 202 202 202 202 202
37161-198 198 198 202 202 202 182 182 182 18 18 18
37162- 2 2 6 2 2 6 2 2 6 2 2 6
37163- 2 2 6 2 2 6 2 2 6 2 2 6
37164- 2 2 6 62 62 62 66 66 66 30 30 30
37165- 10 10 10 0 0 0 0 0 0 0 0 0
37166- 0 0 0 0 0 0 0 0 0 0 0 0
37167- 0 0 0 0 0 0 0 0 0 0 0 0
37168- 0 0 0 0 0 0 0 0 0 0 0 0
37169- 0 0 0 0 0 0 0 0 0 0 0 0
37170- 0 0 0 0 0 0 0 0 0 0 0 0
37171- 0 0 0 0 0 0 0 0 0 0 0 0
37172- 0 0 0 0 0 0 0 0 0 0 0 0
37173- 0 0 0 0 0 0 0 0 0 0 0 0
37174- 14 14 14 42 42 42 82 82 82 18 18 18
37175- 2 2 6 2 2 6 2 2 6 10 10 10
37176- 94 94 94 182 182 182 218 218 218 242 242 242
37177-250 250 250 253 253 253 253 253 253 250 250 250
37178-234 234 234 253 253 253 253 253 253 253 253 253
37179-253 253 253 253 253 253 253 253 253 246 246 246
37180-238 238 238 226 226 226 210 210 210 202 202 202
37181-195 195 195 195 195 195 210 210 210 158 158 158
37182- 6 6 6 14 14 14 50 50 50 14 14 14
37183- 2 2 6 2 2 6 2 2 6 2 2 6
37184- 2 2 6 6 6 6 86 86 86 46 46 46
37185- 18 18 18 6 6 6 0 0 0 0 0 0
37186- 0 0 0 0 0 0 0 0 0 0 0 0
37187- 0 0 0 0 0 0 0 0 0 0 0 0
37188- 0 0 0 0 0 0 0 0 0 0 0 0
37189- 0 0 0 0 0 0 0 0 0 0 0 0
37190- 0 0 0 0 0 0 0 0 0 0 0 0
37191- 0 0 0 0 0 0 0 0 0 0 0 0
37192- 0 0 0 0 0 0 0 0 0 0 0 0
37193- 0 0 0 0 0 0 0 0 0 6 6 6
37194- 22 22 22 54 54 54 70 70 70 2 2 6
37195- 2 2 6 10 10 10 2 2 6 22 22 22
37196-166 166 166 231 231 231 250 250 250 253 253 253
37197-253 253 253 253 253 253 253 253 253 250 250 250
37198-242 242 242 253 253 253 253 253 253 253 253 253
37199-253 253 253 253 253 253 253 253 253 253 253 253
37200-253 253 253 253 253 253 253 253 253 246 246 246
37201-231 231 231 206 206 206 198 198 198 226 226 226
37202- 94 94 94 2 2 6 6 6 6 38 38 38
37203- 30 30 30 2 2 6 2 2 6 2 2 6
37204- 2 2 6 2 2 6 62 62 62 66 66 66
37205- 26 26 26 10 10 10 0 0 0 0 0 0
37206- 0 0 0 0 0 0 0 0 0 0 0 0
37207- 0 0 0 0 0 0 0 0 0 0 0 0
37208- 0 0 0 0 0 0 0 0 0 0 0 0
37209- 0 0 0 0 0 0 0 0 0 0 0 0
37210- 0 0 0 0 0 0 0 0 0 0 0 0
37211- 0 0 0 0 0 0 0 0 0 0 0 0
37212- 0 0 0 0 0 0 0 0 0 0 0 0
37213- 0 0 0 0 0 0 0 0 0 10 10 10
37214- 30 30 30 74 74 74 50 50 50 2 2 6
37215- 26 26 26 26 26 26 2 2 6 106 106 106
37216-238 238 238 253 253 253 253 253 253 253 253 253
37217-253 253 253 253 253 253 253 253 253 253 253 253
37218-253 253 253 253 253 253 253 253 253 253 253 253
37219-253 253 253 253 253 253 253 253 253 253 253 253
37220-253 253 253 253 253 253 253 253 253 253 253 253
37221-253 253 253 246 246 246 218 218 218 202 202 202
37222-210 210 210 14 14 14 2 2 6 2 2 6
37223- 30 30 30 22 22 22 2 2 6 2 2 6
37224- 2 2 6 2 2 6 18 18 18 86 86 86
37225- 42 42 42 14 14 14 0 0 0 0 0 0
37226- 0 0 0 0 0 0 0 0 0 0 0 0
37227- 0 0 0 0 0 0 0 0 0 0 0 0
37228- 0 0 0 0 0 0 0 0 0 0 0 0
37229- 0 0 0 0 0 0 0 0 0 0 0 0
37230- 0 0 0 0 0 0 0 0 0 0 0 0
37231- 0 0 0 0 0 0 0 0 0 0 0 0
37232- 0 0 0 0 0 0 0 0 0 0 0 0
37233- 0 0 0 0 0 0 0 0 0 14 14 14
37234- 42 42 42 90 90 90 22 22 22 2 2 6
37235- 42 42 42 2 2 6 18 18 18 218 218 218
37236-253 253 253 253 253 253 253 253 253 253 253 253
37237-253 253 253 253 253 253 253 253 253 253 253 253
37238-253 253 253 253 253 253 253 253 253 253 253 253
37239-253 253 253 253 253 253 253 253 253 253 253 253
37240-253 253 253 253 253 253 253 253 253 253 253 253
37241-253 253 253 253 253 253 250 250 250 221 221 221
37242-218 218 218 101 101 101 2 2 6 14 14 14
37243- 18 18 18 38 38 38 10 10 10 2 2 6
37244- 2 2 6 2 2 6 2 2 6 78 78 78
37245- 58 58 58 22 22 22 6 6 6 0 0 0
37246- 0 0 0 0 0 0 0 0 0 0 0 0
37247- 0 0 0 0 0 0 0 0 0 0 0 0
37248- 0 0 0 0 0 0 0 0 0 0 0 0
37249- 0 0 0 0 0 0 0 0 0 0 0 0
37250- 0 0 0 0 0 0 0 0 0 0 0 0
37251- 0 0 0 0 0 0 0 0 0 0 0 0
37252- 0 0 0 0 0 0 0 0 0 0 0 0
37253- 0 0 0 0 0 0 6 6 6 18 18 18
37254- 54 54 54 82 82 82 2 2 6 26 26 26
37255- 22 22 22 2 2 6 123 123 123 253 253 253
37256-253 253 253 253 253 253 253 253 253 253 253 253
37257-253 253 253 253 253 253 253 253 253 253 253 253
37258-253 253 253 253 253 253 253 253 253 253 253 253
37259-253 253 253 253 253 253 253 253 253 253 253 253
37260-253 253 253 253 253 253 253 253 253 253 253 253
37261-253 253 253 253 253 253 253 253 253 250 250 250
37262-238 238 238 198 198 198 6 6 6 38 38 38
37263- 58 58 58 26 26 26 38 38 38 2 2 6
37264- 2 2 6 2 2 6 2 2 6 46 46 46
37265- 78 78 78 30 30 30 10 10 10 0 0 0
37266- 0 0 0 0 0 0 0 0 0 0 0 0
37267- 0 0 0 0 0 0 0 0 0 0 0 0
37268- 0 0 0 0 0 0 0 0 0 0 0 0
37269- 0 0 0 0 0 0 0 0 0 0 0 0
37270- 0 0 0 0 0 0 0 0 0 0 0 0
37271- 0 0 0 0 0 0 0 0 0 0 0 0
37272- 0 0 0 0 0 0 0 0 0 0 0 0
37273- 0 0 0 0 0 0 10 10 10 30 30 30
37274- 74 74 74 58 58 58 2 2 6 42 42 42
37275- 2 2 6 22 22 22 231 231 231 253 253 253
37276-253 253 253 253 253 253 253 253 253 253 253 253
37277-253 253 253 253 253 253 253 253 253 250 250 250
37278-253 253 253 253 253 253 253 253 253 253 253 253
37279-253 253 253 253 253 253 253 253 253 253 253 253
37280-253 253 253 253 253 253 253 253 253 253 253 253
37281-253 253 253 253 253 253 253 253 253 253 253 253
37282-253 253 253 246 246 246 46 46 46 38 38 38
37283- 42 42 42 14 14 14 38 38 38 14 14 14
37284- 2 2 6 2 2 6 2 2 6 6 6 6
37285- 86 86 86 46 46 46 14 14 14 0 0 0
37286- 0 0 0 0 0 0 0 0 0 0 0 0
37287- 0 0 0 0 0 0 0 0 0 0 0 0
37288- 0 0 0 0 0 0 0 0 0 0 0 0
37289- 0 0 0 0 0 0 0 0 0 0 0 0
37290- 0 0 0 0 0 0 0 0 0 0 0 0
37291- 0 0 0 0 0 0 0 0 0 0 0 0
37292- 0 0 0 0 0 0 0 0 0 0 0 0
37293- 0 0 0 6 6 6 14 14 14 42 42 42
37294- 90 90 90 18 18 18 18 18 18 26 26 26
37295- 2 2 6 116 116 116 253 253 253 253 253 253
37296-253 253 253 253 253 253 253 253 253 253 253 253
37297-253 253 253 253 253 253 250 250 250 238 238 238
37298-253 253 253 253 253 253 253 253 253 253 253 253
37299-253 253 253 253 253 253 253 253 253 253 253 253
37300-253 253 253 253 253 253 253 253 253 253 253 253
37301-253 253 253 253 253 253 253 253 253 253 253 253
37302-253 253 253 253 253 253 94 94 94 6 6 6
37303- 2 2 6 2 2 6 10 10 10 34 34 34
37304- 2 2 6 2 2 6 2 2 6 2 2 6
37305- 74 74 74 58 58 58 22 22 22 6 6 6
37306- 0 0 0 0 0 0 0 0 0 0 0 0
37307- 0 0 0 0 0 0 0 0 0 0 0 0
37308- 0 0 0 0 0 0 0 0 0 0 0 0
37309- 0 0 0 0 0 0 0 0 0 0 0 0
37310- 0 0 0 0 0 0 0 0 0 0 0 0
37311- 0 0 0 0 0 0 0 0 0 0 0 0
37312- 0 0 0 0 0 0 0 0 0 0 0 0
37313- 0 0 0 10 10 10 26 26 26 66 66 66
37314- 82 82 82 2 2 6 38 38 38 6 6 6
37315- 14 14 14 210 210 210 253 253 253 253 253 253
37316-253 253 253 253 253 253 253 253 253 253 253 253
37317-253 253 253 253 253 253 246 246 246 242 242 242
37318-253 253 253 253 253 253 253 253 253 253 253 253
37319-253 253 253 253 253 253 253 253 253 253 253 253
37320-253 253 253 253 253 253 253 253 253 253 253 253
37321-253 253 253 253 253 253 253 253 253 253 253 253
37322-253 253 253 253 253 253 144 144 144 2 2 6
37323- 2 2 6 2 2 6 2 2 6 46 46 46
37324- 2 2 6 2 2 6 2 2 6 2 2 6
37325- 42 42 42 74 74 74 30 30 30 10 10 10
37326- 0 0 0 0 0 0 0 0 0 0 0 0
37327- 0 0 0 0 0 0 0 0 0 0 0 0
37328- 0 0 0 0 0 0 0 0 0 0 0 0
37329- 0 0 0 0 0 0 0 0 0 0 0 0
37330- 0 0 0 0 0 0 0 0 0 0 0 0
37331- 0 0 0 0 0 0 0 0 0 0 0 0
37332- 0 0 0 0 0 0 0 0 0 0 0 0
37333- 6 6 6 14 14 14 42 42 42 90 90 90
37334- 26 26 26 6 6 6 42 42 42 2 2 6
37335- 74 74 74 250 250 250 253 253 253 253 253 253
37336-253 253 253 253 253 253 253 253 253 253 253 253
37337-253 253 253 253 253 253 242 242 242 242 242 242
37338-253 253 253 253 253 253 253 253 253 253 253 253
37339-253 253 253 253 253 253 253 253 253 253 253 253
37340-253 253 253 253 253 253 253 253 253 253 253 253
37341-253 253 253 253 253 253 253 253 253 253 253 253
37342-253 253 253 253 253 253 182 182 182 2 2 6
37343- 2 2 6 2 2 6 2 2 6 46 46 46
37344- 2 2 6 2 2 6 2 2 6 2 2 6
37345- 10 10 10 86 86 86 38 38 38 10 10 10
37346- 0 0 0 0 0 0 0 0 0 0 0 0
37347- 0 0 0 0 0 0 0 0 0 0 0 0
37348- 0 0 0 0 0 0 0 0 0 0 0 0
37349- 0 0 0 0 0 0 0 0 0 0 0 0
37350- 0 0 0 0 0 0 0 0 0 0 0 0
37351- 0 0 0 0 0 0 0 0 0 0 0 0
37352- 0 0 0 0 0 0 0 0 0 0 0 0
37353- 10 10 10 26 26 26 66 66 66 82 82 82
37354- 2 2 6 22 22 22 18 18 18 2 2 6
37355-149 149 149 253 253 253 253 253 253 253 253 253
37356-253 253 253 253 253 253 253 253 253 253 253 253
37357-253 253 253 253 253 253 234 234 234 242 242 242
37358-253 253 253 253 253 253 253 253 253 253 253 253
37359-253 253 253 253 253 253 253 253 253 253 253 253
37360-253 253 253 253 253 253 253 253 253 253 253 253
37361-253 253 253 253 253 253 253 253 253 253 253 253
37362-253 253 253 253 253 253 206 206 206 2 2 6
37363- 2 2 6 2 2 6 2 2 6 38 38 38
37364- 2 2 6 2 2 6 2 2 6 2 2 6
37365- 6 6 6 86 86 86 46 46 46 14 14 14
37366- 0 0 0 0 0 0 0 0 0 0 0 0
37367- 0 0 0 0 0 0 0 0 0 0 0 0
37368- 0 0 0 0 0 0 0 0 0 0 0 0
37369- 0 0 0 0 0 0 0 0 0 0 0 0
37370- 0 0 0 0 0 0 0 0 0 0 0 0
37371- 0 0 0 0 0 0 0 0 0 0 0 0
37372- 0 0 0 0 0 0 0 0 0 6 6 6
37373- 18 18 18 46 46 46 86 86 86 18 18 18
37374- 2 2 6 34 34 34 10 10 10 6 6 6
37375-210 210 210 253 253 253 253 253 253 253 253 253
37376-253 253 253 253 253 253 253 253 253 253 253 253
37377-253 253 253 253 253 253 234 234 234 242 242 242
37378-253 253 253 253 253 253 253 253 253 253 253 253
37379-253 253 253 253 253 253 253 253 253 253 253 253
37380-253 253 253 253 253 253 253 253 253 253 253 253
37381-253 253 253 253 253 253 253 253 253 253 253 253
37382-253 253 253 253 253 253 221 221 221 6 6 6
37383- 2 2 6 2 2 6 6 6 6 30 30 30
37384- 2 2 6 2 2 6 2 2 6 2 2 6
37385- 2 2 6 82 82 82 54 54 54 18 18 18
37386- 6 6 6 0 0 0 0 0 0 0 0 0
37387- 0 0 0 0 0 0 0 0 0 0 0 0
37388- 0 0 0 0 0 0 0 0 0 0 0 0
37389- 0 0 0 0 0 0 0 0 0 0 0 0
37390- 0 0 0 0 0 0 0 0 0 0 0 0
37391- 0 0 0 0 0 0 0 0 0 0 0 0
37392- 0 0 0 0 0 0 0 0 0 10 10 10
37393- 26 26 26 66 66 66 62 62 62 2 2 6
37394- 2 2 6 38 38 38 10 10 10 26 26 26
37395-238 238 238 253 253 253 253 253 253 253 253 253
37396-253 253 253 253 253 253 253 253 253 253 253 253
37397-253 253 253 253 253 253 231 231 231 238 238 238
37398-253 253 253 253 253 253 253 253 253 253 253 253
37399-253 253 253 253 253 253 253 253 253 253 253 253
37400-253 253 253 253 253 253 253 253 253 253 253 253
37401-253 253 253 253 253 253 253 253 253 253 253 253
37402-253 253 253 253 253 253 231 231 231 6 6 6
37403- 2 2 6 2 2 6 10 10 10 30 30 30
37404- 2 2 6 2 2 6 2 2 6 2 2 6
37405- 2 2 6 66 66 66 58 58 58 22 22 22
37406- 6 6 6 0 0 0 0 0 0 0 0 0
37407- 0 0 0 0 0 0 0 0 0 0 0 0
37408- 0 0 0 0 0 0 0 0 0 0 0 0
37409- 0 0 0 0 0 0 0 0 0 0 0 0
37410- 0 0 0 0 0 0 0 0 0 0 0 0
37411- 0 0 0 0 0 0 0 0 0 0 0 0
37412- 0 0 0 0 0 0 0 0 0 10 10 10
37413- 38 38 38 78 78 78 6 6 6 2 2 6
37414- 2 2 6 46 46 46 14 14 14 42 42 42
37415-246 246 246 253 253 253 253 253 253 253 253 253
37416-253 253 253 253 253 253 253 253 253 253 253 253
37417-253 253 253 253 253 253 231 231 231 242 242 242
37418-253 253 253 253 253 253 253 253 253 253 253 253
37419-253 253 253 253 253 253 253 253 253 253 253 253
37420-253 253 253 253 253 253 253 253 253 253 253 253
37421-253 253 253 253 253 253 253 253 253 253 253 253
37422-253 253 253 253 253 253 234 234 234 10 10 10
37423- 2 2 6 2 2 6 22 22 22 14 14 14
37424- 2 2 6 2 2 6 2 2 6 2 2 6
37425- 2 2 6 66 66 66 62 62 62 22 22 22
37426- 6 6 6 0 0 0 0 0 0 0 0 0
37427- 0 0 0 0 0 0 0 0 0 0 0 0
37428- 0 0 0 0 0 0 0 0 0 0 0 0
37429- 0 0 0 0 0 0 0 0 0 0 0 0
37430- 0 0 0 0 0 0 0 0 0 0 0 0
37431- 0 0 0 0 0 0 0 0 0 0 0 0
37432- 0 0 0 0 0 0 6 6 6 18 18 18
37433- 50 50 50 74 74 74 2 2 6 2 2 6
37434- 14 14 14 70 70 70 34 34 34 62 62 62
37435-250 250 250 253 253 253 253 253 253 253 253 253
37436-253 253 253 253 253 253 253 253 253 253 253 253
37437-253 253 253 253 253 253 231 231 231 246 246 246
37438-253 253 253 253 253 253 253 253 253 253 253 253
37439-253 253 253 253 253 253 253 253 253 253 253 253
37440-253 253 253 253 253 253 253 253 253 253 253 253
37441-253 253 253 253 253 253 253 253 253 253 253 253
37442-253 253 253 253 253 253 234 234 234 14 14 14
37443- 2 2 6 2 2 6 30 30 30 2 2 6
37444- 2 2 6 2 2 6 2 2 6 2 2 6
37445- 2 2 6 66 66 66 62 62 62 22 22 22
37446- 6 6 6 0 0 0 0 0 0 0 0 0
37447- 0 0 0 0 0 0 0 0 0 0 0 0
37448- 0 0 0 0 0 0 0 0 0 0 0 0
37449- 0 0 0 0 0 0 0 0 0 0 0 0
37450- 0 0 0 0 0 0 0 0 0 0 0 0
37451- 0 0 0 0 0 0 0 0 0 0 0 0
37452- 0 0 0 0 0 0 6 6 6 18 18 18
37453- 54 54 54 62 62 62 2 2 6 2 2 6
37454- 2 2 6 30 30 30 46 46 46 70 70 70
37455-250 250 250 253 253 253 253 253 253 253 253 253
37456-253 253 253 253 253 253 253 253 253 253 253 253
37457-253 253 253 253 253 253 231 231 231 246 246 246
37458-253 253 253 253 253 253 253 253 253 253 253 253
37459-253 253 253 253 253 253 253 253 253 253 253 253
37460-253 253 253 253 253 253 253 253 253 253 253 253
37461-253 253 253 253 253 253 253 253 253 253 253 253
37462-253 253 253 253 253 253 226 226 226 10 10 10
37463- 2 2 6 6 6 6 30 30 30 2 2 6
37464- 2 2 6 2 2 6 2 2 6 2 2 6
37465- 2 2 6 66 66 66 58 58 58 22 22 22
37466- 6 6 6 0 0 0 0 0 0 0 0 0
37467- 0 0 0 0 0 0 0 0 0 0 0 0
37468- 0 0 0 0 0 0 0 0 0 0 0 0
37469- 0 0 0 0 0 0 0 0 0 0 0 0
37470- 0 0 0 0 0 0 0 0 0 0 0 0
37471- 0 0 0 0 0 0 0 0 0 0 0 0
37472- 0 0 0 0 0 0 6 6 6 22 22 22
37473- 58 58 58 62 62 62 2 2 6 2 2 6
37474- 2 2 6 2 2 6 30 30 30 78 78 78
37475-250 250 250 253 253 253 253 253 253 253 253 253
37476-253 253 253 253 253 253 253 253 253 253 253 253
37477-253 253 253 253 253 253 231 231 231 246 246 246
37478-253 253 253 253 253 253 253 253 253 253 253 253
37479-253 253 253 253 253 253 253 253 253 253 253 253
37480-253 253 253 253 253 253 253 253 253 253 253 253
37481-253 253 253 253 253 253 253 253 253 253 253 253
37482-253 253 253 253 253 253 206 206 206 2 2 6
37483- 22 22 22 34 34 34 18 14 6 22 22 22
37484- 26 26 26 18 18 18 6 6 6 2 2 6
37485- 2 2 6 82 82 82 54 54 54 18 18 18
37486- 6 6 6 0 0 0 0 0 0 0 0 0
37487- 0 0 0 0 0 0 0 0 0 0 0 0
37488- 0 0 0 0 0 0 0 0 0 0 0 0
37489- 0 0 0 0 0 0 0 0 0 0 0 0
37490- 0 0 0 0 0 0 0 0 0 0 0 0
37491- 0 0 0 0 0 0 0 0 0 0 0 0
37492- 0 0 0 0 0 0 6 6 6 26 26 26
37493- 62 62 62 106 106 106 74 54 14 185 133 11
37494-210 162 10 121 92 8 6 6 6 62 62 62
37495-238 238 238 253 253 253 253 253 253 253 253 253
37496-253 253 253 253 253 253 253 253 253 253 253 253
37497-253 253 253 253 253 253 231 231 231 246 246 246
37498-253 253 253 253 253 253 253 253 253 253 253 253
37499-253 253 253 253 253 253 253 253 253 253 253 253
37500-253 253 253 253 253 253 253 253 253 253 253 253
37501-253 253 253 253 253 253 253 253 253 253 253 253
37502-253 253 253 253 253 253 158 158 158 18 18 18
37503- 14 14 14 2 2 6 2 2 6 2 2 6
37504- 6 6 6 18 18 18 66 66 66 38 38 38
37505- 6 6 6 94 94 94 50 50 50 18 18 18
37506- 6 6 6 0 0 0 0 0 0 0 0 0
37507- 0 0 0 0 0 0 0 0 0 0 0 0
37508- 0 0 0 0 0 0 0 0 0 0 0 0
37509- 0 0 0 0 0 0 0 0 0 0 0 0
37510- 0 0 0 0 0 0 0 0 0 0 0 0
37511- 0 0 0 0 0 0 0 0 0 6 6 6
37512- 10 10 10 10 10 10 18 18 18 38 38 38
37513- 78 78 78 142 134 106 216 158 10 242 186 14
37514-246 190 14 246 190 14 156 118 10 10 10 10
37515- 90 90 90 238 238 238 253 253 253 253 253 253
37516-253 253 253 253 253 253 253 253 253 253 253 253
37517-253 253 253 253 253 253 231 231 231 250 250 250
37518-253 253 253 253 253 253 253 253 253 253 253 253
37519-253 253 253 253 253 253 253 253 253 253 253 253
37520-253 253 253 253 253 253 253 253 253 253 253 253
37521-253 253 253 253 253 253 253 253 253 246 230 190
37522-238 204 91 238 204 91 181 142 44 37 26 9
37523- 2 2 6 2 2 6 2 2 6 2 2 6
37524- 2 2 6 2 2 6 38 38 38 46 46 46
37525- 26 26 26 106 106 106 54 54 54 18 18 18
37526- 6 6 6 0 0 0 0 0 0 0 0 0
37527- 0 0 0 0 0 0 0 0 0 0 0 0
37528- 0 0 0 0 0 0 0 0 0 0 0 0
37529- 0 0 0 0 0 0 0 0 0 0 0 0
37530- 0 0 0 0 0 0 0 0 0 0 0 0
37531- 0 0 0 6 6 6 14 14 14 22 22 22
37532- 30 30 30 38 38 38 50 50 50 70 70 70
37533-106 106 106 190 142 34 226 170 11 242 186 14
37534-246 190 14 246 190 14 246 190 14 154 114 10
37535- 6 6 6 74 74 74 226 226 226 253 253 253
37536-253 253 253 253 253 253 253 253 253 253 253 253
37537-253 253 253 253 253 253 231 231 231 250 250 250
37538-253 253 253 253 253 253 253 253 253 253 253 253
37539-253 253 253 253 253 253 253 253 253 253 253 253
37540-253 253 253 253 253 253 253 253 253 253 253 253
37541-253 253 253 253 253 253 253 253 253 228 184 62
37542-241 196 14 241 208 19 232 195 16 38 30 10
37543- 2 2 6 2 2 6 2 2 6 2 2 6
37544- 2 2 6 6 6 6 30 30 30 26 26 26
37545-203 166 17 154 142 90 66 66 66 26 26 26
37546- 6 6 6 0 0 0 0 0 0 0 0 0
37547- 0 0 0 0 0 0 0 0 0 0 0 0
37548- 0 0 0 0 0 0 0 0 0 0 0 0
37549- 0 0 0 0 0 0 0 0 0 0 0 0
37550- 0 0 0 0 0 0 0 0 0 0 0 0
37551- 6 6 6 18 18 18 38 38 38 58 58 58
37552- 78 78 78 86 86 86 101 101 101 123 123 123
37553-175 146 61 210 150 10 234 174 13 246 186 14
37554-246 190 14 246 190 14 246 190 14 238 190 10
37555-102 78 10 2 2 6 46 46 46 198 198 198
37556-253 253 253 253 253 253 253 253 253 253 253 253
37557-253 253 253 253 253 253 234 234 234 242 242 242
37558-253 253 253 253 253 253 253 253 253 253 253 253
37559-253 253 253 253 253 253 253 253 253 253 253 253
37560-253 253 253 253 253 253 253 253 253 253 253 253
37561-253 253 253 253 253 253 253 253 253 224 178 62
37562-242 186 14 241 196 14 210 166 10 22 18 6
37563- 2 2 6 2 2 6 2 2 6 2 2 6
37564- 2 2 6 2 2 6 6 6 6 121 92 8
37565-238 202 15 232 195 16 82 82 82 34 34 34
37566- 10 10 10 0 0 0 0 0 0 0 0 0
37567- 0 0 0 0 0 0 0 0 0 0 0 0
37568- 0 0 0 0 0 0 0 0 0 0 0 0
37569- 0 0 0 0 0 0 0 0 0 0 0 0
37570- 0 0 0 0 0 0 0 0 0 0 0 0
37571- 14 14 14 38 38 38 70 70 70 154 122 46
37572-190 142 34 200 144 11 197 138 11 197 138 11
37573-213 154 11 226 170 11 242 186 14 246 190 14
37574-246 190 14 246 190 14 246 190 14 246 190 14
37575-225 175 15 46 32 6 2 2 6 22 22 22
37576-158 158 158 250 250 250 253 253 253 253 253 253
37577-253 253 253 253 253 253 253 253 253 253 253 253
37578-253 253 253 253 253 253 253 253 253 253 253 253
37579-253 253 253 253 253 253 253 253 253 253 253 253
37580-253 253 253 253 253 253 253 253 253 253 253 253
37581-253 253 253 250 250 250 242 242 242 224 178 62
37582-239 182 13 236 186 11 213 154 11 46 32 6
37583- 2 2 6 2 2 6 2 2 6 2 2 6
37584- 2 2 6 2 2 6 61 42 6 225 175 15
37585-238 190 10 236 186 11 112 100 78 42 42 42
37586- 14 14 14 0 0 0 0 0 0 0 0 0
37587- 0 0 0 0 0 0 0 0 0 0 0 0
37588- 0 0 0 0 0 0 0 0 0 0 0 0
37589- 0 0 0 0 0 0 0 0 0 0 0 0
37590- 0 0 0 0 0 0 0 0 0 6 6 6
37591- 22 22 22 54 54 54 154 122 46 213 154 11
37592-226 170 11 230 174 11 226 170 11 226 170 11
37593-236 178 12 242 186 14 246 190 14 246 190 14
37594-246 190 14 246 190 14 246 190 14 246 190 14
37595-241 196 14 184 144 12 10 10 10 2 2 6
37596- 6 6 6 116 116 116 242 242 242 253 253 253
37597-253 253 253 253 253 253 253 253 253 253 253 253
37598-253 253 253 253 253 253 253 253 253 253 253 253
37599-253 253 253 253 253 253 253 253 253 253 253 253
37600-253 253 253 253 253 253 253 253 253 253 253 253
37601-253 253 253 231 231 231 198 198 198 214 170 54
37602-236 178 12 236 178 12 210 150 10 137 92 6
37603- 18 14 6 2 2 6 2 2 6 2 2 6
37604- 6 6 6 70 47 6 200 144 11 236 178 12
37605-239 182 13 239 182 13 124 112 88 58 58 58
37606- 22 22 22 6 6 6 0 0 0 0 0 0
37607- 0 0 0 0 0 0 0 0 0 0 0 0
37608- 0 0 0 0 0 0 0 0 0 0 0 0
37609- 0 0 0 0 0 0 0 0 0 0 0 0
37610- 0 0 0 0 0 0 0 0 0 10 10 10
37611- 30 30 30 70 70 70 180 133 36 226 170 11
37612-239 182 13 242 186 14 242 186 14 246 186 14
37613-246 190 14 246 190 14 246 190 14 246 190 14
37614-246 190 14 246 190 14 246 190 14 246 190 14
37615-246 190 14 232 195 16 98 70 6 2 2 6
37616- 2 2 6 2 2 6 66 66 66 221 221 221
37617-253 253 253 253 253 253 253 253 253 253 253 253
37618-253 253 253 253 253 253 253 253 253 253 253 253
37619-253 253 253 253 253 253 253 253 253 253 253 253
37620-253 253 253 253 253 253 253 253 253 253 253 253
37621-253 253 253 206 206 206 198 198 198 214 166 58
37622-230 174 11 230 174 11 216 158 10 192 133 9
37623-163 110 8 116 81 8 102 78 10 116 81 8
37624-167 114 7 197 138 11 226 170 11 239 182 13
37625-242 186 14 242 186 14 162 146 94 78 78 78
37626- 34 34 34 14 14 14 6 6 6 0 0 0
37627- 0 0 0 0 0 0 0 0 0 0 0 0
37628- 0 0 0 0 0 0 0 0 0 0 0 0
37629- 0 0 0 0 0 0 0 0 0 0 0 0
37630- 0 0 0 0 0 0 0 0 0 6 6 6
37631- 30 30 30 78 78 78 190 142 34 226 170 11
37632-239 182 13 246 190 14 246 190 14 246 190 14
37633-246 190 14 246 190 14 246 190 14 246 190 14
37634-246 190 14 246 190 14 246 190 14 246 190 14
37635-246 190 14 241 196 14 203 166 17 22 18 6
37636- 2 2 6 2 2 6 2 2 6 38 38 38
37637-218 218 218 253 253 253 253 253 253 253 253 253
37638-253 253 253 253 253 253 253 253 253 253 253 253
37639-253 253 253 253 253 253 253 253 253 253 253 253
37640-253 253 253 253 253 253 253 253 253 253 253 253
37641-250 250 250 206 206 206 198 198 198 202 162 69
37642-226 170 11 236 178 12 224 166 10 210 150 10
37643-200 144 11 197 138 11 192 133 9 197 138 11
37644-210 150 10 226 170 11 242 186 14 246 190 14
37645-246 190 14 246 186 14 225 175 15 124 112 88
37646- 62 62 62 30 30 30 14 14 14 6 6 6
37647- 0 0 0 0 0 0 0 0 0 0 0 0
37648- 0 0 0 0 0 0 0 0 0 0 0 0
37649- 0 0 0 0 0 0 0 0 0 0 0 0
37650- 0 0 0 0 0 0 0 0 0 10 10 10
37651- 30 30 30 78 78 78 174 135 50 224 166 10
37652-239 182 13 246 190 14 246 190 14 246 190 14
37653-246 190 14 246 190 14 246 190 14 246 190 14
37654-246 190 14 246 190 14 246 190 14 246 190 14
37655-246 190 14 246 190 14 241 196 14 139 102 15
37656- 2 2 6 2 2 6 2 2 6 2 2 6
37657- 78 78 78 250 250 250 253 253 253 253 253 253
37658-253 253 253 253 253 253 253 253 253 253 253 253
37659-253 253 253 253 253 253 253 253 253 253 253 253
37660-253 253 253 253 253 253 253 253 253 253 253 253
37661-250 250 250 214 214 214 198 198 198 190 150 46
37662-219 162 10 236 178 12 234 174 13 224 166 10
37663-216 158 10 213 154 11 213 154 11 216 158 10
37664-226 170 11 239 182 13 246 190 14 246 190 14
37665-246 190 14 246 190 14 242 186 14 206 162 42
37666-101 101 101 58 58 58 30 30 30 14 14 14
37667- 6 6 6 0 0 0 0 0 0 0 0 0
37668- 0 0 0 0 0 0 0 0 0 0 0 0
37669- 0 0 0 0 0 0 0 0 0 0 0 0
37670- 0 0 0 0 0 0 0 0 0 10 10 10
37671- 30 30 30 74 74 74 174 135 50 216 158 10
37672-236 178 12 246 190 14 246 190 14 246 190 14
37673-246 190 14 246 190 14 246 190 14 246 190 14
37674-246 190 14 246 190 14 246 190 14 246 190 14
37675-246 190 14 246 190 14 241 196 14 226 184 13
37676- 61 42 6 2 2 6 2 2 6 2 2 6
37677- 22 22 22 238 238 238 253 253 253 253 253 253
37678-253 253 253 253 253 253 253 253 253 253 253 253
37679-253 253 253 253 253 253 253 253 253 253 253 253
37680-253 253 253 253 253 253 253 253 253 253 253 253
37681-253 253 253 226 226 226 187 187 187 180 133 36
37682-216 158 10 236 178 12 239 182 13 236 178 12
37683-230 174 11 226 170 11 226 170 11 230 174 11
37684-236 178 12 242 186 14 246 190 14 246 190 14
37685-246 190 14 246 190 14 246 186 14 239 182 13
37686-206 162 42 106 106 106 66 66 66 34 34 34
37687- 14 14 14 6 6 6 0 0 0 0 0 0
37688- 0 0 0 0 0 0 0 0 0 0 0 0
37689- 0 0 0 0 0 0 0 0 0 0 0 0
37690- 0 0 0 0 0 0 0 0 0 6 6 6
37691- 26 26 26 70 70 70 163 133 67 213 154 11
37692-236 178 12 246 190 14 246 190 14 246 190 14
37693-246 190 14 246 190 14 246 190 14 246 190 14
37694-246 190 14 246 190 14 246 190 14 246 190 14
37695-246 190 14 246 190 14 246 190 14 241 196 14
37696-190 146 13 18 14 6 2 2 6 2 2 6
37697- 46 46 46 246 246 246 253 253 253 253 253 253
37698-253 253 253 253 253 253 253 253 253 253 253 253
37699-253 253 253 253 253 253 253 253 253 253 253 253
37700-253 253 253 253 253 253 253 253 253 253 253 253
37701-253 253 253 221 221 221 86 86 86 156 107 11
37702-216 158 10 236 178 12 242 186 14 246 186 14
37703-242 186 14 239 182 13 239 182 13 242 186 14
37704-242 186 14 246 186 14 246 190 14 246 190 14
37705-246 190 14 246 190 14 246 190 14 246 190 14
37706-242 186 14 225 175 15 142 122 72 66 66 66
37707- 30 30 30 10 10 10 0 0 0 0 0 0
37708- 0 0 0 0 0 0 0 0 0 0 0 0
37709- 0 0 0 0 0 0 0 0 0 0 0 0
37710- 0 0 0 0 0 0 0 0 0 6 6 6
37711- 26 26 26 70 70 70 163 133 67 210 150 10
37712-236 178 12 246 190 14 246 190 14 246 190 14
37713-246 190 14 246 190 14 246 190 14 246 190 14
37714-246 190 14 246 190 14 246 190 14 246 190 14
37715-246 190 14 246 190 14 246 190 14 246 190 14
37716-232 195 16 121 92 8 34 34 34 106 106 106
37717-221 221 221 253 253 253 253 253 253 253 253 253
37718-253 253 253 253 253 253 253 253 253 253 253 253
37719-253 253 253 253 253 253 253 253 253 253 253 253
37720-253 253 253 253 253 253 253 253 253 253 253 253
37721-242 242 242 82 82 82 18 14 6 163 110 8
37722-216 158 10 236 178 12 242 186 14 246 190 14
37723-246 190 14 246 190 14 246 190 14 246 190 14
37724-246 190 14 246 190 14 246 190 14 246 190 14
37725-246 190 14 246 190 14 246 190 14 246 190 14
37726-246 190 14 246 190 14 242 186 14 163 133 67
37727- 46 46 46 18 18 18 6 6 6 0 0 0
37728- 0 0 0 0 0 0 0 0 0 0 0 0
37729- 0 0 0 0 0 0 0 0 0 0 0 0
37730- 0 0 0 0 0 0 0 0 0 10 10 10
37731- 30 30 30 78 78 78 163 133 67 210 150 10
37732-236 178 12 246 186 14 246 190 14 246 190 14
37733-246 190 14 246 190 14 246 190 14 246 190 14
37734-246 190 14 246 190 14 246 190 14 246 190 14
37735-246 190 14 246 190 14 246 190 14 246 190 14
37736-241 196 14 215 174 15 190 178 144 253 253 253
37737-253 253 253 253 253 253 253 253 253 253 253 253
37738-253 253 253 253 253 253 253 253 253 253 253 253
37739-253 253 253 253 253 253 253 253 253 253 253 253
37740-253 253 253 253 253 253 253 253 253 218 218 218
37741- 58 58 58 2 2 6 22 18 6 167 114 7
37742-216 158 10 236 178 12 246 186 14 246 190 14
37743-246 190 14 246 190 14 246 190 14 246 190 14
37744-246 190 14 246 190 14 246 190 14 246 190 14
37745-246 190 14 246 190 14 246 190 14 246 190 14
37746-246 190 14 246 186 14 242 186 14 190 150 46
37747- 54 54 54 22 22 22 6 6 6 0 0 0
37748- 0 0 0 0 0 0 0 0 0 0 0 0
37749- 0 0 0 0 0 0 0 0 0 0 0 0
37750- 0 0 0 0 0 0 0 0 0 14 14 14
37751- 38 38 38 86 86 86 180 133 36 213 154 11
37752-236 178 12 246 186 14 246 190 14 246 190 14
37753-246 190 14 246 190 14 246 190 14 246 190 14
37754-246 190 14 246 190 14 246 190 14 246 190 14
37755-246 190 14 246 190 14 246 190 14 246 190 14
37756-246 190 14 232 195 16 190 146 13 214 214 214
37757-253 253 253 253 253 253 253 253 253 253 253 253
37758-253 253 253 253 253 253 253 253 253 253 253 253
37759-253 253 253 253 253 253 253 253 253 253 253 253
37760-253 253 253 250 250 250 170 170 170 26 26 26
37761- 2 2 6 2 2 6 37 26 9 163 110 8
37762-219 162 10 239 182 13 246 186 14 246 190 14
37763-246 190 14 246 190 14 246 190 14 246 190 14
37764-246 190 14 246 190 14 246 190 14 246 190 14
37765-246 190 14 246 190 14 246 190 14 246 190 14
37766-246 186 14 236 178 12 224 166 10 142 122 72
37767- 46 46 46 18 18 18 6 6 6 0 0 0
37768- 0 0 0 0 0 0 0 0 0 0 0 0
37769- 0 0 0 0 0 0 0 0 0 0 0 0
37770- 0 0 0 0 0 0 6 6 6 18 18 18
37771- 50 50 50 109 106 95 192 133 9 224 166 10
37772-242 186 14 246 190 14 246 190 14 246 190 14
37773-246 190 14 246 190 14 246 190 14 246 190 14
37774-246 190 14 246 190 14 246 190 14 246 190 14
37775-246 190 14 246 190 14 246 190 14 246 190 14
37776-242 186 14 226 184 13 210 162 10 142 110 46
37777-226 226 226 253 253 253 253 253 253 253 253 253
37778-253 253 253 253 253 253 253 253 253 253 253 253
37779-253 253 253 253 253 253 253 253 253 253 253 253
37780-198 198 198 66 66 66 2 2 6 2 2 6
37781- 2 2 6 2 2 6 50 34 6 156 107 11
37782-219 162 10 239 182 13 246 186 14 246 190 14
37783-246 190 14 246 190 14 246 190 14 246 190 14
37784-246 190 14 246 190 14 246 190 14 246 190 14
37785-246 190 14 246 190 14 246 190 14 242 186 14
37786-234 174 13 213 154 11 154 122 46 66 66 66
37787- 30 30 30 10 10 10 0 0 0 0 0 0
37788- 0 0 0 0 0 0 0 0 0 0 0 0
37789- 0 0 0 0 0 0 0 0 0 0 0 0
37790- 0 0 0 0 0 0 6 6 6 22 22 22
37791- 58 58 58 154 121 60 206 145 10 234 174 13
37792-242 186 14 246 186 14 246 190 14 246 190 14
37793-246 190 14 246 190 14 246 190 14 246 190 14
37794-246 190 14 246 190 14 246 190 14 246 190 14
37795-246 190 14 246 190 14 246 190 14 246 190 14
37796-246 186 14 236 178 12 210 162 10 163 110 8
37797- 61 42 6 138 138 138 218 218 218 250 250 250
37798-253 253 253 253 253 253 253 253 253 250 250 250
37799-242 242 242 210 210 210 144 144 144 66 66 66
37800- 6 6 6 2 2 6 2 2 6 2 2 6
37801- 2 2 6 2 2 6 61 42 6 163 110 8
37802-216 158 10 236 178 12 246 190 14 246 190 14
37803-246 190 14 246 190 14 246 190 14 246 190 14
37804-246 190 14 246 190 14 246 190 14 246 190 14
37805-246 190 14 239 182 13 230 174 11 216 158 10
37806-190 142 34 124 112 88 70 70 70 38 38 38
37807- 18 18 18 6 6 6 0 0 0 0 0 0
37808- 0 0 0 0 0 0 0 0 0 0 0 0
37809- 0 0 0 0 0 0 0 0 0 0 0 0
37810- 0 0 0 0 0 0 6 6 6 22 22 22
37811- 62 62 62 168 124 44 206 145 10 224 166 10
37812-236 178 12 239 182 13 242 186 14 242 186 14
37813-246 186 14 246 190 14 246 190 14 246 190 14
37814-246 190 14 246 190 14 246 190 14 246 190 14
37815-246 190 14 246 190 14 246 190 14 246 190 14
37816-246 190 14 236 178 12 216 158 10 175 118 6
37817- 80 54 7 2 2 6 6 6 6 30 30 30
37818- 54 54 54 62 62 62 50 50 50 38 38 38
37819- 14 14 14 2 2 6 2 2 6 2 2 6
37820- 2 2 6 2 2 6 2 2 6 2 2 6
37821- 2 2 6 6 6 6 80 54 7 167 114 7
37822-213 154 11 236 178 12 246 190 14 246 190 14
37823-246 190 14 246 190 14 246 190 14 246 190 14
37824-246 190 14 242 186 14 239 182 13 239 182 13
37825-230 174 11 210 150 10 174 135 50 124 112 88
37826- 82 82 82 54 54 54 34 34 34 18 18 18
37827- 6 6 6 0 0 0 0 0 0 0 0 0
37828- 0 0 0 0 0 0 0 0 0 0 0 0
37829- 0 0 0 0 0 0 0 0 0 0 0 0
37830- 0 0 0 0 0 0 6 6 6 18 18 18
37831- 50 50 50 158 118 36 192 133 9 200 144 11
37832-216 158 10 219 162 10 224 166 10 226 170 11
37833-230 174 11 236 178 12 239 182 13 239 182 13
37834-242 186 14 246 186 14 246 190 14 246 190 14
37835-246 190 14 246 190 14 246 190 14 246 190 14
37836-246 186 14 230 174 11 210 150 10 163 110 8
37837-104 69 6 10 10 10 2 2 6 2 2 6
37838- 2 2 6 2 2 6 2 2 6 2 2 6
37839- 2 2 6 2 2 6 2 2 6 2 2 6
37840- 2 2 6 2 2 6 2 2 6 2 2 6
37841- 2 2 6 6 6 6 91 60 6 167 114 7
37842-206 145 10 230 174 11 242 186 14 246 190 14
37843-246 190 14 246 190 14 246 186 14 242 186 14
37844-239 182 13 230 174 11 224 166 10 213 154 11
37845-180 133 36 124 112 88 86 86 86 58 58 58
37846- 38 38 38 22 22 22 10 10 10 6 6 6
37847- 0 0 0 0 0 0 0 0 0 0 0 0
37848- 0 0 0 0 0 0 0 0 0 0 0 0
37849- 0 0 0 0 0 0 0 0 0 0 0 0
37850- 0 0 0 0 0 0 0 0 0 14 14 14
37851- 34 34 34 70 70 70 138 110 50 158 118 36
37852-167 114 7 180 123 7 192 133 9 197 138 11
37853-200 144 11 206 145 10 213 154 11 219 162 10
37854-224 166 10 230 174 11 239 182 13 242 186 14
37855-246 186 14 246 186 14 246 186 14 246 186 14
37856-239 182 13 216 158 10 185 133 11 152 99 6
37857-104 69 6 18 14 6 2 2 6 2 2 6
37858- 2 2 6 2 2 6 2 2 6 2 2 6
37859- 2 2 6 2 2 6 2 2 6 2 2 6
37860- 2 2 6 2 2 6 2 2 6 2 2 6
37861- 2 2 6 6 6 6 80 54 7 152 99 6
37862-192 133 9 219 162 10 236 178 12 239 182 13
37863-246 186 14 242 186 14 239 182 13 236 178 12
37864-224 166 10 206 145 10 192 133 9 154 121 60
37865- 94 94 94 62 62 62 42 42 42 22 22 22
37866- 14 14 14 6 6 6 0 0 0 0 0 0
37867- 0 0 0 0 0 0 0 0 0 0 0 0
37868- 0 0 0 0 0 0 0 0 0 0 0 0
37869- 0 0 0 0 0 0 0 0 0 0 0 0
37870- 0 0 0 0 0 0 0 0 0 6 6 6
37871- 18 18 18 34 34 34 58 58 58 78 78 78
37872-101 98 89 124 112 88 142 110 46 156 107 11
37873-163 110 8 167 114 7 175 118 6 180 123 7
37874-185 133 11 197 138 11 210 150 10 219 162 10
37875-226 170 11 236 178 12 236 178 12 234 174 13
37876-219 162 10 197 138 11 163 110 8 130 83 6
37877- 91 60 6 10 10 10 2 2 6 2 2 6
37878- 18 18 18 38 38 38 38 38 38 38 38 38
37879- 38 38 38 38 38 38 38 38 38 38 38 38
37880- 38 38 38 38 38 38 26 26 26 2 2 6
37881- 2 2 6 6 6 6 70 47 6 137 92 6
37882-175 118 6 200 144 11 219 162 10 230 174 11
37883-234 174 13 230 174 11 219 162 10 210 150 10
37884-192 133 9 163 110 8 124 112 88 82 82 82
37885- 50 50 50 30 30 30 14 14 14 6 6 6
37886- 0 0 0 0 0 0 0 0 0 0 0 0
37887- 0 0 0 0 0 0 0 0 0 0 0 0
37888- 0 0 0 0 0 0 0 0 0 0 0 0
37889- 0 0 0 0 0 0 0 0 0 0 0 0
37890- 0 0 0 0 0 0 0 0 0 0 0 0
37891- 6 6 6 14 14 14 22 22 22 34 34 34
37892- 42 42 42 58 58 58 74 74 74 86 86 86
37893-101 98 89 122 102 70 130 98 46 121 87 25
37894-137 92 6 152 99 6 163 110 8 180 123 7
37895-185 133 11 197 138 11 206 145 10 200 144 11
37896-180 123 7 156 107 11 130 83 6 104 69 6
37897- 50 34 6 54 54 54 110 110 110 101 98 89
37898- 86 86 86 82 82 82 78 78 78 78 78 78
37899- 78 78 78 78 78 78 78 78 78 78 78 78
37900- 78 78 78 82 82 82 86 86 86 94 94 94
37901-106 106 106 101 101 101 86 66 34 124 80 6
37902-156 107 11 180 123 7 192 133 9 200 144 11
37903-206 145 10 200 144 11 192 133 9 175 118 6
37904-139 102 15 109 106 95 70 70 70 42 42 42
37905- 22 22 22 10 10 10 0 0 0 0 0 0
37906- 0 0 0 0 0 0 0 0 0 0 0 0
37907- 0 0 0 0 0 0 0 0 0 0 0 0
37908- 0 0 0 0 0 0 0 0 0 0 0 0
37909- 0 0 0 0 0 0 0 0 0 0 0 0
37910- 0 0 0 0 0 0 0 0 0 0 0 0
37911- 0 0 0 0 0 0 6 6 6 10 10 10
37912- 14 14 14 22 22 22 30 30 30 38 38 38
37913- 50 50 50 62 62 62 74 74 74 90 90 90
37914-101 98 89 112 100 78 121 87 25 124 80 6
37915-137 92 6 152 99 6 152 99 6 152 99 6
37916-138 86 6 124 80 6 98 70 6 86 66 30
37917-101 98 89 82 82 82 58 58 58 46 46 46
37918- 38 38 38 34 34 34 34 34 34 34 34 34
37919- 34 34 34 34 34 34 34 34 34 34 34 34
37920- 34 34 34 34 34 34 38 38 38 42 42 42
37921- 54 54 54 82 82 82 94 86 76 91 60 6
37922-134 86 6 156 107 11 167 114 7 175 118 6
37923-175 118 6 167 114 7 152 99 6 121 87 25
37924-101 98 89 62 62 62 34 34 34 18 18 18
37925- 6 6 6 0 0 0 0 0 0 0 0 0
37926- 0 0 0 0 0 0 0 0 0 0 0 0
37927- 0 0 0 0 0 0 0 0 0 0 0 0
37928- 0 0 0 0 0 0 0 0 0 0 0 0
37929- 0 0 0 0 0 0 0 0 0 0 0 0
37930- 0 0 0 0 0 0 0 0 0 0 0 0
37931- 0 0 0 0 0 0 0 0 0 0 0 0
37932- 0 0 0 6 6 6 6 6 6 10 10 10
37933- 18 18 18 22 22 22 30 30 30 42 42 42
37934- 50 50 50 66 66 66 86 86 86 101 98 89
37935-106 86 58 98 70 6 104 69 6 104 69 6
37936-104 69 6 91 60 6 82 62 34 90 90 90
37937- 62 62 62 38 38 38 22 22 22 14 14 14
37938- 10 10 10 10 10 10 10 10 10 10 10 10
37939- 10 10 10 10 10 10 6 6 6 10 10 10
37940- 10 10 10 10 10 10 10 10 10 14 14 14
37941- 22 22 22 42 42 42 70 70 70 89 81 66
37942- 80 54 7 104 69 6 124 80 6 137 92 6
37943-134 86 6 116 81 8 100 82 52 86 86 86
37944- 58 58 58 30 30 30 14 14 14 6 6 6
37945- 0 0 0 0 0 0 0 0 0 0 0 0
37946- 0 0 0 0 0 0 0 0 0 0 0 0
37947- 0 0 0 0 0 0 0 0 0 0 0 0
37948- 0 0 0 0 0 0 0 0 0 0 0 0
37949- 0 0 0 0 0 0 0 0 0 0 0 0
37950- 0 0 0 0 0 0 0 0 0 0 0 0
37951- 0 0 0 0 0 0 0 0 0 0 0 0
37952- 0 0 0 0 0 0 0 0 0 0 0 0
37953- 0 0 0 6 6 6 10 10 10 14 14 14
37954- 18 18 18 26 26 26 38 38 38 54 54 54
37955- 70 70 70 86 86 86 94 86 76 89 81 66
37956- 89 81 66 86 86 86 74 74 74 50 50 50
37957- 30 30 30 14 14 14 6 6 6 0 0 0
37958- 0 0 0 0 0 0 0 0 0 0 0 0
37959- 0 0 0 0 0 0 0 0 0 0 0 0
37960- 0 0 0 0 0 0 0 0 0 0 0 0
37961- 6 6 6 18 18 18 34 34 34 58 58 58
37962- 82 82 82 89 81 66 89 81 66 89 81 66
37963- 94 86 66 94 86 76 74 74 74 50 50 50
37964- 26 26 26 14 14 14 6 6 6 0 0 0
37965- 0 0 0 0 0 0 0 0 0 0 0 0
37966- 0 0 0 0 0 0 0 0 0 0 0 0
37967- 0 0 0 0 0 0 0 0 0 0 0 0
37968- 0 0 0 0 0 0 0 0 0 0 0 0
37969- 0 0 0 0 0 0 0 0 0 0 0 0
37970- 0 0 0 0 0 0 0 0 0 0 0 0
37971- 0 0 0 0 0 0 0 0 0 0 0 0
37972- 0 0 0 0 0 0 0 0 0 0 0 0
37973- 0 0 0 0 0 0 0 0 0 0 0 0
37974- 6 6 6 6 6 6 14 14 14 18 18 18
37975- 30 30 30 38 38 38 46 46 46 54 54 54
37976- 50 50 50 42 42 42 30 30 30 18 18 18
37977- 10 10 10 0 0 0 0 0 0 0 0 0
37978- 0 0 0 0 0 0 0 0 0 0 0 0
37979- 0 0 0 0 0 0 0 0 0 0 0 0
37980- 0 0 0 0 0 0 0 0 0 0 0 0
37981- 0 0 0 6 6 6 14 14 14 26 26 26
37982- 38 38 38 50 50 50 58 58 58 58 58 58
37983- 54 54 54 42 42 42 30 30 30 18 18 18
37984- 10 10 10 0 0 0 0 0 0 0 0 0
37985- 0 0 0 0 0 0 0 0 0 0 0 0
37986- 0 0 0 0 0 0 0 0 0 0 0 0
37987- 0 0 0 0 0 0 0 0 0 0 0 0
37988- 0 0 0 0 0 0 0 0 0 0 0 0
37989- 0 0 0 0 0 0 0 0 0 0 0 0
37990- 0 0 0 0 0 0 0 0 0 0 0 0
37991- 0 0 0 0 0 0 0 0 0 0 0 0
37992- 0 0 0 0 0 0 0 0 0 0 0 0
37993- 0 0 0 0 0 0 0 0 0 0 0 0
37994- 0 0 0 0 0 0 0 0 0 6 6 6
37995- 6 6 6 10 10 10 14 14 14 18 18 18
37996- 18 18 18 14 14 14 10 10 10 6 6 6
37997- 0 0 0 0 0 0 0 0 0 0 0 0
37998- 0 0 0 0 0 0 0 0 0 0 0 0
37999- 0 0 0 0 0 0 0 0 0 0 0 0
38000- 0 0 0 0 0 0 0 0 0 0 0 0
38001- 0 0 0 0 0 0 0 0 0 6 6 6
38002- 14 14 14 18 18 18 22 22 22 22 22 22
38003- 18 18 18 14 14 14 10 10 10 6 6 6
38004- 0 0 0 0 0 0 0 0 0 0 0 0
38005- 0 0 0 0 0 0 0 0 0 0 0 0
38006- 0 0 0 0 0 0 0 0 0 0 0 0
38007- 0 0 0 0 0 0 0 0 0 0 0 0
38008- 0 0 0 0 0 0 0 0 0 0 0 0
38009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38022+4 4 4 4 4 4
38023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38036+4 4 4 4 4 4
38037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38050+4 4 4 4 4 4
38051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38064+4 4 4 4 4 4
38065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38078+4 4 4 4 4 4
38079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38092+4 4 4 4 4 4
38093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38097+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38098+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38102+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38103+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38104+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38106+4 4 4 4 4 4
38107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38111+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38112+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38113+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38116+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38117+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38118+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38119+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38120+4 4 4 4 4 4
38121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38125+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38126+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38127+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38130+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38131+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38132+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38133+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38134+4 4 4 4 4 4
38135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38138+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38139+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38140+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38141+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38143+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38144+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38145+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38146+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38147+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38148+4 4 4 4 4 4
38149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38152+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38153+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38154+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38155+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38156+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38157+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38158+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38159+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38160+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38161+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38162+4 4 4 4 4 4
38163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38166+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38167+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38168+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38169+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38170+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38171+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38172+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38173+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38174+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38175+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38176+4 4 4 4 4 4
38177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38179+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38180+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38181+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38182+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38183+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38184+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38185+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38186+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38187+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38188+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38189+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38190+4 4 4 4 4 4
38191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38193+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38194+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38195+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38196+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38197+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38198+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38199+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38200+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38201+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38202+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38203+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38204+4 4 4 4 4 4
38205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38207+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38208+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38209+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38210+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38211+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38212+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38213+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38214+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38215+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38216+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38217+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38218+4 4 4 4 4 4
38219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38221+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38222+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38223+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38224+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38225+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38226+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38227+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38228+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38229+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38230+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38231+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38232+4 4 4 4 4 4
38233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38234+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38235+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38236+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38237+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38238+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38239+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38240+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38241+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38242+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38243+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38244+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38245+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38246+4 4 4 4 4 4
38247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38248+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38249+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38250+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38251+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38252+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38253+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38254+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38255+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38256+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38257+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38258+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38259+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38260+0 0 0 4 4 4
38261+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38262+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38263+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38264+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38265+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38266+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38267+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38268+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38269+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38270+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38271+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38272+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38273+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38274+2 0 0 0 0 0
38275+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38276+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38277+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38278+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38279+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38280+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38281+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38282+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38283+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38284+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38285+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38286+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38287+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38288+37 38 37 0 0 0
38289+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38290+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38291+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38292+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38293+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38294+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38295+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38296+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38297+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38298+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38299+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38300+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38301+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38302+85 115 134 4 0 0
38303+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38304+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38305+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38306+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38307+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38308+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38309+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38310+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38311+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38312+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38313+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38314+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38315+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38316+60 73 81 4 0 0
38317+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38318+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38319+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38320+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38321+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38322+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38323+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38324+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38325+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38326+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38327+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38328+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38329+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38330+16 19 21 4 0 0
38331+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38332+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38333+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38334+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38335+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38336+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38337+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38338+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38339+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38340+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38341+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38342+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38343+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38344+4 0 0 4 3 3
38345+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38346+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38347+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38349+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38350+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38351+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38352+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38353+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38354+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38355+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38356+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38357+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38358+3 2 2 4 4 4
38359+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38360+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38361+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38362+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38363+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38364+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38365+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38366+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38367+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38368+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38369+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38370+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38371+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38372+4 4 4 4 4 4
38373+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38374+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38375+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38376+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38377+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38378+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38379+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38380+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38381+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38382+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38383+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38384+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38385+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38386+4 4 4 4 4 4
38387+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38388+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38389+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38390+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38391+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38392+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38393+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38394+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38395+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38396+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38397+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38398+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38399+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38400+5 5 5 5 5 5
38401+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38402+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38403+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38404+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38405+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38406+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38407+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38408+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38409+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38410+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38411+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38412+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38413+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38414+5 5 5 4 4 4
38415+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38416+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38417+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38418+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38419+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38420+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38421+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38422+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38423+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38424+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38425+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38426+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38428+4 4 4 4 4 4
38429+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38430+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38431+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38432+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38433+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38434+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38435+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38436+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38437+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38438+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38439+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38440+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38442+4 4 4 4 4 4
38443+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38444+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38445+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38446+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38447+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38448+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38449+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38450+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38451+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38452+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38453+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38456+4 4 4 4 4 4
38457+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38458+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38459+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38460+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38461+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38462+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38463+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38464+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38465+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38466+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38467+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38470+4 4 4 4 4 4
38471+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38472+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38473+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38474+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38475+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38476+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38477+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38478+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38479+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38480+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38481+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38484+4 4 4 4 4 4
38485+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38486+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38487+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38488+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38489+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38490+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38491+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38492+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38493+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38494+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38495+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38498+4 4 4 4 4 4
38499+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38500+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38501+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38502+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38503+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38504+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38505+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38506+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38507+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38508+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38509+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38512+4 4 4 4 4 4
38513+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38514+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38515+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38516+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38517+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38518+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38519+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38520+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38521+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38522+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38523+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38526+4 4 4 4 4 4
38527+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38528+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38529+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38530+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38531+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38532+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38533+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38534+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38535+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38536+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38537+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38540+4 4 4 4 4 4
38541+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38542+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38543+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38544+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38545+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38546+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38547+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38548+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38549+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38550+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38551+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38554+4 4 4 4 4 4
38555+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38556+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38557+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38558+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38559+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38560+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38561+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38562+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38563+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38564+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38565+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38568+4 4 4 4 4 4
38569+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38570+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38571+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38572+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38573+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38574+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38575+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38576+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38577+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38578+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38579+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38582+4 4 4 4 4 4
38583+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38584+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38585+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38586+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38587+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38588+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38589+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38590+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38591+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38592+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38593+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38596+4 4 4 4 4 4
38597+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38598+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38599+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38600+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38601+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38602+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38603+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38604+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38605+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38606+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38607+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38610+4 4 4 4 4 4
38611+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38612+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38613+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38614+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38615+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38616+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38617+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38618+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38619+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38620+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38621+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38624+4 4 4 4 4 4
38625+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38626+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38627+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38628+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38629+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38630+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38631+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38632+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38633+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38634+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38635+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38638+4 4 4 4 4 4
38639+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38640+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38641+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38642+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38643+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38644+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38645+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38646+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38647+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38648+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38649+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38652+4 4 4 4 4 4
38653+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38654+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38655+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38656+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38657+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38658+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38659+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38660+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38661+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38662+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38663+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38666+4 4 4 4 4 4
38667+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38668+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38669+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38670+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38671+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38672+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38673+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38674+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38675+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38676+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38677+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38680+4 4 4 4 4 4
38681+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38682+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38683+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38684+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38685+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38686+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38687+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38688+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38689+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38690+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38691+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38694+4 4 4 4 4 4
38695+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38696+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38697+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38698+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38699+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38700+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38701+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38702+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38703+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38704+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38705+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38708+4 4 4 4 4 4
38709+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38710+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38711+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38712+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38713+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38714+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38715+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38716+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38717+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38718+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38719+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38722+4 4 4 4 4 4
38723+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38724+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38725+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38726+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38727+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38728+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38729+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38730+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38731+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38732+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38733+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38736+4 4 4 4 4 4
38737+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38738+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38739+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38740+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38741+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38742+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38743+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38744+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38745+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38746+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38747+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38750+4 4 4 4 4 4
38751+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38752+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38753+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38754+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38755+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38756+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38757+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38758+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38759+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38760+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38761+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38764+4 4 4 4 4 4
38765+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38766+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38767+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38768+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38769+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38770+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38771+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38772+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38773+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38774+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38775+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38778+4 4 4 4 4 4
38779+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38780+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38781+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38782+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38783+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38784+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38785+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38786+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38787+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38788+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38789+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38792+4 4 4 4 4 4
38793+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38794+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38795+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38796+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38797+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38798+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38799+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38800+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38801+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38802+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38803+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38806+4 4 4 4 4 4
38807+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38808+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38809+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38810+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38811+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38812+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38813+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38814+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38815+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38816+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38817+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38820+4 4 4 4 4 4
38821+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38822+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38823+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38824+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38825+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38826+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38827+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38828+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38829+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38830+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38834+4 4 4 4 4 4
38835+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38836+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38837+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38838+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38839+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38840+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38841+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38842+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38843+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38844+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38848+4 4 4 4 4 4
38849+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38850+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38851+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38852+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38853+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38854+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38855+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38856+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38857+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38858+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38862+4 4 4 4 4 4
38863+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38864+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38865+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38866+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38867+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38868+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38869+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38870+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38871+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38872+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38876+4 4 4 4 4 4
38877+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38878+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38879+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38880+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38881+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38882+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38883+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38884+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38885+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38890+4 4 4 4 4 4
38891+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38892+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38893+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38894+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38895+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38896+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38897+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38898+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38899+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38904+4 4 4 4 4 4
38905+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38906+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38907+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38908+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38909+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38910+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38911+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38912+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38913+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38918+4 4 4 4 4 4
38919+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38920+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
38921+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38922+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
38923+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
38924+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
38925+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
38926+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
38927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38932+4 4 4 4 4 4
38933+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38934+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
38935+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
38936+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
38937+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
38938+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
38939+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
38940+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38946+4 4 4 4 4 4
38947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38948+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
38949+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38950+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
38951+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
38952+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
38953+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
38954+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
38955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38960+4 4 4 4 4 4
38961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38962+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
38963+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
38964+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
38965+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
38966+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
38967+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
38968+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
38969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38974+4 4 4 4 4 4
38975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38976+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38977+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
38978+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38979+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
38980+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
38981+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
38982+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38988+4 4 4 4 4 4
38989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38991+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38992+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
38993+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
38994+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
38995+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
38996+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39002+4 4 4 4 4 4
39003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39006+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39007+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39008+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39009+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39016+4 4 4 4 4 4
39017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39020+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39021+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39022+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39023+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39030+4 4 4 4 4 4
39031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39034+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39035+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39036+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39037+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39044+4 4 4 4 4 4
39045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39048+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39049+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39050+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39051+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39058+4 4 4 4 4 4
39059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39063+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39064+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39065+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39072+4 4 4 4 4 4
39073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39077+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39078+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39079+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39086+4 4 4 4 4 4
39087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39091+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39092+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39093+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39100+4 4 4 4 4 4
39101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39105+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39106+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39114+4 4 4 4 4 4
39115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39119+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39120+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39128+4 4 4 4 4 4
39129diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39130index 3473e75..c930142 100644
39131--- a/drivers/video/udlfb.c
39132+++ b/drivers/video/udlfb.c
39133@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39134 dlfb_urb_completion(urb);
39135
39136 error:
39137- atomic_add(bytes_sent, &dev->bytes_sent);
39138- atomic_add(bytes_identical, &dev->bytes_identical);
39139- atomic_add(width*height*2, &dev->bytes_rendered);
39140+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39141+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39142+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39143 end_cycles = get_cycles();
39144- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39145+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39146 >> 10)), /* Kcycles */
39147 &dev->cpu_kcycles_used);
39148
39149@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39150 dlfb_urb_completion(urb);
39151
39152 error:
39153- atomic_add(bytes_sent, &dev->bytes_sent);
39154- atomic_add(bytes_identical, &dev->bytes_identical);
39155- atomic_add(bytes_rendered, &dev->bytes_rendered);
39156+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39157+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39158+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39159 end_cycles = get_cycles();
39160- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39161+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39162 >> 10)), /* Kcycles */
39163 &dev->cpu_kcycles_used);
39164 }
39165@@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39166 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39167 struct dlfb_data *dev = fb_info->par;
39168 return snprintf(buf, PAGE_SIZE, "%u\n",
39169- atomic_read(&dev->bytes_rendered));
39170+ atomic_read_unchecked(&dev->bytes_rendered));
39171 }
39172
39173 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39174@@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39175 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39176 struct dlfb_data *dev = fb_info->par;
39177 return snprintf(buf, PAGE_SIZE, "%u\n",
39178- atomic_read(&dev->bytes_identical));
39179+ atomic_read_unchecked(&dev->bytes_identical));
39180 }
39181
39182 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39183@@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39184 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39185 struct dlfb_data *dev = fb_info->par;
39186 return snprintf(buf, PAGE_SIZE, "%u\n",
39187- atomic_read(&dev->bytes_sent));
39188+ atomic_read_unchecked(&dev->bytes_sent));
39189 }
39190
39191 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39192@@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39193 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39194 struct dlfb_data *dev = fb_info->par;
39195 return snprintf(buf, PAGE_SIZE, "%u\n",
39196- atomic_read(&dev->cpu_kcycles_used));
39197+ atomic_read_unchecked(&dev->cpu_kcycles_used));
39198 }
39199
39200 static ssize_t edid_show(
39201@@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39202 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39203 struct dlfb_data *dev = fb_info->par;
39204
39205- atomic_set(&dev->bytes_rendered, 0);
39206- atomic_set(&dev->bytes_identical, 0);
39207- atomic_set(&dev->bytes_sent, 0);
39208- atomic_set(&dev->cpu_kcycles_used, 0);
39209+ atomic_set_unchecked(&dev->bytes_rendered, 0);
39210+ atomic_set_unchecked(&dev->bytes_identical, 0);
39211+ atomic_set_unchecked(&dev->bytes_sent, 0);
39212+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39213
39214 return count;
39215 }
39216diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39217index 7f8472c..9842e87 100644
39218--- a/drivers/video/uvesafb.c
39219+++ b/drivers/video/uvesafb.c
39220@@ -19,6 +19,7 @@
39221 #include <linux/io.h>
39222 #include <linux/mutex.h>
39223 #include <linux/slab.h>
39224+#include <linux/moduleloader.h>
39225 #include <video/edid.h>
39226 #include <video/uvesafb.h>
39227 #ifdef CONFIG_X86
39228@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39229 NULL,
39230 };
39231
39232- return call_usermodehelper(v86d_path, argv, envp, 1);
39233+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39234 }
39235
39236 /*
39237@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39238 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39239 par->pmi_setpal = par->ypan = 0;
39240 } else {
39241+
39242+#ifdef CONFIG_PAX_KERNEXEC
39243+#ifdef CONFIG_MODULES
39244+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39245+#endif
39246+ if (!par->pmi_code) {
39247+ par->pmi_setpal = par->ypan = 0;
39248+ return 0;
39249+ }
39250+#endif
39251+
39252 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39253 + task->t.regs.edi);
39254+
39255+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39256+ pax_open_kernel();
39257+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39258+ pax_close_kernel();
39259+
39260+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39261+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39262+#else
39263 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39264 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39265+#endif
39266+
39267 printk(KERN_INFO "uvesafb: protected mode interface info at "
39268 "%04x:%04x\n",
39269 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39270@@ -1821,6 +1844,11 @@ out:
39271 if (par->vbe_modes)
39272 kfree(par->vbe_modes);
39273
39274+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39275+ if (par->pmi_code)
39276+ module_free_exec(NULL, par->pmi_code);
39277+#endif
39278+
39279 framebuffer_release(info);
39280 return err;
39281 }
39282@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39283 kfree(par->vbe_state_orig);
39284 if (par->vbe_state_saved)
39285 kfree(par->vbe_state_saved);
39286+
39287+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39288+ if (par->pmi_code)
39289+ module_free_exec(NULL, par->pmi_code);
39290+#endif
39291+
39292 }
39293
39294 framebuffer_release(info);
39295diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39296index 501b340..86bd4cf 100644
39297--- a/drivers/video/vesafb.c
39298+++ b/drivers/video/vesafb.c
39299@@ -9,6 +9,7 @@
39300 */
39301
39302 #include <linux/module.h>
39303+#include <linux/moduleloader.h>
39304 #include <linux/kernel.h>
39305 #include <linux/errno.h>
39306 #include <linux/string.h>
39307@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39308 static int vram_total __initdata; /* Set total amount of memory */
39309 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39310 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39311-static void (*pmi_start)(void) __read_mostly;
39312-static void (*pmi_pal) (void) __read_mostly;
39313+static void (*pmi_start)(void) __read_only;
39314+static void (*pmi_pal) (void) __read_only;
39315 static int depth __read_mostly;
39316 static int vga_compat __read_mostly;
39317 /* --------------------------------------------------------------------- */
39318@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39319 unsigned int size_vmode;
39320 unsigned int size_remap;
39321 unsigned int size_total;
39322+ void *pmi_code = NULL;
39323
39324 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39325 return -ENODEV;
39326@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39327 size_remap = size_total;
39328 vesafb_fix.smem_len = size_remap;
39329
39330-#ifndef __i386__
39331- screen_info.vesapm_seg = 0;
39332-#endif
39333-
39334 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39335 printk(KERN_WARNING
39336 "vesafb: cannot reserve video memory at 0x%lx\n",
39337@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39338 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39339 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39340
39341+#ifdef __i386__
39342+
39343+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39344+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39345+ if (!pmi_code)
39346+#elif !defined(CONFIG_PAX_KERNEXEC)
39347+ if (0)
39348+#endif
39349+
39350+#endif
39351+ screen_info.vesapm_seg = 0;
39352+
39353 if (screen_info.vesapm_seg) {
39354- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39355- screen_info.vesapm_seg,screen_info.vesapm_off);
39356+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39357+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39358 }
39359
39360 if (screen_info.vesapm_seg < 0xc000)
39361@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39362
39363 if (ypan || pmi_setpal) {
39364 unsigned short *pmi_base;
39365+
39366 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39367- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39368- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39369+
39370+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39371+ pax_open_kernel();
39372+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39373+#else
39374+ pmi_code = pmi_base;
39375+#endif
39376+
39377+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39378+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39379+
39380+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39381+ pmi_start = ktva_ktla(pmi_start);
39382+ pmi_pal = ktva_ktla(pmi_pal);
39383+ pax_close_kernel();
39384+#endif
39385+
39386 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39387 if (pmi_base[3]) {
39388 printk(KERN_INFO "vesafb: pmi: ports = ");
39389@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39390 info->node, info->fix.id);
39391 return 0;
39392 err:
39393+
39394+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39395+ module_free_exec(NULL, pmi_code);
39396+#endif
39397+
39398 if (info->screen_base)
39399 iounmap(info->screen_base);
39400 framebuffer_release(info);
39401diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39402index 88714ae..16c2e11 100644
39403--- a/drivers/video/via/via_clock.h
39404+++ b/drivers/video/via/via_clock.h
39405@@ -56,7 +56,7 @@ struct via_clock {
39406
39407 void (*set_engine_pll_state)(u8 state);
39408 void (*set_engine_pll)(struct via_pll_config config);
39409-};
39410+} __no_const;
39411
39412
39413 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39414diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39415index e56c934..fc22f4b 100644
39416--- a/drivers/xen/xen-pciback/conf_space.h
39417+++ b/drivers/xen/xen-pciback/conf_space.h
39418@@ -44,15 +44,15 @@ struct config_field {
39419 struct {
39420 conf_dword_write write;
39421 conf_dword_read read;
39422- } dw;
39423+ } __no_const dw;
39424 struct {
39425 conf_word_write write;
39426 conf_word_read read;
39427- } w;
39428+ } __no_const w;
39429 struct {
39430 conf_byte_write write;
39431 conf_byte_read read;
39432- } b;
39433+ } __no_const b;
39434 } u;
39435 struct list_head list;
39436 };
39437diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39438index 879ed88..bc03a01 100644
39439--- a/fs/9p/vfs_inode.c
39440+++ b/fs/9p/vfs_inode.c
39441@@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39442 void
39443 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39444 {
39445- char *s = nd_get_link(nd);
39446+ const char *s = nd_get_link(nd);
39447
39448 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39449 IS_ERR(s) ? "<error>" : s);
39450diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39451index 79e2ca7..5828ad1 100644
39452--- a/fs/Kconfig.binfmt
39453+++ b/fs/Kconfig.binfmt
39454@@ -86,7 +86,7 @@ config HAVE_AOUT
39455
39456 config BINFMT_AOUT
39457 tristate "Kernel support for a.out and ECOFF binaries"
39458- depends on HAVE_AOUT
39459+ depends on HAVE_AOUT && BROKEN
39460 ---help---
39461 A.out (Assembler.OUTput) is a set of formats for libraries and
39462 executables used in the earliest versions of UNIX. Linux used
39463diff --git a/fs/aio.c b/fs/aio.c
39464index 969beb0..09fab51 100644
39465--- a/fs/aio.c
39466+++ b/fs/aio.c
39467@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39468 size += sizeof(struct io_event) * nr_events;
39469 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39470
39471- if (nr_pages < 0)
39472+ if (nr_pages <= 0)
39473 return -EINVAL;
39474
39475 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39476@@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39477 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39478 {
39479 ssize_t ret;
39480+ struct iovec iovstack;
39481
39482 #ifdef CONFIG_COMPAT
39483 if (compat)
39484 ret = compat_rw_copy_check_uvector(type,
39485 (struct compat_iovec __user *)kiocb->ki_buf,
39486- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39487+ kiocb->ki_nbytes, 1, &iovstack,
39488 &kiocb->ki_iovec, 1);
39489 else
39490 #endif
39491 ret = rw_copy_check_uvector(type,
39492 (struct iovec __user *)kiocb->ki_buf,
39493- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39494+ kiocb->ki_nbytes, 1, &iovstack,
39495 &kiocb->ki_iovec, 1);
39496 if (ret < 0)
39497 goto out;
39498
39499+ if (kiocb->ki_iovec == &iovstack) {
39500+ kiocb->ki_inline_vec = iovstack;
39501+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39502+ }
39503 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39504 kiocb->ki_cur_seg = 0;
39505 /* ki_nbytes/left now reflect bytes instead of segs */
39506diff --git a/fs/attr.c b/fs/attr.c
39507index 7ee7ba4..0c61a60 100644
39508--- a/fs/attr.c
39509+++ b/fs/attr.c
39510@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39511 unsigned long limit;
39512
39513 limit = rlimit(RLIMIT_FSIZE);
39514+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39515 if (limit != RLIM_INFINITY && offset > limit)
39516 goto out_sig;
39517 if (offset > inode->i_sb->s_maxbytes)
39518diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39519index e1fbdee..cd5ea56 100644
39520--- a/fs/autofs4/waitq.c
39521+++ b/fs/autofs4/waitq.c
39522@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39523 {
39524 unsigned long sigpipe, flags;
39525 mm_segment_t fs;
39526- const char *data = (const char *)addr;
39527+ const char __user *data = (const char __force_user *)addr;
39528 ssize_t wr = 0;
39529
39530 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39531diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39532index 8342ca6..82fd192 100644
39533--- a/fs/befs/linuxvfs.c
39534+++ b/fs/befs/linuxvfs.c
39535@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39536 {
39537 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39538 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39539- char *link = nd_get_link(nd);
39540+ const char *link = nd_get_link(nd);
39541 if (!IS_ERR(link))
39542 kfree(link);
39543 }
39544diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39545index a6395bd..a5b24c4 100644
39546--- a/fs/binfmt_aout.c
39547+++ b/fs/binfmt_aout.c
39548@@ -16,6 +16,7 @@
39549 #include <linux/string.h>
39550 #include <linux/fs.h>
39551 #include <linux/file.h>
39552+#include <linux/security.h>
39553 #include <linux/stat.h>
39554 #include <linux/fcntl.h>
39555 #include <linux/ptrace.h>
39556@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39557 #endif
39558 # define START_STACK(u) ((void __user *)u.start_stack)
39559
39560+ memset(&dump, 0, sizeof(dump));
39561+
39562 fs = get_fs();
39563 set_fs(KERNEL_DS);
39564 has_dumped = 1;
39565@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39566
39567 /* If the size of the dump file exceeds the rlimit, then see what would happen
39568 if we wrote the stack, but not the data area. */
39569+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39570 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39571 dump.u_dsize = 0;
39572
39573 /* Make sure we have enough room to write the stack and data areas. */
39574+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39575 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39576 dump.u_ssize = 0;
39577
39578@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39579 rlim = rlimit(RLIMIT_DATA);
39580 if (rlim >= RLIM_INFINITY)
39581 rlim = ~0;
39582+
39583+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39584 if (ex.a_data + ex.a_bss > rlim)
39585 return -ENOMEM;
39586
39587@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39588 install_exec_creds(bprm);
39589 current->flags &= ~PF_FORKNOEXEC;
39590
39591+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39592+ current->mm->pax_flags = 0UL;
39593+#endif
39594+
39595+#ifdef CONFIG_PAX_PAGEEXEC
39596+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39597+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39598+
39599+#ifdef CONFIG_PAX_EMUTRAMP
39600+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39601+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39602+#endif
39603+
39604+#ifdef CONFIG_PAX_MPROTECT
39605+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39606+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39607+#endif
39608+
39609+ }
39610+#endif
39611+
39612 if (N_MAGIC(ex) == OMAGIC) {
39613 unsigned long text_addr, map_size;
39614 loff_t pos;
39615@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39616
39617 down_write(&current->mm->mmap_sem);
39618 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39619- PROT_READ | PROT_WRITE | PROT_EXEC,
39620+ PROT_READ | PROT_WRITE,
39621 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39622 fd_offset + ex.a_text);
39623 up_write(&current->mm->mmap_sem);
39624diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39625index 21ac5ee..31d14e9 100644
39626--- a/fs/binfmt_elf.c
39627+++ b/fs/binfmt_elf.c
39628@@ -32,6 +32,7 @@
39629 #include <linux/elf.h>
39630 #include <linux/utsname.h>
39631 #include <linux/coredump.h>
39632+#include <linux/xattr.h>
39633 #include <asm/uaccess.h>
39634 #include <asm/param.h>
39635 #include <asm/page.h>
39636@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39637 #define elf_core_dump NULL
39638 #endif
39639
39640+#ifdef CONFIG_PAX_MPROTECT
39641+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39642+#endif
39643+
39644 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39645 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39646 #else
39647@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39648 .load_binary = load_elf_binary,
39649 .load_shlib = load_elf_library,
39650 .core_dump = elf_core_dump,
39651+
39652+#ifdef CONFIG_PAX_MPROTECT
39653+ .handle_mprotect= elf_handle_mprotect,
39654+#endif
39655+
39656 .min_coredump = ELF_EXEC_PAGESIZE,
39657 };
39658
39659@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39660
39661 static int set_brk(unsigned long start, unsigned long end)
39662 {
39663+ unsigned long e = end;
39664+
39665 start = ELF_PAGEALIGN(start);
39666 end = ELF_PAGEALIGN(end);
39667 if (end > start) {
39668@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39669 if (BAD_ADDR(addr))
39670 return addr;
39671 }
39672- current->mm->start_brk = current->mm->brk = end;
39673+ current->mm->start_brk = current->mm->brk = e;
39674 return 0;
39675 }
39676
39677@@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39678 elf_addr_t __user *u_rand_bytes;
39679 const char *k_platform = ELF_PLATFORM;
39680 const char *k_base_platform = ELF_BASE_PLATFORM;
39681- unsigned char k_rand_bytes[16];
39682+ u32 k_rand_bytes[4];
39683 int items;
39684 elf_addr_t *elf_info;
39685 int ei_index = 0;
39686 const struct cred *cred = current_cred();
39687 struct vm_area_struct *vma;
39688+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39689
39690 /*
39691 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39692@@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39693 * Generate 16 random bytes for userspace PRNG seeding.
39694 */
39695 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39696- u_rand_bytes = (elf_addr_t __user *)
39697- STACK_ALLOC(p, sizeof(k_rand_bytes));
39698+ srandom32(k_rand_bytes[0] ^ random32());
39699+ srandom32(k_rand_bytes[1] ^ random32());
39700+ srandom32(k_rand_bytes[2] ^ random32());
39701+ srandom32(k_rand_bytes[3] ^ random32());
39702+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39703+ u_rand_bytes = (elf_addr_t __user *) p;
39704 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39705 return -EFAULT;
39706
39707@@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39708 return -EFAULT;
39709 current->mm->env_end = p;
39710
39711+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39712+
39713 /* Put the elf_info on the stack in the right place. */
39714 sp = (elf_addr_t __user *)envp + 1;
39715- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39716+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39717 return -EFAULT;
39718 return 0;
39719 }
39720@@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39721 {
39722 struct elf_phdr *elf_phdata;
39723 struct elf_phdr *eppnt;
39724- unsigned long load_addr = 0;
39725+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39726 int load_addr_set = 0;
39727 unsigned long last_bss = 0, elf_bss = 0;
39728- unsigned long error = ~0UL;
39729+ unsigned long error = -EINVAL;
39730 unsigned long total_size;
39731 int retval, i, size;
39732
39733@@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39734 goto out_close;
39735 }
39736
39737+#ifdef CONFIG_PAX_SEGMEXEC
39738+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39739+ pax_task_size = SEGMEXEC_TASK_SIZE;
39740+#endif
39741+
39742 eppnt = elf_phdata;
39743 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39744 if (eppnt->p_type == PT_LOAD) {
39745@@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39746 k = load_addr + eppnt->p_vaddr;
39747 if (BAD_ADDR(k) ||
39748 eppnt->p_filesz > eppnt->p_memsz ||
39749- eppnt->p_memsz > TASK_SIZE ||
39750- TASK_SIZE - eppnt->p_memsz < k) {
39751+ eppnt->p_memsz > pax_task_size ||
39752+ pax_task_size - eppnt->p_memsz < k) {
39753 error = -ENOMEM;
39754 goto out_close;
39755 }
39756@@ -528,6 +552,351 @@ out:
39757 return error;
39758 }
39759
39760+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
39761+{
39762+ unsigned long pax_flags = 0UL;
39763+
39764+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39765+
39766+#ifdef CONFIG_PAX_PAGEEXEC
39767+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39768+ pax_flags |= MF_PAX_PAGEEXEC;
39769+#endif
39770+
39771+#ifdef CONFIG_PAX_SEGMEXEC
39772+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39773+ pax_flags |= MF_PAX_SEGMEXEC;
39774+#endif
39775+
39776+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39777+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39778+ if ((__supported_pte_mask & _PAGE_NX))
39779+ pax_flags &= ~MF_PAX_SEGMEXEC;
39780+ else
39781+ pax_flags &= ~MF_PAX_PAGEEXEC;
39782+ }
39783+#endif
39784+
39785+#ifdef CONFIG_PAX_EMUTRAMP
39786+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39787+ pax_flags |= MF_PAX_EMUTRAMP;
39788+#endif
39789+
39790+#ifdef CONFIG_PAX_MPROTECT
39791+ if (elf_phdata->p_flags & PF_MPROTECT)
39792+ pax_flags |= MF_PAX_MPROTECT;
39793+#endif
39794+
39795+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39796+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39797+ pax_flags |= MF_PAX_RANDMMAP;
39798+#endif
39799+
39800+#endif
39801+
39802+ return pax_flags;
39803+}
39804+
39805+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
39806+{
39807+ unsigned long pax_flags = 0UL;
39808+
39809+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39810+
39811+#ifdef CONFIG_PAX_PAGEEXEC
39812+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39813+ pax_flags |= MF_PAX_PAGEEXEC;
39814+#endif
39815+
39816+#ifdef CONFIG_PAX_SEGMEXEC
39817+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39818+ pax_flags |= MF_PAX_SEGMEXEC;
39819+#endif
39820+
39821+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39822+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39823+ if ((__supported_pte_mask & _PAGE_NX))
39824+ pax_flags &= ~MF_PAX_SEGMEXEC;
39825+ else
39826+ pax_flags &= ~MF_PAX_PAGEEXEC;
39827+ }
39828+#endif
39829+
39830+#ifdef CONFIG_PAX_EMUTRAMP
39831+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39832+ pax_flags |= MF_PAX_EMUTRAMP;
39833+#endif
39834+
39835+#ifdef CONFIG_PAX_MPROTECT
39836+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39837+ pax_flags |= MF_PAX_MPROTECT;
39838+#endif
39839+
39840+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39841+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39842+ pax_flags |= MF_PAX_RANDMMAP;
39843+#endif
39844+
39845+#endif
39846+
39847+ return pax_flags;
39848+}
39849+
39850+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39851+{
39852+ unsigned long pax_flags = 0UL;
39853+
39854+#ifdef CONFIG_PAX_EI_PAX
39855+
39856+#ifdef CONFIG_PAX_PAGEEXEC
39857+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39858+ pax_flags |= MF_PAX_PAGEEXEC;
39859+#endif
39860+
39861+#ifdef CONFIG_PAX_SEGMEXEC
39862+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39863+ pax_flags |= MF_PAX_SEGMEXEC;
39864+#endif
39865+
39866+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39867+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39868+ if ((__supported_pte_mask & _PAGE_NX))
39869+ pax_flags &= ~MF_PAX_SEGMEXEC;
39870+ else
39871+ pax_flags &= ~MF_PAX_PAGEEXEC;
39872+ }
39873+#endif
39874+
39875+#ifdef CONFIG_PAX_EMUTRAMP
39876+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39877+ pax_flags |= MF_PAX_EMUTRAMP;
39878+#endif
39879+
39880+#ifdef CONFIG_PAX_MPROTECT
39881+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39882+ pax_flags |= MF_PAX_MPROTECT;
39883+#endif
39884+
39885+#ifdef CONFIG_PAX_ASLR
39886+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39887+ pax_flags |= MF_PAX_RANDMMAP;
39888+#endif
39889+
39890+#else
39891+
39892+#ifdef CONFIG_PAX_PAGEEXEC
39893+ pax_flags |= MF_PAX_PAGEEXEC;
39894+#endif
39895+
39896+#ifdef CONFIG_PAX_MPROTECT
39897+ pax_flags |= MF_PAX_MPROTECT;
39898+#endif
39899+
39900+#ifdef CONFIG_PAX_RANDMMAP
39901+ pax_flags |= MF_PAX_RANDMMAP;
39902+#endif
39903+
39904+#ifdef CONFIG_PAX_SEGMEXEC
39905+ if (!(__supported_pte_mask & _PAGE_NX)) {
39906+ pax_flags &= ~MF_PAX_PAGEEXEC;
39907+ pax_flags |= MF_PAX_SEGMEXEC;
39908+ }
39909+#endif
39910+
39911+#endif
39912+
39913+ return pax_flags;
39914+}
39915+
39916+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39917+{
39918+
39919+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39920+ unsigned long i;
39921+
39922+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39923+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39924+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39925+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39926+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39927+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39928+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39929+ return ~0UL;
39930+
39931+#ifdef CONFIG_PAX_SOFTMODE
39932+ if (pax_softmode)
39933+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
39934+ else
39935+#endif
39936+
39937+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
39938+ break;
39939+ }
39940+#endif
39941+
39942+ return ~0UL;
39943+}
39944+
39945+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
39946+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
39947+{
39948+ unsigned long pax_flags = 0UL;
39949+
39950+#ifdef CONFIG_PAX_PAGEEXEC
39951+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
39952+ pax_flags |= MF_PAX_PAGEEXEC;
39953+#endif
39954+
39955+#ifdef CONFIG_PAX_SEGMEXEC
39956+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
39957+ pax_flags |= MF_PAX_SEGMEXEC;
39958+#endif
39959+
39960+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39961+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39962+ if ((__supported_pte_mask & _PAGE_NX))
39963+ pax_flags &= ~MF_PAX_SEGMEXEC;
39964+ else
39965+ pax_flags &= ~MF_PAX_PAGEEXEC;
39966+ }
39967+#endif
39968+
39969+#ifdef CONFIG_PAX_EMUTRAMP
39970+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
39971+ pax_flags |= MF_PAX_EMUTRAMP;
39972+#endif
39973+
39974+#ifdef CONFIG_PAX_MPROTECT
39975+ if (pax_flags_softmode & MF_PAX_MPROTECT)
39976+ pax_flags |= MF_PAX_MPROTECT;
39977+#endif
39978+
39979+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39980+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
39981+ pax_flags |= MF_PAX_RANDMMAP;
39982+#endif
39983+
39984+ return pax_flags;
39985+}
39986+
39987+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
39988+{
39989+ unsigned long pax_flags = 0UL;
39990+
39991+#ifdef CONFIG_PAX_PAGEEXEC
39992+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
39993+ pax_flags |= MF_PAX_PAGEEXEC;
39994+#endif
39995+
39996+#ifdef CONFIG_PAX_SEGMEXEC
39997+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
39998+ pax_flags |= MF_PAX_SEGMEXEC;
39999+#endif
40000+
40001+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40002+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40003+ if ((__supported_pte_mask & _PAGE_NX))
40004+ pax_flags &= ~MF_PAX_SEGMEXEC;
40005+ else
40006+ pax_flags &= ~MF_PAX_PAGEEXEC;
40007+ }
40008+#endif
40009+
40010+#ifdef CONFIG_PAX_EMUTRAMP
40011+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
40012+ pax_flags |= MF_PAX_EMUTRAMP;
40013+#endif
40014+
40015+#ifdef CONFIG_PAX_MPROTECT
40016+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
40017+ pax_flags |= MF_PAX_MPROTECT;
40018+#endif
40019+
40020+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40021+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
40022+ pax_flags |= MF_PAX_RANDMMAP;
40023+#endif
40024+
40025+ return pax_flags;
40026+}
40027+#endif
40028+
40029+static unsigned long pax_parse_xattr_pax(struct file * const file)
40030+{
40031+
40032+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40033+ ssize_t xattr_size, i;
40034+ unsigned char xattr_value[5];
40035+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
40036+
40037+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
40038+ if (xattr_size <= 0)
40039+ return ~0UL;
40040+
40041+ for (i = 0; i < xattr_size; i++)
40042+ switch (xattr_value[i]) {
40043+ default:
40044+ return ~0UL;
40045+
40046+#define parse_flag(option1, option2, flag) \
40047+ case option1: \
40048+ pax_flags_hardmode |= MF_PAX_##flag; \
40049+ break; \
40050+ case option2: \
40051+ pax_flags_softmode |= MF_PAX_##flag; \
40052+ break;
40053+
40054+ parse_flag('p', 'P', PAGEEXEC);
40055+ parse_flag('e', 'E', EMUTRAMP);
40056+ parse_flag('m', 'M', MPROTECT);
40057+ parse_flag('r', 'R', RANDMMAP);
40058+ parse_flag('s', 'S', SEGMEXEC);
40059+
40060+#undef parse_flag
40061+ }
40062+
40063+ if (pax_flags_hardmode & pax_flags_softmode)
40064+ return ~0UL;
40065+
40066+#ifdef CONFIG_PAX_SOFTMODE
40067+ if (pax_softmode)
40068+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
40069+ else
40070+#endif
40071+
40072+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
40073+#else
40074+ return ~0UL;
40075+#endif
40076+
40077+}
40078+
40079+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40080+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40081+{
40082+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40083+
40084+ pax_flags = pax_parse_ei_pax(elf_ex);
40085+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40086+ xattr_pax_flags = pax_parse_xattr_pax(file);
40087+
40088+ if (pt_pax_flags == ~0UL)
40089+ pt_pax_flags = xattr_pax_flags;
40090+ else if (xattr_pax_flags == ~0UL)
40091+ xattr_pax_flags = pt_pax_flags;
40092+ if (pt_pax_flags != xattr_pax_flags)
40093+ return -EINVAL;
40094+ if (pt_pax_flags != ~0UL)
40095+ pax_flags = pt_pax_flags;
40096+
40097+ if (0 > pax_check_flags(&pax_flags))
40098+ return -EINVAL;
40099+
40100+ current->mm->pax_flags = pax_flags;
40101+ return 0;
40102+}
40103+#endif
40104+
40105 /*
40106 * These are the functions used to load ELF style executables and shared
40107 * libraries. There is no binary dependent code anywhere else.
40108@@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40109 {
40110 unsigned int random_variable = 0;
40111
40112+#ifdef CONFIG_PAX_RANDUSTACK
40113+ if (randomize_va_space)
40114+ return stack_top - current->mm->delta_stack;
40115+#endif
40116+
40117 if ((current->flags & PF_RANDOMIZE) &&
40118 !(current->personality & ADDR_NO_RANDOMIZE)) {
40119 random_variable = get_random_int() & STACK_RND_MASK;
40120@@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40121 unsigned long load_addr = 0, load_bias = 0;
40122 int load_addr_set = 0;
40123 char * elf_interpreter = NULL;
40124- unsigned long error;
40125+ unsigned long error = 0;
40126 struct elf_phdr *elf_ppnt, *elf_phdata;
40127 unsigned long elf_bss, elf_brk;
40128 int retval, i;
40129@@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40130 unsigned long start_code, end_code, start_data, end_data;
40131 unsigned long reloc_func_desc __maybe_unused = 0;
40132 int executable_stack = EXSTACK_DEFAULT;
40133- unsigned long def_flags = 0;
40134 struct {
40135 struct elfhdr elf_ex;
40136 struct elfhdr interp_elf_ex;
40137 } *loc;
40138+ unsigned long pax_task_size = TASK_SIZE;
40139
40140 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40141 if (!loc) {
40142@@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40143
40144 /* OK, This is the point of no return */
40145 current->flags &= ~PF_FORKNOEXEC;
40146- current->mm->def_flags = def_flags;
40147+
40148+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40149+ current->mm->pax_flags = 0UL;
40150+#endif
40151+
40152+#ifdef CONFIG_PAX_DLRESOLVE
40153+ current->mm->call_dl_resolve = 0UL;
40154+#endif
40155+
40156+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40157+ current->mm->call_syscall = 0UL;
40158+#endif
40159+
40160+#ifdef CONFIG_PAX_ASLR
40161+ current->mm->delta_mmap = 0UL;
40162+ current->mm->delta_stack = 0UL;
40163+#endif
40164+
40165+ current->mm->def_flags = 0;
40166+
40167+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40168+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40169+ send_sig(SIGKILL, current, 0);
40170+ goto out_free_dentry;
40171+ }
40172+#endif
40173+
40174+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40175+ pax_set_initial_flags(bprm);
40176+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40177+ if (pax_set_initial_flags_func)
40178+ (pax_set_initial_flags_func)(bprm);
40179+#endif
40180+
40181+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40182+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40183+ current->mm->context.user_cs_limit = PAGE_SIZE;
40184+ current->mm->def_flags |= VM_PAGEEXEC;
40185+ }
40186+#endif
40187+
40188+#ifdef CONFIG_PAX_SEGMEXEC
40189+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40190+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40191+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40192+ pax_task_size = SEGMEXEC_TASK_SIZE;
40193+ current->mm->def_flags |= VM_NOHUGEPAGE;
40194+ }
40195+#endif
40196+
40197+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40198+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40199+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40200+ put_cpu();
40201+ }
40202+#endif
40203
40204 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40205 may depend on the personality. */
40206 SET_PERSONALITY(loc->elf_ex);
40207+
40208+#ifdef CONFIG_PAX_ASLR
40209+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40210+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40211+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40212+ }
40213+#endif
40214+
40215+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40216+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40217+ executable_stack = EXSTACK_DISABLE_X;
40218+ current->personality &= ~READ_IMPLIES_EXEC;
40219+ } else
40220+#endif
40221+
40222 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40223 current->personality |= READ_IMPLIES_EXEC;
40224
40225@@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40226 #else
40227 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40228 #endif
40229+
40230+#ifdef CONFIG_PAX_RANDMMAP
40231+ /* PaX: randomize base address at the default exe base if requested */
40232+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40233+#ifdef CONFIG_SPARC64
40234+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40235+#else
40236+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40237+#endif
40238+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40239+ elf_flags |= MAP_FIXED;
40240+ }
40241+#endif
40242+
40243 }
40244
40245 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40246@@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40247 * allowed task size. Note that p_filesz must always be
40248 * <= p_memsz so it is only necessary to check p_memsz.
40249 */
40250- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40251- elf_ppnt->p_memsz > TASK_SIZE ||
40252- TASK_SIZE - elf_ppnt->p_memsz < k) {
40253+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40254+ elf_ppnt->p_memsz > pax_task_size ||
40255+ pax_task_size - elf_ppnt->p_memsz < k) {
40256 /* set_brk can never work. Avoid overflows. */
40257 send_sig(SIGKILL, current, 0);
40258 retval = -EINVAL;
40259@@ -870,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40260 start_data += load_bias;
40261 end_data += load_bias;
40262
40263+#ifdef CONFIG_PAX_RANDMMAP
40264+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40265+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40266+#endif
40267+
40268 /* Calling set_brk effectively mmaps the pages that we need
40269 * for the bss and break sections. We must do this before
40270 * mapping in the interpreter, to make sure it doesn't wind
40271@@ -881,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40272 goto out_free_dentry;
40273 }
40274 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40275- send_sig(SIGSEGV, current, 0);
40276- retval = -EFAULT; /* Nobody gets to see this, but.. */
40277- goto out_free_dentry;
40278+ /*
40279+ * This bss-zeroing can fail if the ELF
40280+ * file specifies odd protections. So
40281+ * we don't check the return value
40282+ */
40283 }
40284
40285 if (elf_interpreter) {
40286@@ -1098,7 +1563,7 @@ out:
40287 * Decide what to dump of a segment, part, all or none.
40288 */
40289 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40290- unsigned long mm_flags)
40291+ unsigned long mm_flags, long signr)
40292 {
40293 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40294
40295@@ -1132,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40296 if (vma->vm_file == NULL)
40297 return 0;
40298
40299- if (FILTER(MAPPED_PRIVATE))
40300+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40301 goto whole;
40302
40303 /*
40304@@ -1354,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40305 {
40306 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40307 int i = 0;
40308- do
40309+ do {
40310 i += 2;
40311- while (auxv[i - 2] != AT_NULL);
40312+ } while (auxv[i - 2] != AT_NULL);
40313 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40314 }
40315
40316@@ -1862,14 +2327,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40317 }
40318
40319 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40320- unsigned long mm_flags)
40321+ struct coredump_params *cprm)
40322 {
40323 struct vm_area_struct *vma;
40324 size_t size = 0;
40325
40326 for (vma = first_vma(current, gate_vma); vma != NULL;
40327 vma = next_vma(vma, gate_vma))
40328- size += vma_dump_size(vma, mm_flags);
40329+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40330 return size;
40331 }
40332
40333@@ -1963,7 +2428,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40334
40335 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40336
40337- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40338+ offset += elf_core_vma_data_size(gate_vma, cprm);
40339 offset += elf_core_extra_data_size();
40340 e_shoff = offset;
40341
40342@@ -1977,10 +2442,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40343 offset = dataoff;
40344
40345 size += sizeof(*elf);
40346+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40347 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40348 goto end_coredump;
40349
40350 size += sizeof(*phdr4note);
40351+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40352 if (size > cprm->limit
40353 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40354 goto end_coredump;
40355@@ -1994,7 +2461,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40356 phdr.p_offset = offset;
40357 phdr.p_vaddr = vma->vm_start;
40358 phdr.p_paddr = 0;
40359- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40360+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40361 phdr.p_memsz = vma->vm_end - vma->vm_start;
40362 offset += phdr.p_filesz;
40363 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40364@@ -2005,6 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40365 phdr.p_align = ELF_EXEC_PAGESIZE;
40366
40367 size += sizeof(phdr);
40368+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40369 if (size > cprm->limit
40370 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40371 goto end_coredump;
40372@@ -2029,7 +2497,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40373 unsigned long addr;
40374 unsigned long end;
40375
40376- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40377+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40378
40379 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40380 struct page *page;
40381@@ -2038,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40382 page = get_dump_page(addr);
40383 if (page) {
40384 void *kaddr = kmap(page);
40385+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40386 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40387 !dump_write(cprm->file, kaddr,
40388 PAGE_SIZE);
40389@@ -2055,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40390
40391 if (e_phnum == PN_XNUM) {
40392 size += sizeof(*shdr4extnum);
40393+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40394 if (size > cprm->limit
40395 || !dump_write(cprm->file, shdr4extnum,
40396 sizeof(*shdr4extnum)))
40397@@ -2075,6 +2545,97 @@ out:
40398
40399 #endif /* CONFIG_ELF_CORE */
40400
40401+#ifdef CONFIG_PAX_MPROTECT
40402+/* PaX: non-PIC ELF libraries need relocations on their executable segments
40403+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40404+ * we'll remove VM_MAYWRITE for good on RELRO segments.
40405+ *
40406+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40407+ * basis because we want to allow the common case and not the special ones.
40408+ */
40409+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40410+{
40411+ struct elfhdr elf_h;
40412+ struct elf_phdr elf_p;
40413+ unsigned long i;
40414+ unsigned long oldflags;
40415+ bool is_textrel_rw, is_textrel_rx, is_relro;
40416+
40417+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40418+ return;
40419+
40420+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40421+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40422+
40423+#ifdef CONFIG_PAX_ELFRELOCS
40424+ /* possible TEXTREL */
40425+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40426+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40427+#else
40428+ is_textrel_rw = false;
40429+ is_textrel_rx = false;
40430+#endif
40431+
40432+ /* possible RELRO */
40433+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40434+
40435+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40436+ return;
40437+
40438+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40439+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40440+
40441+#ifdef CONFIG_PAX_ETEXECRELOCS
40442+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40443+#else
40444+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40445+#endif
40446+
40447+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40448+ !elf_check_arch(&elf_h) ||
40449+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40450+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40451+ return;
40452+
40453+ for (i = 0UL; i < elf_h.e_phnum; i++) {
40454+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40455+ return;
40456+ switch (elf_p.p_type) {
40457+ case PT_DYNAMIC:
40458+ if (!is_textrel_rw && !is_textrel_rx)
40459+ continue;
40460+ i = 0UL;
40461+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40462+ elf_dyn dyn;
40463+
40464+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40465+ return;
40466+ if (dyn.d_tag == DT_NULL)
40467+ return;
40468+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40469+ gr_log_textrel(vma);
40470+ if (is_textrel_rw)
40471+ vma->vm_flags |= VM_MAYWRITE;
40472+ else
40473+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40474+ vma->vm_flags &= ~VM_MAYWRITE;
40475+ return;
40476+ }
40477+ i++;
40478+ }
40479+ return;
40480+
40481+ case PT_GNU_RELRO:
40482+ if (!is_relro)
40483+ continue;
40484+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40485+ vma->vm_flags &= ~VM_MAYWRITE;
40486+ return;
40487+ }
40488+ }
40489+}
40490+#endif
40491+
40492 static int __init init_elf_binfmt(void)
40493 {
40494 return register_binfmt(&elf_format);
40495diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40496index 1bffbe0..c8c283e 100644
40497--- a/fs/binfmt_flat.c
40498+++ b/fs/binfmt_flat.c
40499@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40500 realdatastart = (unsigned long) -ENOMEM;
40501 printk("Unable to allocate RAM for process data, errno %d\n",
40502 (int)-realdatastart);
40503+ down_write(&current->mm->mmap_sem);
40504 do_munmap(current->mm, textpos, text_len);
40505+ up_write(&current->mm->mmap_sem);
40506 ret = realdatastart;
40507 goto err;
40508 }
40509@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40510 }
40511 if (IS_ERR_VALUE(result)) {
40512 printk("Unable to read data+bss, errno %d\n", (int)-result);
40513+ down_write(&current->mm->mmap_sem);
40514 do_munmap(current->mm, textpos, text_len);
40515 do_munmap(current->mm, realdatastart, len);
40516+ up_write(&current->mm->mmap_sem);
40517 ret = result;
40518 goto err;
40519 }
40520@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40521 }
40522 if (IS_ERR_VALUE(result)) {
40523 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40524+ down_write(&current->mm->mmap_sem);
40525 do_munmap(current->mm, textpos, text_len + data_len + extra +
40526 MAX_SHARED_LIBS * sizeof(unsigned long));
40527+ up_write(&current->mm->mmap_sem);
40528 ret = result;
40529 goto err;
40530 }
40531diff --git a/fs/bio.c b/fs/bio.c
40532index b1fe82c..84da0a9 100644
40533--- a/fs/bio.c
40534+++ b/fs/bio.c
40535@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40536 const int read = bio_data_dir(bio) == READ;
40537 struct bio_map_data *bmd = bio->bi_private;
40538 int i;
40539- char *p = bmd->sgvecs[0].iov_base;
40540+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40541
40542 __bio_for_each_segment(bvec, bio, i, 0) {
40543 char *addr = page_address(bvec->bv_page);
40544diff --git a/fs/block_dev.c b/fs/block_dev.c
40545index b07f1da..9efcb92 100644
40546--- a/fs/block_dev.c
40547+++ b/fs/block_dev.c
40548@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40549 else if (bdev->bd_contains == bdev)
40550 return true; /* is a whole device which isn't held */
40551
40552- else if (whole->bd_holder == bd_may_claim)
40553+ else if (whole->bd_holder == (void *)bd_may_claim)
40554 return true; /* is a partition of a device that is being partitioned */
40555 else if (whole->bd_holder != NULL)
40556 return false; /* is a partition of a held device */
40557diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40558index dede441..f2a2507 100644
40559--- a/fs/btrfs/ctree.c
40560+++ b/fs/btrfs/ctree.c
40561@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40562 free_extent_buffer(buf);
40563 add_root_to_dirty_list(root);
40564 } else {
40565- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40566- parent_start = parent->start;
40567- else
40568+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40569+ if (parent)
40570+ parent_start = parent->start;
40571+ else
40572+ parent_start = 0;
40573+ } else
40574 parent_start = 0;
40575
40576 WARN_ON(trans->transid != btrfs_header_generation(parent));
40577diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40578index fd1a06d..6e9033d 100644
40579--- a/fs/btrfs/inode.c
40580+++ b/fs/btrfs/inode.c
40581@@ -6895,7 +6895,7 @@ fail:
40582 return -ENOMEM;
40583 }
40584
40585-static int btrfs_getattr(struct vfsmount *mnt,
40586+int btrfs_getattr(struct vfsmount *mnt,
40587 struct dentry *dentry, struct kstat *stat)
40588 {
40589 struct inode *inode = dentry->d_inode;
40590@@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40591 return 0;
40592 }
40593
40594+EXPORT_SYMBOL(btrfs_getattr);
40595+
40596+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40597+{
40598+ return BTRFS_I(inode)->root->anon_dev;
40599+}
40600+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40601+
40602 /*
40603 * If a file is moved, it will inherit the cow and compression flags of the new
40604 * directory.
40605diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40606index c04f02c..f5c9e2e 100644
40607--- a/fs/btrfs/ioctl.c
40608+++ b/fs/btrfs/ioctl.c
40609@@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40610 for (i = 0; i < num_types; i++) {
40611 struct btrfs_space_info *tmp;
40612
40613+ /* Don't copy in more than we allocated */
40614 if (!slot_count)
40615 break;
40616
40617+ slot_count--;
40618+
40619 info = NULL;
40620 rcu_read_lock();
40621 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40622@@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40623 memcpy(dest, &space, sizeof(space));
40624 dest++;
40625 space_args.total_spaces++;
40626- slot_count--;
40627 }
40628- if (!slot_count)
40629- break;
40630 }
40631 up_read(&info->groups_sem);
40632 }
40633
40634- user_dest = (struct btrfs_ioctl_space_info *)
40635+ user_dest = (struct btrfs_ioctl_space_info __user *)
40636 (arg + sizeof(struct btrfs_ioctl_space_args));
40637
40638 if (copy_to_user(user_dest, dest_orig, alloc_size))
40639diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40640index cfb5543..1ae7347 100644
40641--- a/fs/btrfs/relocation.c
40642+++ b/fs/btrfs/relocation.c
40643@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40644 }
40645 spin_unlock(&rc->reloc_root_tree.lock);
40646
40647- BUG_ON((struct btrfs_root *)node->data != root);
40648+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40649
40650 if (!del) {
40651 spin_lock(&rc->reloc_root_tree.lock);
40652diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40653index 622f469..e8d2d55 100644
40654--- a/fs/cachefiles/bind.c
40655+++ b/fs/cachefiles/bind.c
40656@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40657 args);
40658
40659 /* start by checking things over */
40660- ASSERT(cache->fstop_percent >= 0 &&
40661- cache->fstop_percent < cache->fcull_percent &&
40662+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40663 cache->fcull_percent < cache->frun_percent &&
40664 cache->frun_percent < 100);
40665
40666- ASSERT(cache->bstop_percent >= 0 &&
40667- cache->bstop_percent < cache->bcull_percent &&
40668+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40669 cache->bcull_percent < cache->brun_percent &&
40670 cache->brun_percent < 100);
40671
40672diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40673index 0a1467b..6a53245 100644
40674--- a/fs/cachefiles/daemon.c
40675+++ b/fs/cachefiles/daemon.c
40676@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40677 if (n > buflen)
40678 return -EMSGSIZE;
40679
40680- if (copy_to_user(_buffer, buffer, n) != 0)
40681+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40682 return -EFAULT;
40683
40684 return n;
40685@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40686 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40687 return -EIO;
40688
40689- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40690+ if (datalen > PAGE_SIZE - 1)
40691 return -EOPNOTSUPP;
40692
40693 /* drag the command string into the kernel so we can parse it */
40694@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40695 if (args[0] != '%' || args[1] != '\0')
40696 return -EINVAL;
40697
40698- if (fstop < 0 || fstop >= cache->fcull_percent)
40699+ if (fstop >= cache->fcull_percent)
40700 return cachefiles_daemon_range_error(cache, args);
40701
40702 cache->fstop_percent = fstop;
40703@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40704 if (args[0] != '%' || args[1] != '\0')
40705 return -EINVAL;
40706
40707- if (bstop < 0 || bstop >= cache->bcull_percent)
40708+ if (bstop >= cache->bcull_percent)
40709 return cachefiles_daemon_range_error(cache, args);
40710
40711 cache->bstop_percent = bstop;
40712diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40713index bd6bc1b..b627b53 100644
40714--- a/fs/cachefiles/internal.h
40715+++ b/fs/cachefiles/internal.h
40716@@ -57,7 +57,7 @@ struct cachefiles_cache {
40717 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40718 struct rb_root active_nodes; /* active nodes (can't be culled) */
40719 rwlock_t active_lock; /* lock for active_nodes */
40720- atomic_t gravecounter; /* graveyard uniquifier */
40721+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40722 unsigned frun_percent; /* when to stop culling (% files) */
40723 unsigned fcull_percent; /* when to start culling (% files) */
40724 unsigned fstop_percent; /* when to stop allocating (% files) */
40725@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40726 * proc.c
40727 */
40728 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40729-extern atomic_t cachefiles_lookup_histogram[HZ];
40730-extern atomic_t cachefiles_mkdir_histogram[HZ];
40731-extern atomic_t cachefiles_create_histogram[HZ];
40732+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40733+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40734+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40735
40736 extern int __init cachefiles_proc_init(void);
40737 extern void cachefiles_proc_cleanup(void);
40738 static inline
40739-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40740+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40741 {
40742 unsigned long jif = jiffies - start_jif;
40743 if (jif >= HZ)
40744 jif = HZ - 1;
40745- atomic_inc(&histogram[jif]);
40746+ atomic_inc_unchecked(&histogram[jif]);
40747 }
40748
40749 #else
40750diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
40751index a0358c2..d6137f2 100644
40752--- a/fs/cachefiles/namei.c
40753+++ b/fs/cachefiles/namei.c
40754@@ -318,7 +318,7 @@ try_again:
40755 /* first step is to make up a grave dentry in the graveyard */
40756 sprintf(nbuffer, "%08x%08x",
40757 (uint32_t) get_seconds(),
40758- (uint32_t) atomic_inc_return(&cache->gravecounter));
40759+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40760
40761 /* do the multiway lock magic */
40762 trap = lock_rename(cache->graveyard, dir);
40763diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
40764index eccd339..4c1d995 100644
40765--- a/fs/cachefiles/proc.c
40766+++ b/fs/cachefiles/proc.c
40767@@ -14,9 +14,9 @@
40768 #include <linux/seq_file.h>
40769 #include "internal.h"
40770
40771-atomic_t cachefiles_lookup_histogram[HZ];
40772-atomic_t cachefiles_mkdir_histogram[HZ];
40773-atomic_t cachefiles_create_histogram[HZ];
40774+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40775+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40776+atomic_unchecked_t cachefiles_create_histogram[HZ];
40777
40778 /*
40779 * display the latency histogram
40780@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
40781 return 0;
40782 default:
40783 index = (unsigned long) v - 3;
40784- x = atomic_read(&cachefiles_lookup_histogram[index]);
40785- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40786- z = atomic_read(&cachefiles_create_histogram[index]);
40787+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40788+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40789+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40790 if (x == 0 && y == 0 && z == 0)
40791 return 0;
40792
40793diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
40794index 0e3c092..818480e 100644
40795--- a/fs/cachefiles/rdwr.c
40796+++ b/fs/cachefiles/rdwr.c
40797@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
40798 old_fs = get_fs();
40799 set_fs(KERNEL_DS);
40800 ret = file->f_op->write(
40801- file, (const void __user *) data, len, &pos);
40802+ file, (const void __force_user *) data, len, &pos);
40803 set_fs(old_fs);
40804 kunmap(page);
40805 if (ret != len)
40806diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
40807index 9895400..fa40a7d 100644
40808--- a/fs/ceph/dir.c
40809+++ b/fs/ceph/dir.c
40810@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
40811 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40812 struct ceph_mds_client *mdsc = fsc->mdsc;
40813 unsigned frag = fpos_frag(filp->f_pos);
40814- int off = fpos_off(filp->f_pos);
40815+ unsigned int off = fpos_off(filp->f_pos);
40816 int err;
40817 u32 ftype;
40818 struct ceph_mds_reply_info_parsed *rinfo;
40819diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
40820index 84e8c07..6170d31 100644
40821--- a/fs/cifs/cifs_debug.c
40822+++ b/fs/cifs/cifs_debug.c
40823@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40824
40825 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40826 #ifdef CONFIG_CIFS_STATS2
40827- atomic_set(&totBufAllocCount, 0);
40828- atomic_set(&totSmBufAllocCount, 0);
40829+ atomic_set_unchecked(&totBufAllocCount, 0);
40830+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40831 #endif /* CONFIG_CIFS_STATS2 */
40832 spin_lock(&cifs_tcp_ses_lock);
40833 list_for_each(tmp1, &cifs_tcp_ses_list) {
40834@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40835 tcon = list_entry(tmp3,
40836 struct cifs_tcon,
40837 tcon_list);
40838- atomic_set(&tcon->num_smbs_sent, 0);
40839- atomic_set(&tcon->num_writes, 0);
40840- atomic_set(&tcon->num_reads, 0);
40841- atomic_set(&tcon->num_oplock_brks, 0);
40842- atomic_set(&tcon->num_opens, 0);
40843- atomic_set(&tcon->num_posixopens, 0);
40844- atomic_set(&tcon->num_posixmkdirs, 0);
40845- atomic_set(&tcon->num_closes, 0);
40846- atomic_set(&tcon->num_deletes, 0);
40847- atomic_set(&tcon->num_mkdirs, 0);
40848- atomic_set(&tcon->num_rmdirs, 0);
40849- atomic_set(&tcon->num_renames, 0);
40850- atomic_set(&tcon->num_t2renames, 0);
40851- atomic_set(&tcon->num_ffirst, 0);
40852- atomic_set(&tcon->num_fnext, 0);
40853- atomic_set(&tcon->num_fclose, 0);
40854- atomic_set(&tcon->num_hardlinks, 0);
40855- atomic_set(&tcon->num_symlinks, 0);
40856- atomic_set(&tcon->num_locks, 0);
40857+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40858+ atomic_set_unchecked(&tcon->num_writes, 0);
40859+ atomic_set_unchecked(&tcon->num_reads, 0);
40860+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40861+ atomic_set_unchecked(&tcon->num_opens, 0);
40862+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40863+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40864+ atomic_set_unchecked(&tcon->num_closes, 0);
40865+ atomic_set_unchecked(&tcon->num_deletes, 0);
40866+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40867+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40868+ atomic_set_unchecked(&tcon->num_renames, 0);
40869+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40870+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40871+ atomic_set_unchecked(&tcon->num_fnext, 0);
40872+ atomic_set_unchecked(&tcon->num_fclose, 0);
40873+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40874+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40875+ atomic_set_unchecked(&tcon->num_locks, 0);
40876 }
40877 }
40878 }
40879@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40880 smBufAllocCount.counter, cifs_min_small);
40881 #ifdef CONFIG_CIFS_STATS2
40882 seq_printf(m, "Total Large %d Small %d Allocations\n",
40883- atomic_read(&totBufAllocCount),
40884- atomic_read(&totSmBufAllocCount));
40885+ atomic_read_unchecked(&totBufAllocCount),
40886+ atomic_read_unchecked(&totSmBufAllocCount));
40887 #endif /* CONFIG_CIFS_STATS2 */
40888
40889 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40890@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40891 if (tcon->need_reconnect)
40892 seq_puts(m, "\tDISCONNECTED ");
40893 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40894- atomic_read(&tcon->num_smbs_sent),
40895- atomic_read(&tcon->num_oplock_brks));
40896+ atomic_read_unchecked(&tcon->num_smbs_sent),
40897+ atomic_read_unchecked(&tcon->num_oplock_brks));
40898 seq_printf(m, "\nReads: %d Bytes: %lld",
40899- atomic_read(&tcon->num_reads),
40900+ atomic_read_unchecked(&tcon->num_reads),
40901 (long long)(tcon->bytes_read));
40902 seq_printf(m, "\nWrites: %d Bytes: %lld",
40903- atomic_read(&tcon->num_writes),
40904+ atomic_read_unchecked(&tcon->num_writes),
40905 (long long)(tcon->bytes_written));
40906 seq_printf(m, "\nFlushes: %d",
40907- atomic_read(&tcon->num_flushes));
40908+ atomic_read_unchecked(&tcon->num_flushes));
40909 seq_printf(m, "\nLocks: %d HardLinks: %d "
40910 "Symlinks: %d",
40911- atomic_read(&tcon->num_locks),
40912- atomic_read(&tcon->num_hardlinks),
40913- atomic_read(&tcon->num_symlinks));
40914+ atomic_read_unchecked(&tcon->num_locks),
40915+ atomic_read_unchecked(&tcon->num_hardlinks),
40916+ atomic_read_unchecked(&tcon->num_symlinks));
40917 seq_printf(m, "\nOpens: %d Closes: %d "
40918 "Deletes: %d",
40919- atomic_read(&tcon->num_opens),
40920- atomic_read(&tcon->num_closes),
40921- atomic_read(&tcon->num_deletes));
40922+ atomic_read_unchecked(&tcon->num_opens),
40923+ atomic_read_unchecked(&tcon->num_closes),
40924+ atomic_read_unchecked(&tcon->num_deletes));
40925 seq_printf(m, "\nPosix Opens: %d "
40926 "Posix Mkdirs: %d",
40927- atomic_read(&tcon->num_posixopens),
40928- atomic_read(&tcon->num_posixmkdirs));
40929+ atomic_read_unchecked(&tcon->num_posixopens),
40930+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40931 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40932- atomic_read(&tcon->num_mkdirs),
40933- atomic_read(&tcon->num_rmdirs));
40934+ atomic_read_unchecked(&tcon->num_mkdirs),
40935+ atomic_read_unchecked(&tcon->num_rmdirs));
40936 seq_printf(m, "\nRenames: %d T2 Renames %d",
40937- atomic_read(&tcon->num_renames),
40938- atomic_read(&tcon->num_t2renames));
40939+ atomic_read_unchecked(&tcon->num_renames),
40940+ atomic_read_unchecked(&tcon->num_t2renames));
40941 seq_printf(m, "\nFindFirst: %d FNext %d "
40942 "FClose %d",
40943- atomic_read(&tcon->num_ffirst),
40944- atomic_read(&tcon->num_fnext),
40945- atomic_read(&tcon->num_fclose));
40946+ atomic_read_unchecked(&tcon->num_ffirst),
40947+ atomic_read_unchecked(&tcon->num_fnext),
40948+ atomic_read_unchecked(&tcon->num_fclose));
40949 }
40950 }
40951 }
40952diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
40953index 8f1fe32..38f9e27 100644
40954--- a/fs/cifs/cifsfs.c
40955+++ b/fs/cifs/cifsfs.c
40956@@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
40957 cifs_req_cachep = kmem_cache_create("cifs_request",
40958 CIFSMaxBufSize +
40959 MAX_CIFS_HDR_SIZE, 0,
40960- SLAB_HWCACHE_ALIGN, NULL);
40961+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40962 if (cifs_req_cachep == NULL)
40963 return -ENOMEM;
40964
40965@@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
40966 efficient to alloc 1 per page off the slab compared to 17K (5page)
40967 alloc of large cifs buffers even when page debugging is on */
40968 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40969- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40970+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40971 NULL);
40972 if (cifs_sm_req_cachep == NULL) {
40973 mempool_destroy(cifs_req_poolp);
40974@@ -1101,8 +1101,8 @@ init_cifs(void)
40975 atomic_set(&bufAllocCount, 0);
40976 atomic_set(&smBufAllocCount, 0);
40977 #ifdef CONFIG_CIFS_STATS2
40978- atomic_set(&totBufAllocCount, 0);
40979- atomic_set(&totSmBufAllocCount, 0);
40980+ atomic_set_unchecked(&totBufAllocCount, 0);
40981+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40982 #endif /* CONFIG_CIFS_STATS2 */
40983
40984 atomic_set(&midCount, 0);
40985diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
40986index 8238aa1..0347196 100644
40987--- a/fs/cifs/cifsglob.h
40988+++ b/fs/cifs/cifsglob.h
40989@@ -392,28 +392,28 @@ struct cifs_tcon {
40990 __u16 Flags; /* optional support bits */
40991 enum statusEnum tidStatus;
40992 #ifdef CONFIG_CIFS_STATS
40993- atomic_t num_smbs_sent;
40994- atomic_t num_writes;
40995- atomic_t num_reads;
40996- atomic_t num_flushes;
40997- atomic_t num_oplock_brks;
40998- atomic_t num_opens;
40999- atomic_t num_closes;
41000- atomic_t num_deletes;
41001- atomic_t num_mkdirs;
41002- atomic_t num_posixopens;
41003- atomic_t num_posixmkdirs;
41004- atomic_t num_rmdirs;
41005- atomic_t num_renames;
41006- atomic_t num_t2renames;
41007- atomic_t num_ffirst;
41008- atomic_t num_fnext;
41009- atomic_t num_fclose;
41010- atomic_t num_hardlinks;
41011- atomic_t num_symlinks;
41012- atomic_t num_locks;
41013- atomic_t num_acl_get;
41014- atomic_t num_acl_set;
41015+ atomic_unchecked_t num_smbs_sent;
41016+ atomic_unchecked_t num_writes;
41017+ atomic_unchecked_t num_reads;
41018+ atomic_unchecked_t num_flushes;
41019+ atomic_unchecked_t num_oplock_brks;
41020+ atomic_unchecked_t num_opens;
41021+ atomic_unchecked_t num_closes;
41022+ atomic_unchecked_t num_deletes;
41023+ atomic_unchecked_t num_mkdirs;
41024+ atomic_unchecked_t num_posixopens;
41025+ atomic_unchecked_t num_posixmkdirs;
41026+ atomic_unchecked_t num_rmdirs;
41027+ atomic_unchecked_t num_renames;
41028+ atomic_unchecked_t num_t2renames;
41029+ atomic_unchecked_t num_ffirst;
41030+ atomic_unchecked_t num_fnext;
41031+ atomic_unchecked_t num_fclose;
41032+ atomic_unchecked_t num_hardlinks;
41033+ atomic_unchecked_t num_symlinks;
41034+ atomic_unchecked_t num_locks;
41035+ atomic_unchecked_t num_acl_get;
41036+ atomic_unchecked_t num_acl_set;
41037 #ifdef CONFIG_CIFS_STATS2
41038 unsigned long long time_writes;
41039 unsigned long long time_reads;
41040@@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
41041 }
41042
41043 #ifdef CONFIG_CIFS_STATS
41044-#define cifs_stats_inc atomic_inc
41045+#define cifs_stats_inc atomic_inc_unchecked
41046
41047 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
41048 unsigned int bytes)
41049@@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
41050 /* Various Debug counters */
41051 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
41052 #ifdef CONFIG_CIFS_STATS2
41053-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41054-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41055+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41056+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41057 #endif
41058 GLOBAL_EXTERN atomic_t smBufAllocCount;
41059 GLOBAL_EXTERN atomic_t midCount;
41060diff --git a/fs/cifs/link.c b/fs/cifs/link.c
41061index 6b0e064..94e6c3c 100644
41062--- a/fs/cifs/link.c
41063+++ b/fs/cifs/link.c
41064@@ -600,7 +600,7 @@ symlink_exit:
41065
41066 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41067 {
41068- char *p = nd_get_link(nd);
41069+ const char *p = nd_get_link(nd);
41070 if (!IS_ERR(p))
41071 kfree(p);
41072 }
41073diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
41074index 703ef5c..2a44ed5 100644
41075--- a/fs/cifs/misc.c
41076+++ b/fs/cifs/misc.c
41077@@ -156,7 +156,7 @@ cifs_buf_get(void)
41078 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41079 atomic_inc(&bufAllocCount);
41080 #ifdef CONFIG_CIFS_STATS2
41081- atomic_inc(&totBufAllocCount);
41082+ atomic_inc_unchecked(&totBufAllocCount);
41083 #endif /* CONFIG_CIFS_STATS2 */
41084 }
41085
41086@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41087 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41088 atomic_inc(&smBufAllocCount);
41089 #ifdef CONFIG_CIFS_STATS2
41090- atomic_inc(&totSmBufAllocCount);
41091+ atomic_inc_unchecked(&totSmBufAllocCount);
41092 #endif /* CONFIG_CIFS_STATS2 */
41093
41094 }
41095diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41096index 6901578..d402eb5 100644
41097--- a/fs/coda/cache.c
41098+++ b/fs/coda/cache.c
41099@@ -24,7 +24,7 @@
41100 #include "coda_linux.h"
41101 #include "coda_cache.h"
41102
41103-static atomic_t permission_epoch = ATOMIC_INIT(0);
41104+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41105
41106 /* replace or extend an acl cache hit */
41107 void coda_cache_enter(struct inode *inode, int mask)
41108@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41109 struct coda_inode_info *cii = ITOC(inode);
41110
41111 spin_lock(&cii->c_lock);
41112- cii->c_cached_epoch = atomic_read(&permission_epoch);
41113+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41114 if (cii->c_uid != current_fsuid()) {
41115 cii->c_uid = current_fsuid();
41116 cii->c_cached_perm = mask;
41117@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41118 {
41119 struct coda_inode_info *cii = ITOC(inode);
41120 spin_lock(&cii->c_lock);
41121- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41122+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41123 spin_unlock(&cii->c_lock);
41124 }
41125
41126 /* remove all acl caches */
41127 void coda_cache_clear_all(struct super_block *sb)
41128 {
41129- atomic_inc(&permission_epoch);
41130+ atomic_inc_unchecked(&permission_epoch);
41131 }
41132
41133
41134@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41135 spin_lock(&cii->c_lock);
41136 hit = (mask & cii->c_cached_perm) == mask &&
41137 cii->c_uid == current_fsuid() &&
41138- cii->c_cached_epoch == atomic_read(&permission_epoch);
41139+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41140 spin_unlock(&cii->c_lock);
41141
41142 return hit;
41143diff --git a/fs/compat.c b/fs/compat.c
41144index c987875..08771ca 100644
41145--- a/fs/compat.c
41146+++ b/fs/compat.c
41147@@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41148 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41149 {
41150 compat_ino_t ino = stat->ino;
41151- typeof(ubuf->st_uid) uid = 0;
41152- typeof(ubuf->st_gid) gid = 0;
41153+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41154+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41155 int err;
41156
41157 SET_UID(uid, stat->uid);
41158@@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41159
41160 set_fs(KERNEL_DS);
41161 /* The __user pointer cast is valid because of the set_fs() */
41162- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41163+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41164 set_fs(oldfs);
41165 /* truncating is ok because it's a user address */
41166 if (!ret)
41167@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41168 goto out;
41169
41170 ret = -EINVAL;
41171- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41172+ if (nr_segs > UIO_MAXIOV)
41173 goto out;
41174 if (nr_segs > fast_segs) {
41175 ret = -ENOMEM;
41176@@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41177
41178 struct compat_readdir_callback {
41179 struct compat_old_linux_dirent __user *dirent;
41180+ struct file * file;
41181 int result;
41182 };
41183
41184@@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41185 buf->result = -EOVERFLOW;
41186 return -EOVERFLOW;
41187 }
41188+
41189+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41190+ return 0;
41191+
41192 buf->result++;
41193 dirent = buf->dirent;
41194 if (!access_ok(VERIFY_WRITE, dirent,
41195@@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41196
41197 buf.result = 0;
41198 buf.dirent = dirent;
41199+ buf.file = file;
41200
41201 error = vfs_readdir(file, compat_fillonedir, &buf);
41202 if (buf.result)
41203@@ -914,6 +920,7 @@ struct compat_linux_dirent {
41204 struct compat_getdents_callback {
41205 struct compat_linux_dirent __user *current_dir;
41206 struct compat_linux_dirent __user *previous;
41207+ struct file * file;
41208 int count;
41209 int error;
41210 };
41211@@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41212 buf->error = -EOVERFLOW;
41213 return -EOVERFLOW;
41214 }
41215+
41216+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41217+ return 0;
41218+
41219 dirent = buf->previous;
41220 if (dirent) {
41221 if (__put_user(offset, &dirent->d_off))
41222@@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41223 buf.previous = NULL;
41224 buf.count = count;
41225 buf.error = 0;
41226+ buf.file = file;
41227
41228 error = vfs_readdir(file, compat_filldir, &buf);
41229 if (error >= 0)
41230@@ -1003,6 +1015,7 @@ out:
41231 struct compat_getdents_callback64 {
41232 struct linux_dirent64 __user *current_dir;
41233 struct linux_dirent64 __user *previous;
41234+ struct file * file;
41235 int count;
41236 int error;
41237 };
41238@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41239 buf->error = -EINVAL; /* only used if we fail.. */
41240 if (reclen > buf->count)
41241 return -EINVAL;
41242+
41243+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41244+ return 0;
41245+
41246 dirent = buf->previous;
41247
41248 if (dirent) {
41249@@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41250 buf.previous = NULL;
41251 buf.count = count;
41252 buf.error = 0;
41253+ buf.file = file;
41254
41255 error = vfs_readdir(file, compat_filldir64, &buf);
41256 if (error >= 0)
41257 error = buf.error;
41258 lastdirent = buf.previous;
41259 if (lastdirent) {
41260- typeof(lastdirent->d_off) d_off = file->f_pos;
41261+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41262 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41263 error = -EFAULT;
41264 else
41265diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41266index 112e45a..b59845b 100644
41267--- a/fs/compat_binfmt_elf.c
41268+++ b/fs/compat_binfmt_elf.c
41269@@ -30,11 +30,13 @@
41270 #undef elf_phdr
41271 #undef elf_shdr
41272 #undef elf_note
41273+#undef elf_dyn
41274 #undef elf_addr_t
41275 #define elfhdr elf32_hdr
41276 #define elf_phdr elf32_phdr
41277 #define elf_shdr elf32_shdr
41278 #define elf_note elf32_note
41279+#define elf_dyn Elf32_Dyn
41280 #define elf_addr_t Elf32_Addr
41281
41282 /*
41283diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41284index 51352de..93292ff 100644
41285--- a/fs/compat_ioctl.c
41286+++ b/fs/compat_ioctl.c
41287@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41288
41289 err = get_user(palp, &up->palette);
41290 err |= get_user(length, &up->length);
41291+ if (err)
41292+ return -EFAULT;
41293
41294 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41295 err = put_user(compat_ptr(palp), &up_native->palette);
41296@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41297 return -EFAULT;
41298 if (__get_user(udata, &ss32->iomem_base))
41299 return -EFAULT;
41300- ss.iomem_base = compat_ptr(udata);
41301+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41302 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41303 __get_user(ss.port_high, &ss32->port_high))
41304 return -EFAULT;
41305@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41306 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41307 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41308 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41309- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41310+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41311 return -EFAULT;
41312
41313 return ioctl_preallocate(file, p);
41314@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41315 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41316 {
41317 unsigned int a, b;
41318- a = *(unsigned int *)p;
41319- b = *(unsigned int *)q;
41320+ a = *(const unsigned int *)p;
41321+ b = *(const unsigned int *)q;
41322 if (a > b)
41323 return 1;
41324 if (a < b)
41325diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41326index 9a37a9b..35792b6 100644
41327--- a/fs/configfs/dir.c
41328+++ b/fs/configfs/dir.c
41329@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41330 }
41331 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41332 struct configfs_dirent *next;
41333- const char * name;
41334+ const unsigned char * name;
41335+ char d_name[sizeof(next->s_dentry->d_iname)];
41336 int len;
41337 struct inode *inode = NULL;
41338
41339@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41340 continue;
41341
41342 name = configfs_get_name(next);
41343- len = strlen(name);
41344+ if (next->s_dentry && name == next->s_dentry->d_iname) {
41345+ len = next->s_dentry->d_name.len;
41346+ memcpy(d_name, name, len);
41347+ name = d_name;
41348+ } else
41349+ len = strlen(name);
41350
41351 /*
41352 * We'll have a dentry and an inode for
41353diff --git a/fs/dcache.c b/fs/dcache.c
41354index f7908ae..920a680 100644
41355--- a/fs/dcache.c
41356+++ b/fs/dcache.c
41357@@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41358 mempages -= reserve;
41359
41360 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41361- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41362+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41363
41364 dcache_init();
41365 inode_init();
41366diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
41367index f3a257d..715ac0f 100644
41368--- a/fs/debugfs/inode.c
41369+++ b/fs/debugfs/inode.c
41370@@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
41371 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
41372 {
41373 return debugfs_create_file(name,
41374+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41375+ S_IFDIR | S_IRWXU,
41376+#else
41377 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41378+#endif
41379 parent, NULL, NULL);
41380 }
41381 EXPORT_SYMBOL_GPL(debugfs_create_dir);
41382diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41383index d2039ca..a766407 100644
41384--- a/fs/ecryptfs/inode.c
41385+++ b/fs/ecryptfs/inode.c
41386@@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41387 old_fs = get_fs();
41388 set_fs(get_ds());
41389 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41390- (char __user *)lower_buf,
41391+ (char __force_user *)lower_buf,
41392 lower_bufsiz);
41393 set_fs(old_fs);
41394 if (rc < 0)
41395@@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41396 }
41397 old_fs = get_fs();
41398 set_fs(get_ds());
41399- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41400+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41401 set_fs(old_fs);
41402 if (rc < 0) {
41403 kfree(buf);
41404@@ -752,7 +752,7 @@ out:
41405 static void
41406 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41407 {
41408- char *buf = nd_get_link(nd);
41409+ const char *buf = nd_get_link(nd);
41410 if (!IS_ERR(buf)) {
41411 /* Free the char* */
41412 kfree(buf);
41413diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41414index 0dc5a3d..d3cdeea 100644
41415--- a/fs/ecryptfs/miscdev.c
41416+++ b/fs/ecryptfs/miscdev.c
41417@@ -328,7 +328,7 @@ check_list:
41418 goto out_unlock_msg_ctx;
41419 i = 5;
41420 if (msg_ctx->msg) {
41421- if (copy_to_user(&buf[i], packet_length, packet_length_size))
41422+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41423 goto out_unlock_msg_ctx;
41424 i += packet_length_size;
41425 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41426diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41427index 608c1c3..7d040a8 100644
41428--- a/fs/ecryptfs/read_write.c
41429+++ b/fs/ecryptfs/read_write.c
41430@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41431 return -EIO;
41432 fs_save = get_fs();
41433 set_fs(get_ds());
41434- rc = vfs_write(lower_file, data, size, &offset);
41435+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41436 set_fs(fs_save);
41437 mark_inode_dirty_sync(ecryptfs_inode);
41438 return rc;
41439@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41440 return -EIO;
41441 fs_save = get_fs();
41442 set_fs(get_ds());
41443- rc = vfs_read(lower_file, data, size, &offset);
41444+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41445 set_fs(fs_save);
41446 return rc;
41447 }
41448diff --git a/fs/exec.c b/fs/exec.c
41449index 3625464..7c7ce8b 100644
41450--- a/fs/exec.c
41451+++ b/fs/exec.c
41452@@ -55,12 +55,28 @@
41453 #include <linux/pipe_fs_i.h>
41454 #include <linux/oom.h>
41455 #include <linux/compat.h>
41456+#include <linux/random.h>
41457+#include <linux/seq_file.h>
41458+
41459+#ifdef CONFIG_PAX_REFCOUNT
41460+#include <linux/kallsyms.h>
41461+#include <linux/kdebug.h>
41462+#endif
41463
41464 #include <asm/uaccess.h>
41465 #include <asm/mmu_context.h>
41466 #include <asm/tlb.h>
41467 #include "internal.h"
41468
41469+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41470+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41471+#endif
41472+
41473+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41474+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41475+EXPORT_SYMBOL(pax_set_initial_flags_func);
41476+#endif
41477+
41478 int core_uses_pid;
41479 char core_pattern[CORENAME_MAX_SIZE] = "core";
41480 unsigned int core_pipe_limit;
41481@@ -70,7 +86,7 @@ struct core_name {
41482 char *corename;
41483 int used, size;
41484 };
41485-static atomic_t call_count = ATOMIC_INIT(1);
41486+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41487
41488 /* The maximal length of core_pattern is also specified in sysctl.c */
41489
41490@@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41491 int write)
41492 {
41493 struct page *page;
41494- int ret;
41495
41496-#ifdef CONFIG_STACK_GROWSUP
41497- if (write) {
41498- ret = expand_downwards(bprm->vma, pos);
41499- if (ret < 0)
41500- return NULL;
41501- }
41502-#endif
41503- ret = get_user_pages(current, bprm->mm, pos,
41504- 1, write, 1, &page, NULL);
41505- if (ret <= 0)
41506+ if (0 > expand_downwards(bprm->vma, pos))
41507+ return NULL;
41508+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41509 return NULL;
41510
41511 if (write) {
41512@@ -215,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41513 if (size <= ARG_MAX)
41514 return page;
41515
41516+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41517+ // only allow 1MB for argv+env on suid/sgid binaries
41518+ // to prevent easy ASLR exhaustion
41519+ if (((bprm->cred->euid != current_euid()) ||
41520+ (bprm->cred->egid != current_egid())) &&
41521+ (size > (1024 * 1024))) {
41522+ put_page(page);
41523+ return NULL;
41524+ }
41525+#endif
41526+
41527 /*
41528 * Limit to 1/4-th the stack size for the argv+env strings.
41529 * This ensures that:
41530@@ -274,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41531 vma->vm_end = STACK_TOP_MAX;
41532 vma->vm_start = vma->vm_end - PAGE_SIZE;
41533 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41534+
41535+#ifdef CONFIG_PAX_SEGMEXEC
41536+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41537+#endif
41538+
41539 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41540 INIT_LIST_HEAD(&vma->anon_vma_chain);
41541
41542@@ -288,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41543 mm->stack_vm = mm->total_vm = 1;
41544 up_write(&mm->mmap_sem);
41545 bprm->p = vma->vm_end - sizeof(void *);
41546+
41547+#ifdef CONFIG_PAX_RANDUSTACK
41548+ if (randomize_va_space)
41549+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41550+#endif
41551+
41552 return 0;
41553 err:
41554 up_write(&mm->mmap_sem);
41555@@ -396,19 +426,7 @@ err:
41556 return err;
41557 }
41558
41559-struct user_arg_ptr {
41560-#ifdef CONFIG_COMPAT
41561- bool is_compat;
41562-#endif
41563- union {
41564- const char __user *const __user *native;
41565-#ifdef CONFIG_COMPAT
41566- compat_uptr_t __user *compat;
41567-#endif
41568- } ptr;
41569-};
41570-
41571-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41572+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41573 {
41574 const char __user *native;
41575
41576@@ -417,14 +435,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41577 compat_uptr_t compat;
41578
41579 if (get_user(compat, argv.ptr.compat + nr))
41580- return ERR_PTR(-EFAULT);
41581+ return (const char __force_user *)ERR_PTR(-EFAULT);
41582
41583 return compat_ptr(compat);
41584 }
41585 #endif
41586
41587 if (get_user(native, argv.ptr.native + nr))
41588- return ERR_PTR(-EFAULT);
41589+ return (const char __force_user *)ERR_PTR(-EFAULT);
41590
41591 return native;
41592 }
41593@@ -443,7 +461,7 @@ static int count(struct user_arg_ptr argv, int max)
41594 if (!p)
41595 break;
41596
41597- if (IS_ERR(p))
41598+ if (IS_ERR((const char __force_kernel *)p))
41599 return -EFAULT;
41600
41601 if (i++ >= max)
41602@@ -477,7 +495,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41603
41604 ret = -EFAULT;
41605 str = get_user_arg_ptr(argv, argc);
41606- if (IS_ERR(str))
41607+ if (IS_ERR((const char __force_kernel *)str))
41608 goto out;
41609
41610 len = strnlen_user(str, MAX_ARG_STRLEN);
41611@@ -559,7 +577,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41612 int r;
41613 mm_segment_t oldfs = get_fs();
41614 struct user_arg_ptr argv = {
41615- .ptr.native = (const char __user *const __user *)__argv,
41616+ .ptr.native = (const char __force_user *const __force_user *)__argv,
41617 };
41618
41619 set_fs(KERNEL_DS);
41620@@ -594,7 +612,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41621 unsigned long new_end = old_end - shift;
41622 struct mmu_gather tlb;
41623
41624- BUG_ON(new_start > new_end);
41625+ if (new_start >= new_end || new_start < mmap_min_addr)
41626+ return -ENOMEM;
41627
41628 /*
41629 * ensure there are no vmas between where we want to go
41630@@ -603,6 +622,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41631 if (vma != find_vma(mm, new_start))
41632 return -EFAULT;
41633
41634+#ifdef CONFIG_PAX_SEGMEXEC
41635+ BUG_ON(pax_find_mirror_vma(vma));
41636+#endif
41637+
41638 /*
41639 * cover the whole range: [new_start, old_end)
41640 */
41641@@ -683,10 +706,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41642 stack_top = arch_align_stack(stack_top);
41643 stack_top = PAGE_ALIGN(stack_top);
41644
41645- if (unlikely(stack_top < mmap_min_addr) ||
41646- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41647- return -ENOMEM;
41648-
41649 stack_shift = vma->vm_end - stack_top;
41650
41651 bprm->p -= stack_shift;
41652@@ -698,8 +717,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41653 bprm->exec -= stack_shift;
41654
41655 down_write(&mm->mmap_sem);
41656+
41657+ /* Move stack pages down in memory. */
41658+ if (stack_shift) {
41659+ ret = shift_arg_pages(vma, stack_shift);
41660+ if (ret)
41661+ goto out_unlock;
41662+ }
41663+
41664 vm_flags = VM_STACK_FLAGS;
41665
41666+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41667+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41668+ vm_flags &= ~VM_EXEC;
41669+
41670+#ifdef CONFIG_PAX_MPROTECT
41671+ if (mm->pax_flags & MF_PAX_MPROTECT)
41672+ vm_flags &= ~VM_MAYEXEC;
41673+#endif
41674+
41675+ }
41676+#endif
41677+
41678 /*
41679 * Adjust stack execute permissions; explicitly enable for
41680 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41681@@ -718,13 +757,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41682 goto out_unlock;
41683 BUG_ON(prev != vma);
41684
41685- /* Move stack pages down in memory. */
41686- if (stack_shift) {
41687- ret = shift_arg_pages(vma, stack_shift);
41688- if (ret)
41689- goto out_unlock;
41690- }
41691-
41692 /* mprotect_fixup is overkill to remove the temporary stack flags */
41693 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41694
41695@@ -805,7 +837,7 @@ int kernel_read(struct file *file, loff_t offset,
41696 old_fs = get_fs();
41697 set_fs(get_ds());
41698 /* The cast to a user pointer is valid due to the set_fs() */
41699- result = vfs_read(file, (void __user *)addr, count, &pos);
41700+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41701 set_fs(old_fs);
41702 return result;
41703 }
41704@@ -1067,6 +1099,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
41705 perf_event_comm(tsk);
41706 }
41707
41708+static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
41709+{
41710+ int i, ch;
41711+
41712+ /* Copies the binary name from after last slash */
41713+ for (i = 0; (ch = *(fn++)) != '\0';) {
41714+ if (ch == '/')
41715+ i = 0; /* overwrite what we wrote */
41716+ else
41717+ if (i < len - 1)
41718+ tcomm[i++] = ch;
41719+ }
41720+ tcomm[i] = '\0';
41721+}
41722+
41723 int flush_old_exec(struct linux_binprm * bprm)
41724 {
41725 int retval;
41726@@ -1081,6 +1128,7 @@ int flush_old_exec(struct linux_binprm * bprm)
41727
41728 set_mm_exe_file(bprm->mm, bprm->file);
41729
41730+ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
41731 /*
41732 * Release all of the old mmap stuff
41733 */
41734@@ -1112,10 +1160,6 @@ EXPORT_SYMBOL(would_dump);
41735
41736 void setup_new_exec(struct linux_binprm * bprm)
41737 {
41738- int i, ch;
41739- const char *name;
41740- char tcomm[sizeof(current->comm)];
41741-
41742 arch_pick_mmap_layout(current->mm);
41743
41744 /* This is the point of no return */
41745@@ -1126,18 +1170,7 @@ void setup_new_exec(struct linux_binprm * bprm)
41746 else
41747 set_dumpable(current->mm, suid_dumpable);
41748
41749- name = bprm->filename;
41750-
41751- /* Copies the binary name from after last slash */
41752- for (i=0; (ch = *(name++)) != '\0';) {
41753- if (ch == '/')
41754- i = 0; /* overwrite what we wrote */
41755- else
41756- if (i < (sizeof(tcomm) - 1))
41757- tcomm[i++] = ch;
41758- }
41759- tcomm[i] = '\0';
41760- set_task_comm(current, tcomm);
41761+ set_task_comm(current, bprm->tcomm);
41762
41763 /* Set the new mm task size. We have to do that late because it may
41764 * depend on TIF_32BIT which is only updated in flush_thread() on
41765@@ -1247,7 +1280,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
41766 }
41767 rcu_read_unlock();
41768
41769- if (p->fs->users > n_fs) {
41770+ if (atomic_read(&p->fs->users) > n_fs) {
41771 bprm->unsafe |= LSM_UNSAFE_SHARE;
41772 } else {
41773 res = -EAGAIN;
41774@@ -1442,6 +1475,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
41775
41776 EXPORT_SYMBOL(search_binary_handler);
41777
41778+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41779+atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
41780+#endif
41781+
41782 /*
41783 * sys_execve() executes a new program.
41784 */
41785@@ -1450,6 +1487,11 @@ static int do_execve_common(const char *filename,
41786 struct user_arg_ptr envp,
41787 struct pt_regs *regs)
41788 {
41789+#ifdef CONFIG_GRKERNSEC
41790+ struct file *old_exec_file;
41791+ struct acl_subject_label *old_acl;
41792+ struct rlimit old_rlim[RLIM_NLIMITS];
41793+#endif
41794 struct linux_binprm *bprm;
41795 struct file *file;
41796 struct files_struct *displaced;
41797@@ -1457,6 +1499,8 @@ static int do_execve_common(const char *filename,
41798 int retval;
41799 const struct cred *cred = current_cred();
41800
41801+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41802+
41803 /*
41804 * We move the actual failure in case of RLIMIT_NPROC excess from
41805 * set*uid() to execve() because too many poorly written programs
41806@@ -1497,12 +1541,27 @@ static int do_execve_common(const char *filename,
41807 if (IS_ERR(file))
41808 goto out_unmark;
41809
41810+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
41811+ retval = -EPERM;
41812+ goto out_file;
41813+ }
41814+
41815 sched_exec();
41816
41817 bprm->file = file;
41818 bprm->filename = filename;
41819 bprm->interp = filename;
41820
41821+ if (gr_process_user_ban()) {
41822+ retval = -EPERM;
41823+ goto out_file;
41824+ }
41825+
41826+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41827+ retval = -EACCES;
41828+ goto out_file;
41829+ }
41830+
41831 retval = bprm_mm_init(bprm);
41832 if (retval)
41833 goto out_file;
41834@@ -1532,11 +1591,46 @@ static int do_execve_common(const char *filename,
41835 if (retval < 0)
41836 goto out;
41837
41838+ if (!gr_tpe_allow(file)) {
41839+ retval = -EACCES;
41840+ goto out;
41841+ }
41842+
41843+ if (gr_check_crash_exec(file)) {
41844+ retval = -EACCES;
41845+ goto out;
41846+ }
41847+
41848+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41849+
41850+ gr_handle_exec_args(bprm, argv);
41851+
41852+#ifdef CONFIG_GRKERNSEC
41853+ old_acl = current->acl;
41854+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41855+ old_exec_file = current->exec_file;
41856+ get_file(file);
41857+ current->exec_file = file;
41858+#endif
41859+
41860+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41861+ bprm->unsafe);
41862+ if (retval < 0)
41863+ goto out_fail;
41864+
41865 retval = search_binary_handler(bprm,regs);
41866 if (retval < 0)
41867- goto out;
41868+ goto out_fail;
41869+#ifdef CONFIG_GRKERNSEC
41870+ if (old_exec_file)
41871+ fput(old_exec_file);
41872+#endif
41873
41874 /* execve succeeded */
41875+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41876+ current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
41877+#endif
41878+
41879 current->fs->in_exec = 0;
41880 current->in_execve = 0;
41881 acct_update_integrals(current);
41882@@ -1545,6 +1639,14 @@ static int do_execve_common(const char *filename,
41883 put_files_struct(displaced);
41884 return retval;
41885
41886+out_fail:
41887+#ifdef CONFIG_GRKERNSEC
41888+ current->acl = old_acl;
41889+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41890+ fput(current->exec_file);
41891+ current->exec_file = old_exec_file;
41892+#endif
41893+
41894 out:
41895 if (bprm->mm) {
41896 acct_arg_size(bprm, 0);
41897@@ -1618,7 +1720,7 @@ static int expand_corename(struct core_name *cn)
41898 {
41899 char *old_corename = cn->corename;
41900
41901- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41902+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41903 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41904
41905 if (!cn->corename) {
41906@@ -1715,7 +1817,7 @@ static int format_corename(struct core_name *cn, long signr)
41907 int pid_in_pattern = 0;
41908 int err = 0;
41909
41910- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41911+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41912 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41913 cn->used = 0;
41914
41915@@ -1812,6 +1914,218 @@ out:
41916 return ispipe;
41917 }
41918
41919+int pax_check_flags(unsigned long *flags)
41920+{
41921+ int retval = 0;
41922+
41923+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41924+ if (*flags & MF_PAX_SEGMEXEC)
41925+ {
41926+ *flags &= ~MF_PAX_SEGMEXEC;
41927+ retval = -EINVAL;
41928+ }
41929+#endif
41930+
41931+ if ((*flags & MF_PAX_PAGEEXEC)
41932+
41933+#ifdef CONFIG_PAX_PAGEEXEC
41934+ && (*flags & MF_PAX_SEGMEXEC)
41935+#endif
41936+
41937+ )
41938+ {
41939+ *flags &= ~MF_PAX_PAGEEXEC;
41940+ retval = -EINVAL;
41941+ }
41942+
41943+ if ((*flags & MF_PAX_MPROTECT)
41944+
41945+#ifdef CONFIG_PAX_MPROTECT
41946+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41947+#endif
41948+
41949+ )
41950+ {
41951+ *flags &= ~MF_PAX_MPROTECT;
41952+ retval = -EINVAL;
41953+ }
41954+
41955+ if ((*flags & MF_PAX_EMUTRAMP)
41956+
41957+#ifdef CONFIG_PAX_EMUTRAMP
41958+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41959+#endif
41960+
41961+ )
41962+ {
41963+ *flags &= ~MF_PAX_EMUTRAMP;
41964+ retval = -EINVAL;
41965+ }
41966+
41967+ return retval;
41968+}
41969+
41970+EXPORT_SYMBOL(pax_check_flags);
41971+
41972+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41973+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41974+{
41975+ struct task_struct *tsk = current;
41976+ struct mm_struct *mm = current->mm;
41977+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41978+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41979+ char *path_exec = NULL;
41980+ char *path_fault = NULL;
41981+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41982+
41983+ if (buffer_exec && buffer_fault) {
41984+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41985+
41986+ down_read(&mm->mmap_sem);
41987+ vma = mm->mmap;
41988+ while (vma && (!vma_exec || !vma_fault)) {
41989+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41990+ vma_exec = vma;
41991+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41992+ vma_fault = vma;
41993+ vma = vma->vm_next;
41994+ }
41995+ if (vma_exec) {
41996+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41997+ if (IS_ERR(path_exec))
41998+ path_exec = "<path too long>";
41999+ else {
42000+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
42001+ if (path_exec) {
42002+ *path_exec = 0;
42003+ path_exec = buffer_exec;
42004+ } else
42005+ path_exec = "<path too long>";
42006+ }
42007+ }
42008+ if (vma_fault) {
42009+ start = vma_fault->vm_start;
42010+ end = vma_fault->vm_end;
42011+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
42012+ if (vma_fault->vm_file) {
42013+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
42014+ if (IS_ERR(path_fault))
42015+ path_fault = "<path too long>";
42016+ else {
42017+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
42018+ if (path_fault) {
42019+ *path_fault = 0;
42020+ path_fault = buffer_fault;
42021+ } else
42022+ path_fault = "<path too long>";
42023+ }
42024+ } else
42025+ path_fault = "<anonymous mapping>";
42026+ }
42027+ up_read(&mm->mmap_sem);
42028+ }
42029+ if (tsk->signal->curr_ip)
42030+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
42031+ else
42032+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
42033+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
42034+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
42035+ task_uid(tsk), task_euid(tsk), pc, sp);
42036+ free_page((unsigned long)buffer_exec);
42037+ free_page((unsigned long)buffer_fault);
42038+ pax_report_insns(regs, pc, sp);
42039+ do_coredump(SIGKILL, SIGKILL, regs);
42040+}
42041+#endif
42042+
42043+#ifdef CONFIG_PAX_REFCOUNT
42044+void pax_report_refcount_overflow(struct pt_regs *regs)
42045+{
42046+ if (current->signal->curr_ip)
42047+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42048+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
42049+ else
42050+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42051+ current->comm, task_pid_nr(current), current_uid(), current_euid());
42052+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
42053+ show_regs(regs);
42054+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
42055+}
42056+#endif
42057+
42058+#ifdef CONFIG_PAX_USERCOPY
42059+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
42060+int object_is_on_stack(const void *obj, unsigned long len)
42061+{
42062+ const void * const stack = task_stack_page(current);
42063+ const void * const stackend = stack + THREAD_SIZE;
42064+
42065+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42066+ const void *frame = NULL;
42067+ const void *oldframe;
42068+#endif
42069+
42070+ if (obj + len < obj)
42071+ return -1;
42072+
42073+ if (obj + len <= stack || stackend <= obj)
42074+ return 0;
42075+
42076+ if (obj < stack || stackend < obj + len)
42077+ return -1;
42078+
42079+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42080+ oldframe = __builtin_frame_address(1);
42081+ if (oldframe)
42082+ frame = __builtin_frame_address(2);
42083+ /*
42084+ low ----------------------------------------------> high
42085+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
42086+ ^----------------^
42087+ allow copies only within here
42088+ */
42089+ while (stack <= frame && frame < stackend) {
42090+ /* if obj + len extends past the last frame, this
42091+ check won't pass and the next frame will be 0,
42092+ causing us to bail out and correctly report
42093+ the copy as invalid
42094+ */
42095+ if (obj + len <= frame)
42096+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
42097+ oldframe = frame;
42098+ frame = *(const void * const *)frame;
42099+ }
42100+ return -1;
42101+#else
42102+ return 1;
42103+#endif
42104+}
42105+
42106+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
42107+{
42108+ if (current->signal->curr_ip)
42109+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42110+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42111+ else
42112+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42113+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42114+ dump_stack();
42115+ gr_handle_kernel_exploit();
42116+ do_group_exit(SIGKILL);
42117+}
42118+#endif
42119+
42120+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
42121+void pax_track_stack(void)
42122+{
42123+ unsigned long sp = (unsigned long)&sp;
42124+ if (sp < current_thread_info()->lowest_stack &&
42125+ sp > (unsigned long)task_stack_page(current))
42126+ current_thread_info()->lowest_stack = sp;
42127+}
42128+EXPORT_SYMBOL(pax_track_stack);
42129+#endif
42130+
42131 static int zap_process(struct task_struct *start, int exit_code)
42132 {
42133 struct task_struct *t;
42134@@ -2023,17 +2337,17 @@ static void wait_for_dump_helpers(struct file *file)
42135 pipe = file->f_path.dentry->d_inode->i_pipe;
42136
42137 pipe_lock(pipe);
42138- pipe->readers++;
42139- pipe->writers--;
42140+ atomic_inc(&pipe->readers);
42141+ atomic_dec(&pipe->writers);
42142
42143- while ((pipe->readers > 1) && (!signal_pending(current))) {
42144+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42145 wake_up_interruptible_sync(&pipe->wait);
42146 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42147 pipe_wait(pipe);
42148 }
42149
42150- pipe->readers--;
42151- pipe->writers++;
42152+ atomic_dec(&pipe->readers);
42153+ atomic_inc(&pipe->writers);
42154 pipe_unlock(pipe);
42155
42156 }
42157@@ -2094,7 +2408,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42158 int retval = 0;
42159 int flag = 0;
42160 int ispipe;
42161- static atomic_t core_dump_count = ATOMIC_INIT(0);
42162+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42163 struct coredump_params cprm = {
42164 .signr = signr,
42165 .regs = regs,
42166@@ -2109,6 +2423,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42167
42168 audit_core_dumps(signr);
42169
42170+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42171+ gr_handle_brute_attach(current, cprm.mm_flags);
42172+
42173 binfmt = mm->binfmt;
42174 if (!binfmt || !binfmt->core_dump)
42175 goto fail;
42176@@ -2176,7 +2493,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42177 }
42178 cprm.limit = RLIM_INFINITY;
42179
42180- dump_count = atomic_inc_return(&core_dump_count);
42181+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
42182 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42183 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42184 task_tgid_vnr(current), current->comm);
42185@@ -2203,6 +2520,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42186 } else {
42187 struct inode *inode;
42188
42189+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42190+
42191 if (cprm.limit < binfmt->min_coredump)
42192 goto fail_unlock;
42193
42194@@ -2246,7 +2565,7 @@ close_fail:
42195 filp_close(cprm.file, NULL);
42196 fail_dropcount:
42197 if (ispipe)
42198- atomic_dec(&core_dump_count);
42199+ atomic_dec_unchecked(&core_dump_count);
42200 fail_unlock:
42201 kfree(cn.corename);
42202 fail_corename:
42203@@ -2265,7 +2584,7 @@ fail:
42204 */
42205 int dump_write(struct file *file, const void *addr, int nr)
42206 {
42207- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42208+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42209 }
42210 EXPORT_SYMBOL(dump_write);
42211
42212diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42213index a8cbe1b..fed04cb 100644
42214--- a/fs/ext2/balloc.c
42215+++ b/fs/ext2/balloc.c
42216@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42217
42218 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42219 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42220- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42221+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42222 sbi->s_resuid != current_fsuid() &&
42223 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42224 return 0;
42225diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42226index a203892..4e64db5 100644
42227--- a/fs/ext3/balloc.c
42228+++ b/fs/ext3/balloc.c
42229@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42230
42231 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42232 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42233- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42234+ if (free_blocks < root_blocks + 1 &&
42235 !use_reservation && sbi->s_resuid != current_fsuid() &&
42236- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42237+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42238+ !capable_nolog(CAP_SYS_RESOURCE)) {
42239 return 0;
42240 }
42241 return 1;
42242diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42243index 12ccacd..a6035fce0 100644
42244--- a/fs/ext4/balloc.c
42245+++ b/fs/ext4/balloc.c
42246@@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42247 /* Hm, nope. Are (enough) root reserved clusters available? */
42248 if (sbi->s_resuid == current_fsuid() ||
42249 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42250- capable(CAP_SYS_RESOURCE) ||
42251- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42252+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42253+ capable_nolog(CAP_SYS_RESOURCE)) {
42254
42255 if (free_clusters >= (nclusters + dirty_clusters))
42256 return 1;
42257diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42258index 5b0e26a..0aa002d 100644
42259--- a/fs/ext4/ext4.h
42260+++ b/fs/ext4/ext4.h
42261@@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42262 unsigned long s_mb_last_start;
42263
42264 /* stats for buddy allocator */
42265- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42266- atomic_t s_bal_success; /* we found long enough chunks */
42267- atomic_t s_bal_allocated; /* in blocks */
42268- atomic_t s_bal_ex_scanned; /* total extents scanned */
42269- atomic_t s_bal_goals; /* goal hits */
42270- atomic_t s_bal_breaks; /* too long searches */
42271- atomic_t s_bal_2orders; /* 2^order hits */
42272+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42273+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42274+ atomic_unchecked_t s_bal_allocated; /* in blocks */
42275+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42276+ atomic_unchecked_t s_bal_goals; /* goal hits */
42277+ atomic_unchecked_t s_bal_breaks; /* too long searches */
42278+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42279 spinlock_t s_bal_lock;
42280 unsigned long s_mb_buddies_generated;
42281 unsigned long long s_mb_generation_time;
42282- atomic_t s_mb_lost_chunks;
42283- atomic_t s_mb_preallocated;
42284- atomic_t s_mb_discarded;
42285+ atomic_unchecked_t s_mb_lost_chunks;
42286+ atomic_unchecked_t s_mb_preallocated;
42287+ atomic_unchecked_t s_mb_discarded;
42288 atomic_t s_lock_busy;
42289
42290 /* locality groups */
42291diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42292index e2d8be8..c7f0ce9 100644
42293--- a/fs/ext4/mballoc.c
42294+++ b/fs/ext4/mballoc.c
42295@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42296 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42297
42298 if (EXT4_SB(sb)->s_mb_stats)
42299- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42300+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42301
42302 break;
42303 }
42304@@ -2088,7 +2088,7 @@ repeat:
42305 ac->ac_status = AC_STATUS_CONTINUE;
42306 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42307 cr = 3;
42308- atomic_inc(&sbi->s_mb_lost_chunks);
42309+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42310 goto repeat;
42311 }
42312 }
42313@@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42314 if (sbi->s_mb_stats) {
42315 ext4_msg(sb, KERN_INFO,
42316 "mballoc: %u blocks %u reqs (%u success)",
42317- atomic_read(&sbi->s_bal_allocated),
42318- atomic_read(&sbi->s_bal_reqs),
42319- atomic_read(&sbi->s_bal_success));
42320+ atomic_read_unchecked(&sbi->s_bal_allocated),
42321+ atomic_read_unchecked(&sbi->s_bal_reqs),
42322+ atomic_read_unchecked(&sbi->s_bal_success));
42323 ext4_msg(sb, KERN_INFO,
42324 "mballoc: %u extents scanned, %u goal hits, "
42325 "%u 2^N hits, %u breaks, %u lost",
42326- atomic_read(&sbi->s_bal_ex_scanned),
42327- atomic_read(&sbi->s_bal_goals),
42328- atomic_read(&sbi->s_bal_2orders),
42329- atomic_read(&sbi->s_bal_breaks),
42330- atomic_read(&sbi->s_mb_lost_chunks));
42331+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42332+ atomic_read_unchecked(&sbi->s_bal_goals),
42333+ atomic_read_unchecked(&sbi->s_bal_2orders),
42334+ atomic_read_unchecked(&sbi->s_bal_breaks),
42335+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42336 ext4_msg(sb, KERN_INFO,
42337 "mballoc: %lu generated and it took %Lu",
42338 sbi->s_mb_buddies_generated,
42339 sbi->s_mb_generation_time);
42340 ext4_msg(sb, KERN_INFO,
42341 "mballoc: %u preallocated, %u discarded",
42342- atomic_read(&sbi->s_mb_preallocated),
42343- atomic_read(&sbi->s_mb_discarded));
42344+ atomic_read_unchecked(&sbi->s_mb_preallocated),
42345+ atomic_read_unchecked(&sbi->s_mb_discarded));
42346 }
42347
42348 free_percpu(sbi->s_locality_groups);
42349@@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42350 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42351
42352 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42353- atomic_inc(&sbi->s_bal_reqs);
42354- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42355+ atomic_inc_unchecked(&sbi->s_bal_reqs);
42356+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42357 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42358- atomic_inc(&sbi->s_bal_success);
42359- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42360+ atomic_inc_unchecked(&sbi->s_bal_success);
42361+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42362 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42363 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42364- atomic_inc(&sbi->s_bal_goals);
42365+ atomic_inc_unchecked(&sbi->s_bal_goals);
42366 if (ac->ac_found > sbi->s_mb_max_to_scan)
42367- atomic_inc(&sbi->s_bal_breaks);
42368+ atomic_inc_unchecked(&sbi->s_bal_breaks);
42369 }
42370
42371 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42372@@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42373 trace_ext4_mb_new_inode_pa(ac, pa);
42374
42375 ext4_mb_use_inode_pa(ac, pa);
42376- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42377+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42378
42379 ei = EXT4_I(ac->ac_inode);
42380 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42381@@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42382 trace_ext4_mb_new_group_pa(ac, pa);
42383
42384 ext4_mb_use_group_pa(ac, pa);
42385- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42386+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42387
42388 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42389 lg = ac->ac_lg;
42390@@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42391 * from the bitmap and continue.
42392 */
42393 }
42394- atomic_add(free, &sbi->s_mb_discarded);
42395+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
42396
42397 return err;
42398 }
42399@@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42400 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42401 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42402 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42403- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42404+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42405 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42406
42407 return 0;
42408diff --git a/fs/fcntl.c b/fs/fcntl.c
42409index 22764c7..86372c9 100644
42410--- a/fs/fcntl.c
42411+++ b/fs/fcntl.c
42412@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42413 if (err)
42414 return err;
42415
42416+ if (gr_handle_chroot_fowner(pid, type))
42417+ return -ENOENT;
42418+ if (gr_check_protected_task_fowner(pid, type))
42419+ return -EACCES;
42420+
42421 f_modown(filp, pid, type, force);
42422 return 0;
42423 }
42424@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42425
42426 static int f_setown_ex(struct file *filp, unsigned long arg)
42427 {
42428- struct f_owner_ex * __user owner_p = (void * __user)arg;
42429+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42430 struct f_owner_ex owner;
42431 struct pid *pid;
42432 int type;
42433@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42434
42435 static int f_getown_ex(struct file *filp, unsigned long arg)
42436 {
42437- struct f_owner_ex * __user owner_p = (void * __user)arg;
42438+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42439 struct f_owner_ex owner;
42440 int ret = 0;
42441
42442@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42443 switch (cmd) {
42444 case F_DUPFD:
42445 case F_DUPFD_CLOEXEC:
42446+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42447 if (arg >= rlimit(RLIMIT_NOFILE))
42448 break;
42449 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42450diff --git a/fs/fifo.c b/fs/fifo.c
42451index b1a524d..4ee270e 100644
42452--- a/fs/fifo.c
42453+++ b/fs/fifo.c
42454@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42455 */
42456 filp->f_op = &read_pipefifo_fops;
42457 pipe->r_counter++;
42458- if (pipe->readers++ == 0)
42459+ if (atomic_inc_return(&pipe->readers) == 1)
42460 wake_up_partner(inode);
42461
42462- if (!pipe->writers) {
42463+ if (!atomic_read(&pipe->writers)) {
42464 if ((filp->f_flags & O_NONBLOCK)) {
42465 /* suppress POLLHUP until we have
42466 * seen a writer */
42467@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42468 * errno=ENXIO when there is no process reading the FIFO.
42469 */
42470 ret = -ENXIO;
42471- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42472+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42473 goto err;
42474
42475 filp->f_op = &write_pipefifo_fops;
42476 pipe->w_counter++;
42477- if (!pipe->writers++)
42478+ if (atomic_inc_return(&pipe->writers) == 1)
42479 wake_up_partner(inode);
42480
42481- if (!pipe->readers) {
42482+ if (!atomic_read(&pipe->readers)) {
42483 wait_for_partner(inode, &pipe->r_counter);
42484 if (signal_pending(current))
42485 goto err_wr;
42486@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42487 */
42488 filp->f_op = &rdwr_pipefifo_fops;
42489
42490- pipe->readers++;
42491- pipe->writers++;
42492+ atomic_inc(&pipe->readers);
42493+ atomic_inc(&pipe->writers);
42494 pipe->r_counter++;
42495 pipe->w_counter++;
42496- if (pipe->readers == 1 || pipe->writers == 1)
42497+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42498 wake_up_partner(inode);
42499 break;
42500
42501@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42502 return 0;
42503
42504 err_rd:
42505- if (!--pipe->readers)
42506+ if (atomic_dec_and_test(&pipe->readers))
42507 wake_up_interruptible(&pipe->wait);
42508 ret = -ERESTARTSYS;
42509 goto err;
42510
42511 err_wr:
42512- if (!--pipe->writers)
42513+ if (atomic_dec_and_test(&pipe->writers))
42514 wake_up_interruptible(&pipe->wait);
42515 ret = -ERESTARTSYS;
42516 goto err;
42517
42518 err:
42519- if (!pipe->readers && !pipe->writers)
42520+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42521 free_pipe_info(inode);
42522
42523 err_nocleanup:
42524diff --git a/fs/file.c b/fs/file.c
42525index 4c6992d..104cdea 100644
42526--- a/fs/file.c
42527+++ b/fs/file.c
42528@@ -15,6 +15,7 @@
42529 #include <linux/slab.h>
42530 #include <linux/vmalloc.h>
42531 #include <linux/file.h>
42532+#include <linux/security.h>
42533 #include <linux/fdtable.h>
42534 #include <linux/bitops.h>
42535 #include <linux/interrupt.h>
42536@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42537 * N.B. For clone tasks sharing a files structure, this test
42538 * will limit the total number of files that can be opened.
42539 */
42540+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42541 if (nr >= rlimit(RLIMIT_NOFILE))
42542 return -EMFILE;
42543
42544diff --git a/fs/filesystems.c b/fs/filesystems.c
42545index 0845f84..7b4ebef 100644
42546--- a/fs/filesystems.c
42547+++ b/fs/filesystems.c
42548@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42549 int len = dot ? dot - name : strlen(name);
42550
42551 fs = __get_fs_type(name, len);
42552+
42553+#ifdef CONFIG_GRKERNSEC_MODHARDEN
42554+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42555+#else
42556 if (!fs && (request_module("%.*s", len, name) == 0))
42557+#endif
42558 fs = __get_fs_type(name, len);
42559
42560 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42561diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42562index 78b519c..212c0d0 100644
42563--- a/fs/fs_struct.c
42564+++ b/fs/fs_struct.c
42565@@ -4,6 +4,7 @@
42566 #include <linux/path.h>
42567 #include <linux/slab.h>
42568 #include <linux/fs_struct.h>
42569+#include <linux/grsecurity.h>
42570 #include "internal.h"
42571
42572 static inline void path_get_longterm(struct path *path)
42573@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42574 old_root = fs->root;
42575 fs->root = *path;
42576 path_get_longterm(path);
42577+ gr_set_chroot_entries(current, path);
42578 write_seqcount_end(&fs->seq);
42579 spin_unlock(&fs->lock);
42580 if (old_root.dentry)
42581@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42582 && fs->root.mnt == old_root->mnt) {
42583 path_get_longterm(new_root);
42584 fs->root = *new_root;
42585+ gr_set_chroot_entries(p, new_root);
42586 count++;
42587 }
42588 if (fs->pwd.dentry == old_root->dentry
42589@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42590 spin_lock(&fs->lock);
42591 write_seqcount_begin(&fs->seq);
42592 tsk->fs = NULL;
42593- kill = !--fs->users;
42594+ gr_clear_chroot_entries(tsk);
42595+ kill = !atomic_dec_return(&fs->users);
42596 write_seqcount_end(&fs->seq);
42597 spin_unlock(&fs->lock);
42598 task_unlock(tsk);
42599@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42600 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42601 /* We don't need to lock fs - think why ;-) */
42602 if (fs) {
42603- fs->users = 1;
42604+ atomic_set(&fs->users, 1);
42605 fs->in_exec = 0;
42606 spin_lock_init(&fs->lock);
42607 seqcount_init(&fs->seq);
42608@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42609 spin_lock(&old->lock);
42610 fs->root = old->root;
42611 path_get_longterm(&fs->root);
42612+ /* instead of calling gr_set_chroot_entries here,
42613+ we call it from every caller of this function
42614+ */
42615 fs->pwd = old->pwd;
42616 path_get_longterm(&fs->pwd);
42617 spin_unlock(&old->lock);
42618@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42619
42620 task_lock(current);
42621 spin_lock(&fs->lock);
42622- kill = !--fs->users;
42623+ kill = !atomic_dec_return(&fs->users);
42624 current->fs = new_fs;
42625+ gr_set_chroot_entries(current, &new_fs->root);
42626 spin_unlock(&fs->lock);
42627 task_unlock(current);
42628
42629@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
42630
42631 /* to be mentioned only in INIT_TASK */
42632 struct fs_struct init_fs = {
42633- .users = 1,
42634+ .users = ATOMIC_INIT(1),
42635 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42636 .seq = SEQCNT_ZERO,
42637 .umask = 0022,
42638@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42639 task_lock(current);
42640
42641 spin_lock(&init_fs.lock);
42642- init_fs.users++;
42643+ atomic_inc(&init_fs.users);
42644 spin_unlock(&init_fs.lock);
42645
42646 spin_lock(&fs->lock);
42647 current->fs = &init_fs;
42648- kill = !--fs->users;
42649+ gr_set_chroot_entries(current, &current->fs->root);
42650+ kill = !atomic_dec_return(&fs->users);
42651 spin_unlock(&fs->lock);
42652
42653 task_unlock(current);
42654diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42655index 9905350..02eaec4 100644
42656--- a/fs/fscache/cookie.c
42657+++ b/fs/fscache/cookie.c
42658@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42659 parent ? (char *) parent->def->name : "<no-parent>",
42660 def->name, netfs_data);
42661
42662- fscache_stat(&fscache_n_acquires);
42663+ fscache_stat_unchecked(&fscache_n_acquires);
42664
42665 /* if there's no parent cookie, then we don't create one here either */
42666 if (!parent) {
42667- fscache_stat(&fscache_n_acquires_null);
42668+ fscache_stat_unchecked(&fscache_n_acquires_null);
42669 _leave(" [no parent]");
42670 return NULL;
42671 }
42672@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42673 /* allocate and initialise a cookie */
42674 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42675 if (!cookie) {
42676- fscache_stat(&fscache_n_acquires_oom);
42677+ fscache_stat_unchecked(&fscache_n_acquires_oom);
42678 _leave(" [ENOMEM]");
42679 return NULL;
42680 }
42681@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42682
42683 switch (cookie->def->type) {
42684 case FSCACHE_COOKIE_TYPE_INDEX:
42685- fscache_stat(&fscache_n_cookie_index);
42686+ fscache_stat_unchecked(&fscache_n_cookie_index);
42687 break;
42688 case FSCACHE_COOKIE_TYPE_DATAFILE:
42689- fscache_stat(&fscache_n_cookie_data);
42690+ fscache_stat_unchecked(&fscache_n_cookie_data);
42691 break;
42692 default:
42693- fscache_stat(&fscache_n_cookie_special);
42694+ fscache_stat_unchecked(&fscache_n_cookie_special);
42695 break;
42696 }
42697
42698@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42699 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42700 atomic_dec(&parent->n_children);
42701 __fscache_cookie_put(cookie);
42702- fscache_stat(&fscache_n_acquires_nobufs);
42703+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42704 _leave(" = NULL");
42705 return NULL;
42706 }
42707 }
42708
42709- fscache_stat(&fscache_n_acquires_ok);
42710+ fscache_stat_unchecked(&fscache_n_acquires_ok);
42711 _leave(" = %p", cookie);
42712 return cookie;
42713 }
42714@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42715 cache = fscache_select_cache_for_object(cookie->parent);
42716 if (!cache) {
42717 up_read(&fscache_addremove_sem);
42718- fscache_stat(&fscache_n_acquires_no_cache);
42719+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42720 _leave(" = -ENOMEDIUM [no cache]");
42721 return -ENOMEDIUM;
42722 }
42723@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42724 object = cache->ops->alloc_object(cache, cookie);
42725 fscache_stat_d(&fscache_n_cop_alloc_object);
42726 if (IS_ERR(object)) {
42727- fscache_stat(&fscache_n_object_no_alloc);
42728+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
42729 ret = PTR_ERR(object);
42730 goto error;
42731 }
42732
42733- fscache_stat(&fscache_n_object_alloc);
42734+ fscache_stat_unchecked(&fscache_n_object_alloc);
42735
42736 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42737
42738@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42739 struct fscache_object *object;
42740 struct hlist_node *_p;
42741
42742- fscache_stat(&fscache_n_updates);
42743+ fscache_stat_unchecked(&fscache_n_updates);
42744
42745 if (!cookie) {
42746- fscache_stat(&fscache_n_updates_null);
42747+ fscache_stat_unchecked(&fscache_n_updates_null);
42748 _leave(" [no cookie]");
42749 return;
42750 }
42751@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42752 struct fscache_object *object;
42753 unsigned long event;
42754
42755- fscache_stat(&fscache_n_relinquishes);
42756+ fscache_stat_unchecked(&fscache_n_relinquishes);
42757 if (retire)
42758- fscache_stat(&fscache_n_relinquishes_retire);
42759+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42760
42761 if (!cookie) {
42762- fscache_stat(&fscache_n_relinquishes_null);
42763+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42764 _leave(" [no cookie]");
42765 return;
42766 }
42767@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42768
42769 /* wait for the cookie to finish being instantiated (or to fail) */
42770 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42771- fscache_stat(&fscache_n_relinquishes_waitcrt);
42772+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42773 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42774 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42775 }
42776diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
42777index f6aad48..88dcf26 100644
42778--- a/fs/fscache/internal.h
42779+++ b/fs/fscache/internal.h
42780@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42781 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42782 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42783
42784-extern atomic_t fscache_n_op_pend;
42785-extern atomic_t fscache_n_op_run;
42786-extern atomic_t fscache_n_op_enqueue;
42787-extern atomic_t fscache_n_op_deferred_release;
42788-extern atomic_t fscache_n_op_release;
42789-extern atomic_t fscache_n_op_gc;
42790-extern atomic_t fscache_n_op_cancelled;
42791-extern atomic_t fscache_n_op_rejected;
42792+extern atomic_unchecked_t fscache_n_op_pend;
42793+extern atomic_unchecked_t fscache_n_op_run;
42794+extern atomic_unchecked_t fscache_n_op_enqueue;
42795+extern atomic_unchecked_t fscache_n_op_deferred_release;
42796+extern atomic_unchecked_t fscache_n_op_release;
42797+extern atomic_unchecked_t fscache_n_op_gc;
42798+extern atomic_unchecked_t fscache_n_op_cancelled;
42799+extern atomic_unchecked_t fscache_n_op_rejected;
42800
42801-extern atomic_t fscache_n_attr_changed;
42802-extern atomic_t fscache_n_attr_changed_ok;
42803-extern atomic_t fscache_n_attr_changed_nobufs;
42804-extern atomic_t fscache_n_attr_changed_nomem;
42805-extern atomic_t fscache_n_attr_changed_calls;
42806+extern atomic_unchecked_t fscache_n_attr_changed;
42807+extern atomic_unchecked_t fscache_n_attr_changed_ok;
42808+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42809+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42810+extern atomic_unchecked_t fscache_n_attr_changed_calls;
42811
42812-extern atomic_t fscache_n_allocs;
42813-extern atomic_t fscache_n_allocs_ok;
42814-extern atomic_t fscache_n_allocs_wait;
42815-extern atomic_t fscache_n_allocs_nobufs;
42816-extern atomic_t fscache_n_allocs_intr;
42817-extern atomic_t fscache_n_allocs_object_dead;
42818-extern atomic_t fscache_n_alloc_ops;
42819-extern atomic_t fscache_n_alloc_op_waits;
42820+extern atomic_unchecked_t fscache_n_allocs;
42821+extern atomic_unchecked_t fscache_n_allocs_ok;
42822+extern atomic_unchecked_t fscache_n_allocs_wait;
42823+extern atomic_unchecked_t fscache_n_allocs_nobufs;
42824+extern atomic_unchecked_t fscache_n_allocs_intr;
42825+extern atomic_unchecked_t fscache_n_allocs_object_dead;
42826+extern atomic_unchecked_t fscache_n_alloc_ops;
42827+extern atomic_unchecked_t fscache_n_alloc_op_waits;
42828
42829-extern atomic_t fscache_n_retrievals;
42830-extern atomic_t fscache_n_retrievals_ok;
42831-extern atomic_t fscache_n_retrievals_wait;
42832-extern atomic_t fscache_n_retrievals_nodata;
42833-extern atomic_t fscache_n_retrievals_nobufs;
42834-extern atomic_t fscache_n_retrievals_intr;
42835-extern atomic_t fscache_n_retrievals_nomem;
42836-extern atomic_t fscache_n_retrievals_object_dead;
42837-extern atomic_t fscache_n_retrieval_ops;
42838-extern atomic_t fscache_n_retrieval_op_waits;
42839+extern atomic_unchecked_t fscache_n_retrievals;
42840+extern atomic_unchecked_t fscache_n_retrievals_ok;
42841+extern atomic_unchecked_t fscache_n_retrievals_wait;
42842+extern atomic_unchecked_t fscache_n_retrievals_nodata;
42843+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42844+extern atomic_unchecked_t fscache_n_retrievals_intr;
42845+extern atomic_unchecked_t fscache_n_retrievals_nomem;
42846+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42847+extern atomic_unchecked_t fscache_n_retrieval_ops;
42848+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42849
42850-extern atomic_t fscache_n_stores;
42851-extern atomic_t fscache_n_stores_ok;
42852-extern atomic_t fscache_n_stores_again;
42853-extern atomic_t fscache_n_stores_nobufs;
42854-extern atomic_t fscache_n_stores_oom;
42855-extern atomic_t fscache_n_store_ops;
42856-extern atomic_t fscache_n_store_calls;
42857-extern atomic_t fscache_n_store_pages;
42858-extern atomic_t fscache_n_store_radix_deletes;
42859-extern atomic_t fscache_n_store_pages_over_limit;
42860+extern atomic_unchecked_t fscache_n_stores;
42861+extern atomic_unchecked_t fscache_n_stores_ok;
42862+extern atomic_unchecked_t fscache_n_stores_again;
42863+extern atomic_unchecked_t fscache_n_stores_nobufs;
42864+extern atomic_unchecked_t fscache_n_stores_oom;
42865+extern atomic_unchecked_t fscache_n_store_ops;
42866+extern atomic_unchecked_t fscache_n_store_calls;
42867+extern atomic_unchecked_t fscache_n_store_pages;
42868+extern atomic_unchecked_t fscache_n_store_radix_deletes;
42869+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42870
42871-extern atomic_t fscache_n_store_vmscan_not_storing;
42872-extern atomic_t fscache_n_store_vmscan_gone;
42873-extern atomic_t fscache_n_store_vmscan_busy;
42874-extern atomic_t fscache_n_store_vmscan_cancelled;
42875+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42876+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42877+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42878+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42879
42880-extern atomic_t fscache_n_marks;
42881-extern atomic_t fscache_n_uncaches;
42882+extern atomic_unchecked_t fscache_n_marks;
42883+extern atomic_unchecked_t fscache_n_uncaches;
42884
42885-extern atomic_t fscache_n_acquires;
42886-extern atomic_t fscache_n_acquires_null;
42887-extern atomic_t fscache_n_acquires_no_cache;
42888-extern atomic_t fscache_n_acquires_ok;
42889-extern atomic_t fscache_n_acquires_nobufs;
42890-extern atomic_t fscache_n_acquires_oom;
42891+extern atomic_unchecked_t fscache_n_acquires;
42892+extern atomic_unchecked_t fscache_n_acquires_null;
42893+extern atomic_unchecked_t fscache_n_acquires_no_cache;
42894+extern atomic_unchecked_t fscache_n_acquires_ok;
42895+extern atomic_unchecked_t fscache_n_acquires_nobufs;
42896+extern atomic_unchecked_t fscache_n_acquires_oom;
42897
42898-extern atomic_t fscache_n_updates;
42899-extern atomic_t fscache_n_updates_null;
42900-extern atomic_t fscache_n_updates_run;
42901+extern atomic_unchecked_t fscache_n_updates;
42902+extern atomic_unchecked_t fscache_n_updates_null;
42903+extern atomic_unchecked_t fscache_n_updates_run;
42904
42905-extern atomic_t fscache_n_relinquishes;
42906-extern atomic_t fscache_n_relinquishes_null;
42907-extern atomic_t fscache_n_relinquishes_waitcrt;
42908-extern atomic_t fscache_n_relinquishes_retire;
42909+extern atomic_unchecked_t fscache_n_relinquishes;
42910+extern atomic_unchecked_t fscache_n_relinquishes_null;
42911+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42912+extern atomic_unchecked_t fscache_n_relinquishes_retire;
42913
42914-extern atomic_t fscache_n_cookie_index;
42915-extern atomic_t fscache_n_cookie_data;
42916-extern atomic_t fscache_n_cookie_special;
42917+extern atomic_unchecked_t fscache_n_cookie_index;
42918+extern atomic_unchecked_t fscache_n_cookie_data;
42919+extern atomic_unchecked_t fscache_n_cookie_special;
42920
42921-extern atomic_t fscache_n_object_alloc;
42922-extern atomic_t fscache_n_object_no_alloc;
42923-extern atomic_t fscache_n_object_lookups;
42924-extern atomic_t fscache_n_object_lookups_negative;
42925-extern atomic_t fscache_n_object_lookups_positive;
42926-extern atomic_t fscache_n_object_lookups_timed_out;
42927-extern atomic_t fscache_n_object_created;
42928-extern atomic_t fscache_n_object_avail;
42929-extern atomic_t fscache_n_object_dead;
42930+extern atomic_unchecked_t fscache_n_object_alloc;
42931+extern atomic_unchecked_t fscache_n_object_no_alloc;
42932+extern atomic_unchecked_t fscache_n_object_lookups;
42933+extern atomic_unchecked_t fscache_n_object_lookups_negative;
42934+extern atomic_unchecked_t fscache_n_object_lookups_positive;
42935+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42936+extern atomic_unchecked_t fscache_n_object_created;
42937+extern atomic_unchecked_t fscache_n_object_avail;
42938+extern atomic_unchecked_t fscache_n_object_dead;
42939
42940-extern atomic_t fscache_n_checkaux_none;
42941-extern atomic_t fscache_n_checkaux_okay;
42942-extern atomic_t fscache_n_checkaux_update;
42943-extern atomic_t fscache_n_checkaux_obsolete;
42944+extern atomic_unchecked_t fscache_n_checkaux_none;
42945+extern atomic_unchecked_t fscache_n_checkaux_okay;
42946+extern atomic_unchecked_t fscache_n_checkaux_update;
42947+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42948
42949 extern atomic_t fscache_n_cop_alloc_object;
42950 extern atomic_t fscache_n_cop_lookup_object;
42951@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
42952 atomic_inc(stat);
42953 }
42954
42955+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42956+{
42957+ atomic_inc_unchecked(stat);
42958+}
42959+
42960 static inline void fscache_stat_d(atomic_t *stat)
42961 {
42962 atomic_dec(stat);
42963@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
42964
42965 #define __fscache_stat(stat) (NULL)
42966 #define fscache_stat(stat) do {} while (0)
42967+#define fscache_stat_unchecked(stat) do {} while (0)
42968 #define fscache_stat_d(stat) do {} while (0)
42969 #endif
42970
42971diff --git a/fs/fscache/object.c b/fs/fscache/object.c
42972index b6b897c..0ffff9c 100644
42973--- a/fs/fscache/object.c
42974+++ b/fs/fscache/object.c
42975@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42976 /* update the object metadata on disk */
42977 case FSCACHE_OBJECT_UPDATING:
42978 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42979- fscache_stat(&fscache_n_updates_run);
42980+ fscache_stat_unchecked(&fscache_n_updates_run);
42981 fscache_stat(&fscache_n_cop_update_object);
42982 object->cache->ops->update_object(object);
42983 fscache_stat_d(&fscache_n_cop_update_object);
42984@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42985 spin_lock(&object->lock);
42986 object->state = FSCACHE_OBJECT_DEAD;
42987 spin_unlock(&object->lock);
42988- fscache_stat(&fscache_n_object_dead);
42989+ fscache_stat_unchecked(&fscache_n_object_dead);
42990 goto terminal_transit;
42991
42992 /* handle the parent cache of this object being withdrawn from
42993@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42994 spin_lock(&object->lock);
42995 object->state = FSCACHE_OBJECT_DEAD;
42996 spin_unlock(&object->lock);
42997- fscache_stat(&fscache_n_object_dead);
42998+ fscache_stat_unchecked(&fscache_n_object_dead);
42999 goto terminal_transit;
43000
43001 /* complain about the object being woken up once it is
43002@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43003 parent->cookie->def->name, cookie->def->name,
43004 object->cache->tag->name);
43005
43006- fscache_stat(&fscache_n_object_lookups);
43007+ fscache_stat_unchecked(&fscache_n_object_lookups);
43008 fscache_stat(&fscache_n_cop_lookup_object);
43009 ret = object->cache->ops->lookup_object(object);
43010 fscache_stat_d(&fscache_n_cop_lookup_object);
43011@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43012 if (ret == -ETIMEDOUT) {
43013 /* probably stuck behind another object, so move this one to
43014 * the back of the queue */
43015- fscache_stat(&fscache_n_object_lookups_timed_out);
43016+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
43017 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43018 }
43019
43020@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
43021
43022 spin_lock(&object->lock);
43023 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43024- fscache_stat(&fscache_n_object_lookups_negative);
43025+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
43026
43027 /* transit here to allow write requests to begin stacking up
43028 * and read requests to begin returning ENODATA */
43029@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
43030 * result, in which case there may be data available */
43031 spin_lock(&object->lock);
43032 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43033- fscache_stat(&fscache_n_object_lookups_positive);
43034+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
43035
43036 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
43037
43038@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
43039 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43040 } else {
43041 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
43042- fscache_stat(&fscache_n_object_created);
43043+ fscache_stat_unchecked(&fscache_n_object_created);
43044
43045 object->state = FSCACHE_OBJECT_AVAILABLE;
43046 spin_unlock(&object->lock);
43047@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
43048 fscache_enqueue_dependents(object);
43049
43050 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
43051- fscache_stat(&fscache_n_object_avail);
43052+ fscache_stat_unchecked(&fscache_n_object_avail);
43053
43054 _leave("");
43055 }
43056@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43057 enum fscache_checkaux result;
43058
43059 if (!object->cookie->def->check_aux) {
43060- fscache_stat(&fscache_n_checkaux_none);
43061+ fscache_stat_unchecked(&fscache_n_checkaux_none);
43062 return FSCACHE_CHECKAUX_OKAY;
43063 }
43064
43065@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43066 switch (result) {
43067 /* entry okay as is */
43068 case FSCACHE_CHECKAUX_OKAY:
43069- fscache_stat(&fscache_n_checkaux_okay);
43070+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
43071 break;
43072
43073 /* entry requires update */
43074 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
43075- fscache_stat(&fscache_n_checkaux_update);
43076+ fscache_stat_unchecked(&fscache_n_checkaux_update);
43077 break;
43078
43079 /* entry requires deletion */
43080 case FSCACHE_CHECKAUX_OBSOLETE:
43081- fscache_stat(&fscache_n_checkaux_obsolete);
43082+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
43083 break;
43084
43085 default:
43086diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
43087index 30afdfa..2256596 100644
43088--- a/fs/fscache/operation.c
43089+++ b/fs/fscache/operation.c
43090@@ -17,7 +17,7 @@
43091 #include <linux/slab.h>
43092 #include "internal.h"
43093
43094-atomic_t fscache_op_debug_id;
43095+atomic_unchecked_t fscache_op_debug_id;
43096 EXPORT_SYMBOL(fscache_op_debug_id);
43097
43098 /**
43099@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
43100 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
43101 ASSERTCMP(atomic_read(&op->usage), >, 0);
43102
43103- fscache_stat(&fscache_n_op_enqueue);
43104+ fscache_stat_unchecked(&fscache_n_op_enqueue);
43105 switch (op->flags & FSCACHE_OP_TYPE) {
43106 case FSCACHE_OP_ASYNC:
43107 _debug("queue async");
43108@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
43109 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
43110 if (op->processor)
43111 fscache_enqueue_operation(op);
43112- fscache_stat(&fscache_n_op_run);
43113+ fscache_stat_unchecked(&fscache_n_op_run);
43114 }
43115
43116 /*
43117@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43118 if (object->n_ops > 1) {
43119 atomic_inc(&op->usage);
43120 list_add_tail(&op->pend_link, &object->pending_ops);
43121- fscache_stat(&fscache_n_op_pend);
43122+ fscache_stat_unchecked(&fscache_n_op_pend);
43123 } else if (!list_empty(&object->pending_ops)) {
43124 atomic_inc(&op->usage);
43125 list_add_tail(&op->pend_link, &object->pending_ops);
43126- fscache_stat(&fscache_n_op_pend);
43127+ fscache_stat_unchecked(&fscache_n_op_pend);
43128 fscache_start_operations(object);
43129 } else {
43130 ASSERTCMP(object->n_in_progress, ==, 0);
43131@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43132 object->n_exclusive++; /* reads and writes must wait */
43133 atomic_inc(&op->usage);
43134 list_add_tail(&op->pend_link, &object->pending_ops);
43135- fscache_stat(&fscache_n_op_pend);
43136+ fscache_stat_unchecked(&fscache_n_op_pend);
43137 ret = 0;
43138 } else {
43139 /* not allowed to submit ops in any other state */
43140@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
43141 if (object->n_exclusive > 0) {
43142 atomic_inc(&op->usage);
43143 list_add_tail(&op->pend_link, &object->pending_ops);
43144- fscache_stat(&fscache_n_op_pend);
43145+ fscache_stat_unchecked(&fscache_n_op_pend);
43146 } else if (!list_empty(&object->pending_ops)) {
43147 atomic_inc(&op->usage);
43148 list_add_tail(&op->pend_link, &object->pending_ops);
43149- fscache_stat(&fscache_n_op_pend);
43150+ fscache_stat_unchecked(&fscache_n_op_pend);
43151 fscache_start_operations(object);
43152 } else {
43153 ASSERTCMP(object->n_exclusive, ==, 0);
43154@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
43155 object->n_ops++;
43156 atomic_inc(&op->usage);
43157 list_add_tail(&op->pend_link, &object->pending_ops);
43158- fscache_stat(&fscache_n_op_pend);
43159+ fscache_stat_unchecked(&fscache_n_op_pend);
43160 ret = 0;
43161 } else if (object->state == FSCACHE_OBJECT_DYING ||
43162 object->state == FSCACHE_OBJECT_LC_DYING ||
43163 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43164- fscache_stat(&fscache_n_op_rejected);
43165+ fscache_stat_unchecked(&fscache_n_op_rejected);
43166 ret = -ENOBUFS;
43167 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43168 fscache_report_unexpected_submission(object, op, ostate);
43169@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
43170
43171 ret = -EBUSY;
43172 if (!list_empty(&op->pend_link)) {
43173- fscache_stat(&fscache_n_op_cancelled);
43174+ fscache_stat_unchecked(&fscache_n_op_cancelled);
43175 list_del_init(&op->pend_link);
43176 object->n_ops--;
43177 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43178@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
43179 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43180 BUG();
43181
43182- fscache_stat(&fscache_n_op_release);
43183+ fscache_stat_unchecked(&fscache_n_op_release);
43184
43185 if (op->release) {
43186 op->release(op);
43187@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
43188 * lock, and defer it otherwise */
43189 if (!spin_trylock(&object->lock)) {
43190 _debug("defer put");
43191- fscache_stat(&fscache_n_op_deferred_release);
43192+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
43193
43194 cache = object->cache;
43195 spin_lock(&cache->op_gc_list_lock);
43196@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43197
43198 _debug("GC DEFERRED REL OBJ%x OP%x",
43199 object->debug_id, op->debug_id);
43200- fscache_stat(&fscache_n_op_gc);
43201+ fscache_stat_unchecked(&fscache_n_op_gc);
43202
43203 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43204
43205diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43206index 3f7a59b..cf196cc 100644
43207--- a/fs/fscache/page.c
43208+++ b/fs/fscache/page.c
43209@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43210 val = radix_tree_lookup(&cookie->stores, page->index);
43211 if (!val) {
43212 rcu_read_unlock();
43213- fscache_stat(&fscache_n_store_vmscan_not_storing);
43214+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43215 __fscache_uncache_page(cookie, page);
43216 return true;
43217 }
43218@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43219 spin_unlock(&cookie->stores_lock);
43220
43221 if (xpage) {
43222- fscache_stat(&fscache_n_store_vmscan_cancelled);
43223- fscache_stat(&fscache_n_store_radix_deletes);
43224+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43225+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43226 ASSERTCMP(xpage, ==, page);
43227 } else {
43228- fscache_stat(&fscache_n_store_vmscan_gone);
43229+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43230 }
43231
43232 wake_up_bit(&cookie->flags, 0);
43233@@ -107,7 +107,7 @@ page_busy:
43234 /* we might want to wait here, but that could deadlock the allocator as
43235 * the work threads writing to the cache may all end up sleeping
43236 * on memory allocation */
43237- fscache_stat(&fscache_n_store_vmscan_busy);
43238+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43239 return false;
43240 }
43241 EXPORT_SYMBOL(__fscache_maybe_release_page);
43242@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43243 FSCACHE_COOKIE_STORING_TAG);
43244 if (!radix_tree_tag_get(&cookie->stores, page->index,
43245 FSCACHE_COOKIE_PENDING_TAG)) {
43246- fscache_stat(&fscache_n_store_radix_deletes);
43247+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43248 xpage = radix_tree_delete(&cookie->stores, page->index);
43249 }
43250 spin_unlock(&cookie->stores_lock);
43251@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43252
43253 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43254
43255- fscache_stat(&fscache_n_attr_changed_calls);
43256+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43257
43258 if (fscache_object_is_active(object)) {
43259 fscache_stat(&fscache_n_cop_attr_changed);
43260@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43261
43262 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43263
43264- fscache_stat(&fscache_n_attr_changed);
43265+ fscache_stat_unchecked(&fscache_n_attr_changed);
43266
43267 op = kzalloc(sizeof(*op), GFP_KERNEL);
43268 if (!op) {
43269- fscache_stat(&fscache_n_attr_changed_nomem);
43270+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43271 _leave(" = -ENOMEM");
43272 return -ENOMEM;
43273 }
43274@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43275 if (fscache_submit_exclusive_op(object, op) < 0)
43276 goto nobufs;
43277 spin_unlock(&cookie->lock);
43278- fscache_stat(&fscache_n_attr_changed_ok);
43279+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43280 fscache_put_operation(op);
43281 _leave(" = 0");
43282 return 0;
43283@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43284 nobufs:
43285 spin_unlock(&cookie->lock);
43286 kfree(op);
43287- fscache_stat(&fscache_n_attr_changed_nobufs);
43288+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43289 _leave(" = %d", -ENOBUFS);
43290 return -ENOBUFS;
43291 }
43292@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43293 /* allocate a retrieval operation and attempt to submit it */
43294 op = kzalloc(sizeof(*op), GFP_NOIO);
43295 if (!op) {
43296- fscache_stat(&fscache_n_retrievals_nomem);
43297+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43298 return NULL;
43299 }
43300
43301@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43302 return 0;
43303 }
43304
43305- fscache_stat(&fscache_n_retrievals_wait);
43306+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
43307
43308 jif = jiffies;
43309 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43310 fscache_wait_bit_interruptible,
43311 TASK_INTERRUPTIBLE) != 0) {
43312- fscache_stat(&fscache_n_retrievals_intr);
43313+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43314 _leave(" = -ERESTARTSYS");
43315 return -ERESTARTSYS;
43316 }
43317@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43318 */
43319 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43320 struct fscache_retrieval *op,
43321- atomic_t *stat_op_waits,
43322- atomic_t *stat_object_dead)
43323+ atomic_unchecked_t *stat_op_waits,
43324+ atomic_unchecked_t *stat_object_dead)
43325 {
43326 int ret;
43327
43328@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43329 goto check_if_dead;
43330
43331 _debug(">>> WT");
43332- fscache_stat(stat_op_waits);
43333+ fscache_stat_unchecked(stat_op_waits);
43334 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43335 fscache_wait_bit_interruptible,
43336 TASK_INTERRUPTIBLE) < 0) {
43337@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43338
43339 check_if_dead:
43340 if (unlikely(fscache_object_is_dead(object))) {
43341- fscache_stat(stat_object_dead);
43342+ fscache_stat_unchecked(stat_object_dead);
43343 return -ENOBUFS;
43344 }
43345 return 0;
43346@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43347
43348 _enter("%p,%p,,,", cookie, page);
43349
43350- fscache_stat(&fscache_n_retrievals);
43351+ fscache_stat_unchecked(&fscache_n_retrievals);
43352
43353 if (hlist_empty(&cookie->backing_objects))
43354 goto nobufs;
43355@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43356 goto nobufs_unlock;
43357 spin_unlock(&cookie->lock);
43358
43359- fscache_stat(&fscache_n_retrieval_ops);
43360+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43361
43362 /* pin the netfs read context in case we need to do the actual netfs
43363 * read because we've encountered a cache read failure */
43364@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43365
43366 error:
43367 if (ret == -ENOMEM)
43368- fscache_stat(&fscache_n_retrievals_nomem);
43369+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43370 else if (ret == -ERESTARTSYS)
43371- fscache_stat(&fscache_n_retrievals_intr);
43372+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43373 else if (ret == -ENODATA)
43374- fscache_stat(&fscache_n_retrievals_nodata);
43375+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43376 else if (ret < 0)
43377- fscache_stat(&fscache_n_retrievals_nobufs);
43378+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43379 else
43380- fscache_stat(&fscache_n_retrievals_ok);
43381+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43382
43383 fscache_put_retrieval(op);
43384 _leave(" = %d", ret);
43385@@ -429,7 +429,7 @@ nobufs_unlock:
43386 spin_unlock(&cookie->lock);
43387 kfree(op);
43388 nobufs:
43389- fscache_stat(&fscache_n_retrievals_nobufs);
43390+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43391 _leave(" = -ENOBUFS");
43392 return -ENOBUFS;
43393 }
43394@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43395
43396 _enter("%p,,%d,,,", cookie, *nr_pages);
43397
43398- fscache_stat(&fscache_n_retrievals);
43399+ fscache_stat_unchecked(&fscache_n_retrievals);
43400
43401 if (hlist_empty(&cookie->backing_objects))
43402 goto nobufs;
43403@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43404 goto nobufs_unlock;
43405 spin_unlock(&cookie->lock);
43406
43407- fscache_stat(&fscache_n_retrieval_ops);
43408+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43409
43410 /* pin the netfs read context in case we need to do the actual netfs
43411 * read because we've encountered a cache read failure */
43412@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43413
43414 error:
43415 if (ret == -ENOMEM)
43416- fscache_stat(&fscache_n_retrievals_nomem);
43417+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43418 else if (ret == -ERESTARTSYS)
43419- fscache_stat(&fscache_n_retrievals_intr);
43420+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43421 else if (ret == -ENODATA)
43422- fscache_stat(&fscache_n_retrievals_nodata);
43423+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43424 else if (ret < 0)
43425- fscache_stat(&fscache_n_retrievals_nobufs);
43426+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43427 else
43428- fscache_stat(&fscache_n_retrievals_ok);
43429+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43430
43431 fscache_put_retrieval(op);
43432 _leave(" = %d", ret);
43433@@ -545,7 +545,7 @@ nobufs_unlock:
43434 spin_unlock(&cookie->lock);
43435 kfree(op);
43436 nobufs:
43437- fscache_stat(&fscache_n_retrievals_nobufs);
43438+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43439 _leave(" = -ENOBUFS");
43440 return -ENOBUFS;
43441 }
43442@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43443
43444 _enter("%p,%p,,,", cookie, page);
43445
43446- fscache_stat(&fscache_n_allocs);
43447+ fscache_stat_unchecked(&fscache_n_allocs);
43448
43449 if (hlist_empty(&cookie->backing_objects))
43450 goto nobufs;
43451@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43452 goto nobufs_unlock;
43453 spin_unlock(&cookie->lock);
43454
43455- fscache_stat(&fscache_n_alloc_ops);
43456+ fscache_stat_unchecked(&fscache_n_alloc_ops);
43457
43458 ret = fscache_wait_for_retrieval_activation(
43459 object, op,
43460@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43461
43462 error:
43463 if (ret == -ERESTARTSYS)
43464- fscache_stat(&fscache_n_allocs_intr);
43465+ fscache_stat_unchecked(&fscache_n_allocs_intr);
43466 else if (ret < 0)
43467- fscache_stat(&fscache_n_allocs_nobufs);
43468+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43469 else
43470- fscache_stat(&fscache_n_allocs_ok);
43471+ fscache_stat_unchecked(&fscache_n_allocs_ok);
43472
43473 fscache_put_retrieval(op);
43474 _leave(" = %d", ret);
43475@@ -625,7 +625,7 @@ nobufs_unlock:
43476 spin_unlock(&cookie->lock);
43477 kfree(op);
43478 nobufs:
43479- fscache_stat(&fscache_n_allocs_nobufs);
43480+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43481 _leave(" = -ENOBUFS");
43482 return -ENOBUFS;
43483 }
43484@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43485
43486 spin_lock(&cookie->stores_lock);
43487
43488- fscache_stat(&fscache_n_store_calls);
43489+ fscache_stat_unchecked(&fscache_n_store_calls);
43490
43491 /* find a page to store */
43492 page = NULL;
43493@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43494 page = results[0];
43495 _debug("gang %d [%lx]", n, page->index);
43496 if (page->index > op->store_limit) {
43497- fscache_stat(&fscache_n_store_pages_over_limit);
43498+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43499 goto superseded;
43500 }
43501
43502@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43503 spin_unlock(&cookie->stores_lock);
43504 spin_unlock(&object->lock);
43505
43506- fscache_stat(&fscache_n_store_pages);
43507+ fscache_stat_unchecked(&fscache_n_store_pages);
43508 fscache_stat(&fscache_n_cop_write_page);
43509 ret = object->cache->ops->write_page(op, page);
43510 fscache_stat_d(&fscache_n_cop_write_page);
43511@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43512 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43513 ASSERT(PageFsCache(page));
43514
43515- fscache_stat(&fscache_n_stores);
43516+ fscache_stat_unchecked(&fscache_n_stores);
43517
43518 op = kzalloc(sizeof(*op), GFP_NOIO);
43519 if (!op)
43520@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43521 spin_unlock(&cookie->stores_lock);
43522 spin_unlock(&object->lock);
43523
43524- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43525+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43526 op->store_limit = object->store_limit;
43527
43528 if (fscache_submit_op(object, &op->op) < 0)
43529@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43530
43531 spin_unlock(&cookie->lock);
43532 radix_tree_preload_end();
43533- fscache_stat(&fscache_n_store_ops);
43534- fscache_stat(&fscache_n_stores_ok);
43535+ fscache_stat_unchecked(&fscache_n_store_ops);
43536+ fscache_stat_unchecked(&fscache_n_stores_ok);
43537
43538 /* the work queue now carries its own ref on the object */
43539 fscache_put_operation(&op->op);
43540@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43541 return 0;
43542
43543 already_queued:
43544- fscache_stat(&fscache_n_stores_again);
43545+ fscache_stat_unchecked(&fscache_n_stores_again);
43546 already_pending:
43547 spin_unlock(&cookie->stores_lock);
43548 spin_unlock(&object->lock);
43549 spin_unlock(&cookie->lock);
43550 radix_tree_preload_end();
43551 kfree(op);
43552- fscache_stat(&fscache_n_stores_ok);
43553+ fscache_stat_unchecked(&fscache_n_stores_ok);
43554 _leave(" = 0");
43555 return 0;
43556
43557@@ -851,14 +851,14 @@ nobufs:
43558 spin_unlock(&cookie->lock);
43559 radix_tree_preload_end();
43560 kfree(op);
43561- fscache_stat(&fscache_n_stores_nobufs);
43562+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
43563 _leave(" = -ENOBUFS");
43564 return -ENOBUFS;
43565
43566 nomem_free:
43567 kfree(op);
43568 nomem:
43569- fscache_stat(&fscache_n_stores_oom);
43570+ fscache_stat_unchecked(&fscache_n_stores_oom);
43571 _leave(" = -ENOMEM");
43572 return -ENOMEM;
43573 }
43574@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43575 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43576 ASSERTCMP(page, !=, NULL);
43577
43578- fscache_stat(&fscache_n_uncaches);
43579+ fscache_stat_unchecked(&fscache_n_uncaches);
43580
43581 /* cache withdrawal may beat us to it */
43582 if (!PageFsCache(page))
43583@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43584 unsigned long loop;
43585
43586 #ifdef CONFIG_FSCACHE_STATS
43587- atomic_add(pagevec->nr, &fscache_n_marks);
43588+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43589 #endif
43590
43591 for (loop = 0; loop < pagevec->nr; loop++) {
43592diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43593index 4765190..2a067f2 100644
43594--- a/fs/fscache/stats.c
43595+++ b/fs/fscache/stats.c
43596@@ -18,95 +18,95 @@
43597 /*
43598 * operation counters
43599 */
43600-atomic_t fscache_n_op_pend;
43601-atomic_t fscache_n_op_run;
43602-atomic_t fscache_n_op_enqueue;
43603-atomic_t fscache_n_op_requeue;
43604-atomic_t fscache_n_op_deferred_release;
43605-atomic_t fscache_n_op_release;
43606-atomic_t fscache_n_op_gc;
43607-atomic_t fscache_n_op_cancelled;
43608-atomic_t fscache_n_op_rejected;
43609+atomic_unchecked_t fscache_n_op_pend;
43610+atomic_unchecked_t fscache_n_op_run;
43611+atomic_unchecked_t fscache_n_op_enqueue;
43612+atomic_unchecked_t fscache_n_op_requeue;
43613+atomic_unchecked_t fscache_n_op_deferred_release;
43614+atomic_unchecked_t fscache_n_op_release;
43615+atomic_unchecked_t fscache_n_op_gc;
43616+atomic_unchecked_t fscache_n_op_cancelled;
43617+atomic_unchecked_t fscache_n_op_rejected;
43618
43619-atomic_t fscache_n_attr_changed;
43620-atomic_t fscache_n_attr_changed_ok;
43621-atomic_t fscache_n_attr_changed_nobufs;
43622-atomic_t fscache_n_attr_changed_nomem;
43623-atomic_t fscache_n_attr_changed_calls;
43624+atomic_unchecked_t fscache_n_attr_changed;
43625+atomic_unchecked_t fscache_n_attr_changed_ok;
43626+atomic_unchecked_t fscache_n_attr_changed_nobufs;
43627+atomic_unchecked_t fscache_n_attr_changed_nomem;
43628+atomic_unchecked_t fscache_n_attr_changed_calls;
43629
43630-atomic_t fscache_n_allocs;
43631-atomic_t fscache_n_allocs_ok;
43632-atomic_t fscache_n_allocs_wait;
43633-atomic_t fscache_n_allocs_nobufs;
43634-atomic_t fscache_n_allocs_intr;
43635-atomic_t fscache_n_allocs_object_dead;
43636-atomic_t fscache_n_alloc_ops;
43637-atomic_t fscache_n_alloc_op_waits;
43638+atomic_unchecked_t fscache_n_allocs;
43639+atomic_unchecked_t fscache_n_allocs_ok;
43640+atomic_unchecked_t fscache_n_allocs_wait;
43641+atomic_unchecked_t fscache_n_allocs_nobufs;
43642+atomic_unchecked_t fscache_n_allocs_intr;
43643+atomic_unchecked_t fscache_n_allocs_object_dead;
43644+atomic_unchecked_t fscache_n_alloc_ops;
43645+atomic_unchecked_t fscache_n_alloc_op_waits;
43646
43647-atomic_t fscache_n_retrievals;
43648-atomic_t fscache_n_retrievals_ok;
43649-atomic_t fscache_n_retrievals_wait;
43650-atomic_t fscache_n_retrievals_nodata;
43651-atomic_t fscache_n_retrievals_nobufs;
43652-atomic_t fscache_n_retrievals_intr;
43653-atomic_t fscache_n_retrievals_nomem;
43654-atomic_t fscache_n_retrievals_object_dead;
43655-atomic_t fscache_n_retrieval_ops;
43656-atomic_t fscache_n_retrieval_op_waits;
43657+atomic_unchecked_t fscache_n_retrievals;
43658+atomic_unchecked_t fscache_n_retrievals_ok;
43659+atomic_unchecked_t fscache_n_retrievals_wait;
43660+atomic_unchecked_t fscache_n_retrievals_nodata;
43661+atomic_unchecked_t fscache_n_retrievals_nobufs;
43662+atomic_unchecked_t fscache_n_retrievals_intr;
43663+atomic_unchecked_t fscache_n_retrievals_nomem;
43664+atomic_unchecked_t fscache_n_retrievals_object_dead;
43665+atomic_unchecked_t fscache_n_retrieval_ops;
43666+atomic_unchecked_t fscache_n_retrieval_op_waits;
43667
43668-atomic_t fscache_n_stores;
43669-atomic_t fscache_n_stores_ok;
43670-atomic_t fscache_n_stores_again;
43671-atomic_t fscache_n_stores_nobufs;
43672-atomic_t fscache_n_stores_oom;
43673-atomic_t fscache_n_store_ops;
43674-atomic_t fscache_n_store_calls;
43675-atomic_t fscache_n_store_pages;
43676-atomic_t fscache_n_store_radix_deletes;
43677-atomic_t fscache_n_store_pages_over_limit;
43678+atomic_unchecked_t fscache_n_stores;
43679+atomic_unchecked_t fscache_n_stores_ok;
43680+atomic_unchecked_t fscache_n_stores_again;
43681+atomic_unchecked_t fscache_n_stores_nobufs;
43682+atomic_unchecked_t fscache_n_stores_oom;
43683+atomic_unchecked_t fscache_n_store_ops;
43684+atomic_unchecked_t fscache_n_store_calls;
43685+atomic_unchecked_t fscache_n_store_pages;
43686+atomic_unchecked_t fscache_n_store_radix_deletes;
43687+atomic_unchecked_t fscache_n_store_pages_over_limit;
43688
43689-atomic_t fscache_n_store_vmscan_not_storing;
43690-atomic_t fscache_n_store_vmscan_gone;
43691-atomic_t fscache_n_store_vmscan_busy;
43692-atomic_t fscache_n_store_vmscan_cancelled;
43693+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43694+atomic_unchecked_t fscache_n_store_vmscan_gone;
43695+atomic_unchecked_t fscache_n_store_vmscan_busy;
43696+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43697
43698-atomic_t fscache_n_marks;
43699-atomic_t fscache_n_uncaches;
43700+atomic_unchecked_t fscache_n_marks;
43701+atomic_unchecked_t fscache_n_uncaches;
43702
43703-atomic_t fscache_n_acquires;
43704-atomic_t fscache_n_acquires_null;
43705-atomic_t fscache_n_acquires_no_cache;
43706-atomic_t fscache_n_acquires_ok;
43707-atomic_t fscache_n_acquires_nobufs;
43708-atomic_t fscache_n_acquires_oom;
43709+atomic_unchecked_t fscache_n_acquires;
43710+atomic_unchecked_t fscache_n_acquires_null;
43711+atomic_unchecked_t fscache_n_acquires_no_cache;
43712+atomic_unchecked_t fscache_n_acquires_ok;
43713+atomic_unchecked_t fscache_n_acquires_nobufs;
43714+atomic_unchecked_t fscache_n_acquires_oom;
43715
43716-atomic_t fscache_n_updates;
43717-atomic_t fscache_n_updates_null;
43718-atomic_t fscache_n_updates_run;
43719+atomic_unchecked_t fscache_n_updates;
43720+atomic_unchecked_t fscache_n_updates_null;
43721+atomic_unchecked_t fscache_n_updates_run;
43722
43723-atomic_t fscache_n_relinquishes;
43724-atomic_t fscache_n_relinquishes_null;
43725-atomic_t fscache_n_relinquishes_waitcrt;
43726-atomic_t fscache_n_relinquishes_retire;
43727+atomic_unchecked_t fscache_n_relinquishes;
43728+atomic_unchecked_t fscache_n_relinquishes_null;
43729+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43730+atomic_unchecked_t fscache_n_relinquishes_retire;
43731
43732-atomic_t fscache_n_cookie_index;
43733-atomic_t fscache_n_cookie_data;
43734-atomic_t fscache_n_cookie_special;
43735+atomic_unchecked_t fscache_n_cookie_index;
43736+atomic_unchecked_t fscache_n_cookie_data;
43737+atomic_unchecked_t fscache_n_cookie_special;
43738
43739-atomic_t fscache_n_object_alloc;
43740-atomic_t fscache_n_object_no_alloc;
43741-atomic_t fscache_n_object_lookups;
43742-atomic_t fscache_n_object_lookups_negative;
43743-atomic_t fscache_n_object_lookups_positive;
43744-atomic_t fscache_n_object_lookups_timed_out;
43745-atomic_t fscache_n_object_created;
43746-atomic_t fscache_n_object_avail;
43747-atomic_t fscache_n_object_dead;
43748+atomic_unchecked_t fscache_n_object_alloc;
43749+atomic_unchecked_t fscache_n_object_no_alloc;
43750+atomic_unchecked_t fscache_n_object_lookups;
43751+atomic_unchecked_t fscache_n_object_lookups_negative;
43752+atomic_unchecked_t fscache_n_object_lookups_positive;
43753+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43754+atomic_unchecked_t fscache_n_object_created;
43755+atomic_unchecked_t fscache_n_object_avail;
43756+atomic_unchecked_t fscache_n_object_dead;
43757
43758-atomic_t fscache_n_checkaux_none;
43759-atomic_t fscache_n_checkaux_okay;
43760-atomic_t fscache_n_checkaux_update;
43761-atomic_t fscache_n_checkaux_obsolete;
43762+atomic_unchecked_t fscache_n_checkaux_none;
43763+atomic_unchecked_t fscache_n_checkaux_okay;
43764+atomic_unchecked_t fscache_n_checkaux_update;
43765+atomic_unchecked_t fscache_n_checkaux_obsolete;
43766
43767 atomic_t fscache_n_cop_alloc_object;
43768 atomic_t fscache_n_cop_lookup_object;
43769@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
43770 seq_puts(m, "FS-Cache statistics\n");
43771
43772 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43773- atomic_read(&fscache_n_cookie_index),
43774- atomic_read(&fscache_n_cookie_data),
43775- atomic_read(&fscache_n_cookie_special));
43776+ atomic_read_unchecked(&fscache_n_cookie_index),
43777+ atomic_read_unchecked(&fscache_n_cookie_data),
43778+ atomic_read_unchecked(&fscache_n_cookie_special));
43779
43780 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43781- atomic_read(&fscache_n_object_alloc),
43782- atomic_read(&fscache_n_object_no_alloc),
43783- atomic_read(&fscache_n_object_avail),
43784- atomic_read(&fscache_n_object_dead));
43785+ atomic_read_unchecked(&fscache_n_object_alloc),
43786+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43787+ atomic_read_unchecked(&fscache_n_object_avail),
43788+ atomic_read_unchecked(&fscache_n_object_dead));
43789 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43790- atomic_read(&fscache_n_checkaux_none),
43791- atomic_read(&fscache_n_checkaux_okay),
43792- atomic_read(&fscache_n_checkaux_update),
43793- atomic_read(&fscache_n_checkaux_obsolete));
43794+ atomic_read_unchecked(&fscache_n_checkaux_none),
43795+ atomic_read_unchecked(&fscache_n_checkaux_okay),
43796+ atomic_read_unchecked(&fscache_n_checkaux_update),
43797+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43798
43799 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43800- atomic_read(&fscache_n_marks),
43801- atomic_read(&fscache_n_uncaches));
43802+ atomic_read_unchecked(&fscache_n_marks),
43803+ atomic_read_unchecked(&fscache_n_uncaches));
43804
43805 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43806 " oom=%u\n",
43807- atomic_read(&fscache_n_acquires),
43808- atomic_read(&fscache_n_acquires_null),
43809- atomic_read(&fscache_n_acquires_no_cache),
43810- atomic_read(&fscache_n_acquires_ok),
43811- atomic_read(&fscache_n_acquires_nobufs),
43812- atomic_read(&fscache_n_acquires_oom));
43813+ atomic_read_unchecked(&fscache_n_acquires),
43814+ atomic_read_unchecked(&fscache_n_acquires_null),
43815+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
43816+ atomic_read_unchecked(&fscache_n_acquires_ok),
43817+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
43818+ atomic_read_unchecked(&fscache_n_acquires_oom));
43819
43820 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43821- atomic_read(&fscache_n_object_lookups),
43822- atomic_read(&fscache_n_object_lookups_negative),
43823- atomic_read(&fscache_n_object_lookups_positive),
43824- atomic_read(&fscache_n_object_created),
43825- atomic_read(&fscache_n_object_lookups_timed_out));
43826+ atomic_read_unchecked(&fscache_n_object_lookups),
43827+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
43828+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
43829+ atomic_read_unchecked(&fscache_n_object_created),
43830+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43831
43832 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43833- atomic_read(&fscache_n_updates),
43834- atomic_read(&fscache_n_updates_null),
43835- atomic_read(&fscache_n_updates_run));
43836+ atomic_read_unchecked(&fscache_n_updates),
43837+ atomic_read_unchecked(&fscache_n_updates_null),
43838+ atomic_read_unchecked(&fscache_n_updates_run));
43839
43840 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43841- atomic_read(&fscache_n_relinquishes),
43842- atomic_read(&fscache_n_relinquishes_null),
43843- atomic_read(&fscache_n_relinquishes_waitcrt),
43844- atomic_read(&fscache_n_relinquishes_retire));
43845+ atomic_read_unchecked(&fscache_n_relinquishes),
43846+ atomic_read_unchecked(&fscache_n_relinquishes_null),
43847+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43848+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
43849
43850 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43851- atomic_read(&fscache_n_attr_changed),
43852- atomic_read(&fscache_n_attr_changed_ok),
43853- atomic_read(&fscache_n_attr_changed_nobufs),
43854- atomic_read(&fscache_n_attr_changed_nomem),
43855- atomic_read(&fscache_n_attr_changed_calls));
43856+ atomic_read_unchecked(&fscache_n_attr_changed),
43857+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
43858+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43859+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43860+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
43861
43862 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43863- atomic_read(&fscache_n_allocs),
43864- atomic_read(&fscache_n_allocs_ok),
43865- atomic_read(&fscache_n_allocs_wait),
43866- atomic_read(&fscache_n_allocs_nobufs),
43867- atomic_read(&fscache_n_allocs_intr));
43868+ atomic_read_unchecked(&fscache_n_allocs),
43869+ atomic_read_unchecked(&fscache_n_allocs_ok),
43870+ atomic_read_unchecked(&fscache_n_allocs_wait),
43871+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
43872+ atomic_read_unchecked(&fscache_n_allocs_intr));
43873 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43874- atomic_read(&fscache_n_alloc_ops),
43875- atomic_read(&fscache_n_alloc_op_waits),
43876- atomic_read(&fscache_n_allocs_object_dead));
43877+ atomic_read_unchecked(&fscache_n_alloc_ops),
43878+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
43879+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
43880
43881 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43882 " int=%u oom=%u\n",
43883- atomic_read(&fscache_n_retrievals),
43884- atomic_read(&fscache_n_retrievals_ok),
43885- atomic_read(&fscache_n_retrievals_wait),
43886- atomic_read(&fscache_n_retrievals_nodata),
43887- atomic_read(&fscache_n_retrievals_nobufs),
43888- atomic_read(&fscache_n_retrievals_intr),
43889- atomic_read(&fscache_n_retrievals_nomem));
43890+ atomic_read_unchecked(&fscache_n_retrievals),
43891+ atomic_read_unchecked(&fscache_n_retrievals_ok),
43892+ atomic_read_unchecked(&fscache_n_retrievals_wait),
43893+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
43894+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43895+ atomic_read_unchecked(&fscache_n_retrievals_intr),
43896+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
43897 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43898- atomic_read(&fscache_n_retrieval_ops),
43899- atomic_read(&fscache_n_retrieval_op_waits),
43900- atomic_read(&fscache_n_retrievals_object_dead));
43901+ atomic_read_unchecked(&fscache_n_retrieval_ops),
43902+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43903+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43904
43905 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43906- atomic_read(&fscache_n_stores),
43907- atomic_read(&fscache_n_stores_ok),
43908- atomic_read(&fscache_n_stores_again),
43909- atomic_read(&fscache_n_stores_nobufs),
43910- atomic_read(&fscache_n_stores_oom));
43911+ atomic_read_unchecked(&fscache_n_stores),
43912+ atomic_read_unchecked(&fscache_n_stores_ok),
43913+ atomic_read_unchecked(&fscache_n_stores_again),
43914+ atomic_read_unchecked(&fscache_n_stores_nobufs),
43915+ atomic_read_unchecked(&fscache_n_stores_oom));
43916 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43917- atomic_read(&fscache_n_store_ops),
43918- atomic_read(&fscache_n_store_calls),
43919- atomic_read(&fscache_n_store_pages),
43920- atomic_read(&fscache_n_store_radix_deletes),
43921- atomic_read(&fscache_n_store_pages_over_limit));
43922+ atomic_read_unchecked(&fscache_n_store_ops),
43923+ atomic_read_unchecked(&fscache_n_store_calls),
43924+ atomic_read_unchecked(&fscache_n_store_pages),
43925+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43926+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43927
43928 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43929- atomic_read(&fscache_n_store_vmscan_not_storing),
43930- atomic_read(&fscache_n_store_vmscan_gone),
43931- atomic_read(&fscache_n_store_vmscan_busy),
43932- atomic_read(&fscache_n_store_vmscan_cancelled));
43933+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43934+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43935+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43936+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43937
43938 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43939- atomic_read(&fscache_n_op_pend),
43940- atomic_read(&fscache_n_op_run),
43941- atomic_read(&fscache_n_op_enqueue),
43942- atomic_read(&fscache_n_op_cancelled),
43943- atomic_read(&fscache_n_op_rejected));
43944+ atomic_read_unchecked(&fscache_n_op_pend),
43945+ atomic_read_unchecked(&fscache_n_op_run),
43946+ atomic_read_unchecked(&fscache_n_op_enqueue),
43947+ atomic_read_unchecked(&fscache_n_op_cancelled),
43948+ atomic_read_unchecked(&fscache_n_op_rejected));
43949 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43950- atomic_read(&fscache_n_op_deferred_release),
43951- atomic_read(&fscache_n_op_release),
43952- atomic_read(&fscache_n_op_gc));
43953+ atomic_read_unchecked(&fscache_n_op_deferred_release),
43954+ atomic_read_unchecked(&fscache_n_op_release),
43955+ atomic_read_unchecked(&fscache_n_op_gc));
43956
43957 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43958 atomic_read(&fscache_n_cop_alloc_object),
43959diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
43960index 3426521..3b75162 100644
43961--- a/fs/fuse/cuse.c
43962+++ b/fs/fuse/cuse.c
43963@@ -587,10 +587,12 @@ static int __init cuse_init(void)
43964 INIT_LIST_HEAD(&cuse_conntbl[i]);
43965
43966 /* inherit and extend fuse_dev_operations */
43967- cuse_channel_fops = fuse_dev_operations;
43968- cuse_channel_fops.owner = THIS_MODULE;
43969- cuse_channel_fops.open = cuse_channel_open;
43970- cuse_channel_fops.release = cuse_channel_release;
43971+ pax_open_kernel();
43972+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43973+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43974+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43975+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43976+ pax_close_kernel();
43977
43978 cuse_class = class_create(THIS_MODULE, "cuse");
43979 if (IS_ERR(cuse_class))
43980diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
43981index 2aaf3ea..8e50863 100644
43982--- a/fs/fuse/dev.c
43983+++ b/fs/fuse/dev.c
43984@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
43985 ret = 0;
43986 pipe_lock(pipe);
43987
43988- if (!pipe->readers) {
43989+ if (!atomic_read(&pipe->readers)) {
43990 send_sig(SIGPIPE, current, 0);
43991 if (!ret)
43992 ret = -EPIPE;
43993diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
43994index 9f63e49..d8a64c0 100644
43995--- a/fs/fuse/dir.c
43996+++ b/fs/fuse/dir.c
43997@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
43998 return link;
43999 }
44000
44001-static void free_link(char *link)
44002+static void free_link(const char *link)
44003 {
44004 if (!IS_ERR(link))
44005 free_page((unsigned long) link);
44006diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
44007index cfd4959..a780959 100644
44008--- a/fs/gfs2/inode.c
44009+++ b/fs/gfs2/inode.c
44010@@ -1490,7 +1490,7 @@ out:
44011
44012 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44013 {
44014- char *s = nd_get_link(nd);
44015+ const char *s = nd_get_link(nd);
44016 if (!IS_ERR(s))
44017 kfree(s);
44018 }
44019diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
44020index 0be5a78..9cfb853 100644
44021--- a/fs/hugetlbfs/inode.c
44022+++ b/fs/hugetlbfs/inode.c
44023@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
44024 .kill_sb = kill_litter_super,
44025 };
44026
44027-static struct vfsmount *hugetlbfs_vfsmount;
44028+struct vfsmount *hugetlbfs_vfsmount;
44029
44030 static int can_do_hugetlb_shm(void)
44031 {
44032diff --git a/fs/inode.c b/fs/inode.c
44033index ee4e66b..0451521 100644
44034--- a/fs/inode.c
44035+++ b/fs/inode.c
44036@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44037
44038 #ifdef CONFIG_SMP
44039 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44040- static atomic_t shared_last_ino;
44041- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44042+ static atomic_unchecked_t shared_last_ino;
44043+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44044
44045 res = next - LAST_INO_BATCH;
44046 }
44047diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
44048index e513f19..2ab1351 100644
44049--- a/fs/jffs2/erase.c
44050+++ b/fs/jffs2/erase.c
44051@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
44052 struct jffs2_unknown_node marker = {
44053 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44054 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44055- .totlen = cpu_to_je32(c->cleanmarker_size)
44056+ .totlen = cpu_to_je32(c->cleanmarker_size),
44057+ .hdr_crc = cpu_to_je32(0)
44058 };
44059
44060 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44061diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
44062index b09e51d..e482afa 100644
44063--- a/fs/jffs2/wbuf.c
44064+++ b/fs/jffs2/wbuf.c
44065@@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
44066 {
44067 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44068 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44069- .totlen = constant_cpu_to_je32(8)
44070+ .totlen = constant_cpu_to_je32(8),
44071+ .hdr_crc = constant_cpu_to_je32(0)
44072 };
44073
44074 /*
44075diff --git a/fs/jfs/super.c b/fs/jfs/super.c
44076index a44eff0..462e07d 100644
44077--- a/fs/jfs/super.c
44078+++ b/fs/jfs/super.c
44079@@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
44080
44081 jfs_inode_cachep =
44082 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44083- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44084+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44085 init_once);
44086 if (jfs_inode_cachep == NULL)
44087 return -ENOMEM;
44088diff --git a/fs/libfs.c b/fs/libfs.c
44089index f6d411e..e82a08d 100644
44090--- a/fs/libfs.c
44091+++ b/fs/libfs.c
44092@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44093
44094 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44095 struct dentry *next;
44096+ char d_name[sizeof(next->d_iname)];
44097+ const unsigned char *name;
44098+
44099 next = list_entry(p, struct dentry, d_u.d_child);
44100 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44101 if (!simple_positive(next)) {
44102@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44103
44104 spin_unlock(&next->d_lock);
44105 spin_unlock(&dentry->d_lock);
44106- if (filldir(dirent, next->d_name.name,
44107+ name = next->d_name.name;
44108+ if (name == next->d_iname) {
44109+ memcpy(d_name, name, next->d_name.len);
44110+ name = d_name;
44111+ }
44112+ if (filldir(dirent, name,
44113 next->d_name.len, filp->f_pos,
44114 next->d_inode->i_ino,
44115 dt_type(next->d_inode)) < 0)
44116diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
44117index 8392cb8..80d6193 100644
44118--- a/fs/lockd/clntproc.c
44119+++ b/fs/lockd/clntproc.c
44120@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
44121 /*
44122 * Cookie counter for NLM requests
44123 */
44124-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44125+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44126
44127 void nlmclnt_next_cookie(struct nlm_cookie *c)
44128 {
44129- u32 cookie = atomic_inc_return(&nlm_cookie);
44130+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44131
44132 memcpy(c->data, &cookie, 4);
44133 c->len=4;
44134diff --git a/fs/locks.c b/fs/locks.c
44135index 637694b..f84a121 100644
44136--- a/fs/locks.c
44137+++ b/fs/locks.c
44138@@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
44139 return;
44140
44141 if (filp->f_op && filp->f_op->flock) {
44142- struct file_lock fl = {
44143+ struct file_lock flock = {
44144 .fl_pid = current->tgid,
44145 .fl_file = filp,
44146 .fl_flags = FL_FLOCK,
44147 .fl_type = F_UNLCK,
44148 .fl_end = OFFSET_MAX,
44149 };
44150- filp->f_op->flock(filp, F_SETLKW, &fl);
44151- if (fl.fl_ops && fl.fl_ops->fl_release_private)
44152- fl.fl_ops->fl_release_private(&fl);
44153+ filp->f_op->flock(filp, F_SETLKW, &flock);
44154+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
44155+ flock.fl_ops->fl_release_private(&flock);
44156 }
44157
44158 lock_flocks();
44159diff --git a/fs/namei.c b/fs/namei.c
44160index 5008f01..90328a7 100644
44161--- a/fs/namei.c
44162+++ b/fs/namei.c
44163@@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
44164 if (ret != -EACCES)
44165 return ret;
44166
44167+#ifdef CONFIG_GRKERNSEC
44168+ /* we'll block if we have to log due to a denied capability use */
44169+ if (mask & MAY_NOT_BLOCK)
44170+ return -ECHILD;
44171+#endif
44172+
44173 if (S_ISDIR(inode->i_mode)) {
44174 /* DACs are overridable for directories */
44175- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44176- return 0;
44177 if (!(mask & MAY_WRITE))
44178- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44179+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44180+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44181 return 0;
44182+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44183+ return 0;
44184 return -EACCES;
44185 }
44186 /*
44187+ * Searching includes executable on directories, else just read.
44188+ */
44189+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44190+ if (mask == MAY_READ)
44191+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44192+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44193+ return 0;
44194+
44195+ /*
44196 * Read/write DACs are always overridable.
44197 * Executable DACs are overridable when there is
44198 * at least one exec bit set.
44199@@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44200 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44201 return 0;
44202
44203- /*
44204- * Searching includes executable on directories, else just read.
44205- */
44206- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44207- if (mask == MAY_READ)
44208- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44209- return 0;
44210-
44211 return -EACCES;
44212 }
44213
44214@@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44215 return error;
44216 }
44217
44218+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
44219+ dentry->d_inode, dentry, nd->path.mnt)) {
44220+ error = -EACCES;
44221+ *p = ERR_PTR(error); /* no ->put_link(), please */
44222+ path_put(&nd->path);
44223+ return error;
44224+ }
44225+
44226 nd->last_type = LAST_BIND;
44227 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44228 error = PTR_ERR(*p);
44229 if (!IS_ERR(*p)) {
44230- char *s = nd_get_link(nd);
44231+ const char *s = nd_get_link(nd);
44232 error = 0;
44233 if (s)
44234 error = __vfs_follow_link(nd, s);
44235@@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
44236 if (!err)
44237 err = complete_walk(nd);
44238
44239+ if (!(nd->flags & LOOKUP_PARENT)) {
44240+#ifdef CONFIG_GRKERNSEC
44241+ if (flags & LOOKUP_RCU) {
44242+ if (!err)
44243+ path_put(&nd->path);
44244+ err = -ECHILD;
44245+ } else
44246+#endif
44247+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44248+ if (!err)
44249+ path_put(&nd->path);
44250+ err = -ENOENT;
44251+ }
44252+ }
44253+
44254 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44255 if (!nd->inode->i_op->lookup) {
44256 path_put(&nd->path);
44257@@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
44258 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44259
44260 if (likely(!retval)) {
44261+ if (*name != '/' && nd->path.dentry && nd->inode) {
44262+#ifdef CONFIG_GRKERNSEC
44263+ if (flags & LOOKUP_RCU)
44264+ return -ECHILD;
44265+#endif
44266+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44267+ return -ENOENT;
44268+ }
44269+
44270 if (unlikely(!audit_dummy_context())) {
44271 if (nd->path.dentry && nd->inode)
44272 audit_inode(name, nd->path.dentry);
44273@@ -2046,6 +2086,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44274 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44275 return -EPERM;
44276
44277+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44278+ return -EPERM;
44279+ if (gr_handle_rawio(inode))
44280+ return -EPERM;
44281+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44282+ return -EACCES;
44283+
44284 return 0;
44285 }
44286
44287@@ -2107,6 +2154,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44288 error = complete_walk(nd);
44289 if (error)
44290 return ERR_PTR(error);
44291+#ifdef CONFIG_GRKERNSEC
44292+ if (nd->flags & LOOKUP_RCU) {
44293+ error = -ECHILD;
44294+ goto exit;
44295+ }
44296+#endif
44297+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44298+ error = -ENOENT;
44299+ goto exit;
44300+ }
44301 audit_inode(pathname, nd->path.dentry);
44302 if (open_flag & O_CREAT) {
44303 error = -EISDIR;
44304@@ -2117,6 +2174,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44305 error = complete_walk(nd);
44306 if (error)
44307 return ERR_PTR(error);
44308+#ifdef CONFIG_GRKERNSEC
44309+ if (nd->flags & LOOKUP_RCU) {
44310+ error = -ECHILD;
44311+ goto exit;
44312+ }
44313+#endif
44314+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44315+ error = -ENOENT;
44316+ goto exit;
44317+ }
44318 audit_inode(pathname, dir);
44319 goto ok;
44320 }
44321@@ -2138,6 +2205,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44322 error = complete_walk(nd);
44323 if (error)
44324 return ERR_PTR(-ECHILD);
44325+#ifdef CONFIG_GRKERNSEC
44326+ if (nd->flags & LOOKUP_RCU) {
44327+ error = -ECHILD;
44328+ goto exit;
44329+ }
44330+#endif
44331+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44332+ error = -ENOENT;
44333+ goto exit;
44334+ }
44335
44336 error = -ENOTDIR;
44337 if (nd->flags & LOOKUP_DIRECTORY) {
44338@@ -2178,6 +2255,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44339 /* Negative dentry, just create the file */
44340 if (!dentry->d_inode) {
44341 int mode = op->mode;
44342+
44343+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44344+ error = -EACCES;
44345+ goto exit_mutex_unlock;
44346+ }
44347+
44348 if (!IS_POSIXACL(dir->d_inode))
44349 mode &= ~current_umask();
44350 /*
44351@@ -2201,6 +2284,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44352 error = vfs_create(dir->d_inode, dentry, mode, nd);
44353 if (error)
44354 goto exit_mutex_unlock;
44355+ else
44356+ gr_handle_create(path->dentry, path->mnt);
44357 mutex_unlock(&dir->d_inode->i_mutex);
44358 dput(nd->path.dentry);
44359 nd->path.dentry = dentry;
44360@@ -2210,6 +2295,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44361 /*
44362 * It already exists.
44363 */
44364+
44365+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44366+ error = -ENOENT;
44367+ goto exit_mutex_unlock;
44368+ }
44369+
44370+ /* only check if O_CREAT is specified, all other checks need to go
44371+ into may_open */
44372+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44373+ error = -EACCES;
44374+ goto exit_mutex_unlock;
44375+ }
44376+
44377 mutex_unlock(&dir->d_inode->i_mutex);
44378 audit_inode(pathname, path->dentry);
44379
44380@@ -2422,6 +2520,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44381 *path = nd.path;
44382 return dentry;
44383 eexist:
44384+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44385+ dput(dentry);
44386+ dentry = ERR_PTR(-ENOENT);
44387+ goto fail;
44388+ }
44389 dput(dentry);
44390 dentry = ERR_PTR(-EEXIST);
44391 fail:
44392@@ -2444,6 +2547,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44393 }
44394 EXPORT_SYMBOL(user_path_create);
44395
44396+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44397+{
44398+ char *tmp = getname(pathname);
44399+ struct dentry *res;
44400+ if (IS_ERR(tmp))
44401+ return ERR_CAST(tmp);
44402+ res = kern_path_create(dfd, tmp, path, is_dir);
44403+ if (IS_ERR(res))
44404+ putname(tmp);
44405+ else
44406+ *to = tmp;
44407+ return res;
44408+}
44409+
44410 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44411 {
44412 int error = may_create(dir, dentry);
44413@@ -2511,6 +2628,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44414 error = mnt_want_write(path.mnt);
44415 if (error)
44416 goto out_dput;
44417+
44418+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44419+ error = -EPERM;
44420+ goto out_drop_write;
44421+ }
44422+
44423+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44424+ error = -EACCES;
44425+ goto out_drop_write;
44426+ }
44427+
44428 error = security_path_mknod(&path, dentry, mode, dev);
44429 if (error)
44430 goto out_drop_write;
44431@@ -2528,6 +2656,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44432 }
44433 out_drop_write:
44434 mnt_drop_write(path.mnt);
44435+
44436+ if (!error)
44437+ gr_handle_create(dentry, path.mnt);
44438 out_dput:
44439 dput(dentry);
44440 mutex_unlock(&path.dentry->d_inode->i_mutex);
44441@@ -2577,12 +2708,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44442 error = mnt_want_write(path.mnt);
44443 if (error)
44444 goto out_dput;
44445+
44446+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44447+ error = -EACCES;
44448+ goto out_drop_write;
44449+ }
44450+
44451 error = security_path_mkdir(&path, dentry, mode);
44452 if (error)
44453 goto out_drop_write;
44454 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44455 out_drop_write:
44456 mnt_drop_write(path.mnt);
44457+
44458+ if (!error)
44459+ gr_handle_create(dentry, path.mnt);
44460 out_dput:
44461 dput(dentry);
44462 mutex_unlock(&path.dentry->d_inode->i_mutex);
44463@@ -2662,6 +2802,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44464 char * name;
44465 struct dentry *dentry;
44466 struct nameidata nd;
44467+ ino_t saved_ino = 0;
44468+ dev_t saved_dev = 0;
44469
44470 error = user_path_parent(dfd, pathname, &nd, &name);
44471 if (error)
44472@@ -2690,6 +2832,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44473 error = -ENOENT;
44474 goto exit3;
44475 }
44476+
44477+ saved_ino = dentry->d_inode->i_ino;
44478+ saved_dev = gr_get_dev_from_dentry(dentry);
44479+
44480+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44481+ error = -EACCES;
44482+ goto exit3;
44483+ }
44484+
44485 error = mnt_want_write(nd.path.mnt);
44486 if (error)
44487 goto exit3;
44488@@ -2697,6 +2848,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44489 if (error)
44490 goto exit4;
44491 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44492+ if (!error && (saved_dev || saved_ino))
44493+ gr_handle_delete(saved_ino, saved_dev);
44494 exit4:
44495 mnt_drop_write(nd.path.mnt);
44496 exit3:
44497@@ -2759,6 +2912,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44498 struct dentry *dentry;
44499 struct nameidata nd;
44500 struct inode *inode = NULL;
44501+ ino_t saved_ino = 0;
44502+ dev_t saved_dev = 0;
44503
44504 error = user_path_parent(dfd, pathname, &nd, &name);
44505 if (error)
44506@@ -2781,6 +2936,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44507 if (!inode)
44508 goto slashes;
44509 ihold(inode);
44510+
44511+ if (inode->i_nlink <= 1) {
44512+ saved_ino = inode->i_ino;
44513+ saved_dev = gr_get_dev_from_dentry(dentry);
44514+ }
44515+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44516+ error = -EACCES;
44517+ goto exit2;
44518+ }
44519+
44520 error = mnt_want_write(nd.path.mnt);
44521 if (error)
44522 goto exit2;
44523@@ -2788,6 +2953,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44524 if (error)
44525 goto exit3;
44526 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44527+ if (!error && (saved_ino || saved_dev))
44528+ gr_handle_delete(saved_ino, saved_dev);
44529 exit3:
44530 mnt_drop_write(nd.path.mnt);
44531 exit2:
44532@@ -2863,10 +3030,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44533 error = mnt_want_write(path.mnt);
44534 if (error)
44535 goto out_dput;
44536+
44537+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44538+ error = -EACCES;
44539+ goto out_drop_write;
44540+ }
44541+
44542 error = security_path_symlink(&path, dentry, from);
44543 if (error)
44544 goto out_drop_write;
44545 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44546+ if (!error)
44547+ gr_handle_create(dentry, path.mnt);
44548 out_drop_write:
44549 mnt_drop_write(path.mnt);
44550 out_dput:
44551@@ -2938,6 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44552 {
44553 struct dentry *new_dentry;
44554 struct path old_path, new_path;
44555+ char *to = NULL;
44556 int how = 0;
44557 int error;
44558
44559@@ -2961,7 +3137,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44560 if (error)
44561 return error;
44562
44563- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44564+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44565 error = PTR_ERR(new_dentry);
44566 if (IS_ERR(new_dentry))
44567 goto out;
44568@@ -2972,13 +3148,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44569 error = mnt_want_write(new_path.mnt);
44570 if (error)
44571 goto out_dput;
44572+
44573+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44574+ old_path.dentry->d_inode,
44575+ old_path.dentry->d_inode->i_mode, to)) {
44576+ error = -EACCES;
44577+ goto out_drop_write;
44578+ }
44579+
44580+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44581+ old_path.dentry, old_path.mnt, to)) {
44582+ error = -EACCES;
44583+ goto out_drop_write;
44584+ }
44585+
44586 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44587 if (error)
44588 goto out_drop_write;
44589 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44590+ if (!error)
44591+ gr_handle_create(new_dentry, new_path.mnt);
44592 out_drop_write:
44593 mnt_drop_write(new_path.mnt);
44594 out_dput:
44595+ putname(to);
44596 dput(new_dentry);
44597 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44598 path_put(&new_path);
44599@@ -3206,6 +3399,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44600 if (new_dentry == trap)
44601 goto exit5;
44602
44603+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44604+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44605+ to);
44606+ if (error)
44607+ goto exit5;
44608+
44609 error = mnt_want_write(oldnd.path.mnt);
44610 if (error)
44611 goto exit5;
44612@@ -3215,6 +3414,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44613 goto exit6;
44614 error = vfs_rename(old_dir->d_inode, old_dentry,
44615 new_dir->d_inode, new_dentry);
44616+ if (!error)
44617+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44618+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44619 exit6:
44620 mnt_drop_write(oldnd.path.mnt);
44621 exit5:
44622@@ -3240,6 +3442,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44623
44624 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44625 {
44626+ char tmpbuf[64];
44627+ const char *newlink;
44628 int len;
44629
44630 len = PTR_ERR(link);
44631@@ -3249,7 +3453,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44632 len = strlen(link);
44633 if (len > (unsigned) buflen)
44634 len = buflen;
44635- if (copy_to_user(buffer, link, len))
44636+
44637+ if (len < sizeof(tmpbuf)) {
44638+ memcpy(tmpbuf, link, len);
44639+ newlink = tmpbuf;
44640+ } else
44641+ newlink = link;
44642+
44643+ if (copy_to_user(buffer, newlink, len))
44644 len = -EFAULT;
44645 out:
44646 return len;
44647diff --git a/fs/namespace.c b/fs/namespace.c
44648index cfc6d44..b4632a5 100644
44649--- a/fs/namespace.c
44650+++ b/fs/namespace.c
44651@@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44652 if (!(sb->s_flags & MS_RDONLY))
44653 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44654 up_write(&sb->s_umount);
44655+
44656+ gr_log_remount(mnt->mnt_devname, retval);
44657+
44658 return retval;
44659 }
44660
44661@@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44662 br_write_unlock(vfsmount_lock);
44663 up_write(&namespace_sem);
44664 release_mounts(&umount_list);
44665+
44666+ gr_log_unmount(mnt->mnt_devname, retval);
44667+
44668 return retval;
44669 }
44670
44671@@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44672 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44673 MS_STRICTATIME);
44674
44675+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44676+ retval = -EPERM;
44677+ goto dput_out;
44678+ }
44679+
44680+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44681+ retval = -EPERM;
44682+ goto dput_out;
44683+ }
44684+
44685 if (flags & MS_REMOUNT)
44686 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44687 data_page);
44688@@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44689 dev_name, data_page);
44690 dput_out:
44691 path_put(&path);
44692+
44693+ gr_log_mount(dev_name, dir_name, retval);
44694+
44695 return retval;
44696 }
44697
44698@@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44699 if (error)
44700 goto out2;
44701
44702+ if (gr_handle_chroot_pivot()) {
44703+ error = -EPERM;
44704+ goto out2;
44705+ }
44706+
44707 get_fs_root(current->fs, &root);
44708 error = lock_mount(&old);
44709 if (error)
44710diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44711index 3db6b82..a57597e 100644
44712--- a/fs/nfs/blocklayout/blocklayout.c
44713+++ b/fs/nfs/blocklayout/blocklayout.c
44714@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44715 */
44716 struct parallel_io {
44717 struct kref refcnt;
44718- struct rpc_call_ops call_ops;
44719+ rpc_call_ops_no_const call_ops;
44720 void (*pnfs_callback) (void *data);
44721 void *data;
44722 };
44723diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44724index 50a15fa..ca113f9 100644
44725--- a/fs/nfs/inode.c
44726+++ b/fs/nfs/inode.c
44727@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44728 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44729 nfsi->attrtimeo_timestamp = jiffies;
44730
44731- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44732+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44733 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44734 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44735 else
44736@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44737 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44738 }
44739
44740-static atomic_long_t nfs_attr_generation_counter;
44741+static atomic_long_unchecked_t nfs_attr_generation_counter;
44742
44743 static unsigned long nfs_read_attr_generation_counter(void)
44744 {
44745- return atomic_long_read(&nfs_attr_generation_counter);
44746+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44747 }
44748
44749 unsigned long nfs_inc_attr_generation_counter(void)
44750 {
44751- return atomic_long_inc_return(&nfs_attr_generation_counter);
44752+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44753 }
44754
44755 void nfs_fattr_init(struct nfs_fattr *fattr)
44756diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
44757index 7a2e442..8e544cc 100644
44758--- a/fs/nfsd/vfs.c
44759+++ b/fs/nfsd/vfs.c
44760@@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44761 } else {
44762 oldfs = get_fs();
44763 set_fs(KERNEL_DS);
44764- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44765+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44766 set_fs(oldfs);
44767 }
44768
44769@@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44770
44771 /* Write the data. */
44772 oldfs = get_fs(); set_fs(KERNEL_DS);
44773- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44774+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44775 set_fs(oldfs);
44776 if (host_err < 0)
44777 goto out_nfserr;
44778@@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
44779 */
44780
44781 oldfs = get_fs(); set_fs(KERNEL_DS);
44782- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44783+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44784 set_fs(oldfs);
44785
44786 if (host_err < 0)
44787diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
44788index 9fde1c0..14e8827 100644
44789--- a/fs/notify/fanotify/fanotify_user.c
44790+++ b/fs/notify/fanotify/fanotify_user.c
44791@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
44792 goto out_close_fd;
44793
44794 ret = -EFAULT;
44795- if (copy_to_user(buf, &fanotify_event_metadata,
44796+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44797+ copy_to_user(buf, &fanotify_event_metadata,
44798 fanotify_event_metadata.event_len))
44799 goto out_kill_access_response;
44800
44801diff --git a/fs/notify/notification.c b/fs/notify/notification.c
44802index ee18815..7aa5d01 100644
44803--- a/fs/notify/notification.c
44804+++ b/fs/notify/notification.c
44805@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
44806 * get set to 0 so it will never get 'freed'
44807 */
44808 static struct fsnotify_event *q_overflow_event;
44809-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44810+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44811
44812 /**
44813 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44814@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44815 */
44816 u32 fsnotify_get_cookie(void)
44817 {
44818- return atomic_inc_return(&fsnotify_sync_cookie);
44819+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44820 }
44821 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44822
44823diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
44824index 99e3610..02c1068 100644
44825--- a/fs/ntfs/dir.c
44826+++ b/fs/ntfs/dir.c
44827@@ -1329,7 +1329,7 @@ find_next_index_buffer:
44828 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44829 ~(s64)(ndir->itype.index.block_size - 1)));
44830 /* Bounds checks. */
44831- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44832+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44833 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44834 "inode 0x%lx or driver bug.", vdir->i_ino);
44835 goto err_out;
44836diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
44837index c587e2d..3641eaa 100644
44838--- a/fs/ntfs/file.c
44839+++ b/fs/ntfs/file.c
44840@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
44841 #endif /* NTFS_RW */
44842 };
44843
44844-const struct file_operations ntfs_empty_file_ops = {};
44845+const struct file_operations ntfs_empty_file_ops __read_only;
44846
44847-const struct inode_operations ntfs_empty_inode_ops = {};
44848+const struct inode_operations ntfs_empty_inode_ops __read_only;
44849diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
44850index 210c352..a174f83 100644
44851--- a/fs/ocfs2/localalloc.c
44852+++ b/fs/ocfs2/localalloc.c
44853@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
44854 goto bail;
44855 }
44856
44857- atomic_inc(&osb->alloc_stats.moves);
44858+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44859
44860 bail:
44861 if (handle)
44862diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
44863index d355e6e..578d905 100644
44864--- a/fs/ocfs2/ocfs2.h
44865+++ b/fs/ocfs2/ocfs2.h
44866@@ -235,11 +235,11 @@ enum ocfs2_vol_state
44867
44868 struct ocfs2_alloc_stats
44869 {
44870- atomic_t moves;
44871- atomic_t local_data;
44872- atomic_t bitmap_data;
44873- atomic_t bg_allocs;
44874- atomic_t bg_extends;
44875+ atomic_unchecked_t moves;
44876+ atomic_unchecked_t local_data;
44877+ atomic_unchecked_t bitmap_data;
44878+ atomic_unchecked_t bg_allocs;
44879+ atomic_unchecked_t bg_extends;
44880 };
44881
44882 enum ocfs2_local_alloc_state
44883diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
44884index ba5d97e..c77db25 100644
44885--- a/fs/ocfs2/suballoc.c
44886+++ b/fs/ocfs2/suballoc.c
44887@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
44888 mlog_errno(status);
44889 goto bail;
44890 }
44891- atomic_inc(&osb->alloc_stats.bg_extends);
44892+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44893
44894 /* You should never ask for this much metadata */
44895 BUG_ON(bits_wanted >
44896@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
44897 mlog_errno(status);
44898 goto bail;
44899 }
44900- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44901+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44902
44903 *suballoc_loc = res.sr_bg_blkno;
44904 *suballoc_bit_start = res.sr_bit_offset;
44905@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
44906 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
44907 res->sr_bits);
44908
44909- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44910+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44911
44912 BUG_ON(res->sr_bits != 1);
44913
44914@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
44915 mlog_errno(status);
44916 goto bail;
44917 }
44918- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44919+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44920
44921 BUG_ON(res.sr_bits != 1);
44922
44923@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
44924 cluster_start,
44925 num_clusters);
44926 if (!status)
44927- atomic_inc(&osb->alloc_stats.local_data);
44928+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44929 } else {
44930 if (min_clusters > (osb->bitmap_cpg - 1)) {
44931 /* The only paths asking for contiguousness
44932@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
44933 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44934 res.sr_bg_blkno,
44935 res.sr_bit_offset);
44936- atomic_inc(&osb->alloc_stats.bitmap_data);
44937+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44938 *num_clusters = res.sr_bits;
44939 }
44940 }
44941diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
44942index 4994f8b..eaab8eb 100644
44943--- a/fs/ocfs2/super.c
44944+++ b/fs/ocfs2/super.c
44945@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
44946 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44947 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44948 "Stats",
44949- atomic_read(&osb->alloc_stats.bitmap_data),
44950- atomic_read(&osb->alloc_stats.local_data),
44951- atomic_read(&osb->alloc_stats.bg_allocs),
44952- atomic_read(&osb->alloc_stats.moves),
44953- atomic_read(&osb->alloc_stats.bg_extends));
44954+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44955+ atomic_read_unchecked(&osb->alloc_stats.local_data),
44956+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44957+ atomic_read_unchecked(&osb->alloc_stats.moves),
44958+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44959
44960 out += snprintf(buf + out, len - out,
44961 "%10s => State: %u Descriptor: %llu Size: %u bits "
44962@@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
44963 spin_lock_init(&osb->osb_xattr_lock);
44964 ocfs2_init_steal_slots(osb);
44965
44966- atomic_set(&osb->alloc_stats.moves, 0);
44967- atomic_set(&osb->alloc_stats.local_data, 0);
44968- atomic_set(&osb->alloc_stats.bitmap_data, 0);
44969- atomic_set(&osb->alloc_stats.bg_allocs, 0);
44970- atomic_set(&osb->alloc_stats.bg_extends, 0);
44971+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44972+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44973+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44974+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44975+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44976
44977 /* Copy the blockcheck stats from the superblock probe */
44978 osb->osb_ecc_stats = *stats;
44979diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
44980index 5d22872..523db20 100644
44981--- a/fs/ocfs2/symlink.c
44982+++ b/fs/ocfs2/symlink.c
44983@@ -142,7 +142,7 @@ bail:
44984
44985 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44986 {
44987- char *link = nd_get_link(nd);
44988+ const char *link = nd_get_link(nd);
44989 if (!IS_ERR(link))
44990 kfree(link);
44991 }
44992diff --git a/fs/open.c b/fs/open.c
44993index 22c41b5..695cb17 100644
44994--- a/fs/open.c
44995+++ b/fs/open.c
44996@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
44997 error = locks_verify_truncate(inode, NULL, length);
44998 if (!error)
44999 error = security_path_truncate(&path);
45000+
45001+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45002+ error = -EACCES;
45003+
45004 if (!error)
45005 error = do_truncate(path.dentry, length, 0, NULL);
45006
45007@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
45008 if (__mnt_is_readonly(path.mnt))
45009 res = -EROFS;
45010
45011+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45012+ res = -EACCES;
45013+
45014 out_path_release:
45015 path_put(&path);
45016 out:
45017@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
45018 if (error)
45019 goto dput_and_out;
45020
45021+ gr_log_chdir(path.dentry, path.mnt);
45022+
45023 set_fs_pwd(current->fs, &path);
45024
45025 dput_and_out:
45026@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
45027 goto out_putf;
45028
45029 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45030+
45031+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45032+ error = -EPERM;
45033+
45034+ if (!error)
45035+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45036+
45037 if (!error)
45038 set_fs_pwd(current->fs, &file->f_path);
45039 out_putf:
45040@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
45041 if (error)
45042 goto dput_and_out;
45043
45044+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45045+ goto dput_and_out;
45046+
45047 set_fs_root(current->fs, &path);
45048+
45049+ gr_handle_chroot_chdir(&path);
45050+
45051 error = 0;
45052 dput_and_out:
45053 path_put(&path);
45054@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
45055 if (error)
45056 return error;
45057 mutex_lock(&inode->i_mutex);
45058+
45059+ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
45060+ error = -EACCES;
45061+ goto out_unlock;
45062+ }
45063+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45064+ error = -EACCES;
45065+ goto out_unlock;
45066+ }
45067+
45068 error = security_path_chmod(path->dentry, path->mnt, mode);
45069 if (error)
45070 goto out_unlock;
45071@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
45072 int error;
45073 struct iattr newattrs;
45074
45075+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
45076+ return -EACCES;
45077+
45078 newattrs.ia_valid = ATTR_CTIME;
45079 if (user != (uid_t) -1) {
45080 newattrs.ia_valid |= ATTR_UID;
45081diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
45082index 6296b40..417c00f 100644
45083--- a/fs/partitions/efi.c
45084+++ b/fs/partitions/efi.c
45085@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
45086 if (!gpt)
45087 return NULL;
45088
45089+ if (!le32_to_cpu(gpt->num_partition_entries))
45090+ return NULL;
45091+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
45092+ if (!pte)
45093+ return NULL;
45094+
45095 count = le32_to_cpu(gpt->num_partition_entries) *
45096 le32_to_cpu(gpt->sizeof_partition_entry);
45097- if (!count)
45098- return NULL;
45099- pte = kzalloc(count, GFP_KERNEL);
45100- if (!pte)
45101- return NULL;
45102-
45103 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
45104 (u8 *) pte,
45105 count) < count) {
45106diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
45107index bd8ae78..539d250 100644
45108--- a/fs/partitions/ldm.c
45109+++ b/fs/partitions/ldm.c
45110@@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
45111 goto found;
45112 }
45113
45114- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45115+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45116 if (!f) {
45117 ldm_crit ("Out of memory.");
45118 return false;
45119diff --git a/fs/pipe.c b/fs/pipe.c
45120index 4065f07..68c0706 100644
45121--- a/fs/pipe.c
45122+++ b/fs/pipe.c
45123@@ -420,9 +420,9 @@ redo:
45124 }
45125 if (bufs) /* More to do? */
45126 continue;
45127- if (!pipe->writers)
45128+ if (!atomic_read(&pipe->writers))
45129 break;
45130- if (!pipe->waiting_writers) {
45131+ if (!atomic_read(&pipe->waiting_writers)) {
45132 /* syscall merging: Usually we must not sleep
45133 * if O_NONBLOCK is set, or if we got some data.
45134 * But if a writer sleeps in kernel space, then
45135@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
45136 mutex_lock(&inode->i_mutex);
45137 pipe = inode->i_pipe;
45138
45139- if (!pipe->readers) {
45140+ if (!atomic_read(&pipe->readers)) {
45141 send_sig(SIGPIPE, current, 0);
45142 ret = -EPIPE;
45143 goto out;
45144@@ -530,7 +530,7 @@ redo1:
45145 for (;;) {
45146 int bufs;
45147
45148- if (!pipe->readers) {
45149+ if (!atomic_read(&pipe->readers)) {
45150 send_sig(SIGPIPE, current, 0);
45151 if (!ret)
45152 ret = -EPIPE;
45153@@ -616,9 +616,9 @@ redo2:
45154 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45155 do_wakeup = 0;
45156 }
45157- pipe->waiting_writers++;
45158+ atomic_inc(&pipe->waiting_writers);
45159 pipe_wait(pipe);
45160- pipe->waiting_writers--;
45161+ atomic_dec(&pipe->waiting_writers);
45162 }
45163 out:
45164 mutex_unlock(&inode->i_mutex);
45165@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45166 mask = 0;
45167 if (filp->f_mode & FMODE_READ) {
45168 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45169- if (!pipe->writers && filp->f_version != pipe->w_counter)
45170+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45171 mask |= POLLHUP;
45172 }
45173
45174@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45175 * Most Unices do not set POLLERR for FIFOs but on Linux they
45176 * behave exactly like pipes for poll().
45177 */
45178- if (!pipe->readers)
45179+ if (!atomic_read(&pipe->readers))
45180 mask |= POLLERR;
45181 }
45182
45183@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
45184
45185 mutex_lock(&inode->i_mutex);
45186 pipe = inode->i_pipe;
45187- pipe->readers -= decr;
45188- pipe->writers -= decw;
45189+ atomic_sub(decr, &pipe->readers);
45190+ atomic_sub(decw, &pipe->writers);
45191
45192- if (!pipe->readers && !pipe->writers) {
45193+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45194 free_pipe_info(inode);
45195 } else {
45196 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45197@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45198
45199 if (inode->i_pipe) {
45200 ret = 0;
45201- inode->i_pipe->readers++;
45202+ atomic_inc(&inode->i_pipe->readers);
45203 }
45204
45205 mutex_unlock(&inode->i_mutex);
45206@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45207
45208 if (inode->i_pipe) {
45209 ret = 0;
45210- inode->i_pipe->writers++;
45211+ atomic_inc(&inode->i_pipe->writers);
45212 }
45213
45214 mutex_unlock(&inode->i_mutex);
45215@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45216 if (inode->i_pipe) {
45217 ret = 0;
45218 if (filp->f_mode & FMODE_READ)
45219- inode->i_pipe->readers++;
45220+ atomic_inc(&inode->i_pipe->readers);
45221 if (filp->f_mode & FMODE_WRITE)
45222- inode->i_pipe->writers++;
45223+ atomic_inc(&inode->i_pipe->writers);
45224 }
45225
45226 mutex_unlock(&inode->i_mutex);
45227@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45228 inode->i_pipe = NULL;
45229 }
45230
45231-static struct vfsmount *pipe_mnt __read_mostly;
45232+struct vfsmount *pipe_mnt __read_mostly;
45233
45234 /*
45235 * pipefs_dname() is called from d_path().
45236@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45237 goto fail_iput;
45238 inode->i_pipe = pipe;
45239
45240- pipe->readers = pipe->writers = 1;
45241+ atomic_set(&pipe->readers, 1);
45242+ atomic_set(&pipe->writers, 1);
45243 inode->i_fop = &rdwr_pipefifo_fops;
45244
45245 /*
45246diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45247index 15af622..0e9f4467 100644
45248--- a/fs/proc/Kconfig
45249+++ b/fs/proc/Kconfig
45250@@ -30,12 +30,12 @@ config PROC_FS
45251
45252 config PROC_KCORE
45253 bool "/proc/kcore support" if !ARM
45254- depends on PROC_FS && MMU
45255+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45256
45257 config PROC_VMCORE
45258 bool "/proc/vmcore support"
45259- depends on PROC_FS && CRASH_DUMP
45260- default y
45261+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45262+ default n
45263 help
45264 Exports the dump image of crashed kernel in ELF format.
45265
45266@@ -59,8 +59,8 @@ config PROC_SYSCTL
45267 limited in memory.
45268
45269 config PROC_PAGE_MONITOR
45270- default y
45271- depends on PROC_FS && MMU
45272+ default n
45273+ depends on PROC_FS && MMU && !GRKERNSEC
45274 bool "Enable /proc page monitoring" if EXPERT
45275 help
45276 Various /proc files exist to monitor process memory utilization:
45277diff --git a/fs/proc/array.c b/fs/proc/array.c
45278index 3a1dafd..1456746 100644
45279--- a/fs/proc/array.c
45280+++ b/fs/proc/array.c
45281@@ -60,6 +60,7 @@
45282 #include <linux/tty.h>
45283 #include <linux/string.h>
45284 #include <linux/mman.h>
45285+#include <linux/grsecurity.h>
45286 #include <linux/proc_fs.h>
45287 #include <linux/ioport.h>
45288 #include <linux/uaccess.h>
45289@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45290 seq_putc(m, '\n');
45291 }
45292
45293+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45294+static inline void task_pax(struct seq_file *m, struct task_struct *p)
45295+{
45296+ if (p->mm)
45297+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45298+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45299+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45300+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45301+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45302+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45303+ else
45304+ seq_printf(m, "PaX:\t-----\n");
45305+}
45306+#endif
45307+
45308 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45309 struct pid *pid, struct task_struct *task)
45310 {
45311@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45312 task_cpus_allowed(m, task);
45313 cpuset_task_status_allowed(m, task);
45314 task_context_switch_counts(m, task);
45315+
45316+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45317+ task_pax(m, task);
45318+#endif
45319+
45320+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45321+ task_grsec_rbac(m, task);
45322+#endif
45323+
45324 return 0;
45325 }
45326
45327+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45328+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45329+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45330+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45331+#endif
45332+
45333 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45334 struct pid *pid, struct task_struct *task, int whole)
45335 {
45336@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45337 char tcomm[sizeof(task->comm)];
45338 unsigned long flags;
45339
45340+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45341+ if (current->exec_id != m->exec_id) {
45342+ gr_log_badprocpid("stat");
45343+ return 0;
45344+ }
45345+#endif
45346+
45347 state = *get_task_state(task);
45348 vsize = eip = esp = 0;
45349 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45350@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45351 gtime = task->gtime;
45352 }
45353
45354+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45355+ if (PAX_RAND_FLAGS(mm)) {
45356+ eip = 0;
45357+ esp = 0;
45358+ wchan = 0;
45359+ }
45360+#endif
45361+#ifdef CONFIG_GRKERNSEC_HIDESYM
45362+ wchan = 0;
45363+ eip =0;
45364+ esp =0;
45365+#endif
45366+
45367 /* scale priority and nice values from timeslices to -20..20 */
45368 /* to make it look like a "normal" Unix priority/nice value */
45369 priority = task_prio(task);
45370@@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45371 vsize,
45372 mm ? get_mm_rss(mm) : 0,
45373 rsslim,
45374+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45375+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45376+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45377+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45378+#else
45379 mm ? (permitted ? mm->start_code : 1) : 0,
45380 mm ? (permitted ? mm->end_code : 1) : 0,
45381 (permitted && mm) ? mm->start_stack : 0,
45382+#endif
45383 esp,
45384 eip,
45385 /* The signal information here is obsolete.
45386@@ -535,6 +592,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45387 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
45388 struct mm_struct *mm = get_task_mm(task);
45389
45390+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45391+ if (current->exec_id != m->exec_id) {
45392+ gr_log_badprocpid("statm");
45393+ return 0;
45394+ }
45395+#endif
45396+
45397 if (mm) {
45398 size = task_statm(mm, &shared, &text, &data, &resident);
45399 mmput(mm);
45400@@ -544,3 +608,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45401
45402 return 0;
45403 }
45404+
45405+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45406+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45407+{
45408+ u32 curr_ip = 0;
45409+ unsigned long flags;
45410+
45411+ if (lock_task_sighand(task, &flags)) {
45412+ curr_ip = task->signal->curr_ip;
45413+ unlock_task_sighand(task, &flags);
45414+ }
45415+
45416+ return sprintf(buffer, "%pI4\n", &curr_ip);
45417+}
45418+#endif
45419diff --git a/fs/proc/base.c b/fs/proc/base.c
45420index 1ace83d..357b933 100644
45421--- a/fs/proc/base.c
45422+++ b/fs/proc/base.c
45423@@ -107,6 +107,22 @@ struct pid_entry {
45424 union proc_op op;
45425 };
45426
45427+struct getdents_callback {
45428+ struct linux_dirent __user * current_dir;
45429+ struct linux_dirent __user * previous;
45430+ struct file * file;
45431+ int count;
45432+ int error;
45433+};
45434+
45435+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45436+ loff_t offset, u64 ino, unsigned int d_type)
45437+{
45438+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45439+ buf->error = -EINVAL;
45440+ return 0;
45441+}
45442+
45443 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45444 .name = (NAME), \
45445 .len = sizeof(NAME) - 1, \
45446@@ -194,26 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
45447 return result;
45448 }
45449
45450-static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45451-{
45452- struct mm_struct *mm;
45453- int err;
45454-
45455- err = mutex_lock_killable(&task->signal->cred_guard_mutex);
45456- if (err)
45457- return ERR_PTR(err);
45458-
45459- mm = get_task_mm(task);
45460- if (mm && mm != current->mm &&
45461- !ptrace_may_access(task, mode)) {
45462- mmput(mm);
45463- mm = ERR_PTR(-EACCES);
45464- }
45465- mutex_unlock(&task->signal->cred_guard_mutex);
45466-
45467- return mm;
45468-}
45469-
45470 struct mm_struct *mm_for_maps(struct task_struct *task)
45471 {
45472 return mm_access(task, PTRACE_MODE_READ);
45473@@ -229,6 +225,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45474 if (!mm->arg_end)
45475 goto out_mm; /* Shh! No looking before we're done */
45476
45477+ if (gr_acl_handle_procpidmem(task))
45478+ goto out_mm;
45479+
45480 len = mm->arg_end - mm->arg_start;
45481
45482 if (len > PAGE_SIZE)
45483@@ -256,12 +255,28 @@ out:
45484 return res;
45485 }
45486
45487+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45488+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45489+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45490+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45491+#endif
45492+
45493 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45494 {
45495 struct mm_struct *mm = mm_for_maps(task);
45496 int res = PTR_ERR(mm);
45497 if (mm && !IS_ERR(mm)) {
45498 unsigned int nwords = 0;
45499+
45500+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45501+ /* allow if we're currently ptracing this task */
45502+ if (PAX_RAND_FLAGS(mm) &&
45503+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45504+ mmput(mm);
45505+ return 0;
45506+ }
45507+#endif
45508+
45509 do {
45510 nwords += 2;
45511 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45512@@ -275,7 +290,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45513 }
45514
45515
45516-#ifdef CONFIG_KALLSYMS
45517+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45518 /*
45519 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45520 * Returns the resolved symbol. If that fails, simply return the address.
45521@@ -314,7 +329,7 @@ static void unlock_trace(struct task_struct *task)
45522 mutex_unlock(&task->signal->cred_guard_mutex);
45523 }
45524
45525-#ifdef CONFIG_STACKTRACE
45526+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45527
45528 #define MAX_STACK_TRACE_DEPTH 64
45529
45530@@ -505,7 +520,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45531 return count;
45532 }
45533
45534-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45535+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45536 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45537 {
45538 long nr;
45539@@ -534,7 +549,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45540 /************************************************************************/
45541
45542 /* permission checks */
45543-static int proc_fd_access_allowed(struct inode *inode)
45544+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45545 {
45546 struct task_struct *task;
45547 int allowed = 0;
45548@@ -544,7 +559,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45549 */
45550 task = get_proc_task(inode);
45551 if (task) {
45552- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45553+ if (log)
45554+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45555+ else
45556+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45557 put_task_struct(task);
45558 }
45559 return allowed;
45560@@ -797,6 +815,11 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
45561 ssize_t copied;
45562 char *page;
45563
45564+#ifdef CONFIG_GRKERNSEC
45565+ if (write)
45566+ return -EPERM;
45567+#endif
45568+
45569 if (!mm)
45570 return 0;
45571
45572@@ -897,6 +920,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45573 if (!task)
45574 goto out_no_task;
45575
45576+ if (gr_acl_handle_procpidmem(task))
45577+ goto out;
45578+
45579 ret = -ENOMEM;
45580 page = (char *)__get_free_page(GFP_TEMPORARY);
45581 if (!page)
45582@@ -1519,7 +1545,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45583 path_put(&nd->path);
45584
45585 /* Are we allowed to snoop on the tasks file descriptors? */
45586- if (!proc_fd_access_allowed(inode))
45587+ if (!proc_fd_access_allowed(inode,0))
45588 goto out;
45589
45590 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45591@@ -1558,8 +1584,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45592 struct path path;
45593
45594 /* Are we allowed to snoop on the tasks file descriptors? */
45595- if (!proc_fd_access_allowed(inode))
45596- goto out;
45597+ /* logging this is needed for learning on chromium to work properly,
45598+ but we don't want to flood the logs from 'ps' which does a readlink
45599+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45600+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45601+ */
45602+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45603+ if (!proc_fd_access_allowed(inode,0))
45604+ goto out;
45605+ } else {
45606+ if (!proc_fd_access_allowed(inode,1))
45607+ goto out;
45608+ }
45609
45610 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45611 if (error)
45612@@ -1624,7 +1660,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45613 rcu_read_lock();
45614 cred = __task_cred(task);
45615 inode->i_uid = cred->euid;
45616+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45617+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45618+#else
45619 inode->i_gid = cred->egid;
45620+#endif
45621 rcu_read_unlock();
45622 }
45623 security_task_to_inode(task, inode);
45624@@ -1642,6 +1682,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45625 struct inode *inode = dentry->d_inode;
45626 struct task_struct *task;
45627 const struct cred *cred;
45628+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45629+ const struct cred *tmpcred = current_cred();
45630+#endif
45631
45632 generic_fillattr(inode, stat);
45633
45634@@ -1649,13 +1692,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45635 stat->uid = 0;
45636 stat->gid = 0;
45637 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45638+
45639+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45640+ rcu_read_unlock();
45641+ return -ENOENT;
45642+ }
45643+
45644 if (task) {
45645+ cred = __task_cred(task);
45646+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45647+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45648+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45649+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45650+#endif
45651+ ) {
45652+#endif
45653 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45654+#ifdef CONFIG_GRKERNSEC_PROC_USER
45655+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45656+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45657+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45658+#endif
45659 task_dumpable(task)) {
45660- cred = __task_cred(task);
45661 stat->uid = cred->euid;
45662+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45663+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45664+#else
45665 stat->gid = cred->egid;
45666+#endif
45667 }
45668+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45669+ } else {
45670+ rcu_read_unlock();
45671+ return -ENOENT;
45672+ }
45673+#endif
45674 }
45675 rcu_read_unlock();
45676 return 0;
45677@@ -1692,11 +1763,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45678
45679 if (task) {
45680 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45681+#ifdef CONFIG_GRKERNSEC_PROC_USER
45682+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45683+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45684+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45685+#endif
45686 task_dumpable(task)) {
45687 rcu_read_lock();
45688 cred = __task_cred(task);
45689 inode->i_uid = cred->euid;
45690+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45691+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45692+#else
45693 inode->i_gid = cred->egid;
45694+#endif
45695 rcu_read_unlock();
45696 } else {
45697 inode->i_uid = 0;
45698@@ -1814,7 +1894,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45699 int fd = proc_fd(inode);
45700
45701 if (task) {
45702- files = get_files_struct(task);
45703+ if (!gr_acl_handle_procpidmem(task))
45704+ files = get_files_struct(task);
45705 put_task_struct(task);
45706 }
45707 if (files) {
45708@@ -2082,11 +2163,21 @@ static const struct file_operations proc_fd_operations = {
45709 */
45710 static int proc_fd_permission(struct inode *inode, int mask)
45711 {
45712+ struct task_struct *task;
45713 int rv = generic_permission(inode, mask);
45714- if (rv == 0)
45715- return 0;
45716+
45717 if (task_pid(current) == proc_pid(inode))
45718 rv = 0;
45719+
45720+ task = get_proc_task(inode);
45721+ if (task == NULL)
45722+ return rv;
45723+
45724+ if (gr_acl_handle_procpidmem(task))
45725+ rv = -EACCES;
45726+
45727+ put_task_struct(task);
45728+
45729 return rv;
45730 }
45731
45732@@ -2196,6 +2287,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
45733 if (!task)
45734 goto out_no_task;
45735
45736+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45737+ goto out;
45738+
45739 /*
45740 * Yes, it does not scale. And it should not. Don't add
45741 * new entries into /proc/<tgid>/ without very good reasons.
45742@@ -2240,6 +2334,9 @@ static int proc_pident_readdir(struct file *filp,
45743 if (!task)
45744 goto out_no_task;
45745
45746+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45747+ goto out;
45748+
45749 ret = 0;
45750 i = filp->f_pos;
45751 switch (i) {
45752@@ -2510,7 +2607,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
45753 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45754 void *cookie)
45755 {
45756- char *s = nd_get_link(nd);
45757+ const char *s = nd_get_link(nd);
45758 if (!IS_ERR(s))
45759 __putname(s);
45760 }
45761@@ -2708,7 +2805,7 @@ static const struct pid_entry tgid_base_stuff[] = {
45762 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45763 #endif
45764 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45765-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45766+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45767 INF("syscall", S_IRUGO, proc_pid_syscall),
45768 #endif
45769 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45770@@ -2733,10 +2830,10 @@ static const struct pid_entry tgid_base_stuff[] = {
45771 #ifdef CONFIG_SECURITY
45772 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45773 #endif
45774-#ifdef CONFIG_KALLSYMS
45775+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45776 INF("wchan", S_IRUGO, proc_pid_wchan),
45777 #endif
45778-#ifdef CONFIG_STACKTRACE
45779+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45780 ONE("stack", S_IRUGO, proc_pid_stack),
45781 #endif
45782 #ifdef CONFIG_SCHEDSTATS
45783@@ -2770,6 +2867,9 @@ static const struct pid_entry tgid_base_stuff[] = {
45784 #ifdef CONFIG_HARDWALL
45785 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45786 #endif
45787+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45788+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45789+#endif
45790 };
45791
45792 static int proc_tgid_base_readdir(struct file * filp,
45793@@ -2895,7 +2995,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
45794 if (!inode)
45795 goto out;
45796
45797+#ifdef CONFIG_GRKERNSEC_PROC_USER
45798+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45799+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45800+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45801+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45802+#else
45803 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45804+#endif
45805 inode->i_op = &proc_tgid_base_inode_operations;
45806 inode->i_fop = &proc_tgid_base_operations;
45807 inode->i_flags|=S_IMMUTABLE;
45808@@ -2937,7 +3044,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
45809 if (!task)
45810 goto out;
45811
45812+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45813+ goto out_put_task;
45814+
45815 result = proc_pid_instantiate(dir, dentry, task, NULL);
45816+out_put_task:
45817 put_task_struct(task);
45818 out:
45819 return result;
45820@@ -3002,6 +3113,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45821 {
45822 unsigned int nr;
45823 struct task_struct *reaper;
45824+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45825+ const struct cred *tmpcred = current_cred();
45826+ const struct cred *itercred;
45827+#endif
45828+ filldir_t __filldir = filldir;
45829 struct tgid_iter iter;
45830 struct pid_namespace *ns;
45831
45832@@ -3025,8 +3141,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45833 for (iter = next_tgid(ns, iter);
45834 iter.task;
45835 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45836+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45837+ rcu_read_lock();
45838+ itercred = __task_cred(iter.task);
45839+#endif
45840+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45841+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45842+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45843+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45844+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45845+#endif
45846+ )
45847+#endif
45848+ )
45849+ __filldir = &gr_fake_filldir;
45850+ else
45851+ __filldir = filldir;
45852+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45853+ rcu_read_unlock();
45854+#endif
45855 filp->f_pos = iter.tgid + TGID_OFFSET;
45856- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45857+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45858 put_task_struct(iter.task);
45859 goto out;
45860 }
45861@@ -3054,7 +3189,7 @@ static const struct pid_entry tid_base_stuff[] = {
45862 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45863 #endif
45864 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45865-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45866+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45867 INF("syscall", S_IRUGO, proc_pid_syscall),
45868 #endif
45869 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45870@@ -3078,10 +3213,10 @@ static const struct pid_entry tid_base_stuff[] = {
45871 #ifdef CONFIG_SECURITY
45872 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45873 #endif
45874-#ifdef CONFIG_KALLSYMS
45875+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45876 INF("wchan", S_IRUGO, proc_pid_wchan),
45877 #endif
45878-#ifdef CONFIG_STACKTRACE
45879+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45880 ONE("stack", S_IRUGO, proc_pid_stack),
45881 #endif
45882 #ifdef CONFIG_SCHEDSTATS
45883diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
45884index 82676e3..5f8518a 100644
45885--- a/fs/proc/cmdline.c
45886+++ b/fs/proc/cmdline.c
45887@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
45888
45889 static int __init proc_cmdline_init(void)
45890 {
45891+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45892+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45893+#else
45894 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45895+#endif
45896 return 0;
45897 }
45898 module_init(proc_cmdline_init);
45899diff --git a/fs/proc/devices.c b/fs/proc/devices.c
45900index b143471..bb105e5 100644
45901--- a/fs/proc/devices.c
45902+++ b/fs/proc/devices.c
45903@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
45904
45905 static int __init proc_devices_init(void)
45906 {
45907+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45908+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45909+#else
45910 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45911+#endif
45912 return 0;
45913 }
45914 module_init(proc_devices_init);
45915diff --git a/fs/proc/inode.c b/fs/proc/inode.c
45916index 7737c54..7172574 100644
45917--- a/fs/proc/inode.c
45918+++ b/fs/proc/inode.c
45919@@ -18,12 +18,18 @@
45920 #include <linux/module.h>
45921 #include <linux/sysctl.h>
45922 #include <linux/slab.h>
45923+#include <linux/grsecurity.h>
45924
45925 #include <asm/system.h>
45926 #include <asm/uaccess.h>
45927
45928 #include "internal.h"
45929
45930+#ifdef CONFIG_PROC_SYSCTL
45931+extern const struct inode_operations proc_sys_inode_operations;
45932+extern const struct inode_operations proc_sys_dir_operations;
45933+#endif
45934+
45935 static void proc_evict_inode(struct inode *inode)
45936 {
45937 struct proc_dir_entry *de;
45938@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
45939 ns_ops = PROC_I(inode)->ns_ops;
45940 if (ns_ops && ns_ops->put)
45941 ns_ops->put(PROC_I(inode)->ns);
45942+
45943+#ifdef CONFIG_PROC_SYSCTL
45944+ if (inode->i_op == &proc_sys_inode_operations ||
45945+ inode->i_op == &proc_sys_dir_operations)
45946+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
45947+#endif
45948+
45949 }
45950
45951 static struct kmem_cache * proc_inode_cachep;
45952@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
45953 if (de->mode) {
45954 inode->i_mode = de->mode;
45955 inode->i_uid = de->uid;
45956+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45957+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45958+#else
45959 inode->i_gid = de->gid;
45960+#endif
45961 }
45962 if (de->size)
45963 inode->i_size = de->size;
45964diff --git a/fs/proc/internal.h b/fs/proc/internal.h
45965index 7838e5c..ff92cbc 100644
45966--- a/fs/proc/internal.h
45967+++ b/fs/proc/internal.h
45968@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45969 struct pid *pid, struct task_struct *task);
45970 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45971 struct pid *pid, struct task_struct *task);
45972+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45973+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45974+#endif
45975 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45976
45977 extern const struct file_operations proc_maps_operations;
45978diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
45979index d245cb2..f4e8498 100644
45980--- a/fs/proc/kcore.c
45981+++ b/fs/proc/kcore.c
45982@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
45983 * the addresses in the elf_phdr on our list.
45984 */
45985 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45986- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45987+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45988+ if (tsz > buflen)
45989 tsz = buflen;
45990-
45991+
45992 while (buflen) {
45993 struct kcore_list *m;
45994
45995@@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
45996 kfree(elf_buf);
45997 } else {
45998 if (kern_addr_valid(start)) {
45999- unsigned long n;
46000+ char *elf_buf;
46001+ mm_segment_t oldfs;
46002
46003- n = copy_to_user(buffer, (char *)start, tsz);
46004- /*
46005- * We cannot distingush between fault on source
46006- * and fault on destination. When this happens
46007- * we clear too and hope it will trigger the
46008- * EFAULT again.
46009- */
46010- if (n) {
46011- if (clear_user(buffer + tsz - n,
46012- n))
46013+ elf_buf = kmalloc(tsz, GFP_KERNEL);
46014+ if (!elf_buf)
46015+ return -ENOMEM;
46016+ oldfs = get_fs();
46017+ set_fs(KERNEL_DS);
46018+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46019+ set_fs(oldfs);
46020+ if (copy_to_user(buffer, elf_buf, tsz)) {
46021+ kfree(elf_buf);
46022 return -EFAULT;
46023+ }
46024 }
46025+ set_fs(oldfs);
46026+ kfree(elf_buf);
46027 } else {
46028 if (clear_user(buffer, tsz))
46029 return -EFAULT;
46030@@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46031
46032 static int open_kcore(struct inode *inode, struct file *filp)
46033 {
46034+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46035+ return -EPERM;
46036+#endif
46037 if (!capable(CAP_SYS_RAWIO))
46038 return -EPERM;
46039 if (kcore_need_update)
46040diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
46041index 80e4645..53e5fcf 100644
46042--- a/fs/proc/meminfo.c
46043+++ b/fs/proc/meminfo.c
46044@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46045 vmi.used >> 10,
46046 vmi.largest_chunk >> 10
46047 #ifdef CONFIG_MEMORY_FAILURE
46048- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46049+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46050 #endif
46051 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46052 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46053diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
46054index b1822dd..df622cb 100644
46055--- a/fs/proc/nommu.c
46056+++ b/fs/proc/nommu.c
46057@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
46058 if (len < 1)
46059 len = 1;
46060 seq_printf(m, "%*c", len, ' ');
46061- seq_path(m, &file->f_path, "");
46062+ seq_path(m, &file->f_path, "\n\\");
46063 }
46064
46065 seq_putc(m, '\n');
46066diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
46067index f738024..876984a 100644
46068--- a/fs/proc/proc_net.c
46069+++ b/fs/proc/proc_net.c
46070@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
46071 struct task_struct *task;
46072 struct nsproxy *ns;
46073 struct net *net = NULL;
46074+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46075+ const struct cred *cred = current_cred();
46076+#endif
46077+
46078+#ifdef CONFIG_GRKERNSEC_PROC_USER
46079+ if (cred->fsuid)
46080+ return net;
46081+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46082+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46083+ return net;
46084+#endif
46085
46086 rcu_read_lock();
46087 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46088diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
46089index a6b6217..1e0579d 100644
46090--- a/fs/proc/proc_sysctl.c
46091+++ b/fs/proc/proc_sysctl.c
46092@@ -9,11 +9,13 @@
46093 #include <linux/namei.h>
46094 #include "internal.h"
46095
46096+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46097+
46098 static const struct dentry_operations proc_sys_dentry_operations;
46099 static const struct file_operations proc_sys_file_operations;
46100-static const struct inode_operations proc_sys_inode_operations;
46101+const struct inode_operations proc_sys_inode_operations;
46102 static const struct file_operations proc_sys_dir_file_operations;
46103-static const struct inode_operations proc_sys_dir_operations;
46104+const struct inode_operations proc_sys_dir_operations;
46105
46106 void proc_sys_poll_notify(struct ctl_table_poll *poll)
46107 {
46108@@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
46109
46110 err = NULL;
46111 d_set_d_op(dentry, &proc_sys_dentry_operations);
46112+
46113+ gr_handle_proc_create(dentry, inode);
46114+
46115 d_add(dentry, inode);
46116
46117+ if (gr_handle_sysctl(p, MAY_EXEC))
46118+ err = ERR_PTR(-ENOENT);
46119+
46120 out:
46121 sysctl_head_finish(head);
46122 return err;
46123@@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
46124 if (!table->proc_handler)
46125 goto out;
46126
46127+#ifdef CONFIG_GRKERNSEC
46128+ error = -EPERM;
46129+ if (write && !capable(CAP_SYS_ADMIN))
46130+ goto out;
46131+#endif
46132+
46133 /* careful: calling conventions are nasty here */
46134 res = count;
46135 error = table->proc_handler(table, write, buf, &res, ppos);
46136@@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
46137 return -ENOMEM;
46138 } else {
46139 d_set_d_op(child, &proc_sys_dentry_operations);
46140+
46141+ gr_handle_proc_create(child, inode);
46142+
46143 d_add(child, inode);
46144 }
46145 } else {
46146@@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
46147 if (*pos < file->f_pos)
46148 continue;
46149
46150+ if (gr_handle_sysctl(table, 0))
46151+ continue;
46152+
46153 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46154 if (res)
46155 return res;
46156@@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
46157 if (IS_ERR(head))
46158 return PTR_ERR(head);
46159
46160+ if (table && gr_handle_sysctl(table, MAY_EXEC))
46161+ return -ENOENT;
46162+
46163 generic_fillattr(inode, stat);
46164 if (table)
46165 stat->mode = (stat->mode & S_IFMT) | table->mode;
46166@@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
46167 .llseek = generic_file_llseek,
46168 };
46169
46170-static const struct inode_operations proc_sys_inode_operations = {
46171+const struct inode_operations proc_sys_inode_operations = {
46172 .permission = proc_sys_permission,
46173 .setattr = proc_sys_setattr,
46174 .getattr = proc_sys_getattr,
46175 };
46176
46177-static const struct inode_operations proc_sys_dir_operations = {
46178+const struct inode_operations proc_sys_dir_operations = {
46179 .lookup = proc_sys_lookup,
46180 .permission = proc_sys_permission,
46181 .setattr = proc_sys_setattr,
46182diff --git a/fs/proc/root.c b/fs/proc/root.c
46183index 03102d9..4ae347e 100644
46184--- a/fs/proc/root.c
46185+++ b/fs/proc/root.c
46186@@ -121,7 +121,15 @@ void __init proc_root_init(void)
46187 #ifdef CONFIG_PROC_DEVICETREE
46188 proc_device_tree_init();
46189 #endif
46190+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46191+#ifdef CONFIG_GRKERNSEC_PROC_USER
46192+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46193+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46194+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46195+#endif
46196+#else
46197 proc_mkdir("bus", NULL);
46198+#endif
46199 proc_sys_init();
46200 }
46201
46202diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
46203index 7dcd2a2..b2f410e 100644
46204--- a/fs/proc/task_mmu.c
46205+++ b/fs/proc/task_mmu.c
46206@@ -11,6 +11,7 @@
46207 #include <linux/rmap.h>
46208 #include <linux/swap.h>
46209 #include <linux/swapops.h>
46210+#include <linux/grsecurity.h>
46211
46212 #include <asm/elf.h>
46213 #include <asm/uaccess.h>
46214@@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46215 "VmExe:\t%8lu kB\n"
46216 "VmLib:\t%8lu kB\n"
46217 "VmPTE:\t%8lu kB\n"
46218- "VmSwap:\t%8lu kB\n",
46219- hiwater_vm << (PAGE_SHIFT-10),
46220+ "VmSwap:\t%8lu kB\n"
46221+
46222+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46223+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46224+#endif
46225+
46226+ ,hiwater_vm << (PAGE_SHIFT-10),
46227 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46228 mm->locked_vm << (PAGE_SHIFT-10),
46229 mm->pinned_vm << (PAGE_SHIFT-10),
46230@@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46231 data << (PAGE_SHIFT-10),
46232 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46233 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46234- swap << (PAGE_SHIFT-10));
46235+ swap << (PAGE_SHIFT-10)
46236+
46237+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46238+ , mm->context.user_cs_base, mm->context.user_cs_limit
46239+#endif
46240+
46241+ );
46242 }
46243
46244 unsigned long task_vsize(struct mm_struct *mm)
46245@@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46246 return ret;
46247 }
46248
46249+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46250+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46251+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46252+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46253+#endif
46254+
46255 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46256 {
46257 struct mm_struct *mm = vma->vm_mm;
46258@@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46259 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46260 }
46261
46262- /* We don't show the stack guard page in /proc/maps */
46263+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46264+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46265+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46266+#else
46267 start = vma->vm_start;
46268- if (stack_guard_page_start(vma, start))
46269- start += PAGE_SIZE;
46270 end = vma->vm_end;
46271- if (stack_guard_page_end(vma, end))
46272- end -= PAGE_SIZE;
46273+#endif
46274
46275 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46276 start,
46277@@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46278 flags & VM_WRITE ? 'w' : '-',
46279 flags & VM_EXEC ? 'x' : '-',
46280 flags & VM_MAYSHARE ? 's' : 'p',
46281+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46282+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46283+#else
46284 pgoff,
46285+#endif
46286 MAJOR(dev), MINOR(dev), ino, &len);
46287
46288 /*
46289@@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46290 */
46291 if (file) {
46292 pad_len_spaces(m, len);
46293- seq_path(m, &file->f_path, "\n");
46294+ seq_path(m, &file->f_path, "\n\\");
46295 } else {
46296 const char *name = arch_vma_name(vma);
46297 if (!name) {
46298@@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46299 if (vma->vm_start <= mm->brk &&
46300 vma->vm_end >= mm->start_brk) {
46301 name = "[heap]";
46302- } else if (vma->vm_start <= mm->start_stack &&
46303- vma->vm_end >= mm->start_stack) {
46304+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46305+ (vma->vm_start <= mm->start_stack &&
46306+ vma->vm_end >= mm->start_stack)) {
46307 name = "[stack]";
46308 }
46309 } else {
46310@@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
46311 struct proc_maps_private *priv = m->private;
46312 struct task_struct *task = priv->task;
46313
46314+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46315+ if (current->exec_id != m->exec_id) {
46316+ gr_log_badprocpid("maps");
46317+ return 0;
46318+ }
46319+#endif
46320+
46321 show_map_vma(m, vma);
46322
46323 if (m->count < m->size) /* vma is copied successfully */
46324@@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v)
46325 .private = &mss,
46326 };
46327
46328+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46329+ if (current->exec_id != m->exec_id) {
46330+ gr_log_badprocpid("smaps");
46331+ return 0;
46332+ }
46333+#endif
46334 memset(&mss, 0, sizeof mss);
46335- mss.vma = vma;
46336- /* mmap_sem is held in m_start */
46337- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46338- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46339-
46340+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46341+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46342+#endif
46343+ mss.vma = vma;
46344+ /* mmap_sem is held in m_start */
46345+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46346+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46347+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46348+ }
46349+#endif
46350 show_map_vma(m, vma);
46351
46352 seq_printf(m,
46353@@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v)
46354 "KernelPageSize: %8lu kB\n"
46355 "MMUPageSize: %8lu kB\n"
46356 "Locked: %8lu kB\n",
46357+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46358+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46359+#else
46360 (vma->vm_end - vma->vm_start) >> 10,
46361+#endif
46362 mss.resident >> 10,
46363 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46364 mss.shared_clean >> 10,
46365@@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v)
46366 int n;
46367 char buffer[50];
46368
46369+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46370+ if (current->exec_id != m->exec_id) {
46371+ gr_log_badprocpid("numa_maps");
46372+ return 0;
46373+ }
46374+#endif
46375+
46376 if (!mm)
46377 return 0;
46378
46379@@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v)
46380 mpol_to_str(buffer, sizeof(buffer), pol, 0);
46381 mpol_cond_put(pol);
46382
46383+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46384+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
46385+#else
46386 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
46387+#endif
46388
46389 if (file) {
46390 seq_printf(m, " file=");
46391- seq_path(m, &file->f_path, "\n\t= ");
46392+ seq_path(m, &file->f_path, "\n\t\\= ");
46393 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46394 seq_printf(m, " heap");
46395 } else if (vma->vm_start <= mm->start_stack &&
46396diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46397index 980de54..2a4db5f 100644
46398--- a/fs/proc/task_nommu.c
46399+++ b/fs/proc/task_nommu.c
46400@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46401 else
46402 bytes += kobjsize(mm);
46403
46404- if (current->fs && current->fs->users > 1)
46405+ if (current->fs && atomic_read(&current->fs->users) > 1)
46406 sbytes += kobjsize(current->fs);
46407 else
46408 bytes += kobjsize(current->fs);
46409@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46410
46411 if (file) {
46412 pad_len_spaces(m, len);
46413- seq_path(m, &file->f_path, "");
46414+ seq_path(m, &file->f_path, "\n\\");
46415 } else if (mm) {
46416 if (vma->vm_start <= mm->start_stack &&
46417 vma->vm_end >= mm->start_stack) {
46418diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46419index d67908b..d13f6a6 100644
46420--- a/fs/quota/netlink.c
46421+++ b/fs/quota/netlink.c
46422@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46423 void quota_send_warning(short type, unsigned int id, dev_t dev,
46424 const char warntype)
46425 {
46426- static atomic_t seq;
46427+ static atomic_unchecked_t seq;
46428 struct sk_buff *skb;
46429 void *msg_head;
46430 int ret;
46431@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46432 "VFS: Not enough memory to send quota warning.\n");
46433 return;
46434 }
46435- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46436+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46437 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46438 if (!msg_head) {
46439 printk(KERN_ERR
46440diff --git a/fs/readdir.c b/fs/readdir.c
46441index 356f715..c918d38 100644
46442--- a/fs/readdir.c
46443+++ b/fs/readdir.c
46444@@ -17,6 +17,7 @@
46445 #include <linux/security.h>
46446 #include <linux/syscalls.h>
46447 #include <linux/unistd.h>
46448+#include <linux/namei.h>
46449
46450 #include <asm/uaccess.h>
46451
46452@@ -67,6 +68,7 @@ struct old_linux_dirent {
46453
46454 struct readdir_callback {
46455 struct old_linux_dirent __user * dirent;
46456+ struct file * file;
46457 int result;
46458 };
46459
46460@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46461 buf->result = -EOVERFLOW;
46462 return -EOVERFLOW;
46463 }
46464+
46465+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46466+ return 0;
46467+
46468 buf->result++;
46469 dirent = buf->dirent;
46470 if (!access_ok(VERIFY_WRITE, dirent,
46471@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46472
46473 buf.result = 0;
46474 buf.dirent = dirent;
46475+ buf.file = file;
46476
46477 error = vfs_readdir(file, fillonedir, &buf);
46478 if (buf.result)
46479@@ -142,6 +149,7 @@ struct linux_dirent {
46480 struct getdents_callback {
46481 struct linux_dirent __user * current_dir;
46482 struct linux_dirent __user * previous;
46483+ struct file * file;
46484 int count;
46485 int error;
46486 };
46487@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46488 buf->error = -EOVERFLOW;
46489 return -EOVERFLOW;
46490 }
46491+
46492+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46493+ return 0;
46494+
46495 dirent = buf->previous;
46496 if (dirent) {
46497 if (__put_user(offset, &dirent->d_off))
46498@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46499 buf.previous = NULL;
46500 buf.count = count;
46501 buf.error = 0;
46502+ buf.file = file;
46503
46504 error = vfs_readdir(file, filldir, &buf);
46505 if (error >= 0)
46506@@ -229,6 +242,7 @@ out:
46507 struct getdents_callback64 {
46508 struct linux_dirent64 __user * current_dir;
46509 struct linux_dirent64 __user * previous;
46510+ struct file *file;
46511 int count;
46512 int error;
46513 };
46514@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46515 buf->error = -EINVAL; /* only used if we fail.. */
46516 if (reclen > buf->count)
46517 return -EINVAL;
46518+
46519+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46520+ return 0;
46521+
46522 dirent = buf->previous;
46523 if (dirent) {
46524 if (__put_user(offset, &dirent->d_off))
46525@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46526
46527 buf.current_dir = dirent;
46528 buf.previous = NULL;
46529+ buf.file = file;
46530 buf.count = count;
46531 buf.error = 0;
46532
46533@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46534 error = buf.error;
46535 lastdirent = buf.previous;
46536 if (lastdirent) {
46537- typeof(lastdirent->d_off) d_off = file->f_pos;
46538+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46539 if (__put_user(d_off, &lastdirent->d_off))
46540 error = -EFAULT;
46541 else
46542diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46543index 60c0804..d814f98 100644
46544--- a/fs/reiserfs/do_balan.c
46545+++ b/fs/reiserfs/do_balan.c
46546@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46547 return;
46548 }
46549
46550- atomic_inc(&(fs_generation(tb->tb_sb)));
46551+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46552 do_balance_starts(tb);
46553
46554 /* balance leaf returns 0 except if combining L R and S into
46555diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46556index 7a99811..a7c96c4 100644
46557--- a/fs/reiserfs/procfs.c
46558+++ b/fs/reiserfs/procfs.c
46559@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46560 "SMALL_TAILS " : "NO_TAILS ",
46561 replay_only(sb) ? "REPLAY_ONLY " : "",
46562 convert_reiserfs(sb) ? "CONV " : "",
46563- atomic_read(&r->s_generation_counter),
46564+ atomic_read_unchecked(&r->s_generation_counter),
46565 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46566 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46567 SF(s_good_search_by_key_reada), SF(s_bmaps),
46568diff --git a/fs/select.c b/fs/select.c
46569index d33418f..2a5345e 100644
46570--- a/fs/select.c
46571+++ b/fs/select.c
46572@@ -20,6 +20,7 @@
46573 #include <linux/module.h>
46574 #include <linux/slab.h>
46575 #include <linux/poll.h>
46576+#include <linux/security.h>
46577 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46578 #include <linux/file.h>
46579 #include <linux/fdtable.h>
46580@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46581 struct poll_list *walk = head;
46582 unsigned long todo = nfds;
46583
46584+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46585 if (nfds > rlimit(RLIMIT_NOFILE))
46586 return -EINVAL;
46587
46588diff --git a/fs/seq_file.c b/fs/seq_file.c
46589index dba43c3..9fb8511 100644
46590--- a/fs/seq_file.c
46591+++ b/fs/seq_file.c
46592@@ -9,6 +9,7 @@
46593 #include <linux/module.h>
46594 #include <linux/seq_file.h>
46595 #include <linux/slab.h>
46596+#include <linux/sched.h>
46597
46598 #include <asm/uaccess.h>
46599 #include <asm/page.h>
46600@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
46601 memset(p, 0, sizeof(*p));
46602 mutex_init(&p->lock);
46603 p->op = op;
46604+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46605+ p->exec_id = current->exec_id;
46606+#endif
46607
46608 /*
46609 * Wrappers around seq_open(e.g. swaps_open) need to be
46610@@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46611 return 0;
46612 }
46613 if (!m->buf) {
46614- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46615+ m->size = PAGE_SIZE;
46616+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46617 if (!m->buf)
46618 return -ENOMEM;
46619 }
46620@@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46621 Eoverflow:
46622 m->op->stop(m, p);
46623 kfree(m->buf);
46624- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46625+ m->size <<= 1;
46626+ m->buf = kmalloc(m->size, GFP_KERNEL);
46627 return !m->buf ? -ENOMEM : -EAGAIN;
46628 }
46629
46630@@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46631 m->version = file->f_version;
46632 /* grab buffer if we didn't have one */
46633 if (!m->buf) {
46634- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46635+ m->size = PAGE_SIZE;
46636+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46637 if (!m->buf)
46638 goto Enomem;
46639 }
46640@@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46641 goto Fill;
46642 m->op->stop(m, p);
46643 kfree(m->buf);
46644- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46645+ m->size <<= 1;
46646+ m->buf = kmalloc(m->size, GFP_KERNEL);
46647 if (!m->buf)
46648 goto Enomem;
46649 m->count = 0;
46650@@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v)
46651 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46652 void *data)
46653 {
46654- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46655+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46656 int res = -ENOMEM;
46657
46658 if (op) {
46659diff --git a/fs/splice.c b/fs/splice.c
46660index fa2defa..8601650 100644
46661--- a/fs/splice.c
46662+++ b/fs/splice.c
46663@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46664 pipe_lock(pipe);
46665
46666 for (;;) {
46667- if (!pipe->readers) {
46668+ if (!atomic_read(&pipe->readers)) {
46669 send_sig(SIGPIPE, current, 0);
46670 if (!ret)
46671 ret = -EPIPE;
46672@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46673 do_wakeup = 0;
46674 }
46675
46676- pipe->waiting_writers++;
46677+ atomic_inc(&pipe->waiting_writers);
46678 pipe_wait(pipe);
46679- pipe->waiting_writers--;
46680+ atomic_dec(&pipe->waiting_writers);
46681 }
46682
46683 pipe_unlock(pipe);
46684@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46685 old_fs = get_fs();
46686 set_fs(get_ds());
46687 /* The cast to a user pointer is valid due to the set_fs() */
46688- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46689+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46690 set_fs(old_fs);
46691
46692 return res;
46693@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46694 old_fs = get_fs();
46695 set_fs(get_ds());
46696 /* The cast to a user pointer is valid due to the set_fs() */
46697- res = vfs_write(file, (const char __user *)buf, count, &pos);
46698+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46699 set_fs(old_fs);
46700
46701 return res;
46702@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46703 goto err;
46704
46705 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46706- vec[i].iov_base = (void __user *) page_address(page);
46707+ vec[i].iov_base = (void __force_user *) page_address(page);
46708 vec[i].iov_len = this_len;
46709 spd.pages[i] = page;
46710 spd.nr_pages++;
46711@@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46712 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46713 {
46714 while (!pipe->nrbufs) {
46715- if (!pipe->writers)
46716+ if (!atomic_read(&pipe->writers))
46717 return 0;
46718
46719- if (!pipe->waiting_writers && sd->num_spliced)
46720+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46721 return 0;
46722
46723 if (sd->flags & SPLICE_F_NONBLOCK)
46724@@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
46725 * out of the pipe right after the splice_to_pipe(). So set
46726 * PIPE_READERS appropriately.
46727 */
46728- pipe->readers = 1;
46729+ atomic_set(&pipe->readers, 1);
46730
46731 current->splice_pipe = pipe;
46732 }
46733@@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46734 ret = -ERESTARTSYS;
46735 break;
46736 }
46737- if (!pipe->writers)
46738+ if (!atomic_read(&pipe->writers))
46739 break;
46740- if (!pipe->waiting_writers) {
46741+ if (!atomic_read(&pipe->waiting_writers)) {
46742 if (flags & SPLICE_F_NONBLOCK) {
46743 ret = -EAGAIN;
46744 break;
46745@@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46746 pipe_lock(pipe);
46747
46748 while (pipe->nrbufs >= pipe->buffers) {
46749- if (!pipe->readers) {
46750+ if (!atomic_read(&pipe->readers)) {
46751 send_sig(SIGPIPE, current, 0);
46752 ret = -EPIPE;
46753 break;
46754@@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46755 ret = -ERESTARTSYS;
46756 break;
46757 }
46758- pipe->waiting_writers++;
46759+ atomic_inc(&pipe->waiting_writers);
46760 pipe_wait(pipe);
46761- pipe->waiting_writers--;
46762+ atomic_dec(&pipe->waiting_writers);
46763 }
46764
46765 pipe_unlock(pipe);
46766@@ -1819,14 +1819,14 @@ retry:
46767 pipe_double_lock(ipipe, opipe);
46768
46769 do {
46770- if (!opipe->readers) {
46771+ if (!atomic_read(&opipe->readers)) {
46772 send_sig(SIGPIPE, current, 0);
46773 if (!ret)
46774 ret = -EPIPE;
46775 break;
46776 }
46777
46778- if (!ipipe->nrbufs && !ipipe->writers)
46779+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46780 break;
46781
46782 /*
46783@@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46784 pipe_double_lock(ipipe, opipe);
46785
46786 do {
46787- if (!opipe->readers) {
46788+ if (!atomic_read(&opipe->readers)) {
46789 send_sig(SIGPIPE, current, 0);
46790 if (!ret)
46791 ret = -EPIPE;
46792@@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46793 * return EAGAIN if we have the potential of some data in the
46794 * future, otherwise just return 0
46795 */
46796- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46797+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46798 ret = -EAGAIN;
46799
46800 pipe_unlock(ipipe);
46801diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
46802index 7fdf6a7..e6cd8ad 100644
46803--- a/fs/sysfs/dir.c
46804+++ b/fs/sysfs/dir.c
46805@@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
46806 struct sysfs_dirent *sd;
46807 int rc;
46808
46809+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46810+ const char *parent_name = parent_sd->s_name;
46811+
46812+ mode = S_IFDIR | S_IRWXU;
46813+
46814+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
46815+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
46816+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
46817+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
46818+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
46819+#endif
46820+
46821 /* allocate */
46822 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
46823 if (!sd)
46824diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
46825index 779789a..f58193c 100644
46826--- a/fs/sysfs/file.c
46827+++ b/fs/sysfs/file.c
46828@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
46829
46830 struct sysfs_open_dirent {
46831 atomic_t refcnt;
46832- atomic_t event;
46833+ atomic_unchecked_t event;
46834 wait_queue_head_t poll;
46835 struct list_head buffers; /* goes through sysfs_buffer.list */
46836 };
46837@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
46838 if (!sysfs_get_active(attr_sd))
46839 return -ENODEV;
46840
46841- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46842+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46843 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46844
46845 sysfs_put_active(attr_sd);
46846@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
46847 return -ENOMEM;
46848
46849 atomic_set(&new_od->refcnt, 0);
46850- atomic_set(&new_od->event, 1);
46851+ atomic_set_unchecked(&new_od->event, 1);
46852 init_waitqueue_head(&new_od->poll);
46853 INIT_LIST_HEAD(&new_od->buffers);
46854 goto retry;
46855@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
46856
46857 sysfs_put_active(attr_sd);
46858
46859- if (buffer->event != atomic_read(&od->event))
46860+ if (buffer->event != atomic_read_unchecked(&od->event))
46861 goto trigger;
46862
46863 return DEFAULT_POLLMASK;
46864@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
46865
46866 od = sd->s_attr.open;
46867 if (od) {
46868- atomic_inc(&od->event);
46869+ atomic_inc_unchecked(&od->event);
46870 wake_up_interruptible(&od->poll);
46871 }
46872
46873diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
46874index a7ac78f..02158e1 100644
46875--- a/fs/sysfs/symlink.c
46876+++ b/fs/sysfs/symlink.c
46877@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46878
46879 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46880 {
46881- char *page = nd_get_link(nd);
46882+ const char *page = nd_get_link(nd);
46883 if (!IS_ERR(page))
46884 free_page((unsigned long)page);
46885 }
46886diff --git a/fs/udf/misc.c b/fs/udf/misc.c
46887index c175b4d..8f36a16 100644
46888--- a/fs/udf/misc.c
46889+++ b/fs/udf/misc.c
46890@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
46891
46892 u8 udf_tag_checksum(const struct tag *t)
46893 {
46894- u8 *data = (u8 *)t;
46895+ const u8 *data = (const u8 *)t;
46896 u8 checksum = 0;
46897 int i;
46898 for (i = 0; i < sizeof(struct tag); ++i)
46899diff --git a/fs/utimes.c b/fs/utimes.c
46900index ba653f3..06ea4b1 100644
46901--- a/fs/utimes.c
46902+++ b/fs/utimes.c
46903@@ -1,6 +1,7 @@
46904 #include <linux/compiler.h>
46905 #include <linux/file.h>
46906 #include <linux/fs.h>
46907+#include <linux/security.h>
46908 #include <linux/linkage.h>
46909 #include <linux/mount.h>
46910 #include <linux/namei.h>
46911@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
46912 goto mnt_drop_write_and_out;
46913 }
46914 }
46915+
46916+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46917+ error = -EACCES;
46918+ goto mnt_drop_write_and_out;
46919+ }
46920+
46921 mutex_lock(&inode->i_mutex);
46922 error = notify_change(path->dentry, &newattrs);
46923 mutex_unlock(&inode->i_mutex);
46924diff --git a/fs/xattr.c b/fs/xattr.c
46925index 67583de..c5aad14 100644
46926--- a/fs/xattr.c
46927+++ b/fs/xattr.c
46928@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46929 * Extended attribute SET operations
46930 */
46931 static long
46932-setxattr(struct dentry *d, const char __user *name, const void __user *value,
46933+setxattr(struct path *path, const char __user *name, const void __user *value,
46934 size_t size, int flags)
46935 {
46936 int error;
46937@@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
46938 return PTR_ERR(kvalue);
46939 }
46940
46941- error = vfs_setxattr(d, kname, kvalue, size, flags);
46942+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46943+ error = -EACCES;
46944+ goto out;
46945+ }
46946+
46947+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46948+out:
46949 kfree(kvalue);
46950 return error;
46951 }
46952@@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
46953 return error;
46954 error = mnt_want_write(path.mnt);
46955 if (!error) {
46956- error = setxattr(path.dentry, name, value, size, flags);
46957+ error = setxattr(&path, name, value, size, flags);
46958 mnt_drop_write(path.mnt);
46959 }
46960 path_put(&path);
46961@@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
46962 return error;
46963 error = mnt_want_write(path.mnt);
46964 if (!error) {
46965- error = setxattr(path.dentry, name, value, size, flags);
46966+ error = setxattr(&path, name, value, size, flags);
46967 mnt_drop_write(path.mnt);
46968 }
46969 path_put(&path);
46970@@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
46971 const void __user *,value, size_t, size, int, flags)
46972 {
46973 struct file *f;
46974- struct dentry *dentry;
46975 int error = -EBADF;
46976
46977 f = fget(fd);
46978 if (!f)
46979 return error;
46980- dentry = f->f_path.dentry;
46981- audit_inode(NULL, dentry);
46982+ audit_inode(NULL, f->f_path.dentry);
46983 error = mnt_want_write_file(f);
46984 if (!error) {
46985- error = setxattr(dentry, name, value, size, flags);
46986+ error = setxattr(&f->f_path, name, value, size, flags);
46987 mnt_drop_write(f->f_path.mnt);
46988 }
46989 fput(f);
46990diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
46991index 8d5a506..7f62712 100644
46992--- a/fs/xattr_acl.c
46993+++ b/fs/xattr_acl.c
46994@@ -17,8 +17,8 @@
46995 struct posix_acl *
46996 posix_acl_from_xattr(const void *value, size_t size)
46997 {
46998- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46999- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47000+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47001+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47002 int count;
47003 struct posix_acl *acl;
47004 struct posix_acl_entry *acl_e;
47005diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
47006index d0ab788..827999b 100644
47007--- a/fs/xfs/xfs_bmap.c
47008+++ b/fs/xfs/xfs_bmap.c
47009@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
47010 int nmap,
47011 int ret_nmap);
47012 #else
47013-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47014+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47015 #endif /* DEBUG */
47016
47017 STATIC int
47018diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
47019index 79d05e8..e3e5861 100644
47020--- a/fs/xfs/xfs_dir2_sf.c
47021+++ b/fs/xfs/xfs_dir2_sf.c
47022@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47023 }
47024
47025 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47026- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47027+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47028+ char name[sfep->namelen];
47029+ memcpy(name, sfep->name, sfep->namelen);
47030+ if (filldir(dirent, name, sfep->namelen,
47031+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
47032+ *offset = off & 0x7fffffff;
47033+ return 0;
47034+ }
47035+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47036 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47037 *offset = off & 0x7fffffff;
47038 return 0;
47039diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
47040index d99a905..9f88202 100644
47041--- a/fs/xfs/xfs_ioctl.c
47042+++ b/fs/xfs/xfs_ioctl.c
47043@@ -128,7 +128,7 @@ xfs_find_handle(
47044 }
47045
47046 error = -EFAULT;
47047- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47048+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47049 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47050 goto out_put;
47051
47052diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
47053index 23ce927..e274cc1 100644
47054--- a/fs/xfs/xfs_iops.c
47055+++ b/fs/xfs/xfs_iops.c
47056@@ -447,7 +447,7 @@ xfs_vn_put_link(
47057 struct nameidata *nd,
47058 void *p)
47059 {
47060- char *s = nd_get_link(nd);
47061+ const char *s = nd_get_link(nd);
47062
47063 if (!IS_ERR(s))
47064 kfree(s);
47065diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
47066new file mode 100644
47067index 0000000..41df561
47068--- /dev/null
47069+++ b/grsecurity/Kconfig
47070@@ -0,0 +1,1075 @@
47071+#
47072+# grecurity configuration
47073+#
47074+
47075+menu "Grsecurity"
47076+
47077+config GRKERNSEC
47078+ bool "Grsecurity"
47079+ select CRYPTO
47080+ select CRYPTO_SHA256
47081+ help
47082+ If you say Y here, you will be able to configure many features
47083+ that will enhance the security of your system. It is highly
47084+ recommended that you say Y here and read through the help
47085+ for each option so that you fully understand the features and
47086+ can evaluate their usefulness for your machine.
47087+
47088+choice
47089+ prompt "Security Level"
47090+ depends on GRKERNSEC
47091+ default GRKERNSEC_CUSTOM
47092+
47093+config GRKERNSEC_LOW
47094+ bool "Low"
47095+ select GRKERNSEC_LINK
47096+ select GRKERNSEC_FIFO
47097+ select GRKERNSEC_RANDNET
47098+ select GRKERNSEC_DMESG
47099+ select GRKERNSEC_CHROOT
47100+ select GRKERNSEC_CHROOT_CHDIR
47101+
47102+ help
47103+ If you choose this option, several of the grsecurity options will
47104+ be enabled that will give you greater protection against a number
47105+ of attacks, while assuring that none of your software will have any
47106+ conflicts with the additional security measures. If you run a lot
47107+ of unusual software, or you are having problems with the higher
47108+ security levels, you should say Y here. With this option, the
47109+ following features are enabled:
47110+
47111+ - Linking restrictions
47112+ - FIFO restrictions
47113+ - Restricted dmesg
47114+ - Enforced chdir("/") on chroot
47115+ - Runtime module disabling
47116+
47117+config GRKERNSEC_MEDIUM
47118+ bool "Medium"
47119+ select PAX
47120+ select PAX_EI_PAX
47121+ select PAX_PT_PAX_FLAGS
47122+ select PAX_HAVE_ACL_FLAGS
47123+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47124+ select GRKERNSEC_CHROOT
47125+ select GRKERNSEC_CHROOT_SYSCTL
47126+ select GRKERNSEC_LINK
47127+ select GRKERNSEC_FIFO
47128+ select GRKERNSEC_DMESG
47129+ select GRKERNSEC_RANDNET
47130+ select GRKERNSEC_FORKFAIL
47131+ select GRKERNSEC_TIME
47132+ select GRKERNSEC_SIGNAL
47133+ select GRKERNSEC_CHROOT
47134+ select GRKERNSEC_CHROOT_UNIX
47135+ select GRKERNSEC_CHROOT_MOUNT
47136+ select GRKERNSEC_CHROOT_PIVOT
47137+ select GRKERNSEC_CHROOT_DOUBLE
47138+ select GRKERNSEC_CHROOT_CHDIR
47139+ select GRKERNSEC_CHROOT_MKNOD
47140+ select GRKERNSEC_PROC
47141+ select GRKERNSEC_PROC_USERGROUP
47142+ select PAX_RANDUSTACK
47143+ select PAX_ASLR
47144+ select PAX_RANDMMAP
47145+ select PAX_REFCOUNT if (X86 || SPARC64)
47146+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47147+
47148+ help
47149+ If you say Y here, several features in addition to those included
47150+ in the low additional security level will be enabled. These
47151+ features provide even more security to your system, though in rare
47152+ cases they may be incompatible with very old or poorly written
47153+ software. If you enable this option, make sure that your auth
47154+ service (identd) is running as gid 1001. With this option,
47155+ the following features (in addition to those provided in the
47156+ low additional security level) will be enabled:
47157+
47158+ - Failed fork logging
47159+ - Time change logging
47160+ - Signal logging
47161+ - Deny mounts in chroot
47162+ - Deny double chrooting
47163+ - Deny sysctl writes in chroot
47164+ - Deny mknod in chroot
47165+ - Deny access to abstract AF_UNIX sockets out of chroot
47166+ - Deny pivot_root in chroot
47167+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
47168+ - /proc restrictions with special GID set to 10 (usually wheel)
47169+ - Address Space Layout Randomization (ASLR)
47170+ - Prevent exploitation of most refcount overflows
47171+ - Bounds checking of copying between the kernel and userland
47172+
47173+config GRKERNSEC_HIGH
47174+ bool "High"
47175+ select GRKERNSEC_LINK
47176+ select GRKERNSEC_FIFO
47177+ select GRKERNSEC_DMESG
47178+ select GRKERNSEC_FORKFAIL
47179+ select GRKERNSEC_TIME
47180+ select GRKERNSEC_SIGNAL
47181+ select GRKERNSEC_CHROOT
47182+ select GRKERNSEC_CHROOT_SHMAT
47183+ select GRKERNSEC_CHROOT_UNIX
47184+ select GRKERNSEC_CHROOT_MOUNT
47185+ select GRKERNSEC_CHROOT_FCHDIR
47186+ select GRKERNSEC_CHROOT_PIVOT
47187+ select GRKERNSEC_CHROOT_DOUBLE
47188+ select GRKERNSEC_CHROOT_CHDIR
47189+ select GRKERNSEC_CHROOT_MKNOD
47190+ select GRKERNSEC_CHROOT_CAPS
47191+ select GRKERNSEC_CHROOT_SYSCTL
47192+ select GRKERNSEC_CHROOT_FINDTASK
47193+ select GRKERNSEC_SYSFS_RESTRICT
47194+ select GRKERNSEC_PROC
47195+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47196+ select GRKERNSEC_HIDESYM
47197+ select GRKERNSEC_BRUTE
47198+ select GRKERNSEC_PROC_USERGROUP
47199+ select GRKERNSEC_KMEM
47200+ select GRKERNSEC_RESLOG
47201+ select GRKERNSEC_RANDNET
47202+ select GRKERNSEC_PROC_ADD
47203+ select GRKERNSEC_CHROOT_CHMOD
47204+ select GRKERNSEC_CHROOT_NICE
47205+ select GRKERNSEC_SETXID
47206+ select GRKERNSEC_AUDIT_MOUNT
47207+ select GRKERNSEC_MODHARDEN if (MODULES)
47208+ select GRKERNSEC_HARDEN_PTRACE
47209+ select GRKERNSEC_PTRACE_READEXEC
47210+ select GRKERNSEC_VM86 if (X86_32)
47211+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47212+ select PAX
47213+ select PAX_RANDUSTACK
47214+ select PAX_ASLR
47215+ select PAX_RANDMMAP
47216+ select PAX_NOEXEC
47217+ select PAX_MPROTECT
47218+ select PAX_EI_PAX
47219+ select PAX_PT_PAX_FLAGS
47220+ select PAX_HAVE_ACL_FLAGS
47221+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47222+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
47223+ select PAX_RANDKSTACK if (X86_TSC && X86)
47224+ select PAX_SEGMEXEC if (X86_32)
47225+ select PAX_PAGEEXEC
47226+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47227+ select PAX_EMUTRAMP if (PARISC)
47228+ select PAX_EMUSIGRT if (PARISC)
47229+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47230+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47231+ select PAX_REFCOUNT if (X86 || SPARC64)
47232+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47233+ help
47234+ If you say Y here, many of the features of grsecurity will be
47235+ enabled, which will protect you against many kinds of attacks
47236+ against your system. The heightened security comes at a cost
47237+ of an increased chance of incompatibilities with rare software
47238+ on your machine. Since this security level enables PaX, you should
47239+ view <http://pax.grsecurity.net> and read about the PaX
47240+ project. While you are there, download chpax and run it on
47241+ binaries that cause problems with PaX. Also remember that
47242+ since the /proc restrictions are enabled, you must run your
47243+ identd as gid 1001. This security level enables the following
47244+ features in addition to those listed in the low and medium
47245+ security levels:
47246+
47247+ - Additional /proc restrictions
47248+ - Chmod restrictions in chroot
47249+ - No signals, ptrace, or viewing of processes outside of chroot
47250+ - Capability restrictions in chroot
47251+ - Deny fchdir out of chroot
47252+ - Priority restrictions in chroot
47253+ - Segmentation-based implementation of PaX
47254+ - Mprotect restrictions
47255+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47256+ - Kernel stack randomization
47257+ - Mount/unmount/remount logging
47258+ - Kernel symbol hiding
47259+ - Hardening of module auto-loading
47260+ - Ptrace restrictions
47261+ - Restricted vm86 mode
47262+ - Restricted sysfs/debugfs
47263+ - Active kernel exploit response
47264+
47265+config GRKERNSEC_CUSTOM
47266+ bool "Custom"
47267+ help
47268+ If you say Y here, you will be able to configure every grsecurity
47269+ option, which allows you to enable many more features that aren't
47270+ covered in the basic security levels. These additional features
47271+ include TPE, socket restrictions, and the sysctl system for
47272+ grsecurity. It is advised that you read through the help for
47273+ each option to determine its usefulness in your situation.
47274+
47275+endchoice
47276+
47277+menu "Memory Protections"
47278+depends on GRKERNSEC
47279+
47280+config GRKERNSEC_KMEM
47281+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
47282+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47283+ help
47284+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47285+ be written to or read from to modify or leak the contents of the running
47286+ kernel. /dev/port will also not be allowed to be opened. If you have module
47287+ support disabled, enabling this will close up four ways that are
47288+ currently used to insert malicious code into the running kernel.
47289+ Even with all these features enabled, we still highly recommend that
47290+ you use the RBAC system, as it is still possible for an attacker to
47291+ modify the running kernel through privileged I/O granted by ioperm/iopl.
47292+ If you are not using XFree86, you may be able to stop this additional
47293+ case by enabling the 'Disable privileged I/O' option. Though nothing
47294+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47295+ but only to video memory, which is the only writing we allow in this
47296+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47297+ not be allowed to mprotect it with PROT_WRITE later.
47298+ It is highly recommended that you say Y here if you meet all the
47299+ conditions above.
47300+
47301+config GRKERNSEC_VM86
47302+ bool "Restrict VM86 mode"
47303+ depends on X86_32
47304+
47305+ help
47306+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47307+ make use of a special execution mode on 32bit x86 processors called
47308+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47309+ video cards and will still work with this option enabled. The purpose
47310+ of the option is to prevent exploitation of emulation errors in
47311+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
47312+ Nearly all users should be able to enable this option.
47313+
47314+config GRKERNSEC_IO
47315+ bool "Disable privileged I/O"
47316+ depends on X86
47317+ select RTC_CLASS
47318+ select RTC_INTF_DEV
47319+ select RTC_DRV_CMOS
47320+
47321+ help
47322+ If you say Y here, all ioperm and iopl calls will return an error.
47323+ Ioperm and iopl can be used to modify the running kernel.
47324+ Unfortunately, some programs need this access to operate properly,
47325+ the most notable of which are XFree86 and hwclock. hwclock can be
47326+ remedied by having RTC support in the kernel, so real-time
47327+ clock support is enabled if this option is enabled, to ensure
47328+ that hwclock operates correctly. XFree86 still will not
47329+ operate correctly with this option enabled, so DO NOT CHOOSE Y
47330+ IF YOU USE XFree86. If you use XFree86 and you still want to
47331+ protect your kernel against modification, use the RBAC system.
47332+
47333+config GRKERNSEC_PROC_MEMMAP
47334+ bool "Harden ASLR against information leaks and entropy reduction"
47335+ default y if (PAX_NOEXEC || PAX_ASLR)
47336+ depends on PAX_NOEXEC || PAX_ASLR
47337+ help
47338+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47339+ give no information about the addresses of its mappings if
47340+ PaX features that rely on random addresses are enabled on the task.
47341+ In addition to sanitizing this information and disabling other
47342+ dangerous sources of information, this option causes reads of sensitive
47343+ /proc/<pid> entries where the file descriptor was opened in a different
47344+ task than the one performing the read. Such attempts are logged.
47345+ Finally, this option limits argv/env strings for suid/sgid binaries
47346+ to 1MB to prevent a complete exhaustion of the stack entropy provided
47347+ by ASLR.
47348+ If you use PaX it is essential that you say Y here as it closes up
47349+ several holes that make full ASLR useless for suid/sgid binaries.
47350+
47351+config GRKERNSEC_BRUTE
47352+ bool "Deter exploit bruteforcing"
47353+ help
47354+ If you say Y here, attempts to bruteforce exploits against forking
47355+ daemons such as apache or sshd, as well as against suid/sgid binaries
47356+ will be deterred. When a child of a forking daemon is killed by PaX
47357+ or crashes due to an illegal instruction or other suspicious signal,
47358+ the parent process will be delayed 30 seconds upon every subsequent
47359+ fork until the administrator is able to assess the situation and
47360+ restart the daemon.
47361+ In the suid/sgid case, the attempt is logged, the user has all their
47362+ processes terminated, and they are prevented from executing any further
47363+ processes for 15 minutes.
47364+ It is recommended that you also enable signal logging in the auditing
47365+ section so that logs are generated when a process triggers a suspicious
47366+ signal.
47367+ If the sysctl option is enabled, a sysctl option with name
47368+ "deter_bruteforce" is created.
47369+
47370+
47371+config GRKERNSEC_MODHARDEN
47372+ bool "Harden module auto-loading"
47373+ depends on MODULES
47374+ help
47375+ If you say Y here, module auto-loading in response to use of some
47376+ feature implemented by an unloaded module will be restricted to
47377+ root users. Enabling this option helps defend against attacks
47378+ by unprivileged users who abuse the auto-loading behavior to
47379+ cause a vulnerable module to load that is then exploited.
47380+
47381+ If this option prevents a legitimate use of auto-loading for a
47382+ non-root user, the administrator can execute modprobe manually
47383+ with the exact name of the module mentioned in the alert log.
47384+ Alternatively, the administrator can add the module to the list
47385+ of modules loaded at boot by modifying init scripts.
47386+
47387+ Modification of init scripts will most likely be needed on
47388+ Ubuntu servers with encrypted home directory support enabled,
47389+ as the first non-root user logging in will cause the ecb(aes),
47390+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47391+
47392+config GRKERNSEC_HIDESYM
47393+ bool "Hide kernel symbols"
47394+ help
47395+ If you say Y here, getting information on loaded modules, and
47396+ displaying all kernel symbols through a syscall will be restricted
47397+ to users with CAP_SYS_MODULE. For software compatibility reasons,
47398+ /proc/kallsyms will be restricted to the root user. The RBAC
47399+ system can hide that entry even from root.
47400+
47401+ This option also prevents leaking of kernel addresses through
47402+ several /proc entries.
47403+
47404+ Note that this option is only effective provided the following
47405+ conditions are met:
47406+ 1) The kernel using grsecurity is not precompiled by some distribution
47407+ 2) You have also enabled GRKERNSEC_DMESG
47408+ 3) You are using the RBAC system and hiding other files such as your
47409+ kernel image and System.map. Alternatively, enabling this option
47410+ causes the permissions on /boot, /lib/modules, and the kernel
47411+ source directory to change at compile time to prevent
47412+ reading by non-root users.
47413+ If the above conditions are met, this option will aid in providing a
47414+ useful protection against local kernel exploitation of overflows
47415+ and arbitrary read/write vulnerabilities.
47416+
47417+config GRKERNSEC_KERN_LOCKOUT
47418+ bool "Active kernel exploit response"
47419+ depends on X86 || ARM || PPC || SPARC
47420+ help
47421+ If you say Y here, when a PaX alert is triggered due to suspicious
47422+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47423+ or an OOPs occurs due to bad memory accesses, instead of just
47424+ terminating the offending process (and potentially allowing
47425+ a subsequent exploit from the same user), we will take one of two
47426+ actions:
47427+ If the user was root, we will panic the system
47428+ If the user was non-root, we will log the attempt, terminate
47429+ all processes owned by the user, then prevent them from creating
47430+ any new processes until the system is restarted
47431+ This deters repeated kernel exploitation/bruteforcing attempts
47432+ and is useful for later forensics.
47433+
47434+endmenu
47435+menu "Role Based Access Control Options"
47436+depends on GRKERNSEC
47437+
47438+config GRKERNSEC_RBAC_DEBUG
47439+ bool
47440+
47441+config GRKERNSEC_NO_RBAC
47442+ bool "Disable RBAC system"
47443+ help
47444+ If you say Y here, the /dev/grsec device will be removed from the kernel,
47445+ preventing the RBAC system from being enabled. You should only say Y
47446+ here if you have no intention of using the RBAC system, so as to prevent
47447+ an attacker with root access from misusing the RBAC system to hide files
47448+ and processes when loadable module support and /dev/[k]mem have been
47449+ locked down.
47450+
47451+config GRKERNSEC_ACL_HIDEKERN
47452+ bool "Hide kernel processes"
47453+ help
47454+ If you say Y here, all kernel threads will be hidden to all
47455+ processes but those whose subject has the "view hidden processes"
47456+ flag.
47457+
47458+config GRKERNSEC_ACL_MAXTRIES
47459+ int "Maximum tries before password lockout"
47460+ default 3
47461+ help
47462+ This option enforces the maximum number of times a user can attempt
47463+ to authorize themselves with the grsecurity RBAC system before being
47464+ denied the ability to attempt authorization again for a specified time.
47465+ The lower the number, the harder it will be to brute-force a password.
47466+
47467+config GRKERNSEC_ACL_TIMEOUT
47468+ int "Time to wait after max password tries, in seconds"
47469+ default 30
47470+ help
47471+ This option specifies the time the user must wait after attempting to
47472+ authorize to the RBAC system with the maximum number of invalid
47473+ passwords. The higher the number, the harder it will be to brute-force
47474+ a password.
47475+
47476+endmenu
47477+menu "Filesystem Protections"
47478+depends on GRKERNSEC
47479+
47480+config GRKERNSEC_PROC
47481+ bool "Proc restrictions"
47482+ help
47483+ If you say Y here, the permissions of the /proc filesystem
47484+ will be altered to enhance system security and privacy. You MUST
47485+ choose either a user only restriction or a user and group restriction.
47486+ Depending upon the option you choose, you can either restrict users to
47487+ see only the processes they themselves run, or choose a group that can
47488+ view all processes and files normally restricted to root if you choose
47489+ the "restrict to user only" option. NOTE: If you're running identd as
47490+ a non-root user, you will have to run it as the group you specify here.
47491+
47492+config GRKERNSEC_PROC_USER
47493+ bool "Restrict /proc to user only"
47494+ depends on GRKERNSEC_PROC
47495+ help
47496+ If you say Y here, non-root users will only be able to view their own
47497+ processes, and restricts them from viewing network-related information,
47498+ and viewing kernel symbol and module information.
47499+
47500+config GRKERNSEC_PROC_USERGROUP
47501+ bool "Allow special group"
47502+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47503+ help
47504+ If you say Y here, you will be able to select a group that will be
47505+ able to view all processes and network-related information. If you've
47506+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47507+ remain hidden. This option is useful if you want to run identd as
47508+ a non-root user.
47509+
47510+config GRKERNSEC_PROC_GID
47511+ int "GID for special group"
47512+ depends on GRKERNSEC_PROC_USERGROUP
47513+ default 1001
47514+
47515+config GRKERNSEC_PROC_ADD
47516+ bool "Additional restrictions"
47517+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47518+ help
47519+ If you say Y here, additional restrictions will be placed on
47520+ /proc that keep normal users from viewing device information and
47521+ slabinfo information that could be useful for exploits.
47522+
47523+config GRKERNSEC_LINK
47524+ bool "Linking restrictions"
47525+ help
47526+ If you say Y here, /tmp race exploits will be prevented, since users
47527+ will no longer be able to follow symlinks owned by other users in
47528+ world-writable +t directories (e.g. /tmp), unless the owner of the
47529+ symlink is the owner of the directory. users will also not be
47530+ able to hardlink to files they do not own. If the sysctl option is
47531+ enabled, a sysctl option with name "linking_restrictions" is created.
47532+
47533+config GRKERNSEC_FIFO
47534+ bool "FIFO restrictions"
47535+ help
47536+ If you say Y here, users will not be able to write to FIFOs they don't
47537+ own in world-writable +t directories (e.g. /tmp), unless the owner of
47538+ the FIFO is the same owner of the directory it's held in. If the sysctl
47539+ option is enabled, a sysctl option with name "fifo_restrictions" is
47540+ created.
47541+
47542+config GRKERNSEC_SYSFS_RESTRICT
47543+ bool "Sysfs/debugfs restriction"
47544+ depends on SYSFS
47545+ help
47546+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47547+ any filesystem normally mounted under it (e.g. debugfs) will be
47548+ mostly accessible only by root. These filesystems generally provide access
47549+ to hardware and debug information that isn't appropriate for unprivileged
47550+ users of the system. Sysfs and debugfs have also become a large source
47551+ of new vulnerabilities, ranging from infoleaks to local compromise.
47552+ There has been very little oversight with an eye toward security involved
47553+ in adding new exporters of information to these filesystems, so their
47554+ use is discouraged.
47555+ For reasons of compatibility, a few directories have been whitelisted
47556+ for access by non-root users:
47557+ /sys/fs/selinux
47558+ /sys/fs/fuse
47559+ /sys/devices/system/cpu
47560+
47561+config GRKERNSEC_ROFS
47562+ bool "Runtime read-only mount protection"
47563+ help
47564+ If you say Y here, a sysctl option with name "romount_protect" will
47565+ be created. By setting this option to 1 at runtime, filesystems
47566+ will be protected in the following ways:
47567+ * No new writable mounts will be allowed
47568+ * Existing read-only mounts won't be able to be remounted read/write
47569+ * Write operations will be denied on all block devices
47570+ This option acts independently of grsec_lock: once it is set to 1,
47571+ it cannot be turned off. Therefore, please be mindful of the resulting
47572+ behavior if this option is enabled in an init script on a read-only
47573+ filesystem. This feature is mainly intended for secure embedded systems.
47574+
47575+config GRKERNSEC_CHROOT
47576+ bool "Chroot jail restrictions"
47577+ help
47578+ If you say Y here, you will be able to choose several options that will
47579+ make breaking out of a chrooted jail much more difficult. If you
47580+ encounter no software incompatibilities with the following options, it
47581+ is recommended that you enable each one.
47582+
47583+config GRKERNSEC_CHROOT_MOUNT
47584+ bool "Deny mounts"
47585+ depends on GRKERNSEC_CHROOT
47586+ help
47587+ If you say Y here, processes inside a chroot will not be able to
47588+ mount or remount filesystems. If the sysctl option is enabled, a
47589+ sysctl option with name "chroot_deny_mount" is created.
47590+
47591+config GRKERNSEC_CHROOT_DOUBLE
47592+ bool "Deny double-chroots"
47593+ depends on GRKERNSEC_CHROOT
47594+ help
47595+ If you say Y here, processes inside a chroot will not be able to chroot
47596+ again outside the chroot. This is a widely used method of breaking
47597+ out of a chroot jail and should not be allowed. If the sysctl
47598+ option is enabled, a sysctl option with name
47599+ "chroot_deny_chroot" is created.
47600+
47601+config GRKERNSEC_CHROOT_PIVOT
47602+ bool "Deny pivot_root in chroot"
47603+ depends on GRKERNSEC_CHROOT
47604+ help
47605+ If you say Y here, processes inside a chroot will not be able to use
47606+ a function called pivot_root() that was introduced in Linux 2.3.41. It
47607+ works similar to chroot in that it changes the root filesystem. This
47608+ function could be misused in a chrooted process to attempt to break out
47609+ of the chroot, and therefore should not be allowed. If the sysctl
47610+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
47611+ created.
47612+
47613+config GRKERNSEC_CHROOT_CHDIR
47614+ bool "Enforce chdir(\"/\") on all chroots"
47615+ depends on GRKERNSEC_CHROOT
47616+ help
47617+ If you say Y here, the current working directory of all newly-chrooted
47618+ applications will be set to the the root directory of the chroot.
47619+ The man page on chroot(2) states:
47620+ Note that this call does not change the current working
47621+ directory, so that `.' can be outside the tree rooted at
47622+ `/'. In particular, the super-user can escape from a
47623+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47624+
47625+ It is recommended that you say Y here, since it's not known to break
47626+ any software. If the sysctl option is enabled, a sysctl option with
47627+ name "chroot_enforce_chdir" is created.
47628+
47629+config GRKERNSEC_CHROOT_CHMOD
47630+ bool "Deny (f)chmod +s"
47631+ depends on GRKERNSEC_CHROOT
47632+ help
47633+ If you say Y here, processes inside a chroot will not be able to chmod
47634+ or fchmod files to make them have suid or sgid bits. This protects
47635+ against another published method of breaking a chroot. If the sysctl
47636+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
47637+ created.
47638+
47639+config GRKERNSEC_CHROOT_FCHDIR
47640+ bool "Deny fchdir out of chroot"
47641+ depends on GRKERNSEC_CHROOT
47642+ help
47643+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
47644+ to a file descriptor of the chrooting process that points to a directory
47645+ outside the filesystem will be stopped. If the sysctl option
47646+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47647+
47648+config GRKERNSEC_CHROOT_MKNOD
47649+ bool "Deny mknod"
47650+ depends on GRKERNSEC_CHROOT
47651+ help
47652+ If you say Y here, processes inside a chroot will not be allowed to
47653+ mknod. The problem with using mknod inside a chroot is that it
47654+ would allow an attacker to create a device entry that is the same
47655+ as one on the physical root of your system, which could range from
47656+ anything from the console device to a device for your harddrive (which
47657+ they could then use to wipe the drive or steal data). It is recommended
47658+ that you say Y here, unless you run into software incompatibilities.
47659+ If the sysctl option is enabled, a sysctl option with name
47660+ "chroot_deny_mknod" is created.
47661+
47662+config GRKERNSEC_CHROOT_SHMAT
47663+ bool "Deny shmat() out of chroot"
47664+ depends on GRKERNSEC_CHROOT
47665+ help
47666+ If you say Y here, processes inside a chroot will not be able to attach
47667+ to shared memory segments that were created outside of the chroot jail.
47668+ It is recommended that you say Y here. If the sysctl option is enabled,
47669+ a sysctl option with name "chroot_deny_shmat" is created.
47670+
47671+config GRKERNSEC_CHROOT_UNIX
47672+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
47673+ depends on GRKERNSEC_CHROOT
47674+ help
47675+ If you say Y here, processes inside a chroot will not be able to
47676+ connect to abstract (meaning not belonging to a filesystem) Unix
47677+ domain sockets that were bound outside of a chroot. It is recommended
47678+ that you say Y here. If the sysctl option is enabled, a sysctl option
47679+ with name "chroot_deny_unix" is created.
47680+
47681+config GRKERNSEC_CHROOT_FINDTASK
47682+ bool "Protect outside processes"
47683+ depends on GRKERNSEC_CHROOT
47684+ help
47685+ If you say Y here, processes inside a chroot will not be able to
47686+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47687+ getsid, or view any process outside of the chroot. If the sysctl
47688+ option is enabled, a sysctl option with name "chroot_findtask" is
47689+ created.
47690+
47691+config GRKERNSEC_CHROOT_NICE
47692+ bool "Restrict priority changes"
47693+ depends on GRKERNSEC_CHROOT
47694+ help
47695+ If you say Y here, processes inside a chroot will not be able to raise
47696+ the priority of processes in the chroot, or alter the priority of
47697+ processes outside the chroot. This provides more security than simply
47698+ removing CAP_SYS_NICE from the process' capability set. If the
47699+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47700+ is created.
47701+
47702+config GRKERNSEC_CHROOT_SYSCTL
47703+ bool "Deny sysctl writes"
47704+ depends on GRKERNSEC_CHROOT
47705+ help
47706+ If you say Y here, an attacker in a chroot will not be able to
47707+ write to sysctl entries, either by sysctl(2) or through a /proc
47708+ interface. It is strongly recommended that you say Y here. If the
47709+ sysctl option is enabled, a sysctl option with name
47710+ "chroot_deny_sysctl" is created.
47711+
47712+config GRKERNSEC_CHROOT_CAPS
47713+ bool "Capability restrictions"
47714+ depends on GRKERNSEC_CHROOT
47715+ help
47716+ If you say Y here, the capabilities on all processes within a
47717+ chroot jail will be lowered to stop module insertion, raw i/o,
47718+ system and net admin tasks, rebooting the system, modifying immutable
47719+ files, modifying IPC owned by another, and changing the system time.
47720+ This is left an option because it can break some apps. Disable this
47721+ if your chrooted apps are having problems performing those kinds of
47722+ tasks. If the sysctl option is enabled, a sysctl option with
47723+ name "chroot_caps" is created.
47724+
47725+endmenu
47726+menu "Kernel Auditing"
47727+depends on GRKERNSEC
47728+
47729+config GRKERNSEC_AUDIT_GROUP
47730+ bool "Single group for auditing"
47731+ help
47732+ If you say Y here, the exec, chdir, and (un)mount logging features
47733+ will only operate on a group you specify. This option is recommended
47734+ if you only want to watch certain users instead of having a large
47735+ amount of logs from the entire system. If the sysctl option is enabled,
47736+ a sysctl option with name "audit_group" is created.
47737+
47738+config GRKERNSEC_AUDIT_GID
47739+ int "GID for auditing"
47740+ depends on GRKERNSEC_AUDIT_GROUP
47741+ default 1007
47742+
47743+config GRKERNSEC_EXECLOG
47744+ bool "Exec logging"
47745+ help
47746+ If you say Y here, all execve() calls will be logged (since the
47747+ other exec*() calls are frontends to execve(), all execution
47748+ will be logged). Useful for shell-servers that like to keep track
47749+ of their users. If the sysctl option is enabled, a sysctl option with
47750+ name "exec_logging" is created.
47751+ WARNING: This option when enabled will produce a LOT of logs, especially
47752+ on an active system.
47753+
47754+config GRKERNSEC_RESLOG
47755+ bool "Resource logging"
47756+ help
47757+ If you say Y here, all attempts to overstep resource limits will
47758+ be logged with the resource name, the requested size, and the current
47759+ limit. It is highly recommended that you say Y here. If the sysctl
47760+ option is enabled, a sysctl option with name "resource_logging" is
47761+ created. If the RBAC system is enabled, the sysctl value is ignored.
47762+
47763+config GRKERNSEC_CHROOT_EXECLOG
47764+ bool "Log execs within chroot"
47765+ help
47766+ If you say Y here, all executions inside a chroot jail will be logged
47767+ to syslog. This can cause a large amount of logs if certain
47768+ applications (eg. djb's daemontools) are installed on the system, and
47769+ is therefore left as an option. If the sysctl option is enabled, a
47770+ sysctl option with name "chroot_execlog" is created.
47771+
47772+config GRKERNSEC_AUDIT_PTRACE
47773+ bool "Ptrace logging"
47774+ help
47775+ If you say Y here, all attempts to attach to a process via ptrace
47776+ will be logged. If the sysctl option is enabled, a sysctl option
47777+ with name "audit_ptrace" is created.
47778+
47779+config GRKERNSEC_AUDIT_CHDIR
47780+ bool "Chdir logging"
47781+ help
47782+ If you say Y here, all chdir() calls will be logged. If the sysctl
47783+ option is enabled, a sysctl option with name "audit_chdir" is created.
47784+
47785+config GRKERNSEC_AUDIT_MOUNT
47786+ bool "(Un)Mount logging"
47787+ help
47788+ If you say Y here, all mounts and unmounts will be logged. If the
47789+ sysctl option is enabled, a sysctl option with name "audit_mount" is
47790+ created.
47791+
47792+config GRKERNSEC_SIGNAL
47793+ bool "Signal logging"
47794+ help
47795+ If you say Y here, certain important signals will be logged, such as
47796+ SIGSEGV, which will as a result inform you of when a error in a program
47797+ occurred, which in some cases could mean a possible exploit attempt.
47798+ If the sysctl option is enabled, a sysctl option with name
47799+ "signal_logging" is created.
47800+
47801+config GRKERNSEC_FORKFAIL
47802+ bool "Fork failure logging"
47803+ help
47804+ If you say Y here, all failed fork() attempts will be logged.
47805+ This could suggest a fork bomb, or someone attempting to overstep
47806+ their process limit. If the sysctl option is enabled, a sysctl option
47807+ with name "forkfail_logging" is created.
47808+
47809+config GRKERNSEC_TIME
47810+ bool "Time change logging"
47811+ help
47812+ If you say Y here, any changes of the system clock will be logged.
47813+ If the sysctl option is enabled, a sysctl option with name
47814+ "timechange_logging" is created.
47815+
47816+config GRKERNSEC_PROC_IPADDR
47817+ bool "/proc/<pid>/ipaddr support"
47818+ help
47819+ If you say Y here, a new entry will be added to each /proc/<pid>
47820+ directory that contains the IP address of the person using the task.
47821+ The IP is carried across local TCP and AF_UNIX stream sockets.
47822+ This information can be useful for IDS/IPSes to perform remote response
47823+ to a local attack. The entry is readable by only the owner of the
47824+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
47825+ the RBAC system), and thus does not create privacy concerns.
47826+
47827+config GRKERNSEC_RWXMAP_LOG
47828+ bool 'Denied RWX mmap/mprotect logging'
47829+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
47830+ help
47831+ If you say Y here, calls to mmap() and mprotect() with explicit
47832+ usage of PROT_WRITE and PROT_EXEC together will be logged when
47833+ denied by the PAX_MPROTECT feature. If the sysctl option is
47834+ enabled, a sysctl option with name "rwxmap_logging" is created.
47835+
47836+config GRKERNSEC_AUDIT_TEXTREL
47837+ bool 'ELF text relocations logging (READ HELP)'
47838+ depends on PAX_MPROTECT
47839+ help
47840+ If you say Y here, text relocations will be logged with the filename
47841+ of the offending library or binary. The purpose of the feature is
47842+ to help Linux distribution developers get rid of libraries and
47843+ binaries that need text relocations which hinder the future progress
47844+ of PaX. Only Linux distribution developers should say Y here, and
47845+ never on a production machine, as this option creates an information
47846+ leak that could aid an attacker in defeating the randomization of
47847+ a single memory region. If the sysctl option is enabled, a sysctl
47848+ option with name "audit_textrel" is created.
47849+
47850+endmenu
47851+
47852+menu "Executable Protections"
47853+depends on GRKERNSEC
47854+
47855+config GRKERNSEC_DMESG
47856+ bool "Dmesg(8) restriction"
47857+ help
47858+ If you say Y here, non-root users will not be able to use dmesg(8)
47859+ to view up to the last 4kb of messages in the kernel's log buffer.
47860+ The kernel's log buffer often contains kernel addresses and other
47861+ identifying information useful to an attacker in fingerprinting a
47862+ system for a targeted exploit.
47863+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
47864+ created.
47865+
47866+config GRKERNSEC_HARDEN_PTRACE
47867+ bool "Deter ptrace-based process snooping"
47868+ help
47869+ If you say Y here, TTY sniffers and other malicious monitoring
47870+ programs implemented through ptrace will be defeated. If you
47871+ have been using the RBAC system, this option has already been
47872+ enabled for several years for all users, with the ability to make
47873+ fine-grained exceptions.
47874+
47875+ This option only affects the ability of non-root users to ptrace
47876+ processes that are not a descendent of the ptracing process.
47877+ This means that strace ./binary and gdb ./binary will still work,
47878+ but attaching to arbitrary processes will not. If the sysctl
47879+ option is enabled, a sysctl option with name "harden_ptrace" is
47880+ created.
47881+
47882+config GRKERNSEC_PTRACE_READEXEC
47883+ bool "Require read access to ptrace sensitive binaries"
47884+ help
47885+ If you say Y here, unprivileged users will not be able to ptrace unreadable
47886+ binaries. This option is useful in environments that
47887+ remove the read bits (e.g. file mode 4711) from suid binaries to
47888+ prevent infoleaking of their contents. This option adds
47889+ consistency to the use of that file mode, as the binary could normally
47890+ be read out when run without privileges while ptracing.
47891+
47892+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
47893+ is created.
47894+
47895+config GRKERNSEC_SETXID
47896+ bool "Enforce consistent multithreaded privileges"
47897+ help
47898+ If you say Y here, a change from a root uid to a non-root uid
47899+ in a multithreaded application will cause the resulting uids,
47900+ gids, supplementary groups, and capabilities in that thread
47901+ to be propagated to the other threads of the process. In most
47902+ cases this is unnecessary, as glibc will emulate this behavior
47903+ on behalf of the application. Other libcs do not act in the
47904+ same way, allowing the other threads of the process to continue
47905+ running with root privileges. If the sysctl option is enabled,
47906+ a sysctl option with name "consistent_setxid" is created.
47907+
47908+config GRKERNSEC_TPE
47909+ bool "Trusted Path Execution (TPE)"
47910+ help
47911+ If you say Y here, you will be able to choose a gid to add to the
47912+ supplementary groups of users you want to mark as "untrusted."
47913+ These users will not be able to execute any files that are not in
47914+ root-owned directories writable only by root. If the sysctl option
47915+ is enabled, a sysctl option with name "tpe" is created.
47916+
47917+config GRKERNSEC_TPE_ALL
47918+ bool "Partially restrict all non-root users"
47919+ depends on GRKERNSEC_TPE
47920+ help
47921+ If you say Y here, all non-root users will be covered under
47922+ a weaker TPE restriction. This is separate from, and in addition to,
47923+ the main TPE options that you have selected elsewhere. Thus, if a
47924+ "trusted" GID is chosen, this restriction applies to even that GID.
47925+ Under this restriction, all non-root users will only be allowed to
47926+ execute files in directories they own that are not group or
47927+ world-writable, or in directories owned by root and writable only by
47928+ root. If the sysctl option is enabled, a sysctl option with name
47929+ "tpe_restrict_all" is created.
47930+
47931+config GRKERNSEC_TPE_INVERT
47932+ bool "Invert GID option"
47933+ depends on GRKERNSEC_TPE
47934+ help
47935+ If you say Y here, the group you specify in the TPE configuration will
47936+ decide what group TPE restrictions will be *disabled* for. This
47937+ option is useful if you want TPE restrictions to be applied to most
47938+ users on the system. If the sysctl option is enabled, a sysctl option
47939+ with name "tpe_invert" is created. Unlike other sysctl options, this
47940+ entry will default to on for backward-compatibility.
47941+
47942+config GRKERNSEC_TPE_GID
47943+ int "GID for untrusted users"
47944+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
47945+ default 1005
47946+ help
47947+ Setting this GID determines what group TPE restrictions will be
47948+ *enabled* for. If the sysctl option is enabled, a sysctl option
47949+ with name "tpe_gid" is created.
47950+
47951+config GRKERNSEC_TPE_GID
47952+ int "GID for trusted users"
47953+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
47954+ default 1005
47955+ help
47956+ Setting this GID determines what group TPE restrictions will be
47957+ *disabled* for. If the sysctl option is enabled, a sysctl option
47958+ with name "tpe_gid" is created.
47959+
47960+endmenu
47961+menu "Network Protections"
47962+depends on GRKERNSEC
47963+
47964+config GRKERNSEC_RANDNET
47965+ bool "Larger entropy pools"
47966+ help
47967+ If you say Y here, the entropy pools used for many features of Linux
47968+ and grsecurity will be doubled in size. Since several grsecurity
47969+ features use additional randomness, it is recommended that you say Y
47970+ here. Saying Y here has a similar effect as modifying
47971+ /proc/sys/kernel/random/poolsize.
47972+
47973+config GRKERNSEC_BLACKHOLE
47974+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
47975+ depends on NET
47976+ help
47977+ If you say Y here, neither TCP resets nor ICMP
47978+ destination-unreachable packets will be sent in response to packets
47979+ sent to ports for which no associated listening process exists.
47980+ This feature supports both IPV4 and IPV6 and exempts the
47981+ loopback interface from blackholing. Enabling this feature
47982+ makes a host more resilient to DoS attacks and reduces network
47983+ visibility against scanners.
47984+
47985+ The blackhole feature as-implemented is equivalent to the FreeBSD
47986+ blackhole feature, as it prevents RST responses to all packets, not
47987+ just SYNs. Under most application behavior this causes no
47988+ problems, but applications (like haproxy) may not close certain
47989+ connections in a way that cleanly terminates them on the remote
47990+ end, leaving the remote host in LAST_ACK state. Because of this
47991+ side-effect and to prevent intentional LAST_ACK DoSes, this
47992+ feature also adds automatic mitigation against such attacks.
47993+ The mitigation drastically reduces the amount of time a socket
47994+ can spend in LAST_ACK state. If you're using haproxy and not
47995+ all servers it connects to have this option enabled, consider
47996+ disabling this feature on the haproxy host.
47997+
47998+ If the sysctl option is enabled, two sysctl options with names
47999+ "ip_blackhole" and "lastack_retries" will be created.
48000+ While "ip_blackhole" takes the standard zero/non-zero on/off
48001+ toggle, "lastack_retries" uses the same kinds of values as
48002+ "tcp_retries1" and "tcp_retries2". The default value of 4
48003+ prevents a socket from lasting more than 45 seconds in LAST_ACK
48004+ state.
48005+
48006+config GRKERNSEC_SOCKET
48007+ bool "Socket restrictions"
48008+ depends on NET
48009+ help
48010+ If you say Y here, you will be able to choose from several options.
48011+ If you assign a GID on your system and add it to the supplementary
48012+ groups of users you want to restrict socket access to, this patch
48013+ will perform up to three things, based on the option(s) you choose.
48014+
48015+config GRKERNSEC_SOCKET_ALL
48016+ bool "Deny any sockets to group"
48017+ depends on GRKERNSEC_SOCKET
48018+ help
48019+ If you say Y here, you will be able to choose a GID of whose users will
48020+ be unable to connect to other hosts from your machine or run server
48021+ applications from your machine. If the sysctl option is enabled, a
48022+ sysctl option with name "socket_all" is created.
48023+
48024+config GRKERNSEC_SOCKET_ALL_GID
48025+ int "GID to deny all sockets for"
48026+ depends on GRKERNSEC_SOCKET_ALL
48027+ default 1004
48028+ help
48029+ Here you can choose the GID to disable socket access for. Remember to
48030+ add the users you want socket access disabled for to the GID
48031+ specified here. If the sysctl option is enabled, a sysctl option
48032+ with name "socket_all_gid" is created.
48033+
48034+config GRKERNSEC_SOCKET_CLIENT
48035+ bool "Deny client sockets to group"
48036+ depends on GRKERNSEC_SOCKET
48037+ help
48038+ If you say Y here, you will be able to choose a GID of whose users will
48039+ be unable to connect to other hosts from your machine, but will be
48040+ able to run servers. If this option is enabled, all users in the group
48041+ you specify will have to use passive mode when initiating ftp transfers
48042+ from the shell on your machine. If the sysctl option is enabled, a
48043+ sysctl option with name "socket_client" is created.
48044+
48045+config GRKERNSEC_SOCKET_CLIENT_GID
48046+ int "GID to deny client sockets for"
48047+ depends on GRKERNSEC_SOCKET_CLIENT
48048+ default 1003
48049+ help
48050+ Here you can choose the GID to disable client socket access for.
48051+ Remember to add the users you want client socket access disabled for to
48052+ the GID specified here. If the sysctl option is enabled, a sysctl
48053+ option with name "socket_client_gid" is created.
48054+
48055+config GRKERNSEC_SOCKET_SERVER
48056+ bool "Deny server sockets to group"
48057+ depends on GRKERNSEC_SOCKET
48058+ help
48059+ If you say Y here, you will be able to choose a GID of whose users will
48060+ be unable to run server applications from your machine. If the sysctl
48061+ option is enabled, a sysctl option with name "socket_server" is created.
48062+
48063+config GRKERNSEC_SOCKET_SERVER_GID
48064+ int "GID to deny server sockets for"
48065+ depends on GRKERNSEC_SOCKET_SERVER
48066+ default 1002
48067+ help
48068+ Here you can choose the GID to disable server socket access for.
48069+ Remember to add the users you want server socket access disabled for to
48070+ the GID specified here. If the sysctl option is enabled, a sysctl
48071+ option with name "socket_server_gid" is created.
48072+
48073+endmenu
48074+menu "Sysctl support"
48075+depends on GRKERNSEC && SYSCTL
48076+
48077+config GRKERNSEC_SYSCTL
48078+ bool "Sysctl support"
48079+ help
48080+ If you say Y here, you will be able to change the options that
48081+ grsecurity runs with at bootup, without having to recompile your
48082+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
48083+ to enable (1) or disable (0) various features. All the sysctl entries
48084+ are mutable until the "grsec_lock" entry is set to a non-zero value.
48085+ All features enabled in the kernel configuration are disabled at boot
48086+ if you do not say Y to the "Turn on features by default" option.
48087+ All options should be set at startup, and the grsec_lock entry should
48088+ be set to a non-zero value after all the options are set.
48089+ *THIS IS EXTREMELY IMPORTANT*
48090+
48091+config GRKERNSEC_SYSCTL_DISTRO
48092+ bool "Extra sysctl support for distro makers (READ HELP)"
48093+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
48094+ help
48095+ If you say Y here, additional sysctl options will be created
48096+ for features that affect processes running as root. Therefore,
48097+ it is critical when using this option that the grsec_lock entry be
48098+ enabled after boot. Only distros with prebuilt kernel packages
48099+ with this option enabled that can ensure grsec_lock is enabled
48100+ after boot should use this option.
48101+ *Failure to set grsec_lock after boot makes all grsec features
48102+ this option covers useless*
48103+
48104+ Currently this option creates the following sysctl entries:
48105+ "Disable Privileged I/O": "disable_priv_io"
48106+
48107+config GRKERNSEC_SYSCTL_ON
48108+ bool "Turn on features by default"
48109+ depends on GRKERNSEC_SYSCTL
48110+ help
48111+ If you say Y here, instead of having all features enabled in the
48112+ kernel configuration disabled at boot time, the features will be
48113+ enabled at boot time. It is recommended you say Y here unless
48114+ there is some reason you would want all sysctl-tunable features to
48115+ be disabled by default. As mentioned elsewhere, it is important
48116+ to enable the grsec_lock entry once you have finished modifying
48117+ the sysctl entries.
48118+
48119+endmenu
48120+menu "Logging Options"
48121+depends on GRKERNSEC
48122+
48123+config GRKERNSEC_FLOODTIME
48124+ int "Seconds in between log messages (minimum)"
48125+ default 10
48126+ help
48127+ This option allows you to enforce the number of seconds between
48128+ grsecurity log messages. The default should be suitable for most
48129+ people, however, if you choose to change it, choose a value small enough
48130+ to allow informative logs to be produced, but large enough to
48131+ prevent flooding.
48132+
48133+config GRKERNSEC_FLOODBURST
48134+ int "Number of messages in a burst (maximum)"
48135+ default 6
48136+ help
48137+ This option allows you to choose the maximum number of messages allowed
48138+ within the flood time interval you chose in a separate option. The
48139+ default should be suitable for most people, however if you find that
48140+ many of your logs are being interpreted as flooding, you may want to
48141+ raise this value.
48142+
48143+endmenu
48144+
48145+endmenu
48146diff --git a/grsecurity/Makefile b/grsecurity/Makefile
48147new file mode 100644
48148index 0000000..1b9afa9
48149--- /dev/null
48150+++ b/grsecurity/Makefile
48151@@ -0,0 +1,38 @@
48152+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
48153+# during 2001-2009 it has been completely redesigned by Brad Spengler
48154+# into an RBAC system
48155+#
48156+# All code in this directory and various hooks inserted throughout the kernel
48157+# are copyright Brad Spengler - Open Source Security, Inc., and released
48158+# under the GPL v2 or higher
48159+
48160+KBUILD_CFLAGS += -Werror
48161+
48162+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
48163+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
48164+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
48165+
48166+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
48167+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
48168+ gracl_learn.o grsec_log.o
48169+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
48170+
48171+ifdef CONFIG_NET
48172+obj-y += grsec_sock.o
48173+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48174+endif
48175+
48176+ifndef CONFIG_GRKERNSEC
48177+obj-y += grsec_disabled.o
48178+endif
48179+
48180+ifdef CONFIG_GRKERNSEC_HIDESYM
48181+extra-y := grsec_hidesym.o
48182+$(obj)/grsec_hidesym.o:
48183+ @-chmod -f 500 /boot
48184+ @-chmod -f 500 /lib/modules
48185+ @-chmod -f 500 /lib64/modules
48186+ @-chmod -f 500 /lib32/modules
48187+ @-chmod -f 700 .
48188+ @echo ' grsec: protected kernel image paths'
48189+endif
48190diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
48191new file mode 100644
48192index 0000000..cf294ac
48193--- /dev/null
48194+++ b/grsecurity/gracl.c
48195@@ -0,0 +1,4163 @@
48196+#include <linux/kernel.h>
48197+#include <linux/module.h>
48198+#include <linux/sched.h>
48199+#include <linux/mm.h>
48200+#include <linux/file.h>
48201+#include <linux/fs.h>
48202+#include <linux/namei.h>
48203+#include <linux/mount.h>
48204+#include <linux/tty.h>
48205+#include <linux/proc_fs.h>
48206+#include <linux/lglock.h>
48207+#include <linux/slab.h>
48208+#include <linux/vmalloc.h>
48209+#include <linux/types.h>
48210+#include <linux/sysctl.h>
48211+#include <linux/netdevice.h>
48212+#include <linux/ptrace.h>
48213+#include <linux/gracl.h>
48214+#include <linux/gralloc.h>
48215+#include <linux/security.h>
48216+#include <linux/grinternal.h>
48217+#include <linux/pid_namespace.h>
48218+#include <linux/fdtable.h>
48219+#include <linux/percpu.h>
48220+
48221+#include <asm/uaccess.h>
48222+#include <asm/errno.h>
48223+#include <asm/mman.h>
48224+
48225+static struct acl_role_db acl_role_set;
48226+static struct name_db name_set;
48227+static struct inodev_db inodev_set;
48228+
48229+/* for keeping track of userspace pointers used for subjects, so we
48230+ can share references in the kernel as well
48231+*/
48232+
48233+static struct path real_root;
48234+
48235+static struct acl_subj_map_db subj_map_set;
48236+
48237+static struct acl_role_label *default_role;
48238+
48239+static struct acl_role_label *role_list;
48240+
48241+static u16 acl_sp_role_value;
48242+
48243+extern char *gr_shared_page[4];
48244+static DEFINE_MUTEX(gr_dev_mutex);
48245+DEFINE_RWLOCK(gr_inode_lock);
48246+
48247+struct gr_arg *gr_usermode;
48248+
48249+static unsigned int gr_status __read_only = GR_STATUS_INIT;
48250+
48251+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48252+extern void gr_clear_learn_entries(void);
48253+
48254+#ifdef CONFIG_GRKERNSEC_RESLOG
48255+extern void gr_log_resource(const struct task_struct *task,
48256+ const int res, const unsigned long wanted, const int gt);
48257+#endif
48258+
48259+unsigned char *gr_system_salt;
48260+unsigned char *gr_system_sum;
48261+
48262+static struct sprole_pw **acl_special_roles = NULL;
48263+static __u16 num_sprole_pws = 0;
48264+
48265+static struct acl_role_label *kernel_role = NULL;
48266+
48267+static unsigned int gr_auth_attempts = 0;
48268+static unsigned long gr_auth_expires = 0UL;
48269+
48270+#ifdef CONFIG_NET
48271+extern struct vfsmount *sock_mnt;
48272+#endif
48273+
48274+extern struct vfsmount *pipe_mnt;
48275+extern struct vfsmount *shm_mnt;
48276+#ifdef CONFIG_HUGETLBFS
48277+extern struct vfsmount *hugetlbfs_vfsmount;
48278+#endif
48279+
48280+static struct acl_object_label *fakefs_obj_rw;
48281+static struct acl_object_label *fakefs_obj_rwx;
48282+
48283+extern int gr_init_uidset(void);
48284+extern void gr_free_uidset(void);
48285+extern void gr_remove_uid(uid_t uid);
48286+extern int gr_find_uid(uid_t uid);
48287+
48288+DECLARE_BRLOCK(vfsmount_lock);
48289+
48290+__inline__ int
48291+gr_acl_is_enabled(void)
48292+{
48293+ return (gr_status & GR_READY);
48294+}
48295+
48296+#ifdef CONFIG_BTRFS_FS
48297+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48298+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48299+#endif
48300+
48301+static inline dev_t __get_dev(const struct dentry *dentry)
48302+{
48303+#ifdef CONFIG_BTRFS_FS
48304+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48305+ return get_btrfs_dev_from_inode(dentry->d_inode);
48306+ else
48307+#endif
48308+ return dentry->d_inode->i_sb->s_dev;
48309+}
48310+
48311+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48312+{
48313+ return __get_dev(dentry);
48314+}
48315+
48316+static char gr_task_roletype_to_char(struct task_struct *task)
48317+{
48318+ switch (task->role->roletype &
48319+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48320+ GR_ROLE_SPECIAL)) {
48321+ case GR_ROLE_DEFAULT:
48322+ return 'D';
48323+ case GR_ROLE_USER:
48324+ return 'U';
48325+ case GR_ROLE_GROUP:
48326+ return 'G';
48327+ case GR_ROLE_SPECIAL:
48328+ return 'S';
48329+ }
48330+
48331+ return 'X';
48332+}
48333+
48334+char gr_roletype_to_char(void)
48335+{
48336+ return gr_task_roletype_to_char(current);
48337+}
48338+
48339+__inline__ int
48340+gr_acl_tpe_check(void)
48341+{
48342+ if (unlikely(!(gr_status & GR_READY)))
48343+ return 0;
48344+ if (current->role->roletype & GR_ROLE_TPE)
48345+ return 1;
48346+ else
48347+ return 0;
48348+}
48349+
48350+int
48351+gr_handle_rawio(const struct inode *inode)
48352+{
48353+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48354+ if (inode && S_ISBLK(inode->i_mode) &&
48355+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48356+ !capable(CAP_SYS_RAWIO))
48357+ return 1;
48358+#endif
48359+ return 0;
48360+}
48361+
48362+static int
48363+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48364+{
48365+ if (likely(lena != lenb))
48366+ return 0;
48367+
48368+ return !memcmp(a, b, lena);
48369+}
48370+
48371+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48372+{
48373+ *buflen -= namelen;
48374+ if (*buflen < 0)
48375+ return -ENAMETOOLONG;
48376+ *buffer -= namelen;
48377+ memcpy(*buffer, str, namelen);
48378+ return 0;
48379+}
48380+
48381+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48382+{
48383+ return prepend(buffer, buflen, name->name, name->len);
48384+}
48385+
48386+static int prepend_path(const struct path *path, struct path *root,
48387+ char **buffer, int *buflen)
48388+{
48389+ struct dentry *dentry = path->dentry;
48390+ struct vfsmount *vfsmnt = path->mnt;
48391+ bool slash = false;
48392+ int error = 0;
48393+
48394+ while (dentry != root->dentry || vfsmnt != root->mnt) {
48395+ struct dentry * parent;
48396+
48397+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48398+ /* Global root? */
48399+ if (vfsmnt->mnt_parent == vfsmnt) {
48400+ goto out;
48401+ }
48402+ dentry = vfsmnt->mnt_mountpoint;
48403+ vfsmnt = vfsmnt->mnt_parent;
48404+ continue;
48405+ }
48406+ parent = dentry->d_parent;
48407+ prefetch(parent);
48408+ spin_lock(&dentry->d_lock);
48409+ error = prepend_name(buffer, buflen, &dentry->d_name);
48410+ spin_unlock(&dentry->d_lock);
48411+ if (!error)
48412+ error = prepend(buffer, buflen, "/", 1);
48413+ if (error)
48414+ break;
48415+
48416+ slash = true;
48417+ dentry = parent;
48418+ }
48419+
48420+out:
48421+ if (!error && !slash)
48422+ error = prepend(buffer, buflen, "/", 1);
48423+
48424+ return error;
48425+}
48426+
48427+/* this must be called with vfsmount_lock and rename_lock held */
48428+
48429+static char *__our_d_path(const struct path *path, struct path *root,
48430+ char *buf, int buflen)
48431+{
48432+ char *res = buf + buflen;
48433+ int error;
48434+
48435+ prepend(&res, &buflen, "\0", 1);
48436+ error = prepend_path(path, root, &res, &buflen);
48437+ if (error)
48438+ return ERR_PTR(error);
48439+
48440+ return res;
48441+}
48442+
48443+static char *
48444+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48445+{
48446+ char *retval;
48447+
48448+ retval = __our_d_path(path, root, buf, buflen);
48449+ if (unlikely(IS_ERR(retval)))
48450+ retval = strcpy(buf, "<path too long>");
48451+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48452+ retval[1] = '\0';
48453+
48454+ return retval;
48455+}
48456+
48457+static char *
48458+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48459+ char *buf, int buflen)
48460+{
48461+ struct path path;
48462+ char *res;
48463+
48464+ path.dentry = (struct dentry *)dentry;
48465+ path.mnt = (struct vfsmount *)vfsmnt;
48466+
48467+ /* we can use real_root.dentry, real_root.mnt, because this is only called
48468+ by the RBAC system */
48469+ res = gen_full_path(&path, &real_root, buf, buflen);
48470+
48471+ return res;
48472+}
48473+
48474+static char *
48475+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48476+ char *buf, int buflen)
48477+{
48478+ char *res;
48479+ struct path path;
48480+ struct path root;
48481+ struct task_struct *reaper = &init_task;
48482+
48483+ path.dentry = (struct dentry *)dentry;
48484+ path.mnt = (struct vfsmount *)vfsmnt;
48485+
48486+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48487+ get_fs_root(reaper->fs, &root);
48488+
48489+ write_seqlock(&rename_lock);
48490+ br_read_lock(vfsmount_lock);
48491+ res = gen_full_path(&path, &root, buf, buflen);
48492+ br_read_unlock(vfsmount_lock);
48493+ write_sequnlock(&rename_lock);
48494+
48495+ path_put(&root);
48496+ return res;
48497+}
48498+
48499+static char *
48500+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48501+{
48502+ char *ret;
48503+ write_seqlock(&rename_lock);
48504+ br_read_lock(vfsmount_lock);
48505+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48506+ PAGE_SIZE);
48507+ br_read_unlock(vfsmount_lock);
48508+ write_sequnlock(&rename_lock);
48509+ return ret;
48510+}
48511+
48512+static char *
48513+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48514+{
48515+ char *ret;
48516+ char *buf;
48517+ int buflen;
48518+
48519+ write_seqlock(&rename_lock);
48520+ br_read_lock(vfsmount_lock);
48521+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48522+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48523+ buflen = (int)(ret - buf);
48524+ if (buflen >= 5)
48525+ prepend(&ret, &buflen, "/proc", 5);
48526+ else
48527+ ret = strcpy(buf, "<path too long>");
48528+ br_read_unlock(vfsmount_lock);
48529+ write_sequnlock(&rename_lock);
48530+ return ret;
48531+}
48532+
48533+char *
48534+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48535+{
48536+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48537+ PAGE_SIZE);
48538+}
48539+
48540+char *
48541+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48542+{
48543+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48544+ PAGE_SIZE);
48545+}
48546+
48547+char *
48548+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48549+{
48550+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48551+ PAGE_SIZE);
48552+}
48553+
48554+char *
48555+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48556+{
48557+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48558+ PAGE_SIZE);
48559+}
48560+
48561+char *
48562+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48563+{
48564+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48565+ PAGE_SIZE);
48566+}
48567+
48568+__inline__ __u32
48569+to_gr_audit(const __u32 reqmode)
48570+{
48571+ /* masks off auditable permission flags, then shifts them to create
48572+ auditing flags, and adds the special case of append auditing if
48573+ we're requesting write */
48574+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48575+}
48576+
48577+struct acl_subject_label *
48578+lookup_subject_map(const struct acl_subject_label *userp)
48579+{
48580+ unsigned int index = shash(userp, subj_map_set.s_size);
48581+ struct subject_map *match;
48582+
48583+ match = subj_map_set.s_hash[index];
48584+
48585+ while (match && match->user != userp)
48586+ match = match->next;
48587+
48588+ if (match != NULL)
48589+ return match->kernel;
48590+ else
48591+ return NULL;
48592+}
48593+
48594+static void
48595+insert_subj_map_entry(struct subject_map *subjmap)
48596+{
48597+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48598+ struct subject_map **curr;
48599+
48600+ subjmap->prev = NULL;
48601+
48602+ curr = &subj_map_set.s_hash[index];
48603+ if (*curr != NULL)
48604+ (*curr)->prev = subjmap;
48605+
48606+ subjmap->next = *curr;
48607+ *curr = subjmap;
48608+
48609+ return;
48610+}
48611+
48612+static struct acl_role_label *
48613+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48614+ const gid_t gid)
48615+{
48616+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48617+ struct acl_role_label *match;
48618+ struct role_allowed_ip *ipp;
48619+ unsigned int x;
48620+ u32 curr_ip = task->signal->curr_ip;
48621+
48622+ task->signal->saved_ip = curr_ip;
48623+
48624+ match = acl_role_set.r_hash[index];
48625+
48626+ while (match) {
48627+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48628+ for (x = 0; x < match->domain_child_num; x++) {
48629+ if (match->domain_children[x] == uid)
48630+ goto found;
48631+ }
48632+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48633+ break;
48634+ match = match->next;
48635+ }
48636+found:
48637+ if (match == NULL) {
48638+ try_group:
48639+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48640+ match = acl_role_set.r_hash[index];
48641+
48642+ while (match) {
48643+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48644+ for (x = 0; x < match->domain_child_num; x++) {
48645+ if (match->domain_children[x] == gid)
48646+ goto found2;
48647+ }
48648+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48649+ break;
48650+ match = match->next;
48651+ }
48652+found2:
48653+ if (match == NULL)
48654+ match = default_role;
48655+ if (match->allowed_ips == NULL)
48656+ return match;
48657+ else {
48658+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48659+ if (likely
48660+ ((ntohl(curr_ip) & ipp->netmask) ==
48661+ (ntohl(ipp->addr) & ipp->netmask)))
48662+ return match;
48663+ }
48664+ match = default_role;
48665+ }
48666+ } else if (match->allowed_ips == NULL) {
48667+ return match;
48668+ } else {
48669+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48670+ if (likely
48671+ ((ntohl(curr_ip) & ipp->netmask) ==
48672+ (ntohl(ipp->addr) & ipp->netmask)))
48673+ return match;
48674+ }
48675+ goto try_group;
48676+ }
48677+
48678+ return match;
48679+}
48680+
48681+struct acl_subject_label *
48682+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48683+ const struct acl_role_label *role)
48684+{
48685+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48686+ struct acl_subject_label *match;
48687+
48688+ match = role->subj_hash[index];
48689+
48690+ while (match && (match->inode != ino || match->device != dev ||
48691+ (match->mode & GR_DELETED))) {
48692+ match = match->next;
48693+ }
48694+
48695+ if (match && !(match->mode & GR_DELETED))
48696+ return match;
48697+ else
48698+ return NULL;
48699+}
48700+
48701+struct acl_subject_label *
48702+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48703+ const struct acl_role_label *role)
48704+{
48705+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48706+ struct acl_subject_label *match;
48707+
48708+ match = role->subj_hash[index];
48709+
48710+ while (match && (match->inode != ino || match->device != dev ||
48711+ !(match->mode & GR_DELETED))) {
48712+ match = match->next;
48713+ }
48714+
48715+ if (match && (match->mode & GR_DELETED))
48716+ return match;
48717+ else
48718+ return NULL;
48719+}
48720+
48721+static struct acl_object_label *
48722+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48723+ const struct acl_subject_label *subj)
48724+{
48725+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48726+ struct acl_object_label *match;
48727+
48728+ match = subj->obj_hash[index];
48729+
48730+ while (match && (match->inode != ino || match->device != dev ||
48731+ (match->mode & GR_DELETED))) {
48732+ match = match->next;
48733+ }
48734+
48735+ if (match && !(match->mode & GR_DELETED))
48736+ return match;
48737+ else
48738+ return NULL;
48739+}
48740+
48741+static struct acl_object_label *
48742+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
48743+ const struct acl_subject_label *subj)
48744+{
48745+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48746+ struct acl_object_label *match;
48747+
48748+ match = subj->obj_hash[index];
48749+
48750+ while (match && (match->inode != ino || match->device != dev ||
48751+ !(match->mode & GR_DELETED))) {
48752+ match = match->next;
48753+ }
48754+
48755+ if (match && (match->mode & GR_DELETED))
48756+ return match;
48757+
48758+ match = subj->obj_hash[index];
48759+
48760+ while (match && (match->inode != ino || match->device != dev ||
48761+ (match->mode & GR_DELETED))) {
48762+ match = match->next;
48763+ }
48764+
48765+ if (match && !(match->mode & GR_DELETED))
48766+ return match;
48767+ else
48768+ return NULL;
48769+}
48770+
48771+static struct name_entry *
48772+lookup_name_entry(const char *name)
48773+{
48774+ unsigned int len = strlen(name);
48775+ unsigned int key = full_name_hash(name, len);
48776+ unsigned int index = key % name_set.n_size;
48777+ struct name_entry *match;
48778+
48779+ match = name_set.n_hash[index];
48780+
48781+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
48782+ match = match->next;
48783+
48784+ return match;
48785+}
48786+
48787+static struct name_entry *
48788+lookup_name_entry_create(const char *name)
48789+{
48790+ unsigned int len = strlen(name);
48791+ unsigned int key = full_name_hash(name, len);
48792+ unsigned int index = key % name_set.n_size;
48793+ struct name_entry *match;
48794+
48795+ match = name_set.n_hash[index];
48796+
48797+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48798+ !match->deleted))
48799+ match = match->next;
48800+
48801+ if (match && match->deleted)
48802+ return match;
48803+
48804+ match = name_set.n_hash[index];
48805+
48806+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48807+ match->deleted))
48808+ match = match->next;
48809+
48810+ if (match && !match->deleted)
48811+ return match;
48812+ else
48813+ return NULL;
48814+}
48815+
48816+static struct inodev_entry *
48817+lookup_inodev_entry(const ino_t ino, const dev_t dev)
48818+{
48819+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
48820+ struct inodev_entry *match;
48821+
48822+ match = inodev_set.i_hash[index];
48823+
48824+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
48825+ match = match->next;
48826+
48827+ return match;
48828+}
48829+
48830+static void
48831+insert_inodev_entry(struct inodev_entry *entry)
48832+{
48833+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
48834+ inodev_set.i_size);
48835+ struct inodev_entry **curr;
48836+
48837+ entry->prev = NULL;
48838+
48839+ curr = &inodev_set.i_hash[index];
48840+ if (*curr != NULL)
48841+ (*curr)->prev = entry;
48842+
48843+ entry->next = *curr;
48844+ *curr = entry;
48845+
48846+ return;
48847+}
48848+
48849+static void
48850+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48851+{
48852+ unsigned int index =
48853+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48854+ struct acl_role_label **curr;
48855+ struct acl_role_label *tmp;
48856+
48857+ curr = &acl_role_set.r_hash[index];
48858+
48859+ /* if role was already inserted due to domains and already has
48860+ a role in the same bucket as it attached, then we need to
48861+ combine these two buckets
48862+ */
48863+ if (role->next) {
48864+ tmp = role->next;
48865+ while (tmp->next)
48866+ tmp = tmp->next;
48867+ tmp->next = *curr;
48868+ } else
48869+ role->next = *curr;
48870+ *curr = role;
48871+
48872+ return;
48873+}
48874+
48875+static void
48876+insert_acl_role_label(struct acl_role_label *role)
48877+{
48878+ int i;
48879+
48880+ if (role_list == NULL) {
48881+ role_list = role;
48882+ role->prev = NULL;
48883+ } else {
48884+ role->prev = role_list;
48885+ role_list = role;
48886+ }
48887+
48888+ /* used for hash chains */
48889+ role->next = NULL;
48890+
48891+ if (role->roletype & GR_ROLE_DOMAIN) {
48892+ for (i = 0; i < role->domain_child_num; i++)
48893+ __insert_acl_role_label(role, role->domain_children[i]);
48894+ } else
48895+ __insert_acl_role_label(role, role->uidgid);
48896+}
48897+
48898+static int
48899+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48900+{
48901+ struct name_entry **curr, *nentry;
48902+ struct inodev_entry *ientry;
48903+ unsigned int len = strlen(name);
48904+ unsigned int key = full_name_hash(name, len);
48905+ unsigned int index = key % name_set.n_size;
48906+
48907+ curr = &name_set.n_hash[index];
48908+
48909+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
48910+ curr = &((*curr)->next);
48911+
48912+ if (*curr != NULL)
48913+ return 1;
48914+
48915+ nentry = acl_alloc(sizeof (struct name_entry));
48916+ if (nentry == NULL)
48917+ return 0;
48918+ ientry = acl_alloc(sizeof (struct inodev_entry));
48919+ if (ientry == NULL)
48920+ return 0;
48921+ ientry->nentry = nentry;
48922+
48923+ nentry->key = key;
48924+ nentry->name = name;
48925+ nentry->inode = inode;
48926+ nentry->device = device;
48927+ nentry->len = len;
48928+ nentry->deleted = deleted;
48929+
48930+ nentry->prev = NULL;
48931+ curr = &name_set.n_hash[index];
48932+ if (*curr != NULL)
48933+ (*curr)->prev = nentry;
48934+ nentry->next = *curr;
48935+ *curr = nentry;
48936+
48937+ /* insert us into the table searchable by inode/dev */
48938+ insert_inodev_entry(ientry);
48939+
48940+ return 1;
48941+}
48942+
48943+static void
48944+insert_acl_obj_label(struct acl_object_label *obj,
48945+ struct acl_subject_label *subj)
48946+{
48947+ unsigned int index =
48948+ fhash(obj->inode, obj->device, subj->obj_hash_size);
48949+ struct acl_object_label **curr;
48950+
48951+
48952+ obj->prev = NULL;
48953+
48954+ curr = &subj->obj_hash[index];
48955+ if (*curr != NULL)
48956+ (*curr)->prev = obj;
48957+
48958+ obj->next = *curr;
48959+ *curr = obj;
48960+
48961+ return;
48962+}
48963+
48964+static void
48965+insert_acl_subj_label(struct acl_subject_label *obj,
48966+ struct acl_role_label *role)
48967+{
48968+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48969+ struct acl_subject_label **curr;
48970+
48971+ obj->prev = NULL;
48972+
48973+ curr = &role->subj_hash[index];
48974+ if (*curr != NULL)
48975+ (*curr)->prev = obj;
48976+
48977+ obj->next = *curr;
48978+ *curr = obj;
48979+
48980+ return;
48981+}
48982+
48983+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48984+
48985+static void *
48986+create_table(__u32 * len, int elementsize)
48987+{
48988+ unsigned int table_sizes[] = {
48989+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48990+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48991+ 4194301, 8388593, 16777213, 33554393, 67108859
48992+ };
48993+ void *newtable = NULL;
48994+ unsigned int pwr = 0;
48995+
48996+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
48997+ table_sizes[pwr] <= *len)
48998+ pwr++;
48999+
49000+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
49001+ return newtable;
49002+
49003+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
49004+ newtable =
49005+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
49006+ else
49007+ newtable = vmalloc(table_sizes[pwr] * elementsize);
49008+
49009+ *len = table_sizes[pwr];
49010+
49011+ return newtable;
49012+}
49013+
49014+static int
49015+init_variables(const struct gr_arg *arg)
49016+{
49017+ struct task_struct *reaper = &init_task;
49018+ unsigned int stacksize;
49019+
49020+ subj_map_set.s_size = arg->role_db.num_subjects;
49021+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
49022+ name_set.n_size = arg->role_db.num_objects;
49023+ inodev_set.i_size = arg->role_db.num_objects;
49024+
49025+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
49026+ !name_set.n_size || !inodev_set.i_size)
49027+ return 1;
49028+
49029+ if (!gr_init_uidset())
49030+ return 1;
49031+
49032+ /* set up the stack that holds allocation info */
49033+
49034+ stacksize = arg->role_db.num_pointers + 5;
49035+
49036+ if (!acl_alloc_stack_init(stacksize))
49037+ return 1;
49038+
49039+ /* grab reference for the real root dentry and vfsmount */
49040+ get_fs_root(reaper->fs, &real_root);
49041+
49042+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49043+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
49044+#endif
49045+
49046+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
49047+ if (fakefs_obj_rw == NULL)
49048+ return 1;
49049+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
49050+
49051+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
49052+ if (fakefs_obj_rwx == NULL)
49053+ return 1;
49054+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
49055+
49056+ subj_map_set.s_hash =
49057+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
49058+ acl_role_set.r_hash =
49059+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
49060+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
49061+ inodev_set.i_hash =
49062+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
49063+
49064+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
49065+ !name_set.n_hash || !inodev_set.i_hash)
49066+ return 1;
49067+
49068+ memset(subj_map_set.s_hash, 0,
49069+ sizeof(struct subject_map *) * subj_map_set.s_size);
49070+ memset(acl_role_set.r_hash, 0,
49071+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
49072+ memset(name_set.n_hash, 0,
49073+ sizeof (struct name_entry *) * name_set.n_size);
49074+ memset(inodev_set.i_hash, 0,
49075+ sizeof (struct inodev_entry *) * inodev_set.i_size);
49076+
49077+ return 0;
49078+}
49079+
49080+/* free information not needed after startup
49081+ currently contains user->kernel pointer mappings for subjects
49082+*/
49083+
49084+static void
49085+free_init_variables(void)
49086+{
49087+ __u32 i;
49088+
49089+ if (subj_map_set.s_hash) {
49090+ for (i = 0; i < subj_map_set.s_size; i++) {
49091+ if (subj_map_set.s_hash[i]) {
49092+ kfree(subj_map_set.s_hash[i]);
49093+ subj_map_set.s_hash[i] = NULL;
49094+ }
49095+ }
49096+
49097+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49098+ PAGE_SIZE)
49099+ kfree(subj_map_set.s_hash);
49100+ else
49101+ vfree(subj_map_set.s_hash);
49102+ }
49103+
49104+ return;
49105+}
49106+
49107+static void
49108+free_variables(void)
49109+{
49110+ struct acl_subject_label *s;
49111+ struct acl_role_label *r;
49112+ struct task_struct *task, *task2;
49113+ unsigned int x;
49114+
49115+ gr_clear_learn_entries();
49116+
49117+ read_lock(&tasklist_lock);
49118+ do_each_thread(task2, task) {
49119+ task->acl_sp_role = 0;
49120+ task->acl_role_id = 0;
49121+ task->acl = NULL;
49122+ task->role = NULL;
49123+ } while_each_thread(task2, task);
49124+ read_unlock(&tasklist_lock);
49125+
49126+ /* release the reference to the real root dentry and vfsmount */
49127+ path_put(&real_root);
49128+
49129+ /* free all object hash tables */
49130+
49131+ FOR_EACH_ROLE_START(r)
49132+ if (r->subj_hash == NULL)
49133+ goto next_role;
49134+ FOR_EACH_SUBJECT_START(r, s, x)
49135+ if (s->obj_hash == NULL)
49136+ break;
49137+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49138+ kfree(s->obj_hash);
49139+ else
49140+ vfree(s->obj_hash);
49141+ FOR_EACH_SUBJECT_END(s, x)
49142+ FOR_EACH_NESTED_SUBJECT_START(r, s)
49143+ if (s->obj_hash == NULL)
49144+ break;
49145+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49146+ kfree(s->obj_hash);
49147+ else
49148+ vfree(s->obj_hash);
49149+ FOR_EACH_NESTED_SUBJECT_END(s)
49150+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49151+ kfree(r->subj_hash);
49152+ else
49153+ vfree(r->subj_hash);
49154+ r->subj_hash = NULL;
49155+next_role:
49156+ FOR_EACH_ROLE_END(r)
49157+
49158+ acl_free_all();
49159+
49160+ if (acl_role_set.r_hash) {
49161+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49162+ PAGE_SIZE)
49163+ kfree(acl_role_set.r_hash);
49164+ else
49165+ vfree(acl_role_set.r_hash);
49166+ }
49167+ if (name_set.n_hash) {
49168+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
49169+ PAGE_SIZE)
49170+ kfree(name_set.n_hash);
49171+ else
49172+ vfree(name_set.n_hash);
49173+ }
49174+
49175+ if (inodev_set.i_hash) {
49176+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49177+ PAGE_SIZE)
49178+ kfree(inodev_set.i_hash);
49179+ else
49180+ vfree(inodev_set.i_hash);
49181+ }
49182+
49183+ gr_free_uidset();
49184+
49185+ memset(&name_set, 0, sizeof (struct name_db));
49186+ memset(&inodev_set, 0, sizeof (struct inodev_db));
49187+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49188+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49189+
49190+ default_role = NULL;
49191+ role_list = NULL;
49192+
49193+ return;
49194+}
49195+
49196+static __u32
49197+count_user_objs(struct acl_object_label *userp)
49198+{
49199+ struct acl_object_label o_tmp;
49200+ __u32 num = 0;
49201+
49202+ while (userp) {
49203+ if (copy_from_user(&o_tmp, userp,
49204+ sizeof (struct acl_object_label)))
49205+ break;
49206+
49207+ userp = o_tmp.prev;
49208+ num++;
49209+ }
49210+
49211+ return num;
49212+}
49213+
49214+static struct acl_subject_label *
49215+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49216+
49217+static int
49218+copy_user_glob(struct acl_object_label *obj)
49219+{
49220+ struct acl_object_label *g_tmp, **guser;
49221+ unsigned int len;
49222+ char *tmp;
49223+
49224+ if (obj->globbed == NULL)
49225+ return 0;
49226+
49227+ guser = &obj->globbed;
49228+ while (*guser) {
49229+ g_tmp = (struct acl_object_label *)
49230+ acl_alloc(sizeof (struct acl_object_label));
49231+ if (g_tmp == NULL)
49232+ return -ENOMEM;
49233+
49234+ if (copy_from_user(g_tmp, *guser,
49235+ sizeof (struct acl_object_label)))
49236+ return -EFAULT;
49237+
49238+ len = strnlen_user(g_tmp->filename, PATH_MAX);
49239+
49240+ if (!len || len >= PATH_MAX)
49241+ return -EINVAL;
49242+
49243+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49244+ return -ENOMEM;
49245+
49246+ if (copy_from_user(tmp, g_tmp->filename, len))
49247+ return -EFAULT;
49248+ tmp[len-1] = '\0';
49249+ g_tmp->filename = tmp;
49250+
49251+ *guser = g_tmp;
49252+ guser = &(g_tmp->next);
49253+ }
49254+
49255+ return 0;
49256+}
49257+
49258+static int
49259+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49260+ struct acl_role_label *role)
49261+{
49262+ struct acl_object_label *o_tmp;
49263+ unsigned int len;
49264+ int ret;
49265+ char *tmp;
49266+
49267+ while (userp) {
49268+ if ((o_tmp = (struct acl_object_label *)
49269+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
49270+ return -ENOMEM;
49271+
49272+ if (copy_from_user(o_tmp, userp,
49273+ sizeof (struct acl_object_label)))
49274+ return -EFAULT;
49275+
49276+ userp = o_tmp->prev;
49277+
49278+ len = strnlen_user(o_tmp->filename, PATH_MAX);
49279+
49280+ if (!len || len >= PATH_MAX)
49281+ return -EINVAL;
49282+
49283+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49284+ return -ENOMEM;
49285+
49286+ if (copy_from_user(tmp, o_tmp->filename, len))
49287+ return -EFAULT;
49288+ tmp[len-1] = '\0';
49289+ o_tmp->filename = tmp;
49290+
49291+ insert_acl_obj_label(o_tmp, subj);
49292+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49293+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49294+ return -ENOMEM;
49295+
49296+ ret = copy_user_glob(o_tmp);
49297+ if (ret)
49298+ return ret;
49299+
49300+ if (o_tmp->nested) {
49301+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49302+ if (IS_ERR(o_tmp->nested))
49303+ return PTR_ERR(o_tmp->nested);
49304+
49305+ /* insert into nested subject list */
49306+ o_tmp->nested->next = role->hash->first;
49307+ role->hash->first = o_tmp->nested;
49308+ }
49309+ }
49310+
49311+ return 0;
49312+}
49313+
49314+static __u32
49315+count_user_subjs(struct acl_subject_label *userp)
49316+{
49317+ struct acl_subject_label s_tmp;
49318+ __u32 num = 0;
49319+
49320+ while (userp) {
49321+ if (copy_from_user(&s_tmp, userp,
49322+ sizeof (struct acl_subject_label)))
49323+ break;
49324+
49325+ userp = s_tmp.prev;
49326+ /* do not count nested subjects against this count, since
49327+ they are not included in the hash table, but are
49328+ attached to objects. We have already counted
49329+ the subjects in userspace for the allocation
49330+ stack
49331+ */
49332+ if (!(s_tmp.mode & GR_NESTED))
49333+ num++;
49334+ }
49335+
49336+ return num;
49337+}
49338+
49339+static int
49340+copy_user_allowedips(struct acl_role_label *rolep)
49341+{
49342+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49343+
49344+ ruserip = rolep->allowed_ips;
49345+
49346+ while (ruserip) {
49347+ rlast = rtmp;
49348+
49349+ if ((rtmp = (struct role_allowed_ip *)
49350+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49351+ return -ENOMEM;
49352+
49353+ if (copy_from_user(rtmp, ruserip,
49354+ sizeof (struct role_allowed_ip)))
49355+ return -EFAULT;
49356+
49357+ ruserip = rtmp->prev;
49358+
49359+ if (!rlast) {
49360+ rtmp->prev = NULL;
49361+ rolep->allowed_ips = rtmp;
49362+ } else {
49363+ rlast->next = rtmp;
49364+ rtmp->prev = rlast;
49365+ }
49366+
49367+ if (!ruserip)
49368+ rtmp->next = NULL;
49369+ }
49370+
49371+ return 0;
49372+}
49373+
49374+static int
49375+copy_user_transitions(struct acl_role_label *rolep)
49376+{
49377+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
49378+
49379+ unsigned int len;
49380+ char *tmp;
49381+
49382+ rusertp = rolep->transitions;
49383+
49384+ while (rusertp) {
49385+ rlast = rtmp;
49386+
49387+ if ((rtmp = (struct role_transition *)
49388+ acl_alloc(sizeof (struct role_transition))) == NULL)
49389+ return -ENOMEM;
49390+
49391+ if (copy_from_user(rtmp, rusertp,
49392+ sizeof (struct role_transition)))
49393+ return -EFAULT;
49394+
49395+ rusertp = rtmp->prev;
49396+
49397+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49398+
49399+ if (!len || len >= GR_SPROLE_LEN)
49400+ return -EINVAL;
49401+
49402+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49403+ return -ENOMEM;
49404+
49405+ if (copy_from_user(tmp, rtmp->rolename, len))
49406+ return -EFAULT;
49407+ tmp[len-1] = '\0';
49408+ rtmp->rolename = tmp;
49409+
49410+ if (!rlast) {
49411+ rtmp->prev = NULL;
49412+ rolep->transitions = rtmp;
49413+ } else {
49414+ rlast->next = rtmp;
49415+ rtmp->prev = rlast;
49416+ }
49417+
49418+ if (!rusertp)
49419+ rtmp->next = NULL;
49420+ }
49421+
49422+ return 0;
49423+}
49424+
49425+static struct acl_subject_label *
49426+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49427+{
49428+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49429+ unsigned int len;
49430+ char *tmp;
49431+ __u32 num_objs;
49432+ struct acl_ip_label **i_tmp, *i_utmp2;
49433+ struct gr_hash_struct ghash;
49434+ struct subject_map *subjmap;
49435+ unsigned int i_num;
49436+ int err;
49437+
49438+ s_tmp = lookup_subject_map(userp);
49439+
49440+ /* we've already copied this subject into the kernel, just return
49441+ the reference to it, and don't copy it over again
49442+ */
49443+ if (s_tmp)
49444+ return(s_tmp);
49445+
49446+ if ((s_tmp = (struct acl_subject_label *)
49447+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49448+ return ERR_PTR(-ENOMEM);
49449+
49450+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49451+ if (subjmap == NULL)
49452+ return ERR_PTR(-ENOMEM);
49453+
49454+ subjmap->user = userp;
49455+ subjmap->kernel = s_tmp;
49456+ insert_subj_map_entry(subjmap);
49457+
49458+ if (copy_from_user(s_tmp, userp,
49459+ sizeof (struct acl_subject_label)))
49460+ return ERR_PTR(-EFAULT);
49461+
49462+ len = strnlen_user(s_tmp->filename, PATH_MAX);
49463+
49464+ if (!len || len >= PATH_MAX)
49465+ return ERR_PTR(-EINVAL);
49466+
49467+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49468+ return ERR_PTR(-ENOMEM);
49469+
49470+ if (copy_from_user(tmp, s_tmp->filename, len))
49471+ return ERR_PTR(-EFAULT);
49472+ tmp[len-1] = '\0';
49473+ s_tmp->filename = tmp;
49474+
49475+ if (!strcmp(s_tmp->filename, "/"))
49476+ role->root_label = s_tmp;
49477+
49478+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49479+ return ERR_PTR(-EFAULT);
49480+
49481+ /* copy user and group transition tables */
49482+
49483+ if (s_tmp->user_trans_num) {
49484+ uid_t *uidlist;
49485+
49486+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49487+ if (uidlist == NULL)
49488+ return ERR_PTR(-ENOMEM);
49489+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49490+ return ERR_PTR(-EFAULT);
49491+
49492+ s_tmp->user_transitions = uidlist;
49493+ }
49494+
49495+ if (s_tmp->group_trans_num) {
49496+ gid_t *gidlist;
49497+
49498+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49499+ if (gidlist == NULL)
49500+ return ERR_PTR(-ENOMEM);
49501+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49502+ return ERR_PTR(-EFAULT);
49503+
49504+ s_tmp->group_transitions = gidlist;
49505+ }
49506+
49507+ /* set up object hash table */
49508+ num_objs = count_user_objs(ghash.first);
49509+
49510+ s_tmp->obj_hash_size = num_objs;
49511+ s_tmp->obj_hash =
49512+ (struct acl_object_label **)
49513+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49514+
49515+ if (!s_tmp->obj_hash)
49516+ return ERR_PTR(-ENOMEM);
49517+
49518+ memset(s_tmp->obj_hash, 0,
49519+ s_tmp->obj_hash_size *
49520+ sizeof (struct acl_object_label *));
49521+
49522+ /* add in objects */
49523+ err = copy_user_objs(ghash.first, s_tmp, role);
49524+
49525+ if (err)
49526+ return ERR_PTR(err);
49527+
49528+ /* set pointer for parent subject */
49529+ if (s_tmp->parent_subject) {
49530+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49531+
49532+ if (IS_ERR(s_tmp2))
49533+ return s_tmp2;
49534+
49535+ s_tmp->parent_subject = s_tmp2;
49536+ }
49537+
49538+ /* add in ip acls */
49539+
49540+ if (!s_tmp->ip_num) {
49541+ s_tmp->ips = NULL;
49542+ goto insert;
49543+ }
49544+
49545+ i_tmp =
49546+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49547+ sizeof (struct acl_ip_label *));
49548+
49549+ if (!i_tmp)
49550+ return ERR_PTR(-ENOMEM);
49551+
49552+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49553+ *(i_tmp + i_num) =
49554+ (struct acl_ip_label *)
49555+ acl_alloc(sizeof (struct acl_ip_label));
49556+ if (!*(i_tmp + i_num))
49557+ return ERR_PTR(-ENOMEM);
49558+
49559+ if (copy_from_user
49560+ (&i_utmp2, s_tmp->ips + i_num,
49561+ sizeof (struct acl_ip_label *)))
49562+ return ERR_PTR(-EFAULT);
49563+
49564+ if (copy_from_user
49565+ (*(i_tmp + i_num), i_utmp2,
49566+ sizeof (struct acl_ip_label)))
49567+ return ERR_PTR(-EFAULT);
49568+
49569+ if ((*(i_tmp + i_num))->iface == NULL)
49570+ continue;
49571+
49572+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49573+ if (!len || len >= IFNAMSIZ)
49574+ return ERR_PTR(-EINVAL);
49575+ tmp = acl_alloc(len);
49576+ if (tmp == NULL)
49577+ return ERR_PTR(-ENOMEM);
49578+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49579+ return ERR_PTR(-EFAULT);
49580+ (*(i_tmp + i_num))->iface = tmp;
49581+ }
49582+
49583+ s_tmp->ips = i_tmp;
49584+
49585+insert:
49586+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49587+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49588+ return ERR_PTR(-ENOMEM);
49589+
49590+ return s_tmp;
49591+}
49592+
49593+static int
49594+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49595+{
49596+ struct acl_subject_label s_pre;
49597+ struct acl_subject_label * ret;
49598+ int err;
49599+
49600+ while (userp) {
49601+ if (copy_from_user(&s_pre, userp,
49602+ sizeof (struct acl_subject_label)))
49603+ return -EFAULT;
49604+
49605+ /* do not add nested subjects here, add
49606+ while parsing objects
49607+ */
49608+
49609+ if (s_pre.mode & GR_NESTED) {
49610+ userp = s_pre.prev;
49611+ continue;
49612+ }
49613+
49614+ ret = do_copy_user_subj(userp, role);
49615+
49616+ err = PTR_ERR(ret);
49617+ if (IS_ERR(ret))
49618+ return err;
49619+
49620+ insert_acl_subj_label(ret, role);
49621+
49622+ userp = s_pre.prev;
49623+ }
49624+
49625+ return 0;
49626+}
49627+
49628+static int
49629+copy_user_acl(struct gr_arg *arg)
49630+{
49631+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49632+ struct sprole_pw *sptmp;
49633+ struct gr_hash_struct *ghash;
49634+ uid_t *domainlist;
49635+ unsigned int r_num;
49636+ unsigned int len;
49637+ char *tmp;
49638+ int err = 0;
49639+ __u16 i;
49640+ __u32 num_subjs;
49641+
49642+ /* we need a default and kernel role */
49643+ if (arg->role_db.num_roles < 2)
49644+ return -EINVAL;
49645+
49646+ /* copy special role authentication info from userspace */
49647+
49648+ num_sprole_pws = arg->num_sprole_pws;
49649+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49650+
49651+ if (!acl_special_roles) {
49652+ err = -ENOMEM;
49653+ goto cleanup;
49654+ }
49655+
49656+ for (i = 0; i < num_sprole_pws; i++) {
49657+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49658+ if (!sptmp) {
49659+ err = -ENOMEM;
49660+ goto cleanup;
49661+ }
49662+ if (copy_from_user(sptmp, arg->sprole_pws + i,
49663+ sizeof (struct sprole_pw))) {
49664+ err = -EFAULT;
49665+ goto cleanup;
49666+ }
49667+
49668+ len =
49669+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49670+
49671+ if (!len || len >= GR_SPROLE_LEN) {
49672+ err = -EINVAL;
49673+ goto cleanup;
49674+ }
49675+
49676+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49677+ err = -ENOMEM;
49678+ goto cleanup;
49679+ }
49680+
49681+ if (copy_from_user(tmp, sptmp->rolename, len)) {
49682+ err = -EFAULT;
49683+ goto cleanup;
49684+ }
49685+ tmp[len-1] = '\0';
49686+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49687+ printk(KERN_ALERT "Copying special role %s\n", tmp);
49688+#endif
49689+ sptmp->rolename = tmp;
49690+ acl_special_roles[i] = sptmp;
49691+ }
49692+
49693+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49694+
49695+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49696+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
49697+
49698+ if (!r_tmp) {
49699+ err = -ENOMEM;
49700+ goto cleanup;
49701+ }
49702+
49703+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
49704+ sizeof (struct acl_role_label *))) {
49705+ err = -EFAULT;
49706+ goto cleanup;
49707+ }
49708+
49709+ if (copy_from_user(r_tmp, r_utmp2,
49710+ sizeof (struct acl_role_label))) {
49711+ err = -EFAULT;
49712+ goto cleanup;
49713+ }
49714+
49715+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49716+
49717+ if (!len || len >= PATH_MAX) {
49718+ err = -EINVAL;
49719+ goto cleanup;
49720+ }
49721+
49722+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49723+ err = -ENOMEM;
49724+ goto cleanup;
49725+ }
49726+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
49727+ err = -EFAULT;
49728+ goto cleanup;
49729+ }
49730+ tmp[len-1] = '\0';
49731+ r_tmp->rolename = tmp;
49732+
49733+ if (!strcmp(r_tmp->rolename, "default")
49734+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
49735+ default_role = r_tmp;
49736+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
49737+ kernel_role = r_tmp;
49738+ }
49739+
49740+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
49741+ err = -ENOMEM;
49742+ goto cleanup;
49743+ }
49744+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
49745+ err = -EFAULT;
49746+ goto cleanup;
49747+ }
49748+
49749+ r_tmp->hash = ghash;
49750+
49751+ num_subjs = count_user_subjs(r_tmp->hash->first);
49752+
49753+ r_tmp->subj_hash_size = num_subjs;
49754+ r_tmp->subj_hash =
49755+ (struct acl_subject_label **)
49756+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
49757+
49758+ if (!r_tmp->subj_hash) {
49759+ err = -ENOMEM;
49760+ goto cleanup;
49761+ }
49762+
49763+ err = copy_user_allowedips(r_tmp);
49764+ if (err)
49765+ goto cleanup;
49766+
49767+ /* copy domain info */
49768+ if (r_tmp->domain_children != NULL) {
49769+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
49770+ if (domainlist == NULL) {
49771+ err = -ENOMEM;
49772+ goto cleanup;
49773+ }
49774+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
49775+ err = -EFAULT;
49776+ goto cleanup;
49777+ }
49778+ r_tmp->domain_children = domainlist;
49779+ }
49780+
49781+ err = copy_user_transitions(r_tmp);
49782+ if (err)
49783+ goto cleanup;
49784+
49785+ memset(r_tmp->subj_hash, 0,
49786+ r_tmp->subj_hash_size *
49787+ sizeof (struct acl_subject_label *));
49788+
49789+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
49790+
49791+ if (err)
49792+ goto cleanup;
49793+
49794+ /* set nested subject list to null */
49795+ r_tmp->hash->first = NULL;
49796+
49797+ insert_acl_role_label(r_tmp);
49798+ }
49799+
49800+ goto return_err;
49801+ cleanup:
49802+ free_variables();
49803+ return_err:
49804+ return err;
49805+
49806+}
49807+
49808+static int
49809+gracl_init(struct gr_arg *args)
49810+{
49811+ int error = 0;
49812+
49813+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
49814+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
49815+
49816+ if (init_variables(args)) {
49817+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
49818+ error = -ENOMEM;
49819+ free_variables();
49820+ goto out;
49821+ }
49822+
49823+ error = copy_user_acl(args);
49824+ free_init_variables();
49825+ if (error) {
49826+ free_variables();
49827+ goto out;
49828+ }
49829+
49830+ if ((error = gr_set_acls(0))) {
49831+ free_variables();
49832+ goto out;
49833+ }
49834+
49835+ pax_open_kernel();
49836+ gr_status |= GR_READY;
49837+ pax_close_kernel();
49838+
49839+ out:
49840+ return error;
49841+}
49842+
49843+/* derived from glibc fnmatch() 0: match, 1: no match*/
49844+
49845+static int
49846+glob_match(const char *p, const char *n)
49847+{
49848+ char c;
49849+
49850+ while ((c = *p++) != '\0') {
49851+ switch (c) {
49852+ case '?':
49853+ if (*n == '\0')
49854+ return 1;
49855+ else if (*n == '/')
49856+ return 1;
49857+ break;
49858+ case '\\':
49859+ if (*n != c)
49860+ return 1;
49861+ break;
49862+ case '*':
49863+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
49864+ if (*n == '/')
49865+ return 1;
49866+ else if (c == '?') {
49867+ if (*n == '\0')
49868+ return 1;
49869+ else
49870+ ++n;
49871+ }
49872+ }
49873+ if (c == '\0') {
49874+ return 0;
49875+ } else {
49876+ const char *endp;
49877+
49878+ if ((endp = strchr(n, '/')) == NULL)
49879+ endp = n + strlen(n);
49880+
49881+ if (c == '[') {
49882+ for (--p; n < endp; ++n)
49883+ if (!glob_match(p, n))
49884+ return 0;
49885+ } else if (c == '/') {
49886+ while (*n != '\0' && *n != '/')
49887+ ++n;
49888+ if (*n == '/' && !glob_match(p, n + 1))
49889+ return 0;
49890+ } else {
49891+ for (--p; n < endp; ++n)
49892+ if (*n == c && !glob_match(p, n))
49893+ return 0;
49894+ }
49895+
49896+ return 1;
49897+ }
49898+ case '[':
49899+ {
49900+ int not;
49901+ char cold;
49902+
49903+ if (*n == '\0' || *n == '/')
49904+ return 1;
49905+
49906+ not = (*p == '!' || *p == '^');
49907+ if (not)
49908+ ++p;
49909+
49910+ c = *p++;
49911+ for (;;) {
49912+ unsigned char fn = (unsigned char)*n;
49913+
49914+ if (c == '\0')
49915+ return 1;
49916+ else {
49917+ if (c == fn)
49918+ goto matched;
49919+ cold = c;
49920+ c = *p++;
49921+
49922+ if (c == '-' && *p != ']') {
49923+ unsigned char cend = *p++;
49924+
49925+ if (cend == '\0')
49926+ return 1;
49927+
49928+ if (cold <= fn && fn <= cend)
49929+ goto matched;
49930+
49931+ c = *p++;
49932+ }
49933+ }
49934+
49935+ if (c == ']')
49936+ break;
49937+ }
49938+ if (!not)
49939+ return 1;
49940+ break;
49941+ matched:
49942+ while (c != ']') {
49943+ if (c == '\0')
49944+ return 1;
49945+
49946+ c = *p++;
49947+ }
49948+ if (not)
49949+ return 1;
49950+ }
49951+ break;
49952+ default:
49953+ if (c != *n)
49954+ return 1;
49955+ }
49956+
49957+ ++n;
49958+ }
49959+
49960+ if (*n == '\0')
49961+ return 0;
49962+
49963+ if (*n == '/')
49964+ return 0;
49965+
49966+ return 1;
49967+}
49968+
49969+static struct acl_object_label *
49970+chk_glob_label(struct acl_object_label *globbed,
49971+ struct dentry *dentry, struct vfsmount *mnt, char **path)
49972+{
49973+ struct acl_object_label *tmp;
49974+
49975+ if (*path == NULL)
49976+ *path = gr_to_filename_nolock(dentry, mnt);
49977+
49978+ tmp = globbed;
49979+
49980+ while (tmp) {
49981+ if (!glob_match(tmp->filename, *path))
49982+ return tmp;
49983+ tmp = tmp->next;
49984+ }
49985+
49986+ return NULL;
49987+}
49988+
49989+static struct acl_object_label *
49990+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49991+ const ino_t curr_ino, const dev_t curr_dev,
49992+ const struct acl_subject_label *subj, char **path, const int checkglob)
49993+{
49994+ struct acl_subject_label *tmpsubj;
49995+ struct acl_object_label *retval;
49996+ struct acl_object_label *retval2;
49997+
49998+ tmpsubj = (struct acl_subject_label *) subj;
49999+ read_lock(&gr_inode_lock);
50000+ do {
50001+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
50002+ if (retval) {
50003+ if (checkglob && retval->globbed) {
50004+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
50005+ (struct vfsmount *)orig_mnt, path);
50006+ if (retval2)
50007+ retval = retval2;
50008+ }
50009+ break;
50010+ }
50011+ } while ((tmpsubj = tmpsubj->parent_subject));
50012+ read_unlock(&gr_inode_lock);
50013+
50014+ return retval;
50015+}
50016+
50017+static __inline__ struct acl_object_label *
50018+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50019+ struct dentry *curr_dentry,
50020+ const struct acl_subject_label *subj, char **path, const int checkglob)
50021+{
50022+ int newglob = checkglob;
50023+ ino_t inode;
50024+ dev_t device;
50025+
50026+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
50027+ as we don't want a / * rule to match instead of the / object
50028+ don't do this for create lookups that call this function though, since they're looking up
50029+ on the parent and thus need globbing checks on all paths
50030+ */
50031+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
50032+ newglob = GR_NO_GLOB;
50033+
50034+ spin_lock(&curr_dentry->d_lock);
50035+ inode = curr_dentry->d_inode->i_ino;
50036+ device = __get_dev(curr_dentry);
50037+ spin_unlock(&curr_dentry->d_lock);
50038+
50039+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
50040+}
50041+
50042+static struct acl_object_label *
50043+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50044+ const struct acl_subject_label *subj, char *path, const int checkglob)
50045+{
50046+ struct dentry *dentry = (struct dentry *) l_dentry;
50047+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50048+ struct acl_object_label *retval;
50049+ struct dentry *parent;
50050+
50051+ write_seqlock(&rename_lock);
50052+ br_read_lock(vfsmount_lock);
50053+
50054+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
50055+#ifdef CONFIG_NET
50056+ mnt == sock_mnt ||
50057+#endif
50058+#ifdef CONFIG_HUGETLBFS
50059+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
50060+#endif
50061+ /* ignore Eric Biederman */
50062+ IS_PRIVATE(l_dentry->d_inode))) {
50063+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
50064+ goto out;
50065+ }
50066+
50067+ for (;;) {
50068+ if (dentry == real_root.dentry && mnt == real_root.mnt)
50069+ break;
50070+
50071+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50072+ if (mnt->mnt_parent == mnt)
50073+ break;
50074+
50075+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50076+ if (retval != NULL)
50077+ goto out;
50078+
50079+ dentry = mnt->mnt_mountpoint;
50080+ mnt = mnt->mnt_parent;
50081+ continue;
50082+ }
50083+
50084+ parent = dentry->d_parent;
50085+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50086+ if (retval != NULL)
50087+ goto out;
50088+
50089+ dentry = parent;
50090+ }
50091+
50092+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50093+
50094+ /* real_root is pinned so we don't have to hold a reference */
50095+ if (retval == NULL)
50096+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50097+out:
50098+ br_read_unlock(vfsmount_lock);
50099+ write_sequnlock(&rename_lock);
50100+
50101+ BUG_ON(retval == NULL);
50102+
50103+ return retval;
50104+}
50105+
50106+static __inline__ struct acl_object_label *
50107+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50108+ const struct acl_subject_label *subj)
50109+{
50110+ char *path = NULL;
50111+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50112+}
50113+
50114+static __inline__ struct acl_object_label *
50115+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50116+ const struct acl_subject_label *subj)
50117+{
50118+ char *path = NULL;
50119+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50120+}
50121+
50122+static __inline__ struct acl_object_label *
50123+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50124+ const struct acl_subject_label *subj, char *path)
50125+{
50126+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50127+}
50128+
50129+static struct acl_subject_label *
50130+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50131+ const struct acl_role_label *role)
50132+{
50133+ struct dentry *dentry = (struct dentry *) l_dentry;
50134+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50135+ struct acl_subject_label *retval;
50136+ struct dentry *parent;
50137+
50138+ write_seqlock(&rename_lock);
50139+ br_read_lock(vfsmount_lock);
50140+
50141+ for (;;) {
50142+ if (dentry == real_root.dentry && mnt == real_root.mnt)
50143+ break;
50144+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50145+ if (mnt->mnt_parent == mnt)
50146+ break;
50147+
50148+ spin_lock(&dentry->d_lock);
50149+ read_lock(&gr_inode_lock);
50150+ retval =
50151+ lookup_acl_subj_label(dentry->d_inode->i_ino,
50152+ __get_dev(dentry), role);
50153+ read_unlock(&gr_inode_lock);
50154+ spin_unlock(&dentry->d_lock);
50155+ if (retval != NULL)
50156+ goto out;
50157+
50158+ dentry = mnt->mnt_mountpoint;
50159+ mnt = mnt->mnt_parent;
50160+ continue;
50161+ }
50162+
50163+ spin_lock(&dentry->d_lock);
50164+ read_lock(&gr_inode_lock);
50165+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50166+ __get_dev(dentry), role);
50167+ read_unlock(&gr_inode_lock);
50168+ parent = dentry->d_parent;
50169+ spin_unlock(&dentry->d_lock);
50170+
50171+ if (retval != NULL)
50172+ goto out;
50173+
50174+ dentry = parent;
50175+ }
50176+
50177+ spin_lock(&dentry->d_lock);
50178+ read_lock(&gr_inode_lock);
50179+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50180+ __get_dev(dentry), role);
50181+ read_unlock(&gr_inode_lock);
50182+ spin_unlock(&dentry->d_lock);
50183+
50184+ if (unlikely(retval == NULL)) {
50185+ /* real_root is pinned, we don't need to hold a reference */
50186+ read_lock(&gr_inode_lock);
50187+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50188+ __get_dev(real_root.dentry), role);
50189+ read_unlock(&gr_inode_lock);
50190+ }
50191+out:
50192+ br_read_unlock(vfsmount_lock);
50193+ write_sequnlock(&rename_lock);
50194+
50195+ BUG_ON(retval == NULL);
50196+
50197+ return retval;
50198+}
50199+
50200+static void
50201+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50202+{
50203+ struct task_struct *task = current;
50204+ const struct cred *cred = current_cred();
50205+
50206+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50207+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50208+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50209+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50210+
50211+ return;
50212+}
50213+
50214+static void
50215+gr_log_learn_sysctl(const char *path, const __u32 mode)
50216+{
50217+ struct task_struct *task = current;
50218+ const struct cred *cred = current_cred();
50219+
50220+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50221+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50222+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50223+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50224+
50225+ return;
50226+}
50227+
50228+static void
50229+gr_log_learn_id_change(const char type, const unsigned int real,
50230+ const unsigned int effective, const unsigned int fs)
50231+{
50232+ struct task_struct *task = current;
50233+ const struct cred *cred = current_cred();
50234+
50235+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50236+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50237+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50238+ type, real, effective, fs, &task->signal->saved_ip);
50239+
50240+ return;
50241+}
50242+
50243+__u32
50244+gr_search_file(const struct dentry * dentry, const __u32 mode,
50245+ const struct vfsmount * mnt)
50246+{
50247+ __u32 retval = mode;
50248+ struct acl_subject_label *curracl;
50249+ struct acl_object_label *currobj;
50250+
50251+ if (unlikely(!(gr_status & GR_READY)))
50252+ return (mode & ~GR_AUDITS);
50253+
50254+ curracl = current->acl;
50255+
50256+ currobj = chk_obj_label(dentry, mnt, curracl);
50257+ retval = currobj->mode & mode;
50258+
50259+ /* if we're opening a specified transfer file for writing
50260+ (e.g. /dev/initctl), then transfer our role to init
50261+ */
50262+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50263+ current->role->roletype & GR_ROLE_PERSIST)) {
50264+ struct task_struct *task = init_pid_ns.child_reaper;
50265+
50266+ if (task->role != current->role) {
50267+ task->acl_sp_role = 0;
50268+ task->acl_role_id = current->acl_role_id;
50269+ task->role = current->role;
50270+ rcu_read_lock();
50271+ read_lock(&grsec_exec_file_lock);
50272+ gr_apply_subject_to_task(task);
50273+ read_unlock(&grsec_exec_file_lock);
50274+ rcu_read_unlock();
50275+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50276+ }
50277+ }
50278+
50279+ if (unlikely
50280+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50281+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50282+ __u32 new_mode = mode;
50283+
50284+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50285+
50286+ retval = new_mode;
50287+
50288+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50289+ new_mode |= GR_INHERIT;
50290+
50291+ if (!(mode & GR_NOLEARN))
50292+ gr_log_learn(dentry, mnt, new_mode);
50293+ }
50294+
50295+ return retval;
50296+}
50297+
50298+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50299+ const struct dentry *parent,
50300+ const struct vfsmount *mnt)
50301+{
50302+ struct name_entry *match;
50303+ struct acl_object_label *matchpo;
50304+ struct acl_subject_label *curracl;
50305+ char *path;
50306+
50307+ if (unlikely(!(gr_status & GR_READY)))
50308+ return NULL;
50309+
50310+ preempt_disable();
50311+ path = gr_to_filename_rbac(new_dentry, mnt);
50312+ match = lookup_name_entry_create(path);
50313+
50314+ curracl = current->acl;
50315+
50316+ if (match) {
50317+ read_lock(&gr_inode_lock);
50318+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50319+ read_unlock(&gr_inode_lock);
50320+
50321+ if (matchpo) {
50322+ preempt_enable();
50323+ return matchpo;
50324+ }
50325+ }
50326+
50327+ // lookup parent
50328+
50329+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50330+
50331+ preempt_enable();
50332+ return matchpo;
50333+}
50334+
50335+__u32
50336+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50337+ const struct vfsmount * mnt, const __u32 mode)
50338+{
50339+ struct acl_object_label *matchpo;
50340+ __u32 retval;
50341+
50342+ if (unlikely(!(gr_status & GR_READY)))
50343+ return (mode & ~GR_AUDITS);
50344+
50345+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
50346+
50347+ retval = matchpo->mode & mode;
50348+
50349+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50350+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50351+ __u32 new_mode = mode;
50352+
50353+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50354+
50355+ gr_log_learn(new_dentry, mnt, new_mode);
50356+ return new_mode;
50357+ }
50358+
50359+ return retval;
50360+}
50361+
50362+__u32
50363+gr_check_link(const struct dentry * new_dentry,
50364+ const struct dentry * parent_dentry,
50365+ const struct vfsmount * parent_mnt,
50366+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50367+{
50368+ struct acl_object_label *obj;
50369+ __u32 oldmode, newmode;
50370+ __u32 needmode;
50371+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50372+ GR_DELETE | GR_INHERIT;
50373+
50374+ if (unlikely(!(gr_status & GR_READY)))
50375+ return (GR_CREATE | GR_LINK);
50376+
50377+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50378+ oldmode = obj->mode;
50379+
50380+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50381+ newmode = obj->mode;
50382+
50383+ needmode = newmode & checkmodes;
50384+
50385+ // old name for hardlink must have at least the permissions of the new name
50386+ if ((oldmode & needmode) != needmode)
50387+ goto bad;
50388+
50389+ // if old name had restrictions/auditing, make sure the new name does as well
50390+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50391+
50392+ // don't allow hardlinking of suid/sgid files without permission
50393+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50394+ needmode |= GR_SETID;
50395+
50396+ if ((newmode & needmode) != needmode)
50397+ goto bad;
50398+
50399+ // enforce minimum permissions
50400+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50401+ return newmode;
50402+bad:
50403+ needmode = oldmode;
50404+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50405+ needmode |= GR_SETID;
50406+
50407+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50408+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50409+ return (GR_CREATE | GR_LINK);
50410+ } else if (newmode & GR_SUPPRESS)
50411+ return GR_SUPPRESS;
50412+ else
50413+ return 0;
50414+}
50415+
50416+int
50417+gr_check_hidden_task(const struct task_struct *task)
50418+{
50419+ if (unlikely(!(gr_status & GR_READY)))
50420+ return 0;
50421+
50422+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50423+ return 1;
50424+
50425+ return 0;
50426+}
50427+
50428+int
50429+gr_check_protected_task(const struct task_struct *task)
50430+{
50431+ if (unlikely(!(gr_status & GR_READY) || !task))
50432+ return 0;
50433+
50434+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50435+ task->acl != current->acl)
50436+ return 1;
50437+
50438+ return 0;
50439+}
50440+
50441+int
50442+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50443+{
50444+ struct task_struct *p;
50445+ int ret = 0;
50446+
50447+ if (unlikely(!(gr_status & GR_READY) || !pid))
50448+ return ret;
50449+
50450+ read_lock(&tasklist_lock);
50451+ do_each_pid_task(pid, type, p) {
50452+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50453+ p->acl != current->acl) {
50454+ ret = 1;
50455+ goto out;
50456+ }
50457+ } while_each_pid_task(pid, type, p);
50458+out:
50459+ read_unlock(&tasklist_lock);
50460+
50461+ return ret;
50462+}
50463+
50464+void
50465+gr_copy_label(struct task_struct *tsk)
50466+{
50467+ tsk->signal->used_accept = 0;
50468+ tsk->acl_sp_role = 0;
50469+ tsk->acl_role_id = current->acl_role_id;
50470+ tsk->acl = current->acl;
50471+ tsk->role = current->role;
50472+ tsk->signal->curr_ip = current->signal->curr_ip;
50473+ tsk->signal->saved_ip = current->signal->saved_ip;
50474+ if (current->exec_file)
50475+ get_file(current->exec_file);
50476+ tsk->exec_file = current->exec_file;
50477+ tsk->is_writable = current->is_writable;
50478+ if (unlikely(current->signal->used_accept)) {
50479+ current->signal->curr_ip = 0;
50480+ current->signal->saved_ip = 0;
50481+ }
50482+
50483+ return;
50484+}
50485+
50486+static void
50487+gr_set_proc_res(struct task_struct *task)
50488+{
50489+ struct acl_subject_label *proc;
50490+ unsigned short i;
50491+
50492+ proc = task->acl;
50493+
50494+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50495+ return;
50496+
50497+ for (i = 0; i < RLIM_NLIMITS; i++) {
50498+ if (!(proc->resmask & (1 << i)))
50499+ continue;
50500+
50501+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50502+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50503+ }
50504+
50505+ return;
50506+}
50507+
50508+extern int __gr_process_user_ban(struct user_struct *user);
50509+
50510+int
50511+gr_check_user_change(int real, int effective, int fs)
50512+{
50513+ unsigned int i;
50514+ __u16 num;
50515+ uid_t *uidlist;
50516+ int curuid;
50517+ int realok = 0;
50518+ int effectiveok = 0;
50519+ int fsok = 0;
50520+
50521+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50522+ struct user_struct *user;
50523+
50524+ if (real == -1)
50525+ goto skipit;
50526+
50527+ user = find_user(real);
50528+ if (user == NULL)
50529+ goto skipit;
50530+
50531+ if (__gr_process_user_ban(user)) {
50532+ /* for find_user */
50533+ free_uid(user);
50534+ return 1;
50535+ }
50536+
50537+ /* for find_user */
50538+ free_uid(user);
50539+
50540+skipit:
50541+#endif
50542+
50543+ if (unlikely(!(gr_status & GR_READY)))
50544+ return 0;
50545+
50546+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50547+ gr_log_learn_id_change('u', real, effective, fs);
50548+
50549+ num = current->acl->user_trans_num;
50550+ uidlist = current->acl->user_transitions;
50551+
50552+ if (uidlist == NULL)
50553+ return 0;
50554+
50555+ if (real == -1)
50556+ realok = 1;
50557+ if (effective == -1)
50558+ effectiveok = 1;
50559+ if (fs == -1)
50560+ fsok = 1;
50561+
50562+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
50563+ for (i = 0; i < num; i++) {
50564+ curuid = (int)uidlist[i];
50565+ if (real == curuid)
50566+ realok = 1;
50567+ if (effective == curuid)
50568+ effectiveok = 1;
50569+ if (fs == curuid)
50570+ fsok = 1;
50571+ }
50572+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
50573+ for (i = 0; i < num; i++) {
50574+ curuid = (int)uidlist[i];
50575+ if (real == curuid)
50576+ break;
50577+ if (effective == curuid)
50578+ break;
50579+ if (fs == curuid)
50580+ break;
50581+ }
50582+ /* not in deny list */
50583+ if (i == num) {
50584+ realok = 1;
50585+ effectiveok = 1;
50586+ fsok = 1;
50587+ }
50588+ }
50589+
50590+ if (realok && effectiveok && fsok)
50591+ return 0;
50592+ else {
50593+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50594+ return 1;
50595+ }
50596+}
50597+
50598+int
50599+gr_check_group_change(int real, int effective, int fs)
50600+{
50601+ unsigned int i;
50602+ __u16 num;
50603+ gid_t *gidlist;
50604+ int curgid;
50605+ int realok = 0;
50606+ int effectiveok = 0;
50607+ int fsok = 0;
50608+
50609+ if (unlikely(!(gr_status & GR_READY)))
50610+ return 0;
50611+
50612+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50613+ gr_log_learn_id_change('g', real, effective, fs);
50614+
50615+ num = current->acl->group_trans_num;
50616+ gidlist = current->acl->group_transitions;
50617+
50618+ if (gidlist == NULL)
50619+ return 0;
50620+
50621+ if (real == -1)
50622+ realok = 1;
50623+ if (effective == -1)
50624+ effectiveok = 1;
50625+ if (fs == -1)
50626+ fsok = 1;
50627+
50628+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
50629+ for (i = 0; i < num; i++) {
50630+ curgid = (int)gidlist[i];
50631+ if (real == curgid)
50632+ realok = 1;
50633+ if (effective == curgid)
50634+ effectiveok = 1;
50635+ if (fs == curgid)
50636+ fsok = 1;
50637+ }
50638+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
50639+ for (i = 0; i < num; i++) {
50640+ curgid = (int)gidlist[i];
50641+ if (real == curgid)
50642+ break;
50643+ if (effective == curgid)
50644+ break;
50645+ if (fs == curgid)
50646+ break;
50647+ }
50648+ /* not in deny list */
50649+ if (i == num) {
50650+ realok = 1;
50651+ effectiveok = 1;
50652+ fsok = 1;
50653+ }
50654+ }
50655+
50656+ if (realok && effectiveok && fsok)
50657+ return 0;
50658+ else {
50659+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50660+ return 1;
50661+ }
50662+}
50663+
50664+extern int gr_acl_is_capable(const int cap);
50665+
50666+void
50667+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50668+{
50669+ struct acl_role_label *role = task->role;
50670+ struct acl_subject_label *subj = NULL;
50671+ struct acl_object_label *obj;
50672+ struct file *filp;
50673+
50674+ if (unlikely(!(gr_status & GR_READY)))
50675+ return;
50676+
50677+ filp = task->exec_file;
50678+
50679+ /* kernel process, we'll give them the kernel role */
50680+ if (unlikely(!filp)) {
50681+ task->role = kernel_role;
50682+ task->acl = kernel_role->root_label;
50683+ return;
50684+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50685+ role = lookup_acl_role_label(task, uid, gid);
50686+
50687+ /* don't change the role if we're not a privileged process */
50688+ if (role && task->role != role &&
50689+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
50690+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
50691+ return;
50692+
50693+ /* perform subject lookup in possibly new role
50694+ we can use this result below in the case where role == task->role
50695+ */
50696+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50697+
50698+ /* if we changed uid/gid, but result in the same role
50699+ and are using inheritance, don't lose the inherited subject
50700+ if current subject is other than what normal lookup
50701+ would result in, we arrived via inheritance, don't
50702+ lose subject
50703+ */
50704+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50705+ (subj == task->acl)))
50706+ task->acl = subj;
50707+
50708+ task->role = role;
50709+
50710+ task->is_writable = 0;
50711+
50712+ /* ignore additional mmap checks for processes that are writable
50713+ by the default ACL */
50714+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50715+ if (unlikely(obj->mode & GR_WRITE))
50716+ task->is_writable = 1;
50717+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50718+ if (unlikely(obj->mode & GR_WRITE))
50719+ task->is_writable = 1;
50720+
50721+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50722+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50723+#endif
50724+
50725+ gr_set_proc_res(task);
50726+
50727+ return;
50728+}
50729+
50730+int
50731+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50732+ const int unsafe_flags)
50733+{
50734+ struct task_struct *task = current;
50735+ struct acl_subject_label *newacl;
50736+ struct acl_object_label *obj;
50737+ __u32 retmode;
50738+
50739+ if (unlikely(!(gr_status & GR_READY)))
50740+ return 0;
50741+
50742+ newacl = chk_subj_label(dentry, mnt, task->role);
50743+
50744+ task_lock(task);
50745+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
50746+ !(task->role->roletype & GR_ROLE_GOD) &&
50747+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
50748+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50749+ task_unlock(task);
50750+ if (unsafe_flags & LSM_UNSAFE_SHARE)
50751+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
50752+ else
50753+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
50754+ return -EACCES;
50755+ }
50756+ task_unlock(task);
50757+
50758+ obj = chk_obj_label(dentry, mnt, task->acl);
50759+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
50760+
50761+ if (!(task->acl->mode & GR_INHERITLEARN) &&
50762+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
50763+ if (obj->nested)
50764+ task->acl = obj->nested;
50765+ else
50766+ task->acl = newacl;
50767+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
50768+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
50769+
50770+ task->is_writable = 0;
50771+
50772+ /* ignore additional mmap checks for processes that are writable
50773+ by the default ACL */
50774+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
50775+ if (unlikely(obj->mode & GR_WRITE))
50776+ task->is_writable = 1;
50777+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
50778+ if (unlikely(obj->mode & GR_WRITE))
50779+ task->is_writable = 1;
50780+
50781+ gr_set_proc_res(task);
50782+
50783+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50784+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50785+#endif
50786+ return 0;
50787+}
50788+
50789+/* always called with valid inodev ptr */
50790+static void
50791+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
50792+{
50793+ struct acl_object_label *matchpo;
50794+ struct acl_subject_label *matchps;
50795+ struct acl_subject_label *subj;
50796+ struct acl_role_label *role;
50797+ unsigned int x;
50798+
50799+ FOR_EACH_ROLE_START(role)
50800+ FOR_EACH_SUBJECT_START(role, subj, x)
50801+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
50802+ matchpo->mode |= GR_DELETED;
50803+ FOR_EACH_SUBJECT_END(subj,x)
50804+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50805+ if (subj->inode == ino && subj->device == dev)
50806+ subj->mode |= GR_DELETED;
50807+ FOR_EACH_NESTED_SUBJECT_END(subj)
50808+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
50809+ matchps->mode |= GR_DELETED;
50810+ FOR_EACH_ROLE_END(role)
50811+
50812+ inodev->nentry->deleted = 1;
50813+
50814+ return;
50815+}
50816+
50817+void
50818+gr_handle_delete(const ino_t ino, const dev_t dev)
50819+{
50820+ struct inodev_entry *inodev;
50821+
50822+ if (unlikely(!(gr_status & GR_READY)))
50823+ return;
50824+
50825+ write_lock(&gr_inode_lock);
50826+ inodev = lookup_inodev_entry(ino, dev);
50827+ if (inodev != NULL)
50828+ do_handle_delete(inodev, ino, dev);
50829+ write_unlock(&gr_inode_lock);
50830+
50831+ return;
50832+}
50833+
50834+static void
50835+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
50836+ const ino_t newinode, const dev_t newdevice,
50837+ struct acl_subject_label *subj)
50838+{
50839+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
50840+ struct acl_object_label *match;
50841+
50842+ match = subj->obj_hash[index];
50843+
50844+ while (match && (match->inode != oldinode ||
50845+ match->device != olddevice ||
50846+ !(match->mode & GR_DELETED)))
50847+ match = match->next;
50848+
50849+ if (match && (match->inode == oldinode)
50850+ && (match->device == olddevice)
50851+ && (match->mode & GR_DELETED)) {
50852+ if (match->prev == NULL) {
50853+ subj->obj_hash[index] = match->next;
50854+ if (match->next != NULL)
50855+ match->next->prev = NULL;
50856+ } else {
50857+ match->prev->next = match->next;
50858+ if (match->next != NULL)
50859+ match->next->prev = match->prev;
50860+ }
50861+ match->prev = NULL;
50862+ match->next = NULL;
50863+ match->inode = newinode;
50864+ match->device = newdevice;
50865+ match->mode &= ~GR_DELETED;
50866+
50867+ insert_acl_obj_label(match, subj);
50868+ }
50869+
50870+ return;
50871+}
50872+
50873+static void
50874+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50875+ const ino_t newinode, const dev_t newdevice,
50876+ struct acl_role_label *role)
50877+{
50878+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50879+ struct acl_subject_label *match;
50880+
50881+ match = role->subj_hash[index];
50882+
50883+ while (match && (match->inode != oldinode ||
50884+ match->device != olddevice ||
50885+ !(match->mode & GR_DELETED)))
50886+ match = match->next;
50887+
50888+ if (match && (match->inode == oldinode)
50889+ && (match->device == olddevice)
50890+ && (match->mode & GR_DELETED)) {
50891+ if (match->prev == NULL) {
50892+ role->subj_hash[index] = match->next;
50893+ if (match->next != NULL)
50894+ match->next->prev = NULL;
50895+ } else {
50896+ match->prev->next = match->next;
50897+ if (match->next != NULL)
50898+ match->next->prev = match->prev;
50899+ }
50900+ match->prev = NULL;
50901+ match->next = NULL;
50902+ match->inode = newinode;
50903+ match->device = newdevice;
50904+ match->mode &= ~GR_DELETED;
50905+
50906+ insert_acl_subj_label(match, role);
50907+ }
50908+
50909+ return;
50910+}
50911+
50912+static void
50913+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50914+ const ino_t newinode, const dev_t newdevice)
50915+{
50916+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
50917+ struct inodev_entry *match;
50918+
50919+ match = inodev_set.i_hash[index];
50920+
50921+ while (match && (match->nentry->inode != oldinode ||
50922+ match->nentry->device != olddevice || !match->nentry->deleted))
50923+ match = match->next;
50924+
50925+ if (match && (match->nentry->inode == oldinode)
50926+ && (match->nentry->device == olddevice) &&
50927+ match->nentry->deleted) {
50928+ if (match->prev == NULL) {
50929+ inodev_set.i_hash[index] = match->next;
50930+ if (match->next != NULL)
50931+ match->next->prev = NULL;
50932+ } else {
50933+ match->prev->next = match->next;
50934+ if (match->next != NULL)
50935+ match->next->prev = match->prev;
50936+ }
50937+ match->prev = NULL;
50938+ match->next = NULL;
50939+ match->nentry->inode = newinode;
50940+ match->nentry->device = newdevice;
50941+ match->nentry->deleted = 0;
50942+
50943+ insert_inodev_entry(match);
50944+ }
50945+
50946+ return;
50947+}
50948+
50949+static void
50950+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
50951+{
50952+ struct acl_subject_label *subj;
50953+ struct acl_role_label *role;
50954+ unsigned int x;
50955+
50956+ FOR_EACH_ROLE_START(role)
50957+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
50958+
50959+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50960+ if ((subj->inode == ino) && (subj->device == dev)) {
50961+ subj->inode = ino;
50962+ subj->device = dev;
50963+ }
50964+ FOR_EACH_NESTED_SUBJECT_END(subj)
50965+ FOR_EACH_SUBJECT_START(role, subj, x)
50966+ update_acl_obj_label(matchn->inode, matchn->device,
50967+ ino, dev, subj);
50968+ FOR_EACH_SUBJECT_END(subj,x)
50969+ FOR_EACH_ROLE_END(role)
50970+
50971+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
50972+
50973+ return;
50974+}
50975+
50976+static void
50977+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50978+ const struct vfsmount *mnt)
50979+{
50980+ ino_t ino = dentry->d_inode->i_ino;
50981+ dev_t dev = __get_dev(dentry);
50982+
50983+ __do_handle_create(matchn, ino, dev);
50984+
50985+ return;
50986+}
50987+
50988+void
50989+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50990+{
50991+ struct name_entry *matchn;
50992+
50993+ if (unlikely(!(gr_status & GR_READY)))
50994+ return;
50995+
50996+ preempt_disable();
50997+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
50998+
50999+ if (unlikely((unsigned long)matchn)) {
51000+ write_lock(&gr_inode_lock);
51001+ do_handle_create(matchn, dentry, mnt);
51002+ write_unlock(&gr_inode_lock);
51003+ }
51004+ preempt_enable();
51005+
51006+ return;
51007+}
51008+
51009+void
51010+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
51011+{
51012+ struct name_entry *matchn;
51013+
51014+ if (unlikely(!(gr_status & GR_READY)))
51015+ return;
51016+
51017+ preempt_disable();
51018+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
51019+
51020+ if (unlikely((unsigned long)matchn)) {
51021+ write_lock(&gr_inode_lock);
51022+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
51023+ write_unlock(&gr_inode_lock);
51024+ }
51025+ preempt_enable();
51026+
51027+ return;
51028+}
51029+
51030+void
51031+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51032+ struct dentry *old_dentry,
51033+ struct dentry *new_dentry,
51034+ struct vfsmount *mnt, const __u8 replace)
51035+{
51036+ struct name_entry *matchn;
51037+ struct inodev_entry *inodev;
51038+ struct inode *inode = new_dentry->d_inode;
51039+ ino_t old_ino = old_dentry->d_inode->i_ino;
51040+ dev_t old_dev = __get_dev(old_dentry);
51041+
51042+ /* vfs_rename swaps the name and parent link for old_dentry and
51043+ new_dentry
51044+ at this point, old_dentry has the new name, parent link, and inode
51045+ for the renamed file
51046+ if a file is being replaced by a rename, new_dentry has the inode
51047+ and name for the replaced file
51048+ */
51049+
51050+ if (unlikely(!(gr_status & GR_READY)))
51051+ return;
51052+
51053+ preempt_disable();
51054+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
51055+
51056+ /* we wouldn't have to check d_inode if it weren't for
51057+ NFS silly-renaming
51058+ */
51059+
51060+ write_lock(&gr_inode_lock);
51061+ if (unlikely(replace && inode)) {
51062+ ino_t new_ino = inode->i_ino;
51063+ dev_t new_dev = __get_dev(new_dentry);
51064+
51065+ inodev = lookup_inodev_entry(new_ino, new_dev);
51066+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
51067+ do_handle_delete(inodev, new_ino, new_dev);
51068+ }
51069+
51070+ inodev = lookup_inodev_entry(old_ino, old_dev);
51071+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
51072+ do_handle_delete(inodev, old_ino, old_dev);
51073+
51074+ if (unlikely((unsigned long)matchn))
51075+ do_handle_create(matchn, old_dentry, mnt);
51076+
51077+ write_unlock(&gr_inode_lock);
51078+ preempt_enable();
51079+
51080+ return;
51081+}
51082+
51083+static int
51084+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
51085+ unsigned char **sum)
51086+{
51087+ struct acl_role_label *r;
51088+ struct role_allowed_ip *ipp;
51089+ struct role_transition *trans;
51090+ unsigned int i;
51091+ int found = 0;
51092+ u32 curr_ip = current->signal->curr_ip;
51093+
51094+ current->signal->saved_ip = curr_ip;
51095+
51096+ /* check transition table */
51097+
51098+ for (trans = current->role->transitions; trans; trans = trans->next) {
51099+ if (!strcmp(rolename, trans->rolename)) {
51100+ found = 1;
51101+ break;
51102+ }
51103+ }
51104+
51105+ if (!found)
51106+ return 0;
51107+
51108+ /* handle special roles that do not require authentication
51109+ and check ip */
51110+
51111+ FOR_EACH_ROLE_START(r)
51112+ if (!strcmp(rolename, r->rolename) &&
51113+ (r->roletype & GR_ROLE_SPECIAL)) {
51114+ found = 0;
51115+ if (r->allowed_ips != NULL) {
51116+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51117+ if ((ntohl(curr_ip) & ipp->netmask) ==
51118+ (ntohl(ipp->addr) & ipp->netmask))
51119+ found = 1;
51120+ }
51121+ } else
51122+ found = 2;
51123+ if (!found)
51124+ return 0;
51125+
51126+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51127+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51128+ *salt = NULL;
51129+ *sum = NULL;
51130+ return 1;
51131+ }
51132+ }
51133+ FOR_EACH_ROLE_END(r)
51134+
51135+ for (i = 0; i < num_sprole_pws; i++) {
51136+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51137+ *salt = acl_special_roles[i]->salt;
51138+ *sum = acl_special_roles[i]->sum;
51139+ return 1;
51140+ }
51141+ }
51142+
51143+ return 0;
51144+}
51145+
51146+static void
51147+assign_special_role(char *rolename)
51148+{
51149+ struct acl_object_label *obj;
51150+ struct acl_role_label *r;
51151+ struct acl_role_label *assigned = NULL;
51152+ struct task_struct *tsk;
51153+ struct file *filp;
51154+
51155+ FOR_EACH_ROLE_START(r)
51156+ if (!strcmp(rolename, r->rolename) &&
51157+ (r->roletype & GR_ROLE_SPECIAL)) {
51158+ assigned = r;
51159+ break;
51160+ }
51161+ FOR_EACH_ROLE_END(r)
51162+
51163+ if (!assigned)
51164+ return;
51165+
51166+ read_lock(&tasklist_lock);
51167+ read_lock(&grsec_exec_file_lock);
51168+
51169+ tsk = current->real_parent;
51170+ if (tsk == NULL)
51171+ goto out_unlock;
51172+
51173+ filp = tsk->exec_file;
51174+ if (filp == NULL)
51175+ goto out_unlock;
51176+
51177+ tsk->is_writable = 0;
51178+
51179+ tsk->acl_sp_role = 1;
51180+ tsk->acl_role_id = ++acl_sp_role_value;
51181+ tsk->role = assigned;
51182+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51183+
51184+ /* ignore additional mmap checks for processes that are writable
51185+ by the default ACL */
51186+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51187+ if (unlikely(obj->mode & GR_WRITE))
51188+ tsk->is_writable = 1;
51189+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51190+ if (unlikely(obj->mode & GR_WRITE))
51191+ tsk->is_writable = 1;
51192+
51193+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51194+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51195+#endif
51196+
51197+out_unlock:
51198+ read_unlock(&grsec_exec_file_lock);
51199+ read_unlock(&tasklist_lock);
51200+ return;
51201+}
51202+
51203+int gr_check_secure_terminal(struct task_struct *task)
51204+{
51205+ struct task_struct *p, *p2, *p3;
51206+ struct files_struct *files;
51207+ struct fdtable *fdt;
51208+ struct file *our_file = NULL, *file;
51209+ int i;
51210+
51211+ if (task->signal->tty == NULL)
51212+ return 1;
51213+
51214+ files = get_files_struct(task);
51215+ if (files != NULL) {
51216+ rcu_read_lock();
51217+ fdt = files_fdtable(files);
51218+ for (i=0; i < fdt->max_fds; i++) {
51219+ file = fcheck_files(files, i);
51220+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51221+ get_file(file);
51222+ our_file = file;
51223+ }
51224+ }
51225+ rcu_read_unlock();
51226+ put_files_struct(files);
51227+ }
51228+
51229+ if (our_file == NULL)
51230+ return 1;
51231+
51232+ read_lock(&tasklist_lock);
51233+ do_each_thread(p2, p) {
51234+ files = get_files_struct(p);
51235+ if (files == NULL ||
51236+ (p->signal && p->signal->tty == task->signal->tty)) {
51237+ if (files != NULL)
51238+ put_files_struct(files);
51239+ continue;
51240+ }
51241+ rcu_read_lock();
51242+ fdt = files_fdtable(files);
51243+ for (i=0; i < fdt->max_fds; i++) {
51244+ file = fcheck_files(files, i);
51245+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51246+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51247+ p3 = task;
51248+ while (p3->pid > 0) {
51249+ if (p3 == p)
51250+ break;
51251+ p3 = p3->real_parent;
51252+ }
51253+ if (p3 == p)
51254+ break;
51255+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51256+ gr_handle_alertkill(p);
51257+ rcu_read_unlock();
51258+ put_files_struct(files);
51259+ read_unlock(&tasklist_lock);
51260+ fput(our_file);
51261+ return 0;
51262+ }
51263+ }
51264+ rcu_read_unlock();
51265+ put_files_struct(files);
51266+ } while_each_thread(p2, p);
51267+ read_unlock(&tasklist_lock);
51268+
51269+ fput(our_file);
51270+ return 1;
51271+}
51272+
51273+ssize_t
51274+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51275+{
51276+ struct gr_arg_wrapper uwrap;
51277+ unsigned char *sprole_salt = NULL;
51278+ unsigned char *sprole_sum = NULL;
51279+ int error = sizeof (struct gr_arg_wrapper);
51280+ int error2 = 0;
51281+
51282+ mutex_lock(&gr_dev_mutex);
51283+
51284+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51285+ error = -EPERM;
51286+ goto out;
51287+ }
51288+
51289+ if (count != sizeof (struct gr_arg_wrapper)) {
51290+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51291+ error = -EINVAL;
51292+ goto out;
51293+ }
51294+
51295+
51296+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51297+ gr_auth_expires = 0;
51298+ gr_auth_attempts = 0;
51299+ }
51300+
51301+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51302+ error = -EFAULT;
51303+ goto out;
51304+ }
51305+
51306+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51307+ error = -EINVAL;
51308+ goto out;
51309+ }
51310+
51311+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51312+ error = -EFAULT;
51313+ goto out;
51314+ }
51315+
51316+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51317+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51318+ time_after(gr_auth_expires, get_seconds())) {
51319+ error = -EBUSY;
51320+ goto out;
51321+ }
51322+
51323+ /* if non-root trying to do anything other than use a special role,
51324+ do not attempt authentication, do not count towards authentication
51325+ locking
51326+ */
51327+
51328+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51329+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51330+ current_uid()) {
51331+ error = -EPERM;
51332+ goto out;
51333+ }
51334+
51335+ /* ensure pw and special role name are null terminated */
51336+
51337+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51338+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51339+
51340+ /* Okay.
51341+ * We have our enough of the argument structure..(we have yet
51342+ * to copy_from_user the tables themselves) . Copy the tables
51343+ * only if we need them, i.e. for loading operations. */
51344+
51345+ switch (gr_usermode->mode) {
51346+ case GR_STATUS:
51347+ if (gr_status & GR_READY) {
51348+ error = 1;
51349+ if (!gr_check_secure_terminal(current))
51350+ error = 3;
51351+ } else
51352+ error = 2;
51353+ goto out;
51354+ case GR_SHUTDOWN:
51355+ if ((gr_status & GR_READY)
51356+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51357+ pax_open_kernel();
51358+ gr_status &= ~GR_READY;
51359+ pax_close_kernel();
51360+
51361+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51362+ free_variables();
51363+ memset(gr_usermode, 0, sizeof (struct gr_arg));
51364+ memset(gr_system_salt, 0, GR_SALT_LEN);
51365+ memset(gr_system_sum, 0, GR_SHA_LEN);
51366+ } else if (gr_status & GR_READY) {
51367+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51368+ error = -EPERM;
51369+ } else {
51370+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51371+ error = -EAGAIN;
51372+ }
51373+ break;
51374+ case GR_ENABLE:
51375+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51376+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51377+ else {
51378+ if (gr_status & GR_READY)
51379+ error = -EAGAIN;
51380+ else
51381+ error = error2;
51382+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51383+ }
51384+ break;
51385+ case GR_RELOAD:
51386+ if (!(gr_status & GR_READY)) {
51387+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51388+ error = -EAGAIN;
51389+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51390+ preempt_disable();
51391+
51392+ pax_open_kernel();
51393+ gr_status &= ~GR_READY;
51394+ pax_close_kernel();
51395+
51396+ free_variables();
51397+ if (!(error2 = gracl_init(gr_usermode))) {
51398+ preempt_enable();
51399+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51400+ } else {
51401+ preempt_enable();
51402+ error = error2;
51403+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51404+ }
51405+ } else {
51406+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51407+ error = -EPERM;
51408+ }
51409+ break;
51410+ case GR_SEGVMOD:
51411+ if (unlikely(!(gr_status & GR_READY))) {
51412+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51413+ error = -EAGAIN;
51414+ break;
51415+ }
51416+
51417+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51418+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51419+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51420+ struct acl_subject_label *segvacl;
51421+ segvacl =
51422+ lookup_acl_subj_label(gr_usermode->segv_inode,
51423+ gr_usermode->segv_device,
51424+ current->role);
51425+ if (segvacl) {
51426+ segvacl->crashes = 0;
51427+ segvacl->expires = 0;
51428+ }
51429+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51430+ gr_remove_uid(gr_usermode->segv_uid);
51431+ }
51432+ } else {
51433+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51434+ error = -EPERM;
51435+ }
51436+ break;
51437+ case GR_SPROLE:
51438+ case GR_SPROLEPAM:
51439+ if (unlikely(!(gr_status & GR_READY))) {
51440+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51441+ error = -EAGAIN;
51442+ break;
51443+ }
51444+
51445+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51446+ current->role->expires = 0;
51447+ current->role->auth_attempts = 0;
51448+ }
51449+
51450+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51451+ time_after(current->role->expires, get_seconds())) {
51452+ error = -EBUSY;
51453+ goto out;
51454+ }
51455+
51456+ if (lookup_special_role_auth
51457+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51458+ && ((!sprole_salt && !sprole_sum)
51459+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51460+ char *p = "";
51461+ assign_special_role(gr_usermode->sp_role);
51462+ read_lock(&tasklist_lock);
51463+ if (current->real_parent)
51464+ p = current->real_parent->role->rolename;
51465+ read_unlock(&tasklist_lock);
51466+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51467+ p, acl_sp_role_value);
51468+ } else {
51469+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51470+ error = -EPERM;
51471+ if(!(current->role->auth_attempts++))
51472+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51473+
51474+ goto out;
51475+ }
51476+ break;
51477+ case GR_UNSPROLE:
51478+ if (unlikely(!(gr_status & GR_READY))) {
51479+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51480+ error = -EAGAIN;
51481+ break;
51482+ }
51483+
51484+ if (current->role->roletype & GR_ROLE_SPECIAL) {
51485+ char *p = "";
51486+ int i = 0;
51487+
51488+ read_lock(&tasklist_lock);
51489+ if (current->real_parent) {
51490+ p = current->real_parent->role->rolename;
51491+ i = current->real_parent->acl_role_id;
51492+ }
51493+ read_unlock(&tasklist_lock);
51494+
51495+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51496+ gr_set_acls(1);
51497+ } else {
51498+ error = -EPERM;
51499+ goto out;
51500+ }
51501+ break;
51502+ default:
51503+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51504+ error = -EINVAL;
51505+ break;
51506+ }
51507+
51508+ if (error != -EPERM)
51509+ goto out;
51510+
51511+ if(!(gr_auth_attempts++))
51512+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51513+
51514+ out:
51515+ mutex_unlock(&gr_dev_mutex);
51516+ return error;
51517+}
51518+
51519+/* must be called with
51520+ rcu_read_lock();
51521+ read_lock(&tasklist_lock);
51522+ read_lock(&grsec_exec_file_lock);
51523+*/
51524+int gr_apply_subject_to_task(struct task_struct *task)
51525+{
51526+ struct acl_object_label *obj;
51527+ char *tmpname;
51528+ struct acl_subject_label *tmpsubj;
51529+ struct file *filp;
51530+ struct name_entry *nmatch;
51531+
51532+ filp = task->exec_file;
51533+ if (filp == NULL)
51534+ return 0;
51535+
51536+ /* the following is to apply the correct subject
51537+ on binaries running when the RBAC system
51538+ is enabled, when the binaries have been
51539+ replaced or deleted since their execution
51540+ -----
51541+ when the RBAC system starts, the inode/dev
51542+ from exec_file will be one the RBAC system
51543+ is unaware of. It only knows the inode/dev
51544+ of the present file on disk, or the absence
51545+ of it.
51546+ */
51547+ preempt_disable();
51548+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51549+
51550+ nmatch = lookup_name_entry(tmpname);
51551+ preempt_enable();
51552+ tmpsubj = NULL;
51553+ if (nmatch) {
51554+ if (nmatch->deleted)
51555+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51556+ else
51557+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51558+ if (tmpsubj != NULL)
51559+ task->acl = tmpsubj;
51560+ }
51561+ if (tmpsubj == NULL)
51562+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51563+ task->role);
51564+ if (task->acl) {
51565+ task->is_writable = 0;
51566+ /* ignore additional mmap checks for processes that are writable
51567+ by the default ACL */
51568+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51569+ if (unlikely(obj->mode & GR_WRITE))
51570+ task->is_writable = 1;
51571+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51572+ if (unlikely(obj->mode & GR_WRITE))
51573+ task->is_writable = 1;
51574+
51575+ gr_set_proc_res(task);
51576+
51577+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51578+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51579+#endif
51580+ } else {
51581+ return 1;
51582+ }
51583+
51584+ return 0;
51585+}
51586+
51587+int
51588+gr_set_acls(const int type)
51589+{
51590+ struct task_struct *task, *task2;
51591+ struct acl_role_label *role = current->role;
51592+ __u16 acl_role_id = current->acl_role_id;
51593+ const struct cred *cred;
51594+ int ret;
51595+
51596+ rcu_read_lock();
51597+ read_lock(&tasklist_lock);
51598+ read_lock(&grsec_exec_file_lock);
51599+ do_each_thread(task2, task) {
51600+ /* check to see if we're called from the exit handler,
51601+ if so, only replace ACLs that have inherited the admin
51602+ ACL */
51603+
51604+ if (type && (task->role != role ||
51605+ task->acl_role_id != acl_role_id))
51606+ continue;
51607+
51608+ task->acl_role_id = 0;
51609+ task->acl_sp_role = 0;
51610+
51611+ if (task->exec_file) {
51612+ cred = __task_cred(task);
51613+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51614+ ret = gr_apply_subject_to_task(task);
51615+ if (ret) {
51616+ read_unlock(&grsec_exec_file_lock);
51617+ read_unlock(&tasklist_lock);
51618+ rcu_read_unlock();
51619+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51620+ return ret;
51621+ }
51622+ } else {
51623+ // it's a kernel process
51624+ task->role = kernel_role;
51625+ task->acl = kernel_role->root_label;
51626+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51627+ task->acl->mode &= ~GR_PROCFIND;
51628+#endif
51629+ }
51630+ } while_each_thread(task2, task);
51631+ read_unlock(&grsec_exec_file_lock);
51632+ read_unlock(&tasklist_lock);
51633+ rcu_read_unlock();
51634+
51635+ return 0;
51636+}
51637+
51638+void
51639+gr_learn_resource(const struct task_struct *task,
51640+ const int res, const unsigned long wanted, const int gt)
51641+{
51642+ struct acl_subject_label *acl;
51643+ const struct cred *cred;
51644+
51645+ if (unlikely((gr_status & GR_READY) &&
51646+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51647+ goto skip_reslog;
51648+
51649+#ifdef CONFIG_GRKERNSEC_RESLOG
51650+ gr_log_resource(task, res, wanted, gt);
51651+#endif
51652+ skip_reslog:
51653+
51654+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51655+ return;
51656+
51657+ acl = task->acl;
51658+
51659+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51660+ !(acl->resmask & (1 << (unsigned short) res))))
51661+ return;
51662+
51663+ if (wanted >= acl->res[res].rlim_cur) {
51664+ unsigned long res_add;
51665+
51666+ res_add = wanted;
51667+ switch (res) {
51668+ case RLIMIT_CPU:
51669+ res_add += GR_RLIM_CPU_BUMP;
51670+ break;
51671+ case RLIMIT_FSIZE:
51672+ res_add += GR_RLIM_FSIZE_BUMP;
51673+ break;
51674+ case RLIMIT_DATA:
51675+ res_add += GR_RLIM_DATA_BUMP;
51676+ break;
51677+ case RLIMIT_STACK:
51678+ res_add += GR_RLIM_STACK_BUMP;
51679+ break;
51680+ case RLIMIT_CORE:
51681+ res_add += GR_RLIM_CORE_BUMP;
51682+ break;
51683+ case RLIMIT_RSS:
51684+ res_add += GR_RLIM_RSS_BUMP;
51685+ break;
51686+ case RLIMIT_NPROC:
51687+ res_add += GR_RLIM_NPROC_BUMP;
51688+ break;
51689+ case RLIMIT_NOFILE:
51690+ res_add += GR_RLIM_NOFILE_BUMP;
51691+ break;
51692+ case RLIMIT_MEMLOCK:
51693+ res_add += GR_RLIM_MEMLOCK_BUMP;
51694+ break;
51695+ case RLIMIT_AS:
51696+ res_add += GR_RLIM_AS_BUMP;
51697+ break;
51698+ case RLIMIT_LOCKS:
51699+ res_add += GR_RLIM_LOCKS_BUMP;
51700+ break;
51701+ case RLIMIT_SIGPENDING:
51702+ res_add += GR_RLIM_SIGPENDING_BUMP;
51703+ break;
51704+ case RLIMIT_MSGQUEUE:
51705+ res_add += GR_RLIM_MSGQUEUE_BUMP;
51706+ break;
51707+ case RLIMIT_NICE:
51708+ res_add += GR_RLIM_NICE_BUMP;
51709+ break;
51710+ case RLIMIT_RTPRIO:
51711+ res_add += GR_RLIM_RTPRIO_BUMP;
51712+ break;
51713+ case RLIMIT_RTTIME:
51714+ res_add += GR_RLIM_RTTIME_BUMP;
51715+ break;
51716+ }
51717+
51718+ acl->res[res].rlim_cur = res_add;
51719+
51720+ if (wanted > acl->res[res].rlim_max)
51721+ acl->res[res].rlim_max = res_add;
51722+
51723+ /* only log the subject filename, since resource logging is supported for
51724+ single-subject learning only */
51725+ rcu_read_lock();
51726+ cred = __task_cred(task);
51727+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51728+ task->role->roletype, cred->uid, cred->gid, acl->filename,
51729+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
51730+ "", (unsigned long) res, &task->signal->saved_ip);
51731+ rcu_read_unlock();
51732+ }
51733+
51734+ return;
51735+}
51736+
51737+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
51738+void
51739+pax_set_initial_flags(struct linux_binprm *bprm)
51740+{
51741+ struct task_struct *task = current;
51742+ struct acl_subject_label *proc;
51743+ unsigned long flags;
51744+
51745+ if (unlikely(!(gr_status & GR_READY)))
51746+ return;
51747+
51748+ flags = pax_get_flags(task);
51749+
51750+ proc = task->acl;
51751+
51752+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
51753+ flags &= ~MF_PAX_PAGEEXEC;
51754+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
51755+ flags &= ~MF_PAX_SEGMEXEC;
51756+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
51757+ flags &= ~MF_PAX_RANDMMAP;
51758+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
51759+ flags &= ~MF_PAX_EMUTRAMP;
51760+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
51761+ flags &= ~MF_PAX_MPROTECT;
51762+
51763+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
51764+ flags |= MF_PAX_PAGEEXEC;
51765+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
51766+ flags |= MF_PAX_SEGMEXEC;
51767+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
51768+ flags |= MF_PAX_RANDMMAP;
51769+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
51770+ flags |= MF_PAX_EMUTRAMP;
51771+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
51772+ flags |= MF_PAX_MPROTECT;
51773+
51774+ pax_set_flags(task, flags);
51775+
51776+ return;
51777+}
51778+#endif
51779+
51780+#ifdef CONFIG_SYSCTL
51781+/* Eric Biederman likes breaking userland ABI and every inode-based security
51782+ system to save 35kb of memory */
51783+
51784+/* we modify the passed in filename, but adjust it back before returning */
51785+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
51786+{
51787+ struct name_entry *nmatch;
51788+ char *p, *lastp = NULL;
51789+ struct acl_object_label *obj = NULL, *tmp;
51790+ struct acl_subject_label *tmpsubj;
51791+ char c = '\0';
51792+
51793+ read_lock(&gr_inode_lock);
51794+
51795+ p = name + len - 1;
51796+ do {
51797+ nmatch = lookup_name_entry(name);
51798+ if (lastp != NULL)
51799+ *lastp = c;
51800+
51801+ if (nmatch == NULL)
51802+ goto next_component;
51803+ tmpsubj = current->acl;
51804+ do {
51805+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
51806+ if (obj != NULL) {
51807+ tmp = obj->globbed;
51808+ while (tmp) {
51809+ if (!glob_match(tmp->filename, name)) {
51810+ obj = tmp;
51811+ goto found_obj;
51812+ }
51813+ tmp = tmp->next;
51814+ }
51815+ goto found_obj;
51816+ }
51817+ } while ((tmpsubj = tmpsubj->parent_subject));
51818+next_component:
51819+ /* end case */
51820+ if (p == name)
51821+ break;
51822+
51823+ while (*p != '/')
51824+ p--;
51825+ if (p == name)
51826+ lastp = p + 1;
51827+ else {
51828+ lastp = p;
51829+ p--;
51830+ }
51831+ c = *lastp;
51832+ *lastp = '\0';
51833+ } while (1);
51834+found_obj:
51835+ read_unlock(&gr_inode_lock);
51836+ /* obj returned will always be non-null */
51837+ return obj;
51838+}
51839+
51840+/* returns 0 when allowing, non-zero on error
51841+ op of 0 is used for readdir, so we don't log the names of hidden files
51842+*/
51843+__u32
51844+gr_handle_sysctl(const struct ctl_table *table, const int op)
51845+{
51846+ struct ctl_table *tmp;
51847+ const char *proc_sys = "/proc/sys";
51848+ char *path;
51849+ struct acl_object_label *obj;
51850+ unsigned short len = 0, pos = 0, depth = 0, i;
51851+ __u32 err = 0;
51852+ __u32 mode = 0;
51853+
51854+ if (unlikely(!(gr_status & GR_READY)))
51855+ return 0;
51856+
51857+ /* for now, ignore operations on non-sysctl entries if it's not a
51858+ readdir*/
51859+ if (table->child != NULL && op != 0)
51860+ return 0;
51861+
51862+ mode |= GR_FIND;
51863+ /* it's only a read if it's an entry, read on dirs is for readdir */
51864+ if (op & MAY_READ)
51865+ mode |= GR_READ;
51866+ if (op & MAY_WRITE)
51867+ mode |= GR_WRITE;
51868+
51869+ preempt_disable();
51870+
51871+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51872+
51873+ /* it's only a read/write if it's an actual entry, not a dir
51874+ (which are opened for readdir)
51875+ */
51876+
51877+ /* convert the requested sysctl entry into a pathname */
51878+
51879+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51880+ len += strlen(tmp->procname);
51881+ len++;
51882+ depth++;
51883+ }
51884+
51885+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51886+ /* deny */
51887+ goto out;
51888+ }
51889+
51890+ memset(path, 0, PAGE_SIZE);
51891+
51892+ memcpy(path, proc_sys, strlen(proc_sys));
51893+
51894+ pos += strlen(proc_sys);
51895+
51896+ for (; depth > 0; depth--) {
51897+ path[pos] = '/';
51898+ pos++;
51899+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51900+ if (depth == i) {
51901+ memcpy(path + pos, tmp->procname,
51902+ strlen(tmp->procname));
51903+ pos += strlen(tmp->procname);
51904+ }
51905+ i++;
51906+ }
51907+ }
51908+
51909+ obj = gr_lookup_by_name(path, pos);
51910+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51911+
51912+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51913+ ((err & mode) != mode))) {
51914+ __u32 new_mode = mode;
51915+
51916+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51917+
51918+ err = 0;
51919+ gr_log_learn_sysctl(path, new_mode);
51920+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
51921+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
51922+ err = -ENOENT;
51923+ } else if (!(err & GR_FIND)) {
51924+ err = -ENOENT;
51925+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
51926+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
51927+ path, (mode & GR_READ) ? " reading" : "",
51928+ (mode & GR_WRITE) ? " writing" : "");
51929+ err = -EACCES;
51930+ } else if ((err & mode) != mode) {
51931+ err = -EACCES;
51932+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
51933+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
51934+ path, (mode & GR_READ) ? " reading" : "",
51935+ (mode & GR_WRITE) ? " writing" : "");
51936+ err = 0;
51937+ } else
51938+ err = 0;
51939+
51940+ out:
51941+ preempt_enable();
51942+
51943+ return err;
51944+}
51945+#endif
51946+
51947+int
51948+gr_handle_proc_ptrace(struct task_struct *task)
51949+{
51950+ struct file *filp;
51951+ struct task_struct *tmp = task;
51952+ struct task_struct *curtemp = current;
51953+ __u32 retmode;
51954+
51955+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51956+ if (unlikely(!(gr_status & GR_READY)))
51957+ return 0;
51958+#endif
51959+
51960+ read_lock(&tasklist_lock);
51961+ read_lock(&grsec_exec_file_lock);
51962+ filp = task->exec_file;
51963+
51964+ while (tmp->pid > 0) {
51965+ if (tmp == curtemp)
51966+ break;
51967+ tmp = tmp->real_parent;
51968+ }
51969+
51970+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51971+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51972+ read_unlock(&grsec_exec_file_lock);
51973+ read_unlock(&tasklist_lock);
51974+ return 1;
51975+ }
51976+
51977+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51978+ if (!(gr_status & GR_READY)) {
51979+ read_unlock(&grsec_exec_file_lock);
51980+ read_unlock(&tasklist_lock);
51981+ return 0;
51982+ }
51983+#endif
51984+
51985+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51986+ read_unlock(&grsec_exec_file_lock);
51987+ read_unlock(&tasklist_lock);
51988+
51989+ if (retmode & GR_NOPTRACE)
51990+ return 1;
51991+
51992+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
51993+ && (current->acl != task->acl || (current->acl != current->role->root_label
51994+ && current->pid != task->pid)))
51995+ return 1;
51996+
51997+ return 0;
51998+}
51999+
52000+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
52001+{
52002+ if (unlikely(!(gr_status & GR_READY)))
52003+ return;
52004+
52005+ if (!(current->role->roletype & GR_ROLE_GOD))
52006+ return;
52007+
52008+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
52009+ p->role->rolename, gr_task_roletype_to_char(p),
52010+ p->acl->filename);
52011+}
52012+
52013+int
52014+gr_handle_ptrace(struct task_struct *task, const long request)
52015+{
52016+ struct task_struct *tmp = task;
52017+ struct task_struct *curtemp = current;
52018+ __u32 retmode;
52019+
52020+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52021+ if (unlikely(!(gr_status & GR_READY)))
52022+ return 0;
52023+#endif
52024+
52025+ read_lock(&tasklist_lock);
52026+ while (tmp->pid > 0) {
52027+ if (tmp == curtemp)
52028+ break;
52029+ tmp = tmp->real_parent;
52030+ }
52031+
52032+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52033+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
52034+ read_unlock(&tasklist_lock);
52035+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52036+ return 1;
52037+ }
52038+ read_unlock(&tasklist_lock);
52039+
52040+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52041+ if (!(gr_status & GR_READY))
52042+ return 0;
52043+#endif
52044+
52045+ read_lock(&grsec_exec_file_lock);
52046+ if (unlikely(!task->exec_file)) {
52047+ read_unlock(&grsec_exec_file_lock);
52048+ return 0;
52049+ }
52050+
52051+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
52052+ read_unlock(&grsec_exec_file_lock);
52053+
52054+ if (retmode & GR_NOPTRACE) {
52055+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52056+ return 1;
52057+ }
52058+
52059+ if (retmode & GR_PTRACERD) {
52060+ switch (request) {
52061+ case PTRACE_SEIZE:
52062+ case PTRACE_POKETEXT:
52063+ case PTRACE_POKEDATA:
52064+ case PTRACE_POKEUSR:
52065+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
52066+ case PTRACE_SETREGS:
52067+ case PTRACE_SETFPREGS:
52068+#endif
52069+#ifdef CONFIG_X86
52070+ case PTRACE_SETFPXREGS:
52071+#endif
52072+#ifdef CONFIG_ALTIVEC
52073+ case PTRACE_SETVRREGS:
52074+#endif
52075+ return 1;
52076+ default:
52077+ return 0;
52078+ }
52079+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
52080+ !(current->role->roletype & GR_ROLE_GOD) &&
52081+ (current->acl != task->acl)) {
52082+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52083+ return 1;
52084+ }
52085+
52086+ return 0;
52087+}
52088+
52089+static int is_writable_mmap(const struct file *filp)
52090+{
52091+ struct task_struct *task = current;
52092+ struct acl_object_label *obj, *obj2;
52093+
52094+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52095+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52096+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52097+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52098+ task->role->root_label);
52099+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52100+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52101+ return 1;
52102+ }
52103+ }
52104+ return 0;
52105+}
52106+
52107+int
52108+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52109+{
52110+ __u32 mode;
52111+
52112+ if (unlikely(!file || !(prot & PROT_EXEC)))
52113+ return 1;
52114+
52115+ if (is_writable_mmap(file))
52116+ return 0;
52117+
52118+ mode =
52119+ gr_search_file(file->f_path.dentry,
52120+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52121+ file->f_path.mnt);
52122+
52123+ if (!gr_tpe_allow(file))
52124+ return 0;
52125+
52126+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52127+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52128+ return 0;
52129+ } else if (unlikely(!(mode & GR_EXEC))) {
52130+ return 0;
52131+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52132+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52133+ return 1;
52134+ }
52135+
52136+ return 1;
52137+}
52138+
52139+int
52140+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52141+{
52142+ __u32 mode;
52143+
52144+ if (unlikely(!file || !(prot & PROT_EXEC)))
52145+ return 1;
52146+
52147+ if (is_writable_mmap(file))
52148+ return 0;
52149+
52150+ mode =
52151+ gr_search_file(file->f_path.dentry,
52152+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52153+ file->f_path.mnt);
52154+
52155+ if (!gr_tpe_allow(file))
52156+ return 0;
52157+
52158+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52159+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52160+ return 0;
52161+ } else if (unlikely(!(mode & GR_EXEC))) {
52162+ return 0;
52163+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52164+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52165+ return 1;
52166+ }
52167+
52168+ return 1;
52169+}
52170+
52171+void
52172+gr_acl_handle_psacct(struct task_struct *task, const long code)
52173+{
52174+ unsigned long runtime;
52175+ unsigned long cputime;
52176+ unsigned int wday, cday;
52177+ __u8 whr, chr;
52178+ __u8 wmin, cmin;
52179+ __u8 wsec, csec;
52180+ struct timespec timeval;
52181+
52182+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52183+ !(task->acl->mode & GR_PROCACCT)))
52184+ return;
52185+
52186+ do_posix_clock_monotonic_gettime(&timeval);
52187+ runtime = timeval.tv_sec - task->start_time.tv_sec;
52188+ wday = runtime / (3600 * 24);
52189+ runtime -= wday * (3600 * 24);
52190+ whr = runtime / 3600;
52191+ runtime -= whr * 3600;
52192+ wmin = runtime / 60;
52193+ runtime -= wmin * 60;
52194+ wsec = runtime;
52195+
52196+ cputime = (task->utime + task->stime) / HZ;
52197+ cday = cputime / (3600 * 24);
52198+ cputime -= cday * (3600 * 24);
52199+ chr = cputime / 3600;
52200+ cputime -= chr * 3600;
52201+ cmin = cputime / 60;
52202+ cputime -= cmin * 60;
52203+ csec = cputime;
52204+
52205+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52206+
52207+ return;
52208+}
52209+
52210+void gr_set_kernel_label(struct task_struct *task)
52211+{
52212+ if (gr_status & GR_READY) {
52213+ task->role = kernel_role;
52214+ task->acl = kernel_role->root_label;
52215+ }
52216+ return;
52217+}
52218+
52219+#ifdef CONFIG_TASKSTATS
52220+int gr_is_taskstats_denied(int pid)
52221+{
52222+ struct task_struct *task;
52223+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52224+ const struct cred *cred;
52225+#endif
52226+ int ret = 0;
52227+
52228+ /* restrict taskstats viewing to un-chrooted root users
52229+ who have the 'view' subject flag if the RBAC system is enabled
52230+ */
52231+
52232+ rcu_read_lock();
52233+ read_lock(&tasklist_lock);
52234+ task = find_task_by_vpid(pid);
52235+ if (task) {
52236+#ifdef CONFIG_GRKERNSEC_CHROOT
52237+ if (proc_is_chrooted(task))
52238+ ret = -EACCES;
52239+#endif
52240+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52241+ cred = __task_cred(task);
52242+#ifdef CONFIG_GRKERNSEC_PROC_USER
52243+ if (cred->uid != 0)
52244+ ret = -EACCES;
52245+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52246+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52247+ ret = -EACCES;
52248+#endif
52249+#endif
52250+ if (gr_status & GR_READY) {
52251+ if (!(task->acl->mode & GR_VIEW))
52252+ ret = -EACCES;
52253+ }
52254+ } else
52255+ ret = -ENOENT;
52256+
52257+ read_unlock(&tasklist_lock);
52258+ rcu_read_unlock();
52259+
52260+ return ret;
52261+}
52262+#endif
52263+
52264+/* AUXV entries are filled via a descendant of search_binary_handler
52265+ after we've already applied the subject for the target
52266+*/
52267+int gr_acl_enable_at_secure(void)
52268+{
52269+ if (unlikely(!(gr_status & GR_READY)))
52270+ return 0;
52271+
52272+ if (current->acl->mode & GR_ATSECURE)
52273+ return 1;
52274+
52275+ return 0;
52276+}
52277+
52278+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52279+{
52280+ struct task_struct *task = current;
52281+ struct dentry *dentry = file->f_path.dentry;
52282+ struct vfsmount *mnt = file->f_path.mnt;
52283+ struct acl_object_label *obj, *tmp;
52284+ struct acl_subject_label *subj;
52285+ unsigned int bufsize;
52286+ int is_not_root;
52287+ char *path;
52288+ dev_t dev = __get_dev(dentry);
52289+
52290+ if (unlikely(!(gr_status & GR_READY)))
52291+ return 1;
52292+
52293+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52294+ return 1;
52295+
52296+ /* ignore Eric Biederman */
52297+ if (IS_PRIVATE(dentry->d_inode))
52298+ return 1;
52299+
52300+ subj = task->acl;
52301+ do {
52302+ obj = lookup_acl_obj_label(ino, dev, subj);
52303+ if (obj != NULL)
52304+ return (obj->mode & GR_FIND) ? 1 : 0;
52305+ } while ((subj = subj->parent_subject));
52306+
52307+ /* this is purely an optimization since we're looking for an object
52308+ for the directory we're doing a readdir on
52309+ if it's possible for any globbed object to match the entry we're
52310+ filling into the directory, then the object we find here will be
52311+ an anchor point with attached globbed objects
52312+ */
52313+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52314+ if (obj->globbed == NULL)
52315+ return (obj->mode & GR_FIND) ? 1 : 0;
52316+
52317+ is_not_root = ((obj->filename[0] == '/') &&
52318+ (obj->filename[1] == '\0')) ? 0 : 1;
52319+ bufsize = PAGE_SIZE - namelen - is_not_root;
52320+
52321+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
52322+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52323+ return 1;
52324+
52325+ preempt_disable();
52326+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52327+ bufsize);
52328+
52329+ bufsize = strlen(path);
52330+
52331+ /* if base is "/", don't append an additional slash */
52332+ if (is_not_root)
52333+ *(path + bufsize) = '/';
52334+ memcpy(path + bufsize + is_not_root, name, namelen);
52335+ *(path + bufsize + namelen + is_not_root) = '\0';
52336+
52337+ tmp = obj->globbed;
52338+ while (tmp) {
52339+ if (!glob_match(tmp->filename, path)) {
52340+ preempt_enable();
52341+ return (tmp->mode & GR_FIND) ? 1 : 0;
52342+ }
52343+ tmp = tmp->next;
52344+ }
52345+ preempt_enable();
52346+ return (obj->mode & GR_FIND) ? 1 : 0;
52347+}
52348+
52349+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52350+EXPORT_SYMBOL(gr_acl_is_enabled);
52351+#endif
52352+EXPORT_SYMBOL(gr_learn_resource);
52353+EXPORT_SYMBOL(gr_set_kernel_label);
52354+#ifdef CONFIG_SECURITY
52355+EXPORT_SYMBOL(gr_check_user_change);
52356+EXPORT_SYMBOL(gr_check_group_change);
52357+#endif
52358+
52359diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52360new file mode 100644
52361index 0000000..34fefda
52362--- /dev/null
52363+++ b/grsecurity/gracl_alloc.c
52364@@ -0,0 +1,105 @@
52365+#include <linux/kernel.h>
52366+#include <linux/mm.h>
52367+#include <linux/slab.h>
52368+#include <linux/vmalloc.h>
52369+#include <linux/gracl.h>
52370+#include <linux/grsecurity.h>
52371+
52372+static unsigned long alloc_stack_next = 1;
52373+static unsigned long alloc_stack_size = 1;
52374+static void **alloc_stack;
52375+
52376+static __inline__ int
52377+alloc_pop(void)
52378+{
52379+ if (alloc_stack_next == 1)
52380+ return 0;
52381+
52382+ kfree(alloc_stack[alloc_stack_next - 2]);
52383+
52384+ alloc_stack_next--;
52385+
52386+ return 1;
52387+}
52388+
52389+static __inline__ int
52390+alloc_push(void *buf)
52391+{
52392+ if (alloc_stack_next >= alloc_stack_size)
52393+ return 1;
52394+
52395+ alloc_stack[alloc_stack_next - 1] = buf;
52396+
52397+ alloc_stack_next++;
52398+
52399+ return 0;
52400+}
52401+
52402+void *
52403+acl_alloc(unsigned long len)
52404+{
52405+ void *ret = NULL;
52406+
52407+ if (!len || len > PAGE_SIZE)
52408+ goto out;
52409+
52410+ ret = kmalloc(len, GFP_KERNEL);
52411+
52412+ if (ret) {
52413+ if (alloc_push(ret)) {
52414+ kfree(ret);
52415+ ret = NULL;
52416+ }
52417+ }
52418+
52419+out:
52420+ return ret;
52421+}
52422+
52423+void *
52424+acl_alloc_num(unsigned long num, unsigned long len)
52425+{
52426+ if (!len || (num > (PAGE_SIZE / len)))
52427+ return NULL;
52428+
52429+ return acl_alloc(num * len);
52430+}
52431+
52432+void
52433+acl_free_all(void)
52434+{
52435+ if (gr_acl_is_enabled() || !alloc_stack)
52436+ return;
52437+
52438+ while (alloc_pop()) ;
52439+
52440+ if (alloc_stack) {
52441+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52442+ kfree(alloc_stack);
52443+ else
52444+ vfree(alloc_stack);
52445+ }
52446+
52447+ alloc_stack = NULL;
52448+ alloc_stack_size = 1;
52449+ alloc_stack_next = 1;
52450+
52451+ return;
52452+}
52453+
52454+int
52455+acl_alloc_stack_init(unsigned long size)
52456+{
52457+ if ((size * sizeof (void *)) <= PAGE_SIZE)
52458+ alloc_stack =
52459+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52460+ else
52461+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
52462+
52463+ alloc_stack_size = size;
52464+
52465+ if (!alloc_stack)
52466+ return 0;
52467+ else
52468+ return 1;
52469+}
52470diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52471new file mode 100644
52472index 0000000..955ddfb
52473--- /dev/null
52474+++ b/grsecurity/gracl_cap.c
52475@@ -0,0 +1,101 @@
52476+#include <linux/kernel.h>
52477+#include <linux/module.h>
52478+#include <linux/sched.h>
52479+#include <linux/gracl.h>
52480+#include <linux/grsecurity.h>
52481+#include <linux/grinternal.h>
52482+
52483+extern const char *captab_log[];
52484+extern int captab_log_entries;
52485+
52486+int
52487+gr_acl_is_capable(const int cap)
52488+{
52489+ struct task_struct *task = current;
52490+ const struct cred *cred = current_cred();
52491+ struct acl_subject_label *curracl;
52492+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52493+ kernel_cap_t cap_audit = __cap_empty_set;
52494+
52495+ if (!gr_acl_is_enabled())
52496+ return 1;
52497+
52498+ curracl = task->acl;
52499+
52500+ cap_drop = curracl->cap_lower;
52501+ cap_mask = curracl->cap_mask;
52502+ cap_audit = curracl->cap_invert_audit;
52503+
52504+ while ((curracl = curracl->parent_subject)) {
52505+ /* if the cap isn't specified in the current computed mask but is specified in the
52506+ current level subject, and is lowered in the current level subject, then add
52507+ it to the set of dropped capabilities
52508+ otherwise, add the current level subject's mask to the current computed mask
52509+ */
52510+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52511+ cap_raise(cap_mask, cap);
52512+ if (cap_raised(curracl->cap_lower, cap))
52513+ cap_raise(cap_drop, cap);
52514+ if (cap_raised(curracl->cap_invert_audit, cap))
52515+ cap_raise(cap_audit, cap);
52516+ }
52517+ }
52518+
52519+ if (!cap_raised(cap_drop, cap)) {
52520+ if (cap_raised(cap_audit, cap))
52521+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52522+ return 1;
52523+ }
52524+
52525+ curracl = task->acl;
52526+
52527+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52528+ && cap_raised(cred->cap_effective, cap)) {
52529+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52530+ task->role->roletype, cred->uid,
52531+ cred->gid, task->exec_file ?
52532+ gr_to_filename(task->exec_file->f_path.dentry,
52533+ task->exec_file->f_path.mnt) : curracl->filename,
52534+ curracl->filename, 0UL,
52535+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52536+ return 1;
52537+ }
52538+
52539+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52540+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52541+ return 0;
52542+}
52543+
52544+int
52545+gr_acl_is_capable_nolog(const int cap)
52546+{
52547+ struct acl_subject_label *curracl;
52548+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52549+
52550+ if (!gr_acl_is_enabled())
52551+ return 1;
52552+
52553+ curracl = current->acl;
52554+
52555+ cap_drop = curracl->cap_lower;
52556+ cap_mask = curracl->cap_mask;
52557+
52558+ while ((curracl = curracl->parent_subject)) {
52559+ /* if the cap isn't specified in the current computed mask but is specified in the
52560+ current level subject, and is lowered in the current level subject, then add
52561+ it to the set of dropped capabilities
52562+ otherwise, add the current level subject's mask to the current computed mask
52563+ */
52564+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52565+ cap_raise(cap_mask, cap);
52566+ if (cap_raised(curracl->cap_lower, cap))
52567+ cap_raise(cap_drop, cap);
52568+ }
52569+ }
52570+
52571+ if (!cap_raised(cap_drop, cap))
52572+ return 1;
52573+
52574+ return 0;
52575+}
52576+
52577diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52578new file mode 100644
52579index 0000000..4eda5c3
52580--- /dev/null
52581+++ b/grsecurity/gracl_fs.c
52582@@ -0,0 +1,433 @@
52583+#include <linux/kernel.h>
52584+#include <linux/sched.h>
52585+#include <linux/types.h>
52586+#include <linux/fs.h>
52587+#include <linux/file.h>
52588+#include <linux/stat.h>
52589+#include <linux/grsecurity.h>
52590+#include <linux/grinternal.h>
52591+#include <linux/gracl.h>
52592+
52593+__u32
52594+gr_acl_handle_hidden_file(const struct dentry * dentry,
52595+ const struct vfsmount * mnt)
52596+{
52597+ __u32 mode;
52598+
52599+ if (unlikely(!dentry->d_inode))
52600+ return GR_FIND;
52601+
52602+ mode =
52603+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52604+
52605+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52606+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52607+ return mode;
52608+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52609+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52610+ return 0;
52611+ } else if (unlikely(!(mode & GR_FIND)))
52612+ return 0;
52613+
52614+ return GR_FIND;
52615+}
52616+
52617+__u32
52618+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52619+ int acc_mode)
52620+{
52621+ __u32 reqmode = GR_FIND;
52622+ __u32 mode;
52623+
52624+ if (unlikely(!dentry->d_inode))
52625+ return reqmode;
52626+
52627+ if (acc_mode & MAY_APPEND)
52628+ reqmode |= GR_APPEND;
52629+ else if (acc_mode & MAY_WRITE)
52630+ reqmode |= GR_WRITE;
52631+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52632+ reqmode |= GR_READ;
52633+
52634+ mode =
52635+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52636+ mnt);
52637+
52638+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52639+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52640+ reqmode & GR_READ ? " reading" : "",
52641+ reqmode & GR_WRITE ? " writing" : reqmode &
52642+ GR_APPEND ? " appending" : "");
52643+ return reqmode;
52644+ } else
52645+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52646+ {
52647+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52648+ reqmode & GR_READ ? " reading" : "",
52649+ reqmode & GR_WRITE ? " writing" : reqmode &
52650+ GR_APPEND ? " appending" : "");
52651+ return 0;
52652+ } else if (unlikely((mode & reqmode) != reqmode))
52653+ return 0;
52654+
52655+ return reqmode;
52656+}
52657+
52658+__u32
52659+gr_acl_handle_creat(const struct dentry * dentry,
52660+ const struct dentry * p_dentry,
52661+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52662+ const int imode)
52663+{
52664+ __u32 reqmode = GR_WRITE | GR_CREATE;
52665+ __u32 mode;
52666+
52667+ if (acc_mode & MAY_APPEND)
52668+ reqmode |= GR_APPEND;
52669+ // if a directory was required or the directory already exists, then
52670+ // don't count this open as a read
52671+ if ((acc_mode & MAY_READ) &&
52672+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52673+ reqmode |= GR_READ;
52674+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52675+ reqmode |= GR_SETID;
52676+
52677+ mode =
52678+ gr_check_create(dentry, p_dentry, p_mnt,
52679+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52680+
52681+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52682+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52683+ reqmode & GR_READ ? " reading" : "",
52684+ reqmode & GR_WRITE ? " writing" : reqmode &
52685+ GR_APPEND ? " appending" : "");
52686+ return reqmode;
52687+ } else
52688+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52689+ {
52690+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52691+ reqmode & GR_READ ? " reading" : "",
52692+ reqmode & GR_WRITE ? " writing" : reqmode &
52693+ GR_APPEND ? " appending" : "");
52694+ return 0;
52695+ } else if (unlikely((mode & reqmode) != reqmode))
52696+ return 0;
52697+
52698+ return reqmode;
52699+}
52700+
52701+__u32
52702+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52703+ const int fmode)
52704+{
52705+ __u32 mode, reqmode = GR_FIND;
52706+
52707+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52708+ reqmode |= GR_EXEC;
52709+ if (fmode & S_IWOTH)
52710+ reqmode |= GR_WRITE;
52711+ if (fmode & S_IROTH)
52712+ reqmode |= GR_READ;
52713+
52714+ mode =
52715+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52716+ mnt);
52717+
52718+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52719+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52720+ reqmode & GR_READ ? " reading" : "",
52721+ reqmode & GR_WRITE ? " writing" : "",
52722+ reqmode & GR_EXEC ? " executing" : "");
52723+ return reqmode;
52724+ } else
52725+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52726+ {
52727+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52728+ reqmode & GR_READ ? " reading" : "",
52729+ reqmode & GR_WRITE ? " writing" : "",
52730+ reqmode & GR_EXEC ? " executing" : "");
52731+ return 0;
52732+ } else if (unlikely((mode & reqmode) != reqmode))
52733+ return 0;
52734+
52735+ return reqmode;
52736+}
52737+
52738+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
52739+{
52740+ __u32 mode;
52741+
52742+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
52743+
52744+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52745+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
52746+ return mode;
52747+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52748+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
52749+ return 0;
52750+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52751+ return 0;
52752+
52753+ return (reqmode);
52754+}
52755+
52756+__u32
52757+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52758+{
52759+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
52760+}
52761+
52762+__u32
52763+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
52764+{
52765+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
52766+}
52767+
52768+__u32
52769+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
52770+{
52771+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
52772+}
52773+
52774+__u32
52775+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
52776+{
52777+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
52778+}
52779+
52780+__u32
52781+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
52782+ mode_t mode)
52783+{
52784+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
52785+ return 1;
52786+
52787+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52788+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52789+ GR_FCHMOD_ACL_MSG);
52790+ } else {
52791+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
52792+ }
52793+}
52794+
52795+__u32
52796+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
52797+ mode_t mode)
52798+{
52799+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52800+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52801+ GR_CHMOD_ACL_MSG);
52802+ } else {
52803+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
52804+ }
52805+}
52806+
52807+__u32
52808+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
52809+{
52810+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
52811+}
52812+
52813+__u32
52814+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
52815+{
52816+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
52817+}
52818+
52819+__u32
52820+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
52821+{
52822+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
52823+}
52824+
52825+__u32
52826+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
52827+{
52828+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
52829+ GR_UNIXCONNECT_ACL_MSG);
52830+}
52831+
52832+/* hardlinks require at minimum create and link permission,
52833+ any additional privilege required is based on the
52834+ privilege of the file being linked to
52835+*/
52836+__u32
52837+gr_acl_handle_link(const struct dentry * new_dentry,
52838+ const struct dentry * parent_dentry,
52839+ const struct vfsmount * parent_mnt,
52840+ const struct dentry * old_dentry,
52841+ const struct vfsmount * old_mnt, const char *to)
52842+{
52843+ __u32 mode;
52844+ __u32 needmode = GR_CREATE | GR_LINK;
52845+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
52846+
52847+ mode =
52848+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
52849+ old_mnt);
52850+
52851+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
52852+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52853+ return mode;
52854+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52855+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52856+ return 0;
52857+ } else if (unlikely((mode & needmode) != needmode))
52858+ return 0;
52859+
52860+ return 1;
52861+}
52862+
52863+__u32
52864+gr_acl_handle_symlink(const struct dentry * new_dentry,
52865+ const struct dentry * parent_dentry,
52866+ const struct vfsmount * parent_mnt, const char *from)
52867+{
52868+ __u32 needmode = GR_WRITE | GR_CREATE;
52869+ __u32 mode;
52870+
52871+ mode =
52872+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
52873+ GR_CREATE | GR_AUDIT_CREATE |
52874+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
52875+
52876+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
52877+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52878+ return mode;
52879+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52880+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52881+ return 0;
52882+ } else if (unlikely((mode & needmode) != needmode))
52883+ return 0;
52884+
52885+ return (GR_WRITE | GR_CREATE);
52886+}
52887+
52888+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
52889+{
52890+ __u32 mode;
52891+
52892+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52893+
52894+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52895+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
52896+ return mode;
52897+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52898+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
52899+ return 0;
52900+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52901+ return 0;
52902+
52903+ return (reqmode);
52904+}
52905+
52906+__u32
52907+gr_acl_handle_mknod(const struct dentry * new_dentry,
52908+ const struct dentry * parent_dentry,
52909+ const struct vfsmount * parent_mnt,
52910+ const int mode)
52911+{
52912+ __u32 reqmode = GR_WRITE | GR_CREATE;
52913+ if (unlikely(mode & (S_ISUID | S_ISGID)))
52914+ reqmode |= GR_SETID;
52915+
52916+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52917+ reqmode, GR_MKNOD_ACL_MSG);
52918+}
52919+
52920+__u32
52921+gr_acl_handle_mkdir(const struct dentry *new_dentry,
52922+ const struct dentry *parent_dentry,
52923+ const struct vfsmount *parent_mnt)
52924+{
52925+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52926+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
52927+}
52928+
52929+#define RENAME_CHECK_SUCCESS(old, new) \
52930+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
52931+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
52932+
52933+int
52934+gr_acl_handle_rename(struct dentry *new_dentry,
52935+ struct dentry *parent_dentry,
52936+ const struct vfsmount *parent_mnt,
52937+ struct dentry *old_dentry,
52938+ struct inode *old_parent_inode,
52939+ struct vfsmount *old_mnt, const char *newname)
52940+{
52941+ __u32 comp1, comp2;
52942+ int error = 0;
52943+
52944+ if (unlikely(!gr_acl_is_enabled()))
52945+ return 0;
52946+
52947+ if (!new_dentry->d_inode) {
52948+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
52949+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
52950+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
52951+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
52952+ GR_DELETE | GR_AUDIT_DELETE |
52953+ GR_AUDIT_READ | GR_AUDIT_WRITE |
52954+ GR_SUPPRESS, old_mnt);
52955+ } else {
52956+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
52957+ GR_CREATE | GR_DELETE |
52958+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
52959+ GR_AUDIT_READ | GR_AUDIT_WRITE |
52960+ GR_SUPPRESS, parent_mnt);
52961+ comp2 =
52962+ gr_search_file(old_dentry,
52963+ GR_READ | GR_WRITE | GR_AUDIT_READ |
52964+ GR_DELETE | GR_AUDIT_DELETE |
52965+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
52966+ }
52967+
52968+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
52969+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
52970+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52971+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
52972+ && !(comp2 & GR_SUPPRESS)) {
52973+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52974+ error = -EACCES;
52975+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
52976+ error = -EACCES;
52977+
52978+ return error;
52979+}
52980+
52981+void
52982+gr_acl_handle_exit(void)
52983+{
52984+ u16 id;
52985+ char *rolename;
52986+ struct file *exec_file;
52987+
52988+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
52989+ !(current->role->roletype & GR_ROLE_PERSIST))) {
52990+ id = current->acl_role_id;
52991+ rolename = current->role->rolename;
52992+ gr_set_acls(1);
52993+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
52994+ }
52995+
52996+ write_lock(&grsec_exec_file_lock);
52997+ exec_file = current->exec_file;
52998+ current->exec_file = NULL;
52999+ write_unlock(&grsec_exec_file_lock);
53000+
53001+ if (exec_file)
53002+ fput(exec_file);
53003+}
53004+
53005+int
53006+gr_acl_handle_procpidmem(const struct task_struct *task)
53007+{
53008+ if (unlikely(!gr_acl_is_enabled()))
53009+ return 0;
53010+
53011+ if (task != current && task->acl->mode & GR_PROTPROCFD)
53012+ return -EACCES;
53013+
53014+ return 0;
53015+}
53016diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
53017new file mode 100644
53018index 0000000..17050ca
53019--- /dev/null
53020+++ b/grsecurity/gracl_ip.c
53021@@ -0,0 +1,381 @@
53022+#include <linux/kernel.h>
53023+#include <asm/uaccess.h>
53024+#include <asm/errno.h>
53025+#include <net/sock.h>
53026+#include <linux/file.h>
53027+#include <linux/fs.h>
53028+#include <linux/net.h>
53029+#include <linux/in.h>
53030+#include <linux/skbuff.h>
53031+#include <linux/ip.h>
53032+#include <linux/udp.h>
53033+#include <linux/types.h>
53034+#include <linux/sched.h>
53035+#include <linux/netdevice.h>
53036+#include <linux/inetdevice.h>
53037+#include <linux/gracl.h>
53038+#include <linux/grsecurity.h>
53039+#include <linux/grinternal.h>
53040+
53041+#define GR_BIND 0x01
53042+#define GR_CONNECT 0x02
53043+#define GR_INVERT 0x04
53044+#define GR_BINDOVERRIDE 0x08
53045+#define GR_CONNECTOVERRIDE 0x10
53046+#define GR_SOCK_FAMILY 0x20
53047+
53048+static const char * gr_protocols[IPPROTO_MAX] = {
53049+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
53050+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
53051+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
53052+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
53053+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
53054+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
53055+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
53056+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
53057+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
53058+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
53059+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
53060+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
53061+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
53062+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
53063+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
53064+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
53065+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
53066+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
53067+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
53068+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
53069+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
53070+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
53071+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
53072+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
53073+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
53074+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
53075+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
53076+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
53077+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
53078+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
53079+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
53080+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
53081+ };
53082+
53083+static const char * gr_socktypes[SOCK_MAX] = {
53084+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
53085+ "unknown:7", "unknown:8", "unknown:9", "packet"
53086+ };
53087+
53088+static const char * gr_sockfamilies[AF_MAX+1] = {
53089+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
53090+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
53091+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
53092+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
53093+ };
53094+
53095+const char *
53096+gr_proto_to_name(unsigned char proto)
53097+{
53098+ return gr_protocols[proto];
53099+}
53100+
53101+const char *
53102+gr_socktype_to_name(unsigned char type)
53103+{
53104+ return gr_socktypes[type];
53105+}
53106+
53107+const char *
53108+gr_sockfamily_to_name(unsigned char family)
53109+{
53110+ return gr_sockfamilies[family];
53111+}
53112+
53113+int
53114+gr_search_socket(const int domain, const int type, const int protocol)
53115+{
53116+ struct acl_subject_label *curr;
53117+ const struct cred *cred = current_cred();
53118+
53119+ if (unlikely(!gr_acl_is_enabled()))
53120+ goto exit;
53121+
53122+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
53123+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53124+ goto exit; // let the kernel handle it
53125+
53126+ curr = current->acl;
53127+
53128+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53129+ /* the family is allowed, if this is PF_INET allow it only if
53130+ the extra sock type/protocol checks pass */
53131+ if (domain == PF_INET)
53132+ goto inet_check;
53133+ goto exit;
53134+ } else {
53135+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53136+ __u32 fakeip = 0;
53137+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53138+ current->role->roletype, cred->uid,
53139+ cred->gid, current->exec_file ?
53140+ gr_to_filename(current->exec_file->f_path.dentry,
53141+ current->exec_file->f_path.mnt) :
53142+ curr->filename, curr->filename,
53143+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53144+ &current->signal->saved_ip);
53145+ goto exit;
53146+ }
53147+ goto exit_fail;
53148+ }
53149+
53150+inet_check:
53151+ /* the rest of this checking is for IPv4 only */
53152+ if (!curr->ips)
53153+ goto exit;
53154+
53155+ if ((curr->ip_type & (1 << type)) &&
53156+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53157+ goto exit;
53158+
53159+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53160+ /* we don't place acls on raw sockets , and sometimes
53161+ dgram/ip sockets are opened for ioctl and not
53162+ bind/connect, so we'll fake a bind learn log */
53163+ if (type == SOCK_RAW || type == SOCK_PACKET) {
53164+ __u32 fakeip = 0;
53165+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53166+ current->role->roletype, cred->uid,
53167+ cred->gid, current->exec_file ?
53168+ gr_to_filename(current->exec_file->f_path.dentry,
53169+ current->exec_file->f_path.mnt) :
53170+ curr->filename, curr->filename,
53171+ &fakeip, 0, type,
53172+ protocol, GR_CONNECT, &current->signal->saved_ip);
53173+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53174+ __u32 fakeip = 0;
53175+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53176+ current->role->roletype, cred->uid,
53177+ cred->gid, current->exec_file ?
53178+ gr_to_filename(current->exec_file->f_path.dentry,
53179+ current->exec_file->f_path.mnt) :
53180+ curr->filename, curr->filename,
53181+ &fakeip, 0, type,
53182+ protocol, GR_BIND, &current->signal->saved_ip);
53183+ }
53184+ /* we'll log when they use connect or bind */
53185+ goto exit;
53186+ }
53187+
53188+exit_fail:
53189+ if (domain == PF_INET)
53190+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53191+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
53192+ else
53193+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53194+ gr_socktype_to_name(type), protocol);
53195+
53196+ return 0;
53197+exit:
53198+ return 1;
53199+}
53200+
53201+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53202+{
53203+ if ((ip->mode & mode) &&
53204+ (ip_port >= ip->low) &&
53205+ (ip_port <= ip->high) &&
53206+ ((ntohl(ip_addr) & our_netmask) ==
53207+ (ntohl(our_addr) & our_netmask))
53208+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53209+ && (ip->type & (1 << type))) {
53210+ if (ip->mode & GR_INVERT)
53211+ return 2; // specifically denied
53212+ else
53213+ return 1; // allowed
53214+ }
53215+
53216+ return 0; // not specifically allowed, may continue parsing
53217+}
53218+
53219+static int
53220+gr_search_connectbind(const int full_mode, struct sock *sk,
53221+ struct sockaddr_in *addr, const int type)
53222+{
53223+ char iface[IFNAMSIZ] = {0};
53224+ struct acl_subject_label *curr;
53225+ struct acl_ip_label *ip;
53226+ struct inet_sock *isk;
53227+ struct net_device *dev;
53228+ struct in_device *idev;
53229+ unsigned long i;
53230+ int ret;
53231+ int mode = full_mode & (GR_BIND | GR_CONNECT);
53232+ __u32 ip_addr = 0;
53233+ __u32 our_addr;
53234+ __u32 our_netmask;
53235+ char *p;
53236+ __u16 ip_port = 0;
53237+ const struct cred *cred = current_cred();
53238+
53239+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53240+ return 0;
53241+
53242+ curr = current->acl;
53243+ isk = inet_sk(sk);
53244+
53245+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53246+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53247+ addr->sin_addr.s_addr = curr->inaddr_any_override;
53248+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53249+ struct sockaddr_in saddr;
53250+ int err;
53251+
53252+ saddr.sin_family = AF_INET;
53253+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
53254+ saddr.sin_port = isk->inet_sport;
53255+
53256+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53257+ if (err)
53258+ return err;
53259+
53260+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53261+ if (err)
53262+ return err;
53263+ }
53264+
53265+ if (!curr->ips)
53266+ return 0;
53267+
53268+ ip_addr = addr->sin_addr.s_addr;
53269+ ip_port = ntohs(addr->sin_port);
53270+
53271+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53272+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53273+ current->role->roletype, cred->uid,
53274+ cred->gid, current->exec_file ?
53275+ gr_to_filename(current->exec_file->f_path.dentry,
53276+ current->exec_file->f_path.mnt) :
53277+ curr->filename, curr->filename,
53278+ &ip_addr, ip_port, type,
53279+ sk->sk_protocol, mode, &current->signal->saved_ip);
53280+ return 0;
53281+ }
53282+
53283+ for (i = 0; i < curr->ip_num; i++) {
53284+ ip = *(curr->ips + i);
53285+ if (ip->iface != NULL) {
53286+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
53287+ p = strchr(iface, ':');
53288+ if (p != NULL)
53289+ *p = '\0';
53290+ dev = dev_get_by_name(sock_net(sk), iface);
53291+ if (dev == NULL)
53292+ continue;
53293+ idev = in_dev_get(dev);
53294+ if (idev == NULL) {
53295+ dev_put(dev);
53296+ continue;
53297+ }
53298+ rcu_read_lock();
53299+ for_ifa(idev) {
53300+ if (!strcmp(ip->iface, ifa->ifa_label)) {
53301+ our_addr = ifa->ifa_address;
53302+ our_netmask = 0xffffffff;
53303+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53304+ if (ret == 1) {
53305+ rcu_read_unlock();
53306+ in_dev_put(idev);
53307+ dev_put(dev);
53308+ return 0;
53309+ } else if (ret == 2) {
53310+ rcu_read_unlock();
53311+ in_dev_put(idev);
53312+ dev_put(dev);
53313+ goto denied;
53314+ }
53315+ }
53316+ } endfor_ifa(idev);
53317+ rcu_read_unlock();
53318+ in_dev_put(idev);
53319+ dev_put(dev);
53320+ } else {
53321+ our_addr = ip->addr;
53322+ our_netmask = ip->netmask;
53323+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53324+ if (ret == 1)
53325+ return 0;
53326+ else if (ret == 2)
53327+ goto denied;
53328+ }
53329+ }
53330+
53331+denied:
53332+ if (mode == GR_BIND)
53333+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53334+ else if (mode == GR_CONNECT)
53335+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53336+
53337+ return -EACCES;
53338+}
53339+
53340+int
53341+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53342+{
53343+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53344+}
53345+
53346+int
53347+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53348+{
53349+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53350+}
53351+
53352+int gr_search_listen(struct socket *sock)
53353+{
53354+ struct sock *sk = sock->sk;
53355+ struct sockaddr_in addr;
53356+
53357+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53358+ addr.sin_port = inet_sk(sk)->inet_sport;
53359+
53360+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53361+}
53362+
53363+int gr_search_accept(struct socket *sock)
53364+{
53365+ struct sock *sk = sock->sk;
53366+ struct sockaddr_in addr;
53367+
53368+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53369+ addr.sin_port = inet_sk(sk)->inet_sport;
53370+
53371+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53372+}
53373+
53374+int
53375+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53376+{
53377+ if (addr)
53378+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53379+ else {
53380+ struct sockaddr_in sin;
53381+ const struct inet_sock *inet = inet_sk(sk);
53382+
53383+ sin.sin_addr.s_addr = inet->inet_daddr;
53384+ sin.sin_port = inet->inet_dport;
53385+
53386+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53387+ }
53388+}
53389+
53390+int
53391+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53392+{
53393+ struct sockaddr_in sin;
53394+
53395+ if (unlikely(skb->len < sizeof (struct udphdr)))
53396+ return 0; // skip this packet
53397+
53398+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53399+ sin.sin_port = udp_hdr(skb)->source;
53400+
53401+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53402+}
53403diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53404new file mode 100644
53405index 0000000..25f54ef
53406--- /dev/null
53407+++ b/grsecurity/gracl_learn.c
53408@@ -0,0 +1,207 @@
53409+#include <linux/kernel.h>
53410+#include <linux/mm.h>
53411+#include <linux/sched.h>
53412+#include <linux/poll.h>
53413+#include <linux/string.h>
53414+#include <linux/file.h>
53415+#include <linux/types.h>
53416+#include <linux/vmalloc.h>
53417+#include <linux/grinternal.h>
53418+
53419+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53420+ size_t count, loff_t *ppos);
53421+extern int gr_acl_is_enabled(void);
53422+
53423+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53424+static int gr_learn_attached;
53425+
53426+/* use a 512k buffer */
53427+#define LEARN_BUFFER_SIZE (512 * 1024)
53428+
53429+static DEFINE_SPINLOCK(gr_learn_lock);
53430+static DEFINE_MUTEX(gr_learn_user_mutex);
53431+
53432+/* we need to maintain two buffers, so that the kernel context of grlearn
53433+ uses a semaphore around the userspace copying, and the other kernel contexts
53434+ use a spinlock when copying into the buffer, since they cannot sleep
53435+*/
53436+static char *learn_buffer;
53437+static char *learn_buffer_user;
53438+static int learn_buffer_len;
53439+static int learn_buffer_user_len;
53440+
53441+static ssize_t
53442+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53443+{
53444+ DECLARE_WAITQUEUE(wait, current);
53445+ ssize_t retval = 0;
53446+
53447+ add_wait_queue(&learn_wait, &wait);
53448+ set_current_state(TASK_INTERRUPTIBLE);
53449+ do {
53450+ mutex_lock(&gr_learn_user_mutex);
53451+ spin_lock(&gr_learn_lock);
53452+ if (learn_buffer_len)
53453+ break;
53454+ spin_unlock(&gr_learn_lock);
53455+ mutex_unlock(&gr_learn_user_mutex);
53456+ if (file->f_flags & O_NONBLOCK) {
53457+ retval = -EAGAIN;
53458+ goto out;
53459+ }
53460+ if (signal_pending(current)) {
53461+ retval = -ERESTARTSYS;
53462+ goto out;
53463+ }
53464+
53465+ schedule();
53466+ } while (1);
53467+
53468+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53469+ learn_buffer_user_len = learn_buffer_len;
53470+ retval = learn_buffer_len;
53471+ learn_buffer_len = 0;
53472+
53473+ spin_unlock(&gr_learn_lock);
53474+
53475+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53476+ retval = -EFAULT;
53477+
53478+ mutex_unlock(&gr_learn_user_mutex);
53479+out:
53480+ set_current_state(TASK_RUNNING);
53481+ remove_wait_queue(&learn_wait, &wait);
53482+ return retval;
53483+}
53484+
53485+static unsigned int
53486+poll_learn(struct file * file, poll_table * wait)
53487+{
53488+ poll_wait(file, &learn_wait, wait);
53489+
53490+ if (learn_buffer_len)
53491+ return (POLLIN | POLLRDNORM);
53492+
53493+ return 0;
53494+}
53495+
53496+void
53497+gr_clear_learn_entries(void)
53498+{
53499+ char *tmp;
53500+
53501+ mutex_lock(&gr_learn_user_mutex);
53502+ spin_lock(&gr_learn_lock);
53503+ tmp = learn_buffer;
53504+ learn_buffer = NULL;
53505+ spin_unlock(&gr_learn_lock);
53506+ if (tmp)
53507+ vfree(tmp);
53508+ if (learn_buffer_user != NULL) {
53509+ vfree(learn_buffer_user);
53510+ learn_buffer_user = NULL;
53511+ }
53512+ learn_buffer_len = 0;
53513+ mutex_unlock(&gr_learn_user_mutex);
53514+
53515+ return;
53516+}
53517+
53518+void
53519+gr_add_learn_entry(const char *fmt, ...)
53520+{
53521+ va_list args;
53522+ unsigned int len;
53523+
53524+ if (!gr_learn_attached)
53525+ return;
53526+
53527+ spin_lock(&gr_learn_lock);
53528+
53529+ /* leave a gap at the end so we know when it's "full" but don't have to
53530+ compute the exact length of the string we're trying to append
53531+ */
53532+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53533+ spin_unlock(&gr_learn_lock);
53534+ wake_up_interruptible(&learn_wait);
53535+ return;
53536+ }
53537+ if (learn_buffer == NULL) {
53538+ spin_unlock(&gr_learn_lock);
53539+ return;
53540+ }
53541+
53542+ va_start(args, fmt);
53543+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53544+ va_end(args);
53545+
53546+ learn_buffer_len += len + 1;
53547+
53548+ spin_unlock(&gr_learn_lock);
53549+ wake_up_interruptible(&learn_wait);
53550+
53551+ return;
53552+}
53553+
53554+static int
53555+open_learn(struct inode *inode, struct file *file)
53556+{
53557+ if (file->f_mode & FMODE_READ && gr_learn_attached)
53558+ return -EBUSY;
53559+ if (file->f_mode & FMODE_READ) {
53560+ int retval = 0;
53561+ mutex_lock(&gr_learn_user_mutex);
53562+ if (learn_buffer == NULL)
53563+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53564+ if (learn_buffer_user == NULL)
53565+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53566+ if (learn_buffer == NULL) {
53567+ retval = -ENOMEM;
53568+ goto out_error;
53569+ }
53570+ if (learn_buffer_user == NULL) {
53571+ retval = -ENOMEM;
53572+ goto out_error;
53573+ }
53574+ learn_buffer_len = 0;
53575+ learn_buffer_user_len = 0;
53576+ gr_learn_attached = 1;
53577+out_error:
53578+ mutex_unlock(&gr_learn_user_mutex);
53579+ return retval;
53580+ }
53581+ return 0;
53582+}
53583+
53584+static int
53585+close_learn(struct inode *inode, struct file *file)
53586+{
53587+ if (file->f_mode & FMODE_READ) {
53588+ char *tmp = NULL;
53589+ mutex_lock(&gr_learn_user_mutex);
53590+ spin_lock(&gr_learn_lock);
53591+ tmp = learn_buffer;
53592+ learn_buffer = NULL;
53593+ spin_unlock(&gr_learn_lock);
53594+ if (tmp)
53595+ vfree(tmp);
53596+ if (learn_buffer_user != NULL) {
53597+ vfree(learn_buffer_user);
53598+ learn_buffer_user = NULL;
53599+ }
53600+ learn_buffer_len = 0;
53601+ learn_buffer_user_len = 0;
53602+ gr_learn_attached = 0;
53603+ mutex_unlock(&gr_learn_user_mutex);
53604+ }
53605+
53606+ return 0;
53607+}
53608+
53609+const struct file_operations grsec_fops = {
53610+ .read = read_learn,
53611+ .write = write_grsec_handler,
53612+ .open = open_learn,
53613+ .release = close_learn,
53614+ .poll = poll_learn,
53615+};
53616diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53617new file mode 100644
53618index 0000000..39645c9
53619--- /dev/null
53620+++ b/grsecurity/gracl_res.c
53621@@ -0,0 +1,68 @@
53622+#include <linux/kernel.h>
53623+#include <linux/sched.h>
53624+#include <linux/gracl.h>
53625+#include <linux/grinternal.h>
53626+
53627+static const char *restab_log[] = {
53628+ [RLIMIT_CPU] = "RLIMIT_CPU",
53629+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53630+ [RLIMIT_DATA] = "RLIMIT_DATA",
53631+ [RLIMIT_STACK] = "RLIMIT_STACK",
53632+ [RLIMIT_CORE] = "RLIMIT_CORE",
53633+ [RLIMIT_RSS] = "RLIMIT_RSS",
53634+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
53635+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53636+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53637+ [RLIMIT_AS] = "RLIMIT_AS",
53638+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53639+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53640+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53641+ [RLIMIT_NICE] = "RLIMIT_NICE",
53642+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53643+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53644+ [GR_CRASH_RES] = "RLIMIT_CRASH"
53645+};
53646+
53647+void
53648+gr_log_resource(const struct task_struct *task,
53649+ const int res, const unsigned long wanted, const int gt)
53650+{
53651+ const struct cred *cred;
53652+ unsigned long rlim;
53653+
53654+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
53655+ return;
53656+
53657+ // not yet supported resource
53658+ if (unlikely(!restab_log[res]))
53659+ return;
53660+
53661+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53662+ rlim = task_rlimit_max(task, res);
53663+ else
53664+ rlim = task_rlimit(task, res);
53665+
53666+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53667+ return;
53668+
53669+ rcu_read_lock();
53670+ cred = __task_cred(task);
53671+
53672+ if (res == RLIMIT_NPROC &&
53673+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53674+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53675+ goto out_rcu_unlock;
53676+ else if (res == RLIMIT_MEMLOCK &&
53677+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53678+ goto out_rcu_unlock;
53679+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53680+ goto out_rcu_unlock;
53681+ rcu_read_unlock();
53682+
53683+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53684+
53685+ return;
53686+out_rcu_unlock:
53687+ rcu_read_unlock();
53688+ return;
53689+}
53690diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53691new file mode 100644
53692index 0000000..5556be3
53693--- /dev/null
53694+++ b/grsecurity/gracl_segv.c
53695@@ -0,0 +1,299 @@
53696+#include <linux/kernel.h>
53697+#include <linux/mm.h>
53698+#include <asm/uaccess.h>
53699+#include <asm/errno.h>
53700+#include <asm/mman.h>
53701+#include <net/sock.h>
53702+#include <linux/file.h>
53703+#include <linux/fs.h>
53704+#include <linux/net.h>
53705+#include <linux/in.h>
53706+#include <linux/slab.h>
53707+#include <linux/types.h>
53708+#include <linux/sched.h>
53709+#include <linux/timer.h>
53710+#include <linux/gracl.h>
53711+#include <linux/grsecurity.h>
53712+#include <linux/grinternal.h>
53713+
53714+static struct crash_uid *uid_set;
53715+static unsigned short uid_used;
53716+static DEFINE_SPINLOCK(gr_uid_lock);
53717+extern rwlock_t gr_inode_lock;
53718+extern struct acl_subject_label *
53719+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53720+ struct acl_role_label *role);
53721+
53722+#ifdef CONFIG_BTRFS_FS
53723+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53724+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53725+#endif
53726+
53727+static inline dev_t __get_dev(const struct dentry *dentry)
53728+{
53729+#ifdef CONFIG_BTRFS_FS
53730+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53731+ return get_btrfs_dev_from_inode(dentry->d_inode);
53732+ else
53733+#endif
53734+ return dentry->d_inode->i_sb->s_dev;
53735+}
53736+
53737+int
53738+gr_init_uidset(void)
53739+{
53740+ uid_set =
53741+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
53742+ uid_used = 0;
53743+
53744+ return uid_set ? 1 : 0;
53745+}
53746+
53747+void
53748+gr_free_uidset(void)
53749+{
53750+ if (uid_set)
53751+ kfree(uid_set);
53752+
53753+ return;
53754+}
53755+
53756+int
53757+gr_find_uid(const uid_t uid)
53758+{
53759+ struct crash_uid *tmp = uid_set;
53760+ uid_t buid;
53761+ int low = 0, high = uid_used - 1, mid;
53762+
53763+ while (high >= low) {
53764+ mid = (low + high) >> 1;
53765+ buid = tmp[mid].uid;
53766+ if (buid == uid)
53767+ return mid;
53768+ if (buid > uid)
53769+ high = mid - 1;
53770+ if (buid < uid)
53771+ low = mid + 1;
53772+ }
53773+
53774+ return -1;
53775+}
53776+
53777+static __inline__ void
53778+gr_insertsort(void)
53779+{
53780+ unsigned short i, j;
53781+ struct crash_uid index;
53782+
53783+ for (i = 1; i < uid_used; i++) {
53784+ index = uid_set[i];
53785+ j = i;
53786+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
53787+ uid_set[j] = uid_set[j - 1];
53788+ j--;
53789+ }
53790+ uid_set[j] = index;
53791+ }
53792+
53793+ return;
53794+}
53795+
53796+static __inline__ void
53797+gr_insert_uid(const uid_t uid, const unsigned long expires)
53798+{
53799+ int loc;
53800+
53801+ if (uid_used == GR_UIDTABLE_MAX)
53802+ return;
53803+
53804+ loc = gr_find_uid(uid);
53805+
53806+ if (loc >= 0) {
53807+ uid_set[loc].expires = expires;
53808+ return;
53809+ }
53810+
53811+ uid_set[uid_used].uid = uid;
53812+ uid_set[uid_used].expires = expires;
53813+ uid_used++;
53814+
53815+ gr_insertsort();
53816+
53817+ return;
53818+}
53819+
53820+void
53821+gr_remove_uid(const unsigned short loc)
53822+{
53823+ unsigned short i;
53824+
53825+ for (i = loc + 1; i < uid_used; i++)
53826+ uid_set[i - 1] = uid_set[i];
53827+
53828+ uid_used--;
53829+
53830+ return;
53831+}
53832+
53833+int
53834+gr_check_crash_uid(const uid_t uid)
53835+{
53836+ int loc;
53837+ int ret = 0;
53838+
53839+ if (unlikely(!gr_acl_is_enabled()))
53840+ return 0;
53841+
53842+ spin_lock(&gr_uid_lock);
53843+ loc = gr_find_uid(uid);
53844+
53845+ if (loc < 0)
53846+ goto out_unlock;
53847+
53848+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
53849+ gr_remove_uid(loc);
53850+ else
53851+ ret = 1;
53852+
53853+out_unlock:
53854+ spin_unlock(&gr_uid_lock);
53855+ return ret;
53856+}
53857+
53858+static __inline__ int
53859+proc_is_setxid(const struct cred *cred)
53860+{
53861+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
53862+ cred->uid != cred->fsuid)
53863+ return 1;
53864+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
53865+ cred->gid != cred->fsgid)
53866+ return 1;
53867+
53868+ return 0;
53869+}
53870+
53871+extern int gr_fake_force_sig(int sig, struct task_struct *t);
53872+
53873+void
53874+gr_handle_crash(struct task_struct *task, const int sig)
53875+{
53876+ struct acl_subject_label *curr;
53877+ struct task_struct *tsk, *tsk2;
53878+ const struct cred *cred;
53879+ const struct cred *cred2;
53880+
53881+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
53882+ return;
53883+
53884+ if (unlikely(!gr_acl_is_enabled()))
53885+ return;
53886+
53887+ curr = task->acl;
53888+
53889+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
53890+ return;
53891+
53892+ if (time_before_eq(curr->expires, get_seconds())) {
53893+ curr->expires = 0;
53894+ curr->crashes = 0;
53895+ }
53896+
53897+ curr->crashes++;
53898+
53899+ if (!curr->expires)
53900+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
53901+
53902+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53903+ time_after(curr->expires, get_seconds())) {
53904+ rcu_read_lock();
53905+ cred = __task_cred(task);
53906+ if (cred->uid && proc_is_setxid(cred)) {
53907+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53908+ spin_lock(&gr_uid_lock);
53909+ gr_insert_uid(cred->uid, curr->expires);
53910+ spin_unlock(&gr_uid_lock);
53911+ curr->expires = 0;
53912+ curr->crashes = 0;
53913+ read_lock(&tasklist_lock);
53914+ do_each_thread(tsk2, tsk) {
53915+ cred2 = __task_cred(tsk);
53916+ if (tsk != task && cred2->uid == cred->uid)
53917+ gr_fake_force_sig(SIGKILL, tsk);
53918+ } while_each_thread(tsk2, tsk);
53919+ read_unlock(&tasklist_lock);
53920+ } else {
53921+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53922+ read_lock(&tasklist_lock);
53923+ read_lock(&grsec_exec_file_lock);
53924+ do_each_thread(tsk2, tsk) {
53925+ if (likely(tsk != task)) {
53926+ // if this thread has the same subject as the one that triggered
53927+ // RES_CRASH and it's the same binary, kill it
53928+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
53929+ gr_fake_force_sig(SIGKILL, tsk);
53930+ }
53931+ } while_each_thread(tsk2, tsk);
53932+ read_unlock(&grsec_exec_file_lock);
53933+ read_unlock(&tasklist_lock);
53934+ }
53935+ rcu_read_unlock();
53936+ }
53937+
53938+ return;
53939+}
53940+
53941+int
53942+gr_check_crash_exec(const struct file *filp)
53943+{
53944+ struct acl_subject_label *curr;
53945+
53946+ if (unlikely(!gr_acl_is_enabled()))
53947+ return 0;
53948+
53949+ read_lock(&gr_inode_lock);
53950+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
53951+ __get_dev(filp->f_path.dentry),
53952+ current->role);
53953+ read_unlock(&gr_inode_lock);
53954+
53955+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
53956+ (!curr->crashes && !curr->expires))
53957+ return 0;
53958+
53959+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53960+ time_after(curr->expires, get_seconds()))
53961+ return 1;
53962+ else if (time_before_eq(curr->expires, get_seconds())) {
53963+ curr->crashes = 0;
53964+ curr->expires = 0;
53965+ }
53966+
53967+ return 0;
53968+}
53969+
53970+void
53971+gr_handle_alertkill(struct task_struct *task)
53972+{
53973+ struct acl_subject_label *curracl;
53974+ __u32 curr_ip;
53975+ struct task_struct *p, *p2;
53976+
53977+ if (unlikely(!gr_acl_is_enabled()))
53978+ return;
53979+
53980+ curracl = task->acl;
53981+ curr_ip = task->signal->curr_ip;
53982+
53983+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
53984+ read_lock(&tasklist_lock);
53985+ do_each_thread(p2, p) {
53986+ if (p->signal->curr_ip == curr_ip)
53987+ gr_fake_force_sig(SIGKILL, p);
53988+ } while_each_thread(p2, p);
53989+ read_unlock(&tasklist_lock);
53990+ } else if (curracl->mode & GR_KILLPROC)
53991+ gr_fake_force_sig(SIGKILL, task);
53992+
53993+ return;
53994+}
53995diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
53996new file mode 100644
53997index 0000000..9d83a69
53998--- /dev/null
53999+++ b/grsecurity/gracl_shm.c
54000@@ -0,0 +1,40 @@
54001+#include <linux/kernel.h>
54002+#include <linux/mm.h>
54003+#include <linux/sched.h>
54004+#include <linux/file.h>
54005+#include <linux/ipc.h>
54006+#include <linux/gracl.h>
54007+#include <linux/grsecurity.h>
54008+#include <linux/grinternal.h>
54009+
54010+int
54011+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54012+ const time_t shm_createtime, const uid_t cuid, const int shmid)
54013+{
54014+ struct task_struct *task;
54015+
54016+ if (!gr_acl_is_enabled())
54017+ return 1;
54018+
54019+ rcu_read_lock();
54020+ read_lock(&tasklist_lock);
54021+
54022+ task = find_task_by_vpid(shm_cprid);
54023+
54024+ if (unlikely(!task))
54025+ task = find_task_by_vpid(shm_lapid);
54026+
54027+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
54028+ (task->pid == shm_lapid)) &&
54029+ (task->acl->mode & GR_PROTSHM) &&
54030+ (task->acl != current->acl))) {
54031+ read_unlock(&tasklist_lock);
54032+ rcu_read_unlock();
54033+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
54034+ return 0;
54035+ }
54036+ read_unlock(&tasklist_lock);
54037+ rcu_read_unlock();
54038+
54039+ return 1;
54040+}
54041diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
54042new file mode 100644
54043index 0000000..bc0be01
54044--- /dev/null
54045+++ b/grsecurity/grsec_chdir.c
54046@@ -0,0 +1,19 @@
54047+#include <linux/kernel.h>
54048+#include <linux/sched.h>
54049+#include <linux/fs.h>
54050+#include <linux/file.h>
54051+#include <linux/grsecurity.h>
54052+#include <linux/grinternal.h>
54053+
54054+void
54055+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
54056+{
54057+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54058+ if ((grsec_enable_chdir && grsec_enable_group &&
54059+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
54060+ !grsec_enable_group)) {
54061+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
54062+ }
54063+#endif
54064+ return;
54065+}
54066diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
54067new file mode 100644
54068index 0000000..a2dc675
54069--- /dev/null
54070+++ b/grsecurity/grsec_chroot.c
54071@@ -0,0 +1,351 @@
54072+#include <linux/kernel.h>
54073+#include <linux/module.h>
54074+#include <linux/sched.h>
54075+#include <linux/file.h>
54076+#include <linux/fs.h>
54077+#include <linux/mount.h>
54078+#include <linux/types.h>
54079+#include <linux/pid_namespace.h>
54080+#include <linux/grsecurity.h>
54081+#include <linux/grinternal.h>
54082+
54083+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
54084+{
54085+#ifdef CONFIG_GRKERNSEC
54086+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
54087+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
54088+ task->gr_is_chrooted = 1;
54089+ else
54090+ task->gr_is_chrooted = 0;
54091+
54092+ task->gr_chroot_dentry = path->dentry;
54093+#endif
54094+ return;
54095+}
54096+
54097+void gr_clear_chroot_entries(struct task_struct *task)
54098+{
54099+#ifdef CONFIG_GRKERNSEC
54100+ task->gr_is_chrooted = 0;
54101+ task->gr_chroot_dentry = NULL;
54102+#endif
54103+ return;
54104+}
54105+
54106+int
54107+gr_handle_chroot_unix(const pid_t pid)
54108+{
54109+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54110+ struct task_struct *p;
54111+
54112+ if (unlikely(!grsec_enable_chroot_unix))
54113+ return 1;
54114+
54115+ if (likely(!proc_is_chrooted(current)))
54116+ return 1;
54117+
54118+ rcu_read_lock();
54119+ read_lock(&tasklist_lock);
54120+ p = find_task_by_vpid_unrestricted(pid);
54121+ if (unlikely(p && !have_same_root(current, p))) {
54122+ read_unlock(&tasklist_lock);
54123+ rcu_read_unlock();
54124+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54125+ return 0;
54126+ }
54127+ read_unlock(&tasklist_lock);
54128+ rcu_read_unlock();
54129+#endif
54130+ return 1;
54131+}
54132+
54133+int
54134+gr_handle_chroot_nice(void)
54135+{
54136+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54137+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54138+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54139+ return -EPERM;
54140+ }
54141+#endif
54142+ return 0;
54143+}
54144+
54145+int
54146+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54147+{
54148+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54149+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54150+ && proc_is_chrooted(current)) {
54151+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54152+ return -EACCES;
54153+ }
54154+#endif
54155+ return 0;
54156+}
54157+
54158+int
54159+gr_handle_chroot_rawio(const struct inode *inode)
54160+{
54161+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54162+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54163+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54164+ return 1;
54165+#endif
54166+ return 0;
54167+}
54168+
54169+int
54170+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54171+{
54172+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54173+ struct task_struct *p;
54174+ int ret = 0;
54175+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54176+ return ret;
54177+
54178+ read_lock(&tasklist_lock);
54179+ do_each_pid_task(pid, type, p) {
54180+ if (!have_same_root(current, p)) {
54181+ ret = 1;
54182+ goto out;
54183+ }
54184+ } while_each_pid_task(pid, type, p);
54185+out:
54186+ read_unlock(&tasklist_lock);
54187+ return ret;
54188+#endif
54189+ return 0;
54190+}
54191+
54192+int
54193+gr_pid_is_chrooted(struct task_struct *p)
54194+{
54195+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54196+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54197+ return 0;
54198+
54199+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54200+ !have_same_root(current, p)) {
54201+ return 1;
54202+ }
54203+#endif
54204+ return 0;
54205+}
54206+
54207+EXPORT_SYMBOL(gr_pid_is_chrooted);
54208+
54209+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54210+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54211+{
54212+ struct path path, currentroot;
54213+ int ret = 0;
54214+
54215+ path.dentry = (struct dentry *)u_dentry;
54216+ path.mnt = (struct vfsmount *)u_mnt;
54217+ get_fs_root(current->fs, &currentroot);
54218+ if (path_is_under(&path, &currentroot))
54219+ ret = 1;
54220+ path_put(&currentroot);
54221+
54222+ return ret;
54223+}
54224+#endif
54225+
54226+int
54227+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54228+{
54229+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54230+ if (!grsec_enable_chroot_fchdir)
54231+ return 1;
54232+
54233+ if (!proc_is_chrooted(current))
54234+ return 1;
54235+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54236+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54237+ return 0;
54238+ }
54239+#endif
54240+ return 1;
54241+}
54242+
54243+int
54244+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54245+ const time_t shm_createtime)
54246+{
54247+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54248+ struct task_struct *p;
54249+ time_t starttime;
54250+
54251+ if (unlikely(!grsec_enable_chroot_shmat))
54252+ return 1;
54253+
54254+ if (likely(!proc_is_chrooted(current)))
54255+ return 1;
54256+
54257+ rcu_read_lock();
54258+ read_lock(&tasklist_lock);
54259+
54260+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54261+ starttime = p->start_time.tv_sec;
54262+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54263+ if (have_same_root(current, p)) {
54264+ goto allow;
54265+ } else {
54266+ read_unlock(&tasklist_lock);
54267+ rcu_read_unlock();
54268+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54269+ return 0;
54270+ }
54271+ }
54272+ /* creator exited, pid reuse, fall through to next check */
54273+ }
54274+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54275+ if (unlikely(!have_same_root(current, p))) {
54276+ read_unlock(&tasklist_lock);
54277+ rcu_read_unlock();
54278+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54279+ return 0;
54280+ }
54281+ }
54282+
54283+allow:
54284+ read_unlock(&tasklist_lock);
54285+ rcu_read_unlock();
54286+#endif
54287+ return 1;
54288+}
54289+
54290+void
54291+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54292+{
54293+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54294+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54295+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54296+#endif
54297+ return;
54298+}
54299+
54300+int
54301+gr_handle_chroot_mknod(const struct dentry *dentry,
54302+ const struct vfsmount *mnt, const int mode)
54303+{
54304+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54305+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54306+ proc_is_chrooted(current)) {
54307+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54308+ return -EPERM;
54309+ }
54310+#endif
54311+ return 0;
54312+}
54313+
54314+int
54315+gr_handle_chroot_mount(const struct dentry *dentry,
54316+ const struct vfsmount *mnt, const char *dev_name)
54317+{
54318+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54319+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54320+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54321+ return -EPERM;
54322+ }
54323+#endif
54324+ return 0;
54325+}
54326+
54327+int
54328+gr_handle_chroot_pivot(void)
54329+{
54330+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54331+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54332+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54333+ return -EPERM;
54334+ }
54335+#endif
54336+ return 0;
54337+}
54338+
54339+int
54340+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54341+{
54342+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54343+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54344+ !gr_is_outside_chroot(dentry, mnt)) {
54345+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54346+ return -EPERM;
54347+ }
54348+#endif
54349+ return 0;
54350+}
54351+
54352+extern const char *captab_log[];
54353+extern int captab_log_entries;
54354+
54355+int
54356+gr_chroot_is_capable(const int cap)
54357+{
54358+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54359+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54360+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54361+ if (cap_raised(chroot_caps, cap)) {
54362+ const struct cred *creds = current_cred();
54363+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54364+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54365+ }
54366+ return 0;
54367+ }
54368+ }
54369+#endif
54370+ return 1;
54371+}
54372+
54373+int
54374+gr_chroot_is_capable_nolog(const int cap)
54375+{
54376+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54377+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54378+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54379+ if (cap_raised(chroot_caps, cap)) {
54380+ return 0;
54381+ }
54382+ }
54383+#endif
54384+ return 1;
54385+}
54386+
54387+int
54388+gr_handle_chroot_sysctl(const int op)
54389+{
54390+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54391+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54392+ proc_is_chrooted(current))
54393+ return -EACCES;
54394+#endif
54395+ return 0;
54396+}
54397+
54398+void
54399+gr_handle_chroot_chdir(struct path *path)
54400+{
54401+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54402+ if (grsec_enable_chroot_chdir)
54403+ set_fs_pwd(current->fs, path);
54404+#endif
54405+ return;
54406+}
54407+
54408+int
54409+gr_handle_chroot_chmod(const struct dentry *dentry,
54410+ const struct vfsmount *mnt, const int mode)
54411+{
54412+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54413+ /* allow chmod +s on directories, but not files */
54414+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54415+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54416+ proc_is_chrooted(current)) {
54417+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54418+ return -EPERM;
54419+ }
54420+#endif
54421+ return 0;
54422+}
54423diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54424new file mode 100644
54425index 0000000..d81a586
54426--- /dev/null
54427+++ b/grsecurity/grsec_disabled.c
54428@@ -0,0 +1,439 @@
54429+#include <linux/kernel.h>
54430+#include <linux/module.h>
54431+#include <linux/sched.h>
54432+#include <linux/file.h>
54433+#include <linux/fs.h>
54434+#include <linux/kdev_t.h>
54435+#include <linux/net.h>
54436+#include <linux/in.h>
54437+#include <linux/ip.h>
54438+#include <linux/skbuff.h>
54439+#include <linux/sysctl.h>
54440+
54441+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54442+void
54443+pax_set_initial_flags(struct linux_binprm *bprm)
54444+{
54445+ return;
54446+}
54447+#endif
54448+
54449+#ifdef CONFIG_SYSCTL
54450+__u32
54451+gr_handle_sysctl(const struct ctl_table * table, const int op)
54452+{
54453+ return 0;
54454+}
54455+#endif
54456+
54457+#ifdef CONFIG_TASKSTATS
54458+int gr_is_taskstats_denied(int pid)
54459+{
54460+ return 0;
54461+}
54462+#endif
54463+
54464+int
54465+gr_acl_is_enabled(void)
54466+{
54467+ return 0;
54468+}
54469+
54470+void
54471+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54472+{
54473+ return;
54474+}
54475+
54476+int
54477+gr_handle_rawio(const struct inode *inode)
54478+{
54479+ return 0;
54480+}
54481+
54482+void
54483+gr_acl_handle_psacct(struct task_struct *task, const long code)
54484+{
54485+ return;
54486+}
54487+
54488+int
54489+gr_handle_ptrace(struct task_struct *task, const long request)
54490+{
54491+ return 0;
54492+}
54493+
54494+int
54495+gr_handle_proc_ptrace(struct task_struct *task)
54496+{
54497+ return 0;
54498+}
54499+
54500+void
54501+gr_learn_resource(const struct task_struct *task,
54502+ const int res, const unsigned long wanted, const int gt)
54503+{
54504+ return;
54505+}
54506+
54507+int
54508+gr_set_acls(const int type)
54509+{
54510+ return 0;
54511+}
54512+
54513+int
54514+gr_check_hidden_task(const struct task_struct *tsk)
54515+{
54516+ return 0;
54517+}
54518+
54519+int
54520+gr_check_protected_task(const struct task_struct *task)
54521+{
54522+ return 0;
54523+}
54524+
54525+int
54526+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54527+{
54528+ return 0;
54529+}
54530+
54531+void
54532+gr_copy_label(struct task_struct *tsk)
54533+{
54534+ return;
54535+}
54536+
54537+void
54538+gr_set_pax_flags(struct task_struct *task)
54539+{
54540+ return;
54541+}
54542+
54543+int
54544+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54545+ const int unsafe_share)
54546+{
54547+ return 0;
54548+}
54549+
54550+void
54551+gr_handle_delete(const ino_t ino, const dev_t dev)
54552+{
54553+ return;
54554+}
54555+
54556+void
54557+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54558+{
54559+ return;
54560+}
54561+
54562+void
54563+gr_handle_crash(struct task_struct *task, const int sig)
54564+{
54565+ return;
54566+}
54567+
54568+int
54569+gr_check_crash_exec(const struct file *filp)
54570+{
54571+ return 0;
54572+}
54573+
54574+int
54575+gr_check_crash_uid(const uid_t uid)
54576+{
54577+ return 0;
54578+}
54579+
54580+void
54581+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54582+ struct dentry *old_dentry,
54583+ struct dentry *new_dentry,
54584+ struct vfsmount *mnt, const __u8 replace)
54585+{
54586+ return;
54587+}
54588+
54589+int
54590+gr_search_socket(const int family, const int type, const int protocol)
54591+{
54592+ return 1;
54593+}
54594+
54595+int
54596+gr_search_connectbind(const int mode, const struct socket *sock,
54597+ const struct sockaddr_in *addr)
54598+{
54599+ return 0;
54600+}
54601+
54602+void
54603+gr_handle_alertkill(struct task_struct *task)
54604+{
54605+ return;
54606+}
54607+
54608+__u32
54609+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54610+{
54611+ return 1;
54612+}
54613+
54614+__u32
54615+gr_acl_handle_hidden_file(const struct dentry * dentry,
54616+ const struct vfsmount * mnt)
54617+{
54618+ return 1;
54619+}
54620+
54621+__u32
54622+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54623+ int acc_mode)
54624+{
54625+ return 1;
54626+}
54627+
54628+__u32
54629+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54630+{
54631+ return 1;
54632+}
54633+
54634+__u32
54635+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54636+{
54637+ return 1;
54638+}
54639+
54640+int
54641+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54642+ unsigned int *vm_flags)
54643+{
54644+ return 1;
54645+}
54646+
54647+__u32
54648+gr_acl_handle_truncate(const struct dentry * dentry,
54649+ const struct vfsmount * mnt)
54650+{
54651+ return 1;
54652+}
54653+
54654+__u32
54655+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54656+{
54657+ return 1;
54658+}
54659+
54660+__u32
54661+gr_acl_handle_access(const struct dentry * dentry,
54662+ const struct vfsmount * mnt, const int fmode)
54663+{
54664+ return 1;
54665+}
54666+
54667+__u32
54668+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
54669+ mode_t mode)
54670+{
54671+ return 1;
54672+}
54673+
54674+__u32
54675+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54676+ mode_t mode)
54677+{
54678+ return 1;
54679+}
54680+
54681+__u32
54682+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54683+{
54684+ return 1;
54685+}
54686+
54687+__u32
54688+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54689+{
54690+ return 1;
54691+}
54692+
54693+void
54694+grsecurity_init(void)
54695+{
54696+ return;
54697+}
54698+
54699+__u32
54700+gr_acl_handle_mknod(const struct dentry * new_dentry,
54701+ const struct dentry * parent_dentry,
54702+ const struct vfsmount * parent_mnt,
54703+ const int mode)
54704+{
54705+ return 1;
54706+}
54707+
54708+__u32
54709+gr_acl_handle_mkdir(const struct dentry * new_dentry,
54710+ const struct dentry * parent_dentry,
54711+ const struct vfsmount * parent_mnt)
54712+{
54713+ return 1;
54714+}
54715+
54716+__u32
54717+gr_acl_handle_symlink(const struct dentry * new_dentry,
54718+ const struct dentry * parent_dentry,
54719+ const struct vfsmount * parent_mnt, const char *from)
54720+{
54721+ return 1;
54722+}
54723+
54724+__u32
54725+gr_acl_handle_link(const struct dentry * new_dentry,
54726+ const struct dentry * parent_dentry,
54727+ const struct vfsmount * parent_mnt,
54728+ const struct dentry * old_dentry,
54729+ const struct vfsmount * old_mnt, const char *to)
54730+{
54731+ return 1;
54732+}
54733+
54734+int
54735+gr_acl_handle_rename(const struct dentry *new_dentry,
54736+ const struct dentry *parent_dentry,
54737+ const struct vfsmount *parent_mnt,
54738+ const struct dentry *old_dentry,
54739+ const struct inode *old_parent_inode,
54740+ const struct vfsmount *old_mnt, const char *newname)
54741+{
54742+ return 0;
54743+}
54744+
54745+int
54746+gr_acl_handle_filldir(const struct file *file, const char *name,
54747+ const int namelen, const ino_t ino)
54748+{
54749+ return 1;
54750+}
54751+
54752+int
54753+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54754+ const time_t shm_createtime, const uid_t cuid, const int shmid)
54755+{
54756+ return 1;
54757+}
54758+
54759+int
54760+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
54761+{
54762+ return 0;
54763+}
54764+
54765+int
54766+gr_search_accept(const struct socket *sock)
54767+{
54768+ return 0;
54769+}
54770+
54771+int
54772+gr_search_listen(const struct socket *sock)
54773+{
54774+ return 0;
54775+}
54776+
54777+int
54778+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
54779+{
54780+ return 0;
54781+}
54782+
54783+__u32
54784+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
54785+{
54786+ return 1;
54787+}
54788+
54789+__u32
54790+gr_acl_handle_creat(const struct dentry * dentry,
54791+ const struct dentry * p_dentry,
54792+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54793+ const int imode)
54794+{
54795+ return 1;
54796+}
54797+
54798+void
54799+gr_acl_handle_exit(void)
54800+{
54801+ return;
54802+}
54803+
54804+int
54805+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54806+{
54807+ return 1;
54808+}
54809+
54810+void
54811+gr_set_role_label(const uid_t uid, const gid_t gid)
54812+{
54813+ return;
54814+}
54815+
54816+int
54817+gr_acl_handle_procpidmem(const struct task_struct *task)
54818+{
54819+ return 0;
54820+}
54821+
54822+int
54823+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
54824+{
54825+ return 0;
54826+}
54827+
54828+int
54829+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
54830+{
54831+ return 0;
54832+}
54833+
54834+void
54835+gr_set_kernel_label(struct task_struct *task)
54836+{
54837+ return;
54838+}
54839+
54840+int
54841+gr_check_user_change(int real, int effective, int fs)
54842+{
54843+ return 0;
54844+}
54845+
54846+int
54847+gr_check_group_change(int real, int effective, int fs)
54848+{
54849+ return 0;
54850+}
54851+
54852+int gr_acl_enable_at_secure(void)
54853+{
54854+ return 0;
54855+}
54856+
54857+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
54858+{
54859+ return dentry->d_inode->i_sb->s_dev;
54860+}
54861+
54862+EXPORT_SYMBOL(gr_learn_resource);
54863+EXPORT_SYMBOL(gr_set_kernel_label);
54864+#ifdef CONFIG_SECURITY
54865+EXPORT_SYMBOL(gr_check_user_change);
54866+EXPORT_SYMBOL(gr_check_group_change);
54867+#endif
54868diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
54869new file mode 100644
54870index 0000000..2b05ada
54871--- /dev/null
54872+++ b/grsecurity/grsec_exec.c
54873@@ -0,0 +1,146 @@
54874+#include <linux/kernel.h>
54875+#include <linux/sched.h>
54876+#include <linux/file.h>
54877+#include <linux/binfmts.h>
54878+#include <linux/fs.h>
54879+#include <linux/types.h>
54880+#include <linux/grdefs.h>
54881+#include <linux/grsecurity.h>
54882+#include <linux/grinternal.h>
54883+#include <linux/capability.h>
54884+#include <linux/module.h>
54885+
54886+#include <asm/uaccess.h>
54887+
54888+#ifdef CONFIG_GRKERNSEC_EXECLOG
54889+static char gr_exec_arg_buf[132];
54890+static DEFINE_MUTEX(gr_exec_arg_mutex);
54891+#endif
54892+
54893+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
54894+
54895+void
54896+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
54897+{
54898+#ifdef CONFIG_GRKERNSEC_EXECLOG
54899+ char *grarg = gr_exec_arg_buf;
54900+ unsigned int i, x, execlen = 0;
54901+ char c;
54902+
54903+ if (!((grsec_enable_execlog && grsec_enable_group &&
54904+ in_group_p(grsec_audit_gid))
54905+ || (grsec_enable_execlog && !grsec_enable_group)))
54906+ return;
54907+
54908+ mutex_lock(&gr_exec_arg_mutex);
54909+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
54910+
54911+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
54912+ const char __user *p;
54913+ unsigned int len;
54914+
54915+ p = get_user_arg_ptr(argv, i);
54916+ if (IS_ERR(p))
54917+ goto log;
54918+
54919+ len = strnlen_user(p, 128 - execlen);
54920+ if (len > 128 - execlen)
54921+ len = 128 - execlen;
54922+ else if (len > 0)
54923+ len--;
54924+ if (copy_from_user(grarg + execlen, p, len))
54925+ goto log;
54926+
54927+ /* rewrite unprintable characters */
54928+ for (x = 0; x < len; x++) {
54929+ c = *(grarg + execlen + x);
54930+ if (c < 32 || c > 126)
54931+ *(grarg + execlen + x) = ' ';
54932+ }
54933+
54934+ execlen += len;
54935+ *(grarg + execlen) = ' ';
54936+ *(grarg + execlen + 1) = '\0';
54937+ execlen++;
54938+ }
54939+
54940+ log:
54941+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
54942+ bprm->file->f_path.mnt, grarg);
54943+ mutex_unlock(&gr_exec_arg_mutex);
54944+#endif
54945+ return;
54946+}
54947+
54948+#ifdef CONFIG_GRKERNSEC
54949+extern int gr_acl_is_capable(const int cap);
54950+extern int gr_acl_is_capable_nolog(const int cap);
54951+extern int gr_chroot_is_capable(const int cap);
54952+extern int gr_chroot_is_capable_nolog(const int cap);
54953+#endif
54954+
54955+const char *captab_log[] = {
54956+ "CAP_CHOWN",
54957+ "CAP_DAC_OVERRIDE",
54958+ "CAP_DAC_READ_SEARCH",
54959+ "CAP_FOWNER",
54960+ "CAP_FSETID",
54961+ "CAP_KILL",
54962+ "CAP_SETGID",
54963+ "CAP_SETUID",
54964+ "CAP_SETPCAP",
54965+ "CAP_LINUX_IMMUTABLE",
54966+ "CAP_NET_BIND_SERVICE",
54967+ "CAP_NET_BROADCAST",
54968+ "CAP_NET_ADMIN",
54969+ "CAP_NET_RAW",
54970+ "CAP_IPC_LOCK",
54971+ "CAP_IPC_OWNER",
54972+ "CAP_SYS_MODULE",
54973+ "CAP_SYS_RAWIO",
54974+ "CAP_SYS_CHROOT",
54975+ "CAP_SYS_PTRACE",
54976+ "CAP_SYS_PACCT",
54977+ "CAP_SYS_ADMIN",
54978+ "CAP_SYS_BOOT",
54979+ "CAP_SYS_NICE",
54980+ "CAP_SYS_RESOURCE",
54981+ "CAP_SYS_TIME",
54982+ "CAP_SYS_TTY_CONFIG",
54983+ "CAP_MKNOD",
54984+ "CAP_LEASE",
54985+ "CAP_AUDIT_WRITE",
54986+ "CAP_AUDIT_CONTROL",
54987+ "CAP_SETFCAP",
54988+ "CAP_MAC_OVERRIDE",
54989+ "CAP_MAC_ADMIN",
54990+ "CAP_SYSLOG",
54991+ "CAP_WAKE_ALARM"
54992+};
54993+
54994+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
54995+
54996+int gr_is_capable(const int cap)
54997+{
54998+#ifdef CONFIG_GRKERNSEC
54999+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
55000+ return 1;
55001+ return 0;
55002+#else
55003+ return 1;
55004+#endif
55005+}
55006+
55007+int gr_is_capable_nolog(const int cap)
55008+{
55009+#ifdef CONFIG_GRKERNSEC
55010+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
55011+ return 1;
55012+ return 0;
55013+#else
55014+ return 1;
55015+#endif
55016+}
55017+
55018+EXPORT_SYMBOL(gr_is_capable);
55019+EXPORT_SYMBOL(gr_is_capable_nolog);
55020diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
55021new file mode 100644
55022index 0000000..d3ee748
55023--- /dev/null
55024+++ b/grsecurity/grsec_fifo.c
55025@@ -0,0 +1,24 @@
55026+#include <linux/kernel.h>
55027+#include <linux/sched.h>
55028+#include <linux/fs.h>
55029+#include <linux/file.h>
55030+#include <linux/grinternal.h>
55031+
55032+int
55033+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
55034+ const struct dentry *dir, const int flag, const int acc_mode)
55035+{
55036+#ifdef CONFIG_GRKERNSEC_FIFO
55037+ const struct cred *cred = current_cred();
55038+
55039+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
55040+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
55041+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
55042+ (cred->fsuid != dentry->d_inode->i_uid)) {
55043+ if (!inode_permission(dentry->d_inode, acc_mode))
55044+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
55045+ return -EACCES;
55046+ }
55047+#endif
55048+ return 0;
55049+}
55050diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
55051new file mode 100644
55052index 0000000..8ca18bf
55053--- /dev/null
55054+++ b/grsecurity/grsec_fork.c
55055@@ -0,0 +1,23 @@
55056+#include <linux/kernel.h>
55057+#include <linux/sched.h>
55058+#include <linux/grsecurity.h>
55059+#include <linux/grinternal.h>
55060+#include <linux/errno.h>
55061+
55062+void
55063+gr_log_forkfail(const int retval)
55064+{
55065+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55066+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
55067+ switch (retval) {
55068+ case -EAGAIN:
55069+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
55070+ break;
55071+ case -ENOMEM:
55072+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
55073+ break;
55074+ }
55075+ }
55076+#endif
55077+ return;
55078+}
55079diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
55080new file mode 100644
55081index 0000000..01ddde4
55082--- /dev/null
55083+++ b/grsecurity/grsec_init.c
55084@@ -0,0 +1,277 @@
55085+#include <linux/kernel.h>
55086+#include <linux/sched.h>
55087+#include <linux/mm.h>
55088+#include <linux/gracl.h>
55089+#include <linux/slab.h>
55090+#include <linux/vmalloc.h>
55091+#include <linux/percpu.h>
55092+#include <linux/module.h>
55093+
55094+int grsec_enable_ptrace_readexec;
55095+int grsec_enable_setxid;
55096+int grsec_enable_brute;
55097+int grsec_enable_link;
55098+int grsec_enable_dmesg;
55099+int grsec_enable_harden_ptrace;
55100+int grsec_enable_fifo;
55101+int grsec_enable_execlog;
55102+int grsec_enable_signal;
55103+int grsec_enable_forkfail;
55104+int grsec_enable_audit_ptrace;
55105+int grsec_enable_time;
55106+int grsec_enable_audit_textrel;
55107+int grsec_enable_group;
55108+int grsec_audit_gid;
55109+int grsec_enable_chdir;
55110+int grsec_enable_mount;
55111+int grsec_enable_rofs;
55112+int grsec_enable_chroot_findtask;
55113+int grsec_enable_chroot_mount;
55114+int grsec_enable_chroot_shmat;
55115+int grsec_enable_chroot_fchdir;
55116+int grsec_enable_chroot_double;
55117+int grsec_enable_chroot_pivot;
55118+int grsec_enable_chroot_chdir;
55119+int grsec_enable_chroot_chmod;
55120+int grsec_enable_chroot_mknod;
55121+int grsec_enable_chroot_nice;
55122+int grsec_enable_chroot_execlog;
55123+int grsec_enable_chroot_caps;
55124+int grsec_enable_chroot_sysctl;
55125+int grsec_enable_chroot_unix;
55126+int grsec_enable_tpe;
55127+int grsec_tpe_gid;
55128+int grsec_enable_blackhole;
55129+#ifdef CONFIG_IPV6_MODULE
55130+EXPORT_SYMBOL(grsec_enable_blackhole);
55131+#endif
55132+int grsec_lastack_retries;
55133+int grsec_enable_tpe_all;
55134+int grsec_enable_tpe_invert;
55135+int grsec_enable_socket_all;
55136+int grsec_socket_all_gid;
55137+int grsec_enable_socket_client;
55138+int grsec_socket_client_gid;
55139+int grsec_enable_socket_server;
55140+int grsec_socket_server_gid;
55141+int grsec_resource_logging;
55142+int grsec_disable_privio;
55143+int grsec_enable_log_rwxmaps;
55144+int grsec_lock;
55145+
55146+DEFINE_SPINLOCK(grsec_alert_lock);
55147+unsigned long grsec_alert_wtime = 0;
55148+unsigned long grsec_alert_fyet = 0;
55149+
55150+DEFINE_SPINLOCK(grsec_audit_lock);
55151+
55152+DEFINE_RWLOCK(grsec_exec_file_lock);
55153+
55154+char *gr_shared_page[4];
55155+
55156+char *gr_alert_log_fmt;
55157+char *gr_audit_log_fmt;
55158+char *gr_alert_log_buf;
55159+char *gr_audit_log_buf;
55160+
55161+extern struct gr_arg *gr_usermode;
55162+extern unsigned char *gr_system_salt;
55163+extern unsigned char *gr_system_sum;
55164+
55165+void __init
55166+grsecurity_init(void)
55167+{
55168+ int j;
55169+ /* create the per-cpu shared pages */
55170+
55171+#ifdef CONFIG_X86
55172+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55173+#endif
55174+
55175+ for (j = 0; j < 4; j++) {
55176+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55177+ if (gr_shared_page[j] == NULL) {
55178+ panic("Unable to allocate grsecurity shared page");
55179+ return;
55180+ }
55181+ }
55182+
55183+ /* allocate log buffers */
55184+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55185+ if (!gr_alert_log_fmt) {
55186+ panic("Unable to allocate grsecurity alert log format buffer");
55187+ return;
55188+ }
55189+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55190+ if (!gr_audit_log_fmt) {
55191+ panic("Unable to allocate grsecurity audit log format buffer");
55192+ return;
55193+ }
55194+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55195+ if (!gr_alert_log_buf) {
55196+ panic("Unable to allocate grsecurity alert log buffer");
55197+ return;
55198+ }
55199+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55200+ if (!gr_audit_log_buf) {
55201+ panic("Unable to allocate grsecurity audit log buffer");
55202+ return;
55203+ }
55204+
55205+ /* allocate memory for authentication structure */
55206+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55207+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55208+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55209+
55210+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55211+ panic("Unable to allocate grsecurity authentication structure");
55212+ return;
55213+ }
55214+
55215+
55216+#ifdef CONFIG_GRKERNSEC_IO
55217+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55218+ grsec_disable_privio = 1;
55219+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55220+ grsec_disable_privio = 1;
55221+#else
55222+ grsec_disable_privio = 0;
55223+#endif
55224+#endif
55225+
55226+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55227+ /* for backward compatibility, tpe_invert always defaults to on if
55228+ enabled in the kernel
55229+ */
55230+ grsec_enable_tpe_invert = 1;
55231+#endif
55232+
55233+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55234+#ifndef CONFIG_GRKERNSEC_SYSCTL
55235+ grsec_lock = 1;
55236+#endif
55237+
55238+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55239+ grsec_enable_audit_textrel = 1;
55240+#endif
55241+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55242+ grsec_enable_log_rwxmaps = 1;
55243+#endif
55244+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55245+ grsec_enable_group = 1;
55246+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55247+#endif
55248+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55249+ grsec_enable_ptrace_readexec = 1;
55250+#endif
55251+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55252+ grsec_enable_chdir = 1;
55253+#endif
55254+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55255+ grsec_enable_harden_ptrace = 1;
55256+#endif
55257+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55258+ grsec_enable_mount = 1;
55259+#endif
55260+#ifdef CONFIG_GRKERNSEC_LINK
55261+ grsec_enable_link = 1;
55262+#endif
55263+#ifdef CONFIG_GRKERNSEC_BRUTE
55264+ grsec_enable_brute = 1;
55265+#endif
55266+#ifdef CONFIG_GRKERNSEC_DMESG
55267+ grsec_enable_dmesg = 1;
55268+#endif
55269+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55270+ grsec_enable_blackhole = 1;
55271+ grsec_lastack_retries = 4;
55272+#endif
55273+#ifdef CONFIG_GRKERNSEC_FIFO
55274+ grsec_enable_fifo = 1;
55275+#endif
55276+#ifdef CONFIG_GRKERNSEC_EXECLOG
55277+ grsec_enable_execlog = 1;
55278+#endif
55279+#ifdef CONFIG_GRKERNSEC_SETXID
55280+ grsec_enable_setxid = 1;
55281+#endif
55282+#ifdef CONFIG_GRKERNSEC_SIGNAL
55283+ grsec_enable_signal = 1;
55284+#endif
55285+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55286+ grsec_enable_forkfail = 1;
55287+#endif
55288+#ifdef CONFIG_GRKERNSEC_TIME
55289+ grsec_enable_time = 1;
55290+#endif
55291+#ifdef CONFIG_GRKERNSEC_RESLOG
55292+ grsec_resource_logging = 1;
55293+#endif
55294+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55295+ grsec_enable_chroot_findtask = 1;
55296+#endif
55297+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55298+ grsec_enable_chroot_unix = 1;
55299+#endif
55300+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55301+ grsec_enable_chroot_mount = 1;
55302+#endif
55303+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55304+ grsec_enable_chroot_fchdir = 1;
55305+#endif
55306+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55307+ grsec_enable_chroot_shmat = 1;
55308+#endif
55309+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55310+ grsec_enable_audit_ptrace = 1;
55311+#endif
55312+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55313+ grsec_enable_chroot_double = 1;
55314+#endif
55315+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55316+ grsec_enable_chroot_pivot = 1;
55317+#endif
55318+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55319+ grsec_enable_chroot_chdir = 1;
55320+#endif
55321+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55322+ grsec_enable_chroot_chmod = 1;
55323+#endif
55324+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55325+ grsec_enable_chroot_mknod = 1;
55326+#endif
55327+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55328+ grsec_enable_chroot_nice = 1;
55329+#endif
55330+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55331+ grsec_enable_chroot_execlog = 1;
55332+#endif
55333+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55334+ grsec_enable_chroot_caps = 1;
55335+#endif
55336+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55337+ grsec_enable_chroot_sysctl = 1;
55338+#endif
55339+#ifdef CONFIG_GRKERNSEC_TPE
55340+ grsec_enable_tpe = 1;
55341+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55342+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55343+ grsec_enable_tpe_all = 1;
55344+#endif
55345+#endif
55346+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55347+ grsec_enable_socket_all = 1;
55348+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55349+#endif
55350+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55351+ grsec_enable_socket_client = 1;
55352+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55353+#endif
55354+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55355+ grsec_enable_socket_server = 1;
55356+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55357+#endif
55358+#endif
55359+
55360+ return;
55361+}
55362diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55363new file mode 100644
55364index 0000000..3efe141
55365--- /dev/null
55366+++ b/grsecurity/grsec_link.c
55367@@ -0,0 +1,43 @@
55368+#include <linux/kernel.h>
55369+#include <linux/sched.h>
55370+#include <linux/fs.h>
55371+#include <linux/file.h>
55372+#include <linux/grinternal.h>
55373+
55374+int
55375+gr_handle_follow_link(const struct inode *parent,
55376+ const struct inode *inode,
55377+ const struct dentry *dentry, const struct vfsmount *mnt)
55378+{
55379+#ifdef CONFIG_GRKERNSEC_LINK
55380+ const struct cred *cred = current_cred();
55381+
55382+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55383+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55384+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55385+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55386+ return -EACCES;
55387+ }
55388+#endif
55389+ return 0;
55390+}
55391+
55392+int
55393+gr_handle_hardlink(const struct dentry *dentry,
55394+ const struct vfsmount *mnt,
55395+ struct inode *inode, const int mode, const char *to)
55396+{
55397+#ifdef CONFIG_GRKERNSEC_LINK
55398+ const struct cred *cred = current_cred();
55399+
55400+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55401+ (!S_ISREG(mode) || (mode & S_ISUID) ||
55402+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55403+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55404+ !capable(CAP_FOWNER) && cred->uid) {
55405+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55406+ return -EPERM;
55407+ }
55408+#endif
55409+ return 0;
55410+}
55411diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55412new file mode 100644
55413index 0000000..a45d2e9
55414--- /dev/null
55415+++ b/grsecurity/grsec_log.c
55416@@ -0,0 +1,322 @@
55417+#include <linux/kernel.h>
55418+#include <linux/sched.h>
55419+#include <linux/file.h>
55420+#include <linux/tty.h>
55421+#include <linux/fs.h>
55422+#include <linux/grinternal.h>
55423+
55424+#ifdef CONFIG_TREE_PREEMPT_RCU
55425+#define DISABLE_PREEMPT() preempt_disable()
55426+#define ENABLE_PREEMPT() preempt_enable()
55427+#else
55428+#define DISABLE_PREEMPT()
55429+#define ENABLE_PREEMPT()
55430+#endif
55431+
55432+#define BEGIN_LOCKS(x) \
55433+ DISABLE_PREEMPT(); \
55434+ rcu_read_lock(); \
55435+ read_lock(&tasklist_lock); \
55436+ read_lock(&grsec_exec_file_lock); \
55437+ if (x != GR_DO_AUDIT) \
55438+ spin_lock(&grsec_alert_lock); \
55439+ else \
55440+ spin_lock(&grsec_audit_lock)
55441+
55442+#define END_LOCKS(x) \
55443+ if (x != GR_DO_AUDIT) \
55444+ spin_unlock(&grsec_alert_lock); \
55445+ else \
55446+ spin_unlock(&grsec_audit_lock); \
55447+ read_unlock(&grsec_exec_file_lock); \
55448+ read_unlock(&tasklist_lock); \
55449+ rcu_read_unlock(); \
55450+ ENABLE_PREEMPT(); \
55451+ if (x == GR_DONT_AUDIT) \
55452+ gr_handle_alertkill(current)
55453+
55454+enum {
55455+ FLOODING,
55456+ NO_FLOODING
55457+};
55458+
55459+extern char *gr_alert_log_fmt;
55460+extern char *gr_audit_log_fmt;
55461+extern char *gr_alert_log_buf;
55462+extern char *gr_audit_log_buf;
55463+
55464+static int gr_log_start(int audit)
55465+{
55466+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55467+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55468+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55469+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55470+ unsigned long curr_secs = get_seconds();
55471+
55472+ if (audit == GR_DO_AUDIT)
55473+ goto set_fmt;
55474+
55475+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55476+ grsec_alert_wtime = curr_secs;
55477+ grsec_alert_fyet = 0;
55478+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55479+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55480+ grsec_alert_fyet++;
55481+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55482+ grsec_alert_wtime = curr_secs;
55483+ grsec_alert_fyet++;
55484+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55485+ return FLOODING;
55486+ }
55487+ else return FLOODING;
55488+
55489+set_fmt:
55490+#endif
55491+ memset(buf, 0, PAGE_SIZE);
55492+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
55493+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55494+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55495+ } else if (current->signal->curr_ip) {
55496+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55497+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55498+ } else if (gr_acl_is_enabled()) {
55499+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55500+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55501+ } else {
55502+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
55503+ strcpy(buf, fmt);
55504+ }
55505+
55506+ return NO_FLOODING;
55507+}
55508+
55509+static void gr_log_middle(int audit, const char *msg, va_list ap)
55510+ __attribute__ ((format (printf, 2, 0)));
55511+
55512+static void gr_log_middle(int audit, const char *msg, va_list ap)
55513+{
55514+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55515+ unsigned int len = strlen(buf);
55516+
55517+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55518+
55519+ return;
55520+}
55521+
55522+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55523+ __attribute__ ((format (printf, 2, 3)));
55524+
55525+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55526+{
55527+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55528+ unsigned int len = strlen(buf);
55529+ va_list ap;
55530+
55531+ va_start(ap, msg);
55532+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55533+ va_end(ap);
55534+
55535+ return;
55536+}
55537+
55538+static void gr_log_end(int audit, int append_default)
55539+{
55540+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55541+
55542+ if (append_default) {
55543+ unsigned int len = strlen(buf);
55544+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55545+ }
55546+
55547+ printk("%s\n", buf);
55548+
55549+ return;
55550+}
55551+
55552+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55553+{
55554+ int logtype;
55555+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55556+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55557+ void *voidptr = NULL;
55558+ int num1 = 0, num2 = 0;
55559+ unsigned long ulong1 = 0, ulong2 = 0;
55560+ struct dentry *dentry = NULL;
55561+ struct vfsmount *mnt = NULL;
55562+ struct file *file = NULL;
55563+ struct task_struct *task = NULL;
55564+ const struct cred *cred, *pcred;
55565+ va_list ap;
55566+
55567+ BEGIN_LOCKS(audit);
55568+ logtype = gr_log_start(audit);
55569+ if (logtype == FLOODING) {
55570+ END_LOCKS(audit);
55571+ return;
55572+ }
55573+ va_start(ap, argtypes);
55574+ switch (argtypes) {
55575+ case GR_TTYSNIFF:
55576+ task = va_arg(ap, struct task_struct *);
55577+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55578+ break;
55579+ case GR_SYSCTL_HIDDEN:
55580+ str1 = va_arg(ap, char *);
55581+ gr_log_middle_varargs(audit, msg, result, str1);
55582+ break;
55583+ case GR_RBAC:
55584+ dentry = va_arg(ap, struct dentry *);
55585+ mnt = va_arg(ap, struct vfsmount *);
55586+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55587+ break;
55588+ case GR_RBAC_STR:
55589+ dentry = va_arg(ap, struct dentry *);
55590+ mnt = va_arg(ap, struct vfsmount *);
55591+ str1 = va_arg(ap, char *);
55592+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55593+ break;
55594+ case GR_STR_RBAC:
55595+ str1 = va_arg(ap, char *);
55596+ dentry = va_arg(ap, struct dentry *);
55597+ mnt = va_arg(ap, struct vfsmount *);
55598+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55599+ break;
55600+ case GR_RBAC_MODE2:
55601+ dentry = va_arg(ap, struct dentry *);
55602+ mnt = va_arg(ap, struct vfsmount *);
55603+ str1 = va_arg(ap, char *);
55604+ str2 = va_arg(ap, char *);
55605+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55606+ break;
55607+ case GR_RBAC_MODE3:
55608+ dentry = va_arg(ap, struct dentry *);
55609+ mnt = va_arg(ap, struct vfsmount *);
55610+ str1 = va_arg(ap, char *);
55611+ str2 = va_arg(ap, char *);
55612+ str3 = va_arg(ap, char *);
55613+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55614+ break;
55615+ case GR_FILENAME:
55616+ dentry = va_arg(ap, struct dentry *);
55617+ mnt = va_arg(ap, struct vfsmount *);
55618+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55619+ break;
55620+ case GR_STR_FILENAME:
55621+ str1 = va_arg(ap, char *);
55622+ dentry = va_arg(ap, struct dentry *);
55623+ mnt = va_arg(ap, struct vfsmount *);
55624+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55625+ break;
55626+ case GR_FILENAME_STR:
55627+ dentry = va_arg(ap, struct dentry *);
55628+ mnt = va_arg(ap, struct vfsmount *);
55629+ str1 = va_arg(ap, char *);
55630+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55631+ break;
55632+ case GR_FILENAME_TWO_INT:
55633+ dentry = va_arg(ap, struct dentry *);
55634+ mnt = va_arg(ap, struct vfsmount *);
55635+ num1 = va_arg(ap, int);
55636+ num2 = va_arg(ap, int);
55637+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55638+ break;
55639+ case GR_FILENAME_TWO_INT_STR:
55640+ dentry = va_arg(ap, struct dentry *);
55641+ mnt = va_arg(ap, struct vfsmount *);
55642+ num1 = va_arg(ap, int);
55643+ num2 = va_arg(ap, int);
55644+ str1 = va_arg(ap, char *);
55645+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55646+ break;
55647+ case GR_TEXTREL:
55648+ file = va_arg(ap, struct file *);
55649+ ulong1 = va_arg(ap, unsigned long);
55650+ ulong2 = va_arg(ap, unsigned long);
55651+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55652+ break;
55653+ case GR_PTRACE:
55654+ task = va_arg(ap, struct task_struct *);
55655+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55656+ break;
55657+ case GR_RESOURCE:
55658+ task = va_arg(ap, struct task_struct *);
55659+ cred = __task_cred(task);
55660+ pcred = __task_cred(task->real_parent);
55661+ ulong1 = va_arg(ap, unsigned long);
55662+ str1 = va_arg(ap, char *);
55663+ ulong2 = va_arg(ap, unsigned long);
55664+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55665+ break;
55666+ case GR_CAP:
55667+ task = va_arg(ap, struct task_struct *);
55668+ cred = __task_cred(task);
55669+ pcred = __task_cred(task->real_parent);
55670+ str1 = va_arg(ap, char *);
55671+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55672+ break;
55673+ case GR_SIG:
55674+ str1 = va_arg(ap, char *);
55675+ voidptr = va_arg(ap, void *);
55676+ gr_log_middle_varargs(audit, msg, str1, voidptr);
55677+ break;
55678+ case GR_SIG2:
55679+ task = va_arg(ap, struct task_struct *);
55680+ cred = __task_cred(task);
55681+ pcred = __task_cred(task->real_parent);
55682+ num1 = va_arg(ap, int);
55683+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55684+ break;
55685+ case GR_CRASH1:
55686+ task = va_arg(ap, struct task_struct *);
55687+ cred = __task_cred(task);
55688+ pcred = __task_cred(task->real_parent);
55689+ ulong1 = va_arg(ap, unsigned long);
55690+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55691+ break;
55692+ case GR_CRASH2:
55693+ task = va_arg(ap, struct task_struct *);
55694+ cred = __task_cred(task);
55695+ pcred = __task_cred(task->real_parent);
55696+ ulong1 = va_arg(ap, unsigned long);
55697+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55698+ break;
55699+ case GR_RWXMAP:
55700+ file = va_arg(ap, struct file *);
55701+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55702+ break;
55703+ case GR_PSACCT:
55704+ {
55705+ unsigned int wday, cday;
55706+ __u8 whr, chr;
55707+ __u8 wmin, cmin;
55708+ __u8 wsec, csec;
55709+ char cur_tty[64] = { 0 };
55710+ char parent_tty[64] = { 0 };
55711+
55712+ task = va_arg(ap, struct task_struct *);
55713+ wday = va_arg(ap, unsigned int);
55714+ cday = va_arg(ap, unsigned int);
55715+ whr = va_arg(ap, int);
55716+ chr = va_arg(ap, int);
55717+ wmin = va_arg(ap, int);
55718+ cmin = va_arg(ap, int);
55719+ wsec = va_arg(ap, int);
55720+ csec = va_arg(ap, int);
55721+ ulong1 = va_arg(ap, unsigned long);
55722+ cred = __task_cred(task);
55723+ pcred = __task_cred(task->real_parent);
55724+
55725+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55726+ }
55727+ break;
55728+ default:
55729+ gr_log_middle(audit, msg, ap);
55730+ }
55731+ va_end(ap);
55732+ // these don't need DEFAULTSECARGS printed on the end
55733+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
55734+ gr_log_end(audit, 0);
55735+ else
55736+ gr_log_end(audit, 1);
55737+ END_LOCKS(audit);
55738+}
55739diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
55740new file mode 100644
55741index 0000000..f536303
55742--- /dev/null
55743+++ b/grsecurity/grsec_mem.c
55744@@ -0,0 +1,40 @@
55745+#include <linux/kernel.h>
55746+#include <linux/sched.h>
55747+#include <linux/mm.h>
55748+#include <linux/mman.h>
55749+#include <linux/grinternal.h>
55750+
55751+void
55752+gr_handle_ioperm(void)
55753+{
55754+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
55755+ return;
55756+}
55757+
55758+void
55759+gr_handle_iopl(void)
55760+{
55761+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
55762+ return;
55763+}
55764+
55765+void
55766+gr_handle_mem_readwrite(u64 from, u64 to)
55767+{
55768+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
55769+ return;
55770+}
55771+
55772+void
55773+gr_handle_vm86(void)
55774+{
55775+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
55776+ return;
55777+}
55778+
55779+void
55780+gr_log_badprocpid(const char *entry)
55781+{
55782+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
55783+ return;
55784+}
55785diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
55786new file mode 100644
55787index 0000000..2131422
55788--- /dev/null
55789+++ b/grsecurity/grsec_mount.c
55790@@ -0,0 +1,62 @@
55791+#include <linux/kernel.h>
55792+#include <linux/sched.h>
55793+#include <linux/mount.h>
55794+#include <linux/grsecurity.h>
55795+#include <linux/grinternal.h>
55796+
55797+void
55798+gr_log_remount(const char *devname, const int retval)
55799+{
55800+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55801+ if (grsec_enable_mount && (retval >= 0))
55802+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
55803+#endif
55804+ return;
55805+}
55806+
55807+void
55808+gr_log_unmount(const char *devname, const int retval)
55809+{
55810+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55811+ if (grsec_enable_mount && (retval >= 0))
55812+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
55813+#endif
55814+ return;
55815+}
55816+
55817+void
55818+gr_log_mount(const char *from, const char *to, const int retval)
55819+{
55820+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55821+ if (grsec_enable_mount && (retval >= 0))
55822+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
55823+#endif
55824+ return;
55825+}
55826+
55827+int
55828+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
55829+{
55830+#ifdef CONFIG_GRKERNSEC_ROFS
55831+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
55832+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
55833+ return -EPERM;
55834+ } else
55835+ return 0;
55836+#endif
55837+ return 0;
55838+}
55839+
55840+int
55841+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
55842+{
55843+#ifdef CONFIG_GRKERNSEC_ROFS
55844+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
55845+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
55846+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
55847+ return -EPERM;
55848+ } else
55849+ return 0;
55850+#endif
55851+ return 0;
55852+}
55853diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
55854new file mode 100644
55855index 0000000..a3b12a0
55856--- /dev/null
55857+++ b/grsecurity/grsec_pax.c
55858@@ -0,0 +1,36 @@
55859+#include <linux/kernel.h>
55860+#include <linux/sched.h>
55861+#include <linux/mm.h>
55862+#include <linux/file.h>
55863+#include <linux/grinternal.h>
55864+#include <linux/grsecurity.h>
55865+
55866+void
55867+gr_log_textrel(struct vm_area_struct * vma)
55868+{
55869+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55870+ if (grsec_enable_audit_textrel)
55871+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
55872+#endif
55873+ return;
55874+}
55875+
55876+void
55877+gr_log_rwxmmap(struct file *file)
55878+{
55879+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55880+ if (grsec_enable_log_rwxmaps)
55881+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
55882+#endif
55883+ return;
55884+}
55885+
55886+void
55887+gr_log_rwxmprotect(struct file *file)
55888+{
55889+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55890+ if (grsec_enable_log_rwxmaps)
55891+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
55892+#endif
55893+ return;
55894+}
55895diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
55896new file mode 100644
55897index 0000000..f7f29aa
55898--- /dev/null
55899+++ b/grsecurity/grsec_ptrace.c
55900@@ -0,0 +1,30 @@
55901+#include <linux/kernel.h>
55902+#include <linux/sched.h>
55903+#include <linux/grinternal.h>
55904+#include <linux/security.h>
55905+
55906+void
55907+gr_audit_ptrace(struct task_struct *task)
55908+{
55909+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55910+ if (grsec_enable_audit_ptrace)
55911+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
55912+#endif
55913+ return;
55914+}
55915+
55916+int
55917+gr_ptrace_readexec(struct file *file, int unsafe_flags)
55918+{
55919+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55920+ const struct dentry *dentry = file->f_path.dentry;
55921+ const struct vfsmount *mnt = file->f_path.mnt;
55922+
55923+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
55924+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
55925+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
55926+ return -EACCES;
55927+ }
55928+#endif
55929+ return 0;
55930+}
55931diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
55932new file mode 100644
55933index 0000000..7a5b2de
55934--- /dev/null
55935+++ b/grsecurity/grsec_sig.c
55936@@ -0,0 +1,207 @@
55937+#include <linux/kernel.h>
55938+#include <linux/sched.h>
55939+#include <linux/delay.h>
55940+#include <linux/grsecurity.h>
55941+#include <linux/grinternal.h>
55942+#include <linux/hardirq.h>
55943+
55944+char *signames[] = {
55945+ [SIGSEGV] = "Segmentation fault",
55946+ [SIGILL] = "Illegal instruction",
55947+ [SIGABRT] = "Abort",
55948+ [SIGBUS] = "Invalid alignment/Bus error"
55949+};
55950+
55951+void
55952+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
55953+{
55954+#ifdef CONFIG_GRKERNSEC_SIGNAL
55955+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
55956+ (sig == SIGABRT) || (sig == SIGBUS))) {
55957+ if (t->pid == current->pid) {
55958+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
55959+ } else {
55960+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
55961+ }
55962+ }
55963+#endif
55964+ return;
55965+}
55966+
55967+int
55968+gr_handle_signal(const struct task_struct *p, const int sig)
55969+{
55970+#ifdef CONFIG_GRKERNSEC
55971+ /* ignore the 0 signal for protected task checks */
55972+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
55973+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
55974+ return -EPERM;
55975+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
55976+ return -EPERM;
55977+ }
55978+#endif
55979+ return 0;
55980+}
55981+
55982+#ifdef CONFIG_GRKERNSEC
55983+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
55984+
55985+int gr_fake_force_sig(int sig, struct task_struct *t)
55986+{
55987+ unsigned long int flags;
55988+ int ret, blocked, ignored;
55989+ struct k_sigaction *action;
55990+
55991+ spin_lock_irqsave(&t->sighand->siglock, flags);
55992+ action = &t->sighand->action[sig-1];
55993+ ignored = action->sa.sa_handler == SIG_IGN;
55994+ blocked = sigismember(&t->blocked, sig);
55995+ if (blocked || ignored) {
55996+ action->sa.sa_handler = SIG_DFL;
55997+ if (blocked) {
55998+ sigdelset(&t->blocked, sig);
55999+ recalc_sigpending_and_wake(t);
56000+ }
56001+ }
56002+ if (action->sa.sa_handler == SIG_DFL)
56003+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
56004+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
56005+
56006+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
56007+
56008+ return ret;
56009+}
56010+#endif
56011+
56012+#ifdef CONFIG_GRKERNSEC_BRUTE
56013+#define GR_USER_BAN_TIME (15 * 60)
56014+
56015+static int __get_dumpable(unsigned long mm_flags)
56016+{
56017+ int ret;
56018+
56019+ ret = mm_flags & MMF_DUMPABLE_MASK;
56020+ return (ret >= 2) ? 2 : ret;
56021+}
56022+#endif
56023+
56024+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
56025+{
56026+#ifdef CONFIG_GRKERNSEC_BRUTE
56027+ uid_t uid = 0;
56028+
56029+ if (!grsec_enable_brute)
56030+ return;
56031+
56032+ rcu_read_lock();
56033+ read_lock(&tasklist_lock);
56034+ read_lock(&grsec_exec_file_lock);
56035+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
56036+ p->real_parent->brute = 1;
56037+ else {
56038+ const struct cred *cred = __task_cred(p), *cred2;
56039+ struct task_struct *tsk, *tsk2;
56040+
56041+ if (!__get_dumpable(mm_flags) && cred->uid) {
56042+ struct user_struct *user;
56043+
56044+ uid = cred->uid;
56045+
56046+ /* this is put upon execution past expiration */
56047+ user = find_user(uid);
56048+ if (user == NULL)
56049+ goto unlock;
56050+ user->banned = 1;
56051+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
56052+ if (user->ban_expires == ~0UL)
56053+ user->ban_expires--;
56054+
56055+ do_each_thread(tsk2, tsk) {
56056+ cred2 = __task_cred(tsk);
56057+ if (tsk != p && cred2->uid == uid)
56058+ gr_fake_force_sig(SIGKILL, tsk);
56059+ } while_each_thread(tsk2, tsk);
56060+ }
56061+ }
56062+unlock:
56063+ read_unlock(&grsec_exec_file_lock);
56064+ read_unlock(&tasklist_lock);
56065+ rcu_read_unlock();
56066+
56067+ if (uid)
56068+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
56069+
56070+#endif
56071+ return;
56072+}
56073+
56074+void gr_handle_brute_check(void)
56075+{
56076+#ifdef CONFIG_GRKERNSEC_BRUTE
56077+ if (current->brute)
56078+ msleep(30 * 1000);
56079+#endif
56080+ return;
56081+}
56082+
56083+void gr_handle_kernel_exploit(void)
56084+{
56085+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
56086+ const struct cred *cred;
56087+ struct task_struct *tsk, *tsk2;
56088+ struct user_struct *user;
56089+ uid_t uid;
56090+
56091+ if (in_irq() || in_serving_softirq() || in_nmi())
56092+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
56093+
56094+ uid = current_uid();
56095+
56096+ if (uid == 0)
56097+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
56098+ else {
56099+ /* kill all the processes of this user, hold a reference
56100+ to their creds struct, and prevent them from creating
56101+ another process until system reset
56102+ */
56103+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
56104+ /* we intentionally leak this ref */
56105+ user = get_uid(current->cred->user);
56106+ if (user) {
56107+ user->banned = 1;
56108+ user->ban_expires = ~0UL;
56109+ }
56110+
56111+ read_lock(&tasklist_lock);
56112+ do_each_thread(tsk2, tsk) {
56113+ cred = __task_cred(tsk);
56114+ if (cred->uid == uid)
56115+ gr_fake_force_sig(SIGKILL, tsk);
56116+ } while_each_thread(tsk2, tsk);
56117+ read_unlock(&tasklist_lock);
56118+ }
56119+#endif
56120+}
56121+
56122+int __gr_process_user_ban(struct user_struct *user)
56123+{
56124+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56125+ if (unlikely(user->banned)) {
56126+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
56127+ user->banned = 0;
56128+ user->ban_expires = 0;
56129+ free_uid(user);
56130+ } else
56131+ return -EPERM;
56132+ }
56133+#endif
56134+ return 0;
56135+}
56136+
56137+int gr_process_user_ban(void)
56138+{
56139+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56140+ return __gr_process_user_ban(current->cred->user);
56141+#endif
56142+ return 0;
56143+}
56144diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
56145new file mode 100644
56146index 0000000..4030d57
56147--- /dev/null
56148+++ b/grsecurity/grsec_sock.c
56149@@ -0,0 +1,244 @@
56150+#include <linux/kernel.h>
56151+#include <linux/module.h>
56152+#include <linux/sched.h>
56153+#include <linux/file.h>
56154+#include <linux/net.h>
56155+#include <linux/in.h>
56156+#include <linux/ip.h>
56157+#include <net/sock.h>
56158+#include <net/inet_sock.h>
56159+#include <linux/grsecurity.h>
56160+#include <linux/grinternal.h>
56161+#include <linux/gracl.h>
56162+
56163+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
56164+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
56165+
56166+EXPORT_SYMBOL(gr_search_udp_recvmsg);
56167+EXPORT_SYMBOL(gr_search_udp_sendmsg);
56168+
56169+#ifdef CONFIG_UNIX_MODULE
56170+EXPORT_SYMBOL(gr_acl_handle_unix);
56171+EXPORT_SYMBOL(gr_acl_handle_mknod);
56172+EXPORT_SYMBOL(gr_handle_chroot_unix);
56173+EXPORT_SYMBOL(gr_handle_create);
56174+#endif
56175+
56176+#ifdef CONFIG_GRKERNSEC
56177+#define gr_conn_table_size 32749
56178+struct conn_table_entry {
56179+ struct conn_table_entry *next;
56180+ struct signal_struct *sig;
56181+};
56182+
56183+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56184+DEFINE_SPINLOCK(gr_conn_table_lock);
56185+
56186+extern const char * gr_socktype_to_name(unsigned char type);
56187+extern const char * gr_proto_to_name(unsigned char proto);
56188+extern const char * gr_sockfamily_to_name(unsigned char family);
56189+
56190+static __inline__ int
56191+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56192+{
56193+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56194+}
56195+
56196+static __inline__ int
56197+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56198+ __u16 sport, __u16 dport)
56199+{
56200+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56201+ sig->gr_sport == sport && sig->gr_dport == dport))
56202+ return 1;
56203+ else
56204+ return 0;
56205+}
56206+
56207+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56208+{
56209+ struct conn_table_entry **match;
56210+ unsigned int index;
56211+
56212+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56213+ sig->gr_sport, sig->gr_dport,
56214+ gr_conn_table_size);
56215+
56216+ newent->sig = sig;
56217+
56218+ match = &gr_conn_table[index];
56219+ newent->next = *match;
56220+ *match = newent;
56221+
56222+ return;
56223+}
56224+
56225+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56226+{
56227+ struct conn_table_entry *match, *last = NULL;
56228+ unsigned int index;
56229+
56230+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56231+ sig->gr_sport, sig->gr_dport,
56232+ gr_conn_table_size);
56233+
56234+ match = gr_conn_table[index];
56235+ while (match && !conn_match(match->sig,
56236+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56237+ sig->gr_dport)) {
56238+ last = match;
56239+ match = match->next;
56240+ }
56241+
56242+ if (match) {
56243+ if (last)
56244+ last->next = match->next;
56245+ else
56246+ gr_conn_table[index] = NULL;
56247+ kfree(match);
56248+ }
56249+
56250+ return;
56251+}
56252+
56253+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56254+ __u16 sport, __u16 dport)
56255+{
56256+ struct conn_table_entry *match;
56257+ unsigned int index;
56258+
56259+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56260+
56261+ match = gr_conn_table[index];
56262+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56263+ match = match->next;
56264+
56265+ if (match)
56266+ return match->sig;
56267+ else
56268+ return NULL;
56269+}
56270+
56271+#endif
56272+
56273+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56274+{
56275+#ifdef CONFIG_GRKERNSEC
56276+ struct signal_struct *sig = task->signal;
56277+ struct conn_table_entry *newent;
56278+
56279+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56280+ if (newent == NULL)
56281+ return;
56282+ /* no bh lock needed since we are called with bh disabled */
56283+ spin_lock(&gr_conn_table_lock);
56284+ gr_del_task_from_ip_table_nolock(sig);
56285+ sig->gr_saddr = inet->inet_rcv_saddr;
56286+ sig->gr_daddr = inet->inet_daddr;
56287+ sig->gr_sport = inet->inet_sport;
56288+ sig->gr_dport = inet->inet_dport;
56289+ gr_add_to_task_ip_table_nolock(sig, newent);
56290+ spin_unlock(&gr_conn_table_lock);
56291+#endif
56292+ return;
56293+}
56294+
56295+void gr_del_task_from_ip_table(struct task_struct *task)
56296+{
56297+#ifdef CONFIG_GRKERNSEC
56298+ spin_lock_bh(&gr_conn_table_lock);
56299+ gr_del_task_from_ip_table_nolock(task->signal);
56300+ spin_unlock_bh(&gr_conn_table_lock);
56301+#endif
56302+ return;
56303+}
56304+
56305+void
56306+gr_attach_curr_ip(const struct sock *sk)
56307+{
56308+#ifdef CONFIG_GRKERNSEC
56309+ struct signal_struct *p, *set;
56310+ const struct inet_sock *inet = inet_sk(sk);
56311+
56312+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56313+ return;
56314+
56315+ set = current->signal;
56316+
56317+ spin_lock_bh(&gr_conn_table_lock);
56318+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56319+ inet->inet_dport, inet->inet_sport);
56320+ if (unlikely(p != NULL)) {
56321+ set->curr_ip = p->curr_ip;
56322+ set->used_accept = 1;
56323+ gr_del_task_from_ip_table_nolock(p);
56324+ spin_unlock_bh(&gr_conn_table_lock);
56325+ return;
56326+ }
56327+ spin_unlock_bh(&gr_conn_table_lock);
56328+
56329+ set->curr_ip = inet->inet_daddr;
56330+ set->used_accept = 1;
56331+#endif
56332+ return;
56333+}
56334+
56335+int
56336+gr_handle_sock_all(const int family, const int type, const int protocol)
56337+{
56338+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56339+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56340+ (family != AF_UNIX)) {
56341+ if (family == AF_INET)
56342+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56343+ else
56344+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56345+ return -EACCES;
56346+ }
56347+#endif
56348+ return 0;
56349+}
56350+
56351+int
56352+gr_handle_sock_server(const struct sockaddr *sck)
56353+{
56354+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56355+ if (grsec_enable_socket_server &&
56356+ in_group_p(grsec_socket_server_gid) &&
56357+ sck && (sck->sa_family != AF_UNIX) &&
56358+ (sck->sa_family != AF_LOCAL)) {
56359+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56360+ return -EACCES;
56361+ }
56362+#endif
56363+ return 0;
56364+}
56365+
56366+int
56367+gr_handle_sock_server_other(const struct sock *sck)
56368+{
56369+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56370+ if (grsec_enable_socket_server &&
56371+ in_group_p(grsec_socket_server_gid) &&
56372+ sck && (sck->sk_family != AF_UNIX) &&
56373+ (sck->sk_family != AF_LOCAL)) {
56374+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56375+ return -EACCES;
56376+ }
56377+#endif
56378+ return 0;
56379+}
56380+
56381+int
56382+gr_handle_sock_client(const struct sockaddr *sck)
56383+{
56384+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56385+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56386+ sck && (sck->sa_family != AF_UNIX) &&
56387+ (sck->sa_family != AF_LOCAL)) {
56388+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56389+ return -EACCES;
56390+ }
56391+#endif
56392+ return 0;
56393+}
56394diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56395new file mode 100644
56396index 0000000..a1aedd7
56397--- /dev/null
56398+++ b/grsecurity/grsec_sysctl.c
56399@@ -0,0 +1,451 @@
56400+#include <linux/kernel.h>
56401+#include <linux/sched.h>
56402+#include <linux/sysctl.h>
56403+#include <linux/grsecurity.h>
56404+#include <linux/grinternal.h>
56405+
56406+int
56407+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56408+{
56409+#ifdef CONFIG_GRKERNSEC_SYSCTL
56410+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56411+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56412+ return -EACCES;
56413+ }
56414+#endif
56415+ return 0;
56416+}
56417+
56418+#ifdef CONFIG_GRKERNSEC_ROFS
56419+static int __maybe_unused one = 1;
56420+#endif
56421+
56422+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56423+struct ctl_table grsecurity_table[] = {
56424+#ifdef CONFIG_GRKERNSEC_SYSCTL
56425+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56426+#ifdef CONFIG_GRKERNSEC_IO
56427+ {
56428+ .procname = "disable_priv_io",
56429+ .data = &grsec_disable_privio,
56430+ .maxlen = sizeof(int),
56431+ .mode = 0600,
56432+ .proc_handler = &proc_dointvec,
56433+ },
56434+#endif
56435+#endif
56436+#ifdef CONFIG_GRKERNSEC_LINK
56437+ {
56438+ .procname = "linking_restrictions",
56439+ .data = &grsec_enable_link,
56440+ .maxlen = sizeof(int),
56441+ .mode = 0600,
56442+ .proc_handler = &proc_dointvec,
56443+ },
56444+#endif
56445+#ifdef CONFIG_GRKERNSEC_BRUTE
56446+ {
56447+ .procname = "deter_bruteforce",
56448+ .data = &grsec_enable_brute,
56449+ .maxlen = sizeof(int),
56450+ .mode = 0600,
56451+ .proc_handler = &proc_dointvec,
56452+ },
56453+#endif
56454+#ifdef CONFIG_GRKERNSEC_FIFO
56455+ {
56456+ .procname = "fifo_restrictions",
56457+ .data = &grsec_enable_fifo,
56458+ .maxlen = sizeof(int),
56459+ .mode = 0600,
56460+ .proc_handler = &proc_dointvec,
56461+ },
56462+#endif
56463+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56464+ {
56465+ .procname = "ptrace_readexec",
56466+ .data = &grsec_enable_ptrace_readexec,
56467+ .maxlen = sizeof(int),
56468+ .mode = 0600,
56469+ .proc_handler = &proc_dointvec,
56470+ },
56471+#endif
56472+#ifdef CONFIG_GRKERNSEC_SETXID
56473+ {
56474+ .procname = "consistent_setxid",
56475+ .data = &grsec_enable_setxid,
56476+ .maxlen = sizeof(int),
56477+ .mode = 0600,
56478+ .proc_handler = &proc_dointvec,
56479+ },
56480+#endif
56481+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56482+ {
56483+ .procname = "ip_blackhole",
56484+ .data = &grsec_enable_blackhole,
56485+ .maxlen = sizeof(int),
56486+ .mode = 0600,
56487+ .proc_handler = &proc_dointvec,
56488+ },
56489+ {
56490+ .procname = "lastack_retries",
56491+ .data = &grsec_lastack_retries,
56492+ .maxlen = sizeof(int),
56493+ .mode = 0600,
56494+ .proc_handler = &proc_dointvec,
56495+ },
56496+#endif
56497+#ifdef CONFIG_GRKERNSEC_EXECLOG
56498+ {
56499+ .procname = "exec_logging",
56500+ .data = &grsec_enable_execlog,
56501+ .maxlen = sizeof(int),
56502+ .mode = 0600,
56503+ .proc_handler = &proc_dointvec,
56504+ },
56505+#endif
56506+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56507+ {
56508+ .procname = "rwxmap_logging",
56509+ .data = &grsec_enable_log_rwxmaps,
56510+ .maxlen = sizeof(int),
56511+ .mode = 0600,
56512+ .proc_handler = &proc_dointvec,
56513+ },
56514+#endif
56515+#ifdef CONFIG_GRKERNSEC_SIGNAL
56516+ {
56517+ .procname = "signal_logging",
56518+ .data = &grsec_enable_signal,
56519+ .maxlen = sizeof(int),
56520+ .mode = 0600,
56521+ .proc_handler = &proc_dointvec,
56522+ },
56523+#endif
56524+#ifdef CONFIG_GRKERNSEC_FORKFAIL
56525+ {
56526+ .procname = "forkfail_logging",
56527+ .data = &grsec_enable_forkfail,
56528+ .maxlen = sizeof(int),
56529+ .mode = 0600,
56530+ .proc_handler = &proc_dointvec,
56531+ },
56532+#endif
56533+#ifdef CONFIG_GRKERNSEC_TIME
56534+ {
56535+ .procname = "timechange_logging",
56536+ .data = &grsec_enable_time,
56537+ .maxlen = sizeof(int),
56538+ .mode = 0600,
56539+ .proc_handler = &proc_dointvec,
56540+ },
56541+#endif
56542+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56543+ {
56544+ .procname = "chroot_deny_shmat",
56545+ .data = &grsec_enable_chroot_shmat,
56546+ .maxlen = sizeof(int),
56547+ .mode = 0600,
56548+ .proc_handler = &proc_dointvec,
56549+ },
56550+#endif
56551+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56552+ {
56553+ .procname = "chroot_deny_unix",
56554+ .data = &grsec_enable_chroot_unix,
56555+ .maxlen = sizeof(int),
56556+ .mode = 0600,
56557+ .proc_handler = &proc_dointvec,
56558+ },
56559+#endif
56560+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56561+ {
56562+ .procname = "chroot_deny_mount",
56563+ .data = &grsec_enable_chroot_mount,
56564+ .maxlen = sizeof(int),
56565+ .mode = 0600,
56566+ .proc_handler = &proc_dointvec,
56567+ },
56568+#endif
56569+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56570+ {
56571+ .procname = "chroot_deny_fchdir",
56572+ .data = &grsec_enable_chroot_fchdir,
56573+ .maxlen = sizeof(int),
56574+ .mode = 0600,
56575+ .proc_handler = &proc_dointvec,
56576+ },
56577+#endif
56578+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56579+ {
56580+ .procname = "chroot_deny_chroot",
56581+ .data = &grsec_enable_chroot_double,
56582+ .maxlen = sizeof(int),
56583+ .mode = 0600,
56584+ .proc_handler = &proc_dointvec,
56585+ },
56586+#endif
56587+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56588+ {
56589+ .procname = "chroot_deny_pivot",
56590+ .data = &grsec_enable_chroot_pivot,
56591+ .maxlen = sizeof(int),
56592+ .mode = 0600,
56593+ .proc_handler = &proc_dointvec,
56594+ },
56595+#endif
56596+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56597+ {
56598+ .procname = "chroot_enforce_chdir",
56599+ .data = &grsec_enable_chroot_chdir,
56600+ .maxlen = sizeof(int),
56601+ .mode = 0600,
56602+ .proc_handler = &proc_dointvec,
56603+ },
56604+#endif
56605+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56606+ {
56607+ .procname = "chroot_deny_chmod",
56608+ .data = &grsec_enable_chroot_chmod,
56609+ .maxlen = sizeof(int),
56610+ .mode = 0600,
56611+ .proc_handler = &proc_dointvec,
56612+ },
56613+#endif
56614+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56615+ {
56616+ .procname = "chroot_deny_mknod",
56617+ .data = &grsec_enable_chroot_mknod,
56618+ .maxlen = sizeof(int),
56619+ .mode = 0600,
56620+ .proc_handler = &proc_dointvec,
56621+ },
56622+#endif
56623+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56624+ {
56625+ .procname = "chroot_restrict_nice",
56626+ .data = &grsec_enable_chroot_nice,
56627+ .maxlen = sizeof(int),
56628+ .mode = 0600,
56629+ .proc_handler = &proc_dointvec,
56630+ },
56631+#endif
56632+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56633+ {
56634+ .procname = "chroot_execlog",
56635+ .data = &grsec_enable_chroot_execlog,
56636+ .maxlen = sizeof(int),
56637+ .mode = 0600,
56638+ .proc_handler = &proc_dointvec,
56639+ },
56640+#endif
56641+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56642+ {
56643+ .procname = "chroot_caps",
56644+ .data = &grsec_enable_chroot_caps,
56645+ .maxlen = sizeof(int),
56646+ .mode = 0600,
56647+ .proc_handler = &proc_dointvec,
56648+ },
56649+#endif
56650+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56651+ {
56652+ .procname = "chroot_deny_sysctl",
56653+ .data = &grsec_enable_chroot_sysctl,
56654+ .maxlen = sizeof(int),
56655+ .mode = 0600,
56656+ .proc_handler = &proc_dointvec,
56657+ },
56658+#endif
56659+#ifdef CONFIG_GRKERNSEC_TPE
56660+ {
56661+ .procname = "tpe",
56662+ .data = &grsec_enable_tpe,
56663+ .maxlen = sizeof(int),
56664+ .mode = 0600,
56665+ .proc_handler = &proc_dointvec,
56666+ },
56667+ {
56668+ .procname = "tpe_gid",
56669+ .data = &grsec_tpe_gid,
56670+ .maxlen = sizeof(int),
56671+ .mode = 0600,
56672+ .proc_handler = &proc_dointvec,
56673+ },
56674+#endif
56675+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56676+ {
56677+ .procname = "tpe_invert",
56678+ .data = &grsec_enable_tpe_invert,
56679+ .maxlen = sizeof(int),
56680+ .mode = 0600,
56681+ .proc_handler = &proc_dointvec,
56682+ },
56683+#endif
56684+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56685+ {
56686+ .procname = "tpe_restrict_all",
56687+ .data = &grsec_enable_tpe_all,
56688+ .maxlen = sizeof(int),
56689+ .mode = 0600,
56690+ .proc_handler = &proc_dointvec,
56691+ },
56692+#endif
56693+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56694+ {
56695+ .procname = "socket_all",
56696+ .data = &grsec_enable_socket_all,
56697+ .maxlen = sizeof(int),
56698+ .mode = 0600,
56699+ .proc_handler = &proc_dointvec,
56700+ },
56701+ {
56702+ .procname = "socket_all_gid",
56703+ .data = &grsec_socket_all_gid,
56704+ .maxlen = sizeof(int),
56705+ .mode = 0600,
56706+ .proc_handler = &proc_dointvec,
56707+ },
56708+#endif
56709+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56710+ {
56711+ .procname = "socket_client",
56712+ .data = &grsec_enable_socket_client,
56713+ .maxlen = sizeof(int),
56714+ .mode = 0600,
56715+ .proc_handler = &proc_dointvec,
56716+ },
56717+ {
56718+ .procname = "socket_client_gid",
56719+ .data = &grsec_socket_client_gid,
56720+ .maxlen = sizeof(int),
56721+ .mode = 0600,
56722+ .proc_handler = &proc_dointvec,
56723+ },
56724+#endif
56725+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56726+ {
56727+ .procname = "socket_server",
56728+ .data = &grsec_enable_socket_server,
56729+ .maxlen = sizeof(int),
56730+ .mode = 0600,
56731+ .proc_handler = &proc_dointvec,
56732+ },
56733+ {
56734+ .procname = "socket_server_gid",
56735+ .data = &grsec_socket_server_gid,
56736+ .maxlen = sizeof(int),
56737+ .mode = 0600,
56738+ .proc_handler = &proc_dointvec,
56739+ },
56740+#endif
56741+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56742+ {
56743+ .procname = "audit_group",
56744+ .data = &grsec_enable_group,
56745+ .maxlen = sizeof(int),
56746+ .mode = 0600,
56747+ .proc_handler = &proc_dointvec,
56748+ },
56749+ {
56750+ .procname = "audit_gid",
56751+ .data = &grsec_audit_gid,
56752+ .maxlen = sizeof(int),
56753+ .mode = 0600,
56754+ .proc_handler = &proc_dointvec,
56755+ },
56756+#endif
56757+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56758+ {
56759+ .procname = "audit_chdir",
56760+ .data = &grsec_enable_chdir,
56761+ .maxlen = sizeof(int),
56762+ .mode = 0600,
56763+ .proc_handler = &proc_dointvec,
56764+ },
56765+#endif
56766+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56767+ {
56768+ .procname = "audit_mount",
56769+ .data = &grsec_enable_mount,
56770+ .maxlen = sizeof(int),
56771+ .mode = 0600,
56772+ .proc_handler = &proc_dointvec,
56773+ },
56774+#endif
56775+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56776+ {
56777+ .procname = "audit_textrel",
56778+ .data = &grsec_enable_audit_textrel,
56779+ .maxlen = sizeof(int),
56780+ .mode = 0600,
56781+ .proc_handler = &proc_dointvec,
56782+ },
56783+#endif
56784+#ifdef CONFIG_GRKERNSEC_DMESG
56785+ {
56786+ .procname = "dmesg",
56787+ .data = &grsec_enable_dmesg,
56788+ .maxlen = sizeof(int),
56789+ .mode = 0600,
56790+ .proc_handler = &proc_dointvec,
56791+ },
56792+#endif
56793+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56794+ {
56795+ .procname = "chroot_findtask",
56796+ .data = &grsec_enable_chroot_findtask,
56797+ .maxlen = sizeof(int),
56798+ .mode = 0600,
56799+ .proc_handler = &proc_dointvec,
56800+ },
56801+#endif
56802+#ifdef CONFIG_GRKERNSEC_RESLOG
56803+ {
56804+ .procname = "resource_logging",
56805+ .data = &grsec_resource_logging,
56806+ .maxlen = sizeof(int),
56807+ .mode = 0600,
56808+ .proc_handler = &proc_dointvec,
56809+ },
56810+#endif
56811+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56812+ {
56813+ .procname = "audit_ptrace",
56814+ .data = &grsec_enable_audit_ptrace,
56815+ .maxlen = sizeof(int),
56816+ .mode = 0600,
56817+ .proc_handler = &proc_dointvec,
56818+ },
56819+#endif
56820+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56821+ {
56822+ .procname = "harden_ptrace",
56823+ .data = &grsec_enable_harden_ptrace,
56824+ .maxlen = sizeof(int),
56825+ .mode = 0600,
56826+ .proc_handler = &proc_dointvec,
56827+ },
56828+#endif
56829+ {
56830+ .procname = "grsec_lock",
56831+ .data = &grsec_lock,
56832+ .maxlen = sizeof(int),
56833+ .mode = 0600,
56834+ .proc_handler = &proc_dointvec,
56835+ },
56836+#endif
56837+#ifdef CONFIG_GRKERNSEC_ROFS
56838+ {
56839+ .procname = "romount_protect",
56840+ .data = &grsec_enable_rofs,
56841+ .maxlen = sizeof(int),
56842+ .mode = 0600,
56843+ .proc_handler = &proc_dointvec_minmax,
56844+ .extra1 = &one,
56845+ .extra2 = &one,
56846+ },
56847+#endif
56848+ { }
56849+};
56850+#endif
56851diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
56852new file mode 100644
56853index 0000000..0dc13c3
56854--- /dev/null
56855+++ b/grsecurity/grsec_time.c
56856@@ -0,0 +1,16 @@
56857+#include <linux/kernel.h>
56858+#include <linux/sched.h>
56859+#include <linux/grinternal.h>
56860+#include <linux/module.h>
56861+
56862+void
56863+gr_log_timechange(void)
56864+{
56865+#ifdef CONFIG_GRKERNSEC_TIME
56866+ if (grsec_enable_time)
56867+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
56868+#endif
56869+ return;
56870+}
56871+
56872+EXPORT_SYMBOL(gr_log_timechange);
56873diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
56874new file mode 100644
56875index 0000000..07e0dc0
56876--- /dev/null
56877+++ b/grsecurity/grsec_tpe.c
56878@@ -0,0 +1,73 @@
56879+#include <linux/kernel.h>
56880+#include <linux/sched.h>
56881+#include <linux/file.h>
56882+#include <linux/fs.h>
56883+#include <linux/grinternal.h>
56884+
56885+extern int gr_acl_tpe_check(void);
56886+
56887+int
56888+gr_tpe_allow(const struct file *file)
56889+{
56890+#ifdef CONFIG_GRKERNSEC
56891+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
56892+ const struct cred *cred = current_cred();
56893+ char *msg = NULL;
56894+ char *msg2 = NULL;
56895+
56896+ // never restrict root
56897+ if (!cred->uid)
56898+ return 1;
56899+
56900+ if (grsec_enable_tpe) {
56901+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56902+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
56903+ msg = "not being in trusted group";
56904+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
56905+ msg = "being in untrusted group";
56906+#else
56907+ if (in_group_p(grsec_tpe_gid))
56908+ msg = "being in untrusted group";
56909+#endif
56910+ }
56911+ if (!msg && gr_acl_tpe_check())
56912+ msg = "being in untrusted role";
56913+
56914+ // not in any affected group/role
56915+ if (!msg)
56916+ goto next_check;
56917+
56918+ if (inode->i_uid)
56919+ msg2 = "file in non-root-owned directory";
56920+ else if (inode->i_mode & S_IWOTH)
56921+ msg2 = "file in world-writable directory";
56922+ else if (inode->i_mode & S_IWGRP)
56923+ msg2 = "file in group-writable directory";
56924+
56925+ if (msg && msg2) {
56926+ char fullmsg[70] = {0};
56927+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
56928+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
56929+ return 0;
56930+ }
56931+ msg = NULL;
56932+next_check:
56933+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56934+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
56935+ return 1;
56936+
56937+ if (inode->i_uid && (inode->i_uid != cred->uid))
56938+ msg = "directory not owned by user";
56939+ else if (inode->i_mode & S_IWOTH)
56940+ msg = "file in world-writable directory";
56941+ else if (inode->i_mode & S_IWGRP)
56942+ msg = "file in group-writable directory";
56943+
56944+ if (msg) {
56945+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
56946+ return 0;
56947+ }
56948+#endif
56949+#endif
56950+ return 1;
56951+}
56952diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
56953new file mode 100644
56954index 0000000..9f7b1ac
56955--- /dev/null
56956+++ b/grsecurity/grsum.c
56957@@ -0,0 +1,61 @@
56958+#include <linux/err.h>
56959+#include <linux/kernel.h>
56960+#include <linux/sched.h>
56961+#include <linux/mm.h>
56962+#include <linux/scatterlist.h>
56963+#include <linux/crypto.h>
56964+#include <linux/gracl.h>
56965+
56966+
56967+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
56968+#error "crypto and sha256 must be built into the kernel"
56969+#endif
56970+
56971+int
56972+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
56973+{
56974+ char *p;
56975+ struct crypto_hash *tfm;
56976+ struct hash_desc desc;
56977+ struct scatterlist sg;
56978+ unsigned char temp_sum[GR_SHA_LEN];
56979+ volatile int retval = 0;
56980+ volatile int dummy = 0;
56981+ unsigned int i;
56982+
56983+ sg_init_table(&sg, 1);
56984+
56985+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
56986+ if (IS_ERR(tfm)) {
56987+ /* should never happen, since sha256 should be built in */
56988+ return 1;
56989+ }
56990+
56991+ desc.tfm = tfm;
56992+ desc.flags = 0;
56993+
56994+ crypto_hash_init(&desc);
56995+
56996+ p = salt;
56997+ sg_set_buf(&sg, p, GR_SALT_LEN);
56998+ crypto_hash_update(&desc, &sg, sg.length);
56999+
57000+ p = entry->pw;
57001+ sg_set_buf(&sg, p, strlen(p));
57002+
57003+ crypto_hash_update(&desc, &sg, sg.length);
57004+
57005+ crypto_hash_final(&desc, temp_sum);
57006+
57007+ memset(entry->pw, 0, GR_PW_LEN);
57008+
57009+ for (i = 0; i < GR_SHA_LEN; i++)
57010+ if (sum[i] != temp_sum[i])
57011+ retval = 1;
57012+ else
57013+ dummy = 1; // waste a cycle
57014+
57015+ crypto_free_hash(tfm);
57016+
57017+ return retval;
57018+}
57019diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
57020index 6cd5b64..f620d2d 100644
57021--- a/include/acpi/acpi_bus.h
57022+++ b/include/acpi/acpi_bus.h
57023@@ -107,7 +107,7 @@ struct acpi_device_ops {
57024 acpi_op_bind bind;
57025 acpi_op_unbind unbind;
57026 acpi_op_notify notify;
57027-};
57028+} __no_const;
57029
57030 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57031
57032diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
57033index b7babf0..71e4e74 100644
57034--- a/include/asm-generic/atomic-long.h
57035+++ b/include/asm-generic/atomic-long.h
57036@@ -22,6 +22,12 @@
57037
57038 typedef atomic64_t atomic_long_t;
57039
57040+#ifdef CONFIG_PAX_REFCOUNT
57041+typedef atomic64_unchecked_t atomic_long_unchecked_t;
57042+#else
57043+typedef atomic64_t atomic_long_unchecked_t;
57044+#endif
57045+
57046 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57047
57048 static inline long atomic_long_read(atomic_long_t *l)
57049@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57050 return (long)atomic64_read(v);
57051 }
57052
57053+#ifdef CONFIG_PAX_REFCOUNT
57054+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57055+{
57056+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57057+
57058+ return (long)atomic64_read_unchecked(v);
57059+}
57060+#endif
57061+
57062 static inline void atomic_long_set(atomic_long_t *l, long i)
57063 {
57064 atomic64_t *v = (atomic64_t *)l;
57065@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57066 atomic64_set(v, i);
57067 }
57068
57069+#ifdef CONFIG_PAX_REFCOUNT
57070+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57071+{
57072+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57073+
57074+ atomic64_set_unchecked(v, i);
57075+}
57076+#endif
57077+
57078 static inline void atomic_long_inc(atomic_long_t *l)
57079 {
57080 atomic64_t *v = (atomic64_t *)l;
57081@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57082 atomic64_inc(v);
57083 }
57084
57085+#ifdef CONFIG_PAX_REFCOUNT
57086+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57087+{
57088+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57089+
57090+ atomic64_inc_unchecked(v);
57091+}
57092+#endif
57093+
57094 static inline void atomic_long_dec(atomic_long_t *l)
57095 {
57096 atomic64_t *v = (atomic64_t *)l;
57097@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57098 atomic64_dec(v);
57099 }
57100
57101+#ifdef CONFIG_PAX_REFCOUNT
57102+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57103+{
57104+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57105+
57106+ atomic64_dec_unchecked(v);
57107+}
57108+#endif
57109+
57110 static inline void atomic_long_add(long i, atomic_long_t *l)
57111 {
57112 atomic64_t *v = (atomic64_t *)l;
57113@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57114 atomic64_add(i, v);
57115 }
57116
57117+#ifdef CONFIG_PAX_REFCOUNT
57118+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57119+{
57120+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57121+
57122+ atomic64_add_unchecked(i, v);
57123+}
57124+#endif
57125+
57126 static inline void atomic_long_sub(long i, atomic_long_t *l)
57127 {
57128 atomic64_t *v = (atomic64_t *)l;
57129@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57130 atomic64_sub(i, v);
57131 }
57132
57133+#ifdef CONFIG_PAX_REFCOUNT
57134+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57135+{
57136+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57137+
57138+ atomic64_sub_unchecked(i, v);
57139+}
57140+#endif
57141+
57142 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57143 {
57144 atomic64_t *v = (atomic64_t *)l;
57145@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57146 return (long)atomic64_inc_return(v);
57147 }
57148
57149+#ifdef CONFIG_PAX_REFCOUNT
57150+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57151+{
57152+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57153+
57154+ return (long)atomic64_inc_return_unchecked(v);
57155+}
57156+#endif
57157+
57158 static inline long atomic_long_dec_return(atomic_long_t *l)
57159 {
57160 atomic64_t *v = (atomic64_t *)l;
57161@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57162
57163 typedef atomic_t atomic_long_t;
57164
57165+#ifdef CONFIG_PAX_REFCOUNT
57166+typedef atomic_unchecked_t atomic_long_unchecked_t;
57167+#else
57168+typedef atomic_t atomic_long_unchecked_t;
57169+#endif
57170+
57171 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57172 static inline long atomic_long_read(atomic_long_t *l)
57173 {
57174@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57175 return (long)atomic_read(v);
57176 }
57177
57178+#ifdef CONFIG_PAX_REFCOUNT
57179+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57180+{
57181+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57182+
57183+ return (long)atomic_read_unchecked(v);
57184+}
57185+#endif
57186+
57187 static inline void atomic_long_set(atomic_long_t *l, long i)
57188 {
57189 atomic_t *v = (atomic_t *)l;
57190@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57191 atomic_set(v, i);
57192 }
57193
57194+#ifdef CONFIG_PAX_REFCOUNT
57195+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57196+{
57197+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57198+
57199+ atomic_set_unchecked(v, i);
57200+}
57201+#endif
57202+
57203 static inline void atomic_long_inc(atomic_long_t *l)
57204 {
57205 atomic_t *v = (atomic_t *)l;
57206@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57207 atomic_inc(v);
57208 }
57209
57210+#ifdef CONFIG_PAX_REFCOUNT
57211+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57212+{
57213+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57214+
57215+ atomic_inc_unchecked(v);
57216+}
57217+#endif
57218+
57219 static inline void atomic_long_dec(atomic_long_t *l)
57220 {
57221 atomic_t *v = (atomic_t *)l;
57222@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57223 atomic_dec(v);
57224 }
57225
57226+#ifdef CONFIG_PAX_REFCOUNT
57227+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57228+{
57229+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57230+
57231+ atomic_dec_unchecked(v);
57232+}
57233+#endif
57234+
57235 static inline void atomic_long_add(long i, atomic_long_t *l)
57236 {
57237 atomic_t *v = (atomic_t *)l;
57238@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57239 atomic_add(i, v);
57240 }
57241
57242+#ifdef CONFIG_PAX_REFCOUNT
57243+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57244+{
57245+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57246+
57247+ atomic_add_unchecked(i, v);
57248+}
57249+#endif
57250+
57251 static inline void atomic_long_sub(long i, atomic_long_t *l)
57252 {
57253 atomic_t *v = (atomic_t *)l;
57254@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57255 atomic_sub(i, v);
57256 }
57257
57258+#ifdef CONFIG_PAX_REFCOUNT
57259+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57260+{
57261+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57262+
57263+ atomic_sub_unchecked(i, v);
57264+}
57265+#endif
57266+
57267 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57268 {
57269 atomic_t *v = (atomic_t *)l;
57270@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57271 return (long)atomic_inc_return(v);
57272 }
57273
57274+#ifdef CONFIG_PAX_REFCOUNT
57275+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57276+{
57277+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57278+
57279+ return (long)atomic_inc_return_unchecked(v);
57280+}
57281+#endif
57282+
57283 static inline long atomic_long_dec_return(atomic_long_t *l)
57284 {
57285 atomic_t *v = (atomic_t *)l;
57286@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57287
57288 #endif /* BITS_PER_LONG == 64 */
57289
57290+#ifdef CONFIG_PAX_REFCOUNT
57291+static inline void pax_refcount_needs_these_functions(void)
57292+{
57293+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
57294+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57295+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57296+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57297+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57298+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57299+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57300+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57301+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57302+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57303+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57304+
57305+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57306+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57307+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57308+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57309+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57310+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57311+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57312+}
57313+#else
57314+#define atomic_read_unchecked(v) atomic_read(v)
57315+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57316+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57317+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57318+#define atomic_inc_unchecked(v) atomic_inc(v)
57319+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57320+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57321+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57322+#define atomic_dec_unchecked(v) atomic_dec(v)
57323+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57324+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57325+
57326+#define atomic_long_read_unchecked(v) atomic_long_read(v)
57327+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57328+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57329+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57330+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57331+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57332+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57333+#endif
57334+
57335 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57336diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
57337index b18ce4f..2ee2843 100644
57338--- a/include/asm-generic/atomic64.h
57339+++ b/include/asm-generic/atomic64.h
57340@@ -16,6 +16,8 @@ typedef struct {
57341 long long counter;
57342 } atomic64_t;
57343
57344+typedef atomic64_t atomic64_unchecked_t;
57345+
57346 #define ATOMIC64_INIT(i) { (i) }
57347
57348 extern long long atomic64_read(const atomic64_t *v);
57349@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
57350 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
57351 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
57352
57353+#define atomic64_read_unchecked(v) atomic64_read(v)
57354+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
57355+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
57356+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
57357+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57358+#define atomic64_inc_unchecked(v) atomic64_inc(v)
57359+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57360+#define atomic64_dec_unchecked(v) atomic64_dec(v)
57361+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57362+
57363 #endif /* _ASM_GENERIC_ATOMIC64_H */
57364diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57365index 1bfcfe5..e04c5c9 100644
57366--- a/include/asm-generic/cache.h
57367+++ b/include/asm-generic/cache.h
57368@@ -6,7 +6,7 @@
57369 * cache lines need to provide their own cache.h.
57370 */
57371
57372-#define L1_CACHE_SHIFT 5
57373-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57374+#define L1_CACHE_SHIFT 5UL
57375+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57376
57377 #endif /* __ASM_GENERIC_CACHE_H */
57378diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57379index 1ca3efc..e3dc852 100644
57380--- a/include/asm-generic/int-l64.h
57381+++ b/include/asm-generic/int-l64.h
57382@@ -46,6 +46,8 @@ typedef unsigned int u32;
57383 typedef signed long s64;
57384 typedef unsigned long u64;
57385
57386+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57387+
57388 #define S8_C(x) x
57389 #define U8_C(x) x ## U
57390 #define S16_C(x) x
57391diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57392index f394147..b6152b9 100644
57393--- a/include/asm-generic/int-ll64.h
57394+++ b/include/asm-generic/int-ll64.h
57395@@ -51,6 +51,8 @@ typedef unsigned int u32;
57396 typedef signed long long s64;
57397 typedef unsigned long long u64;
57398
57399+typedef unsigned long long intoverflow_t;
57400+
57401 #define S8_C(x) x
57402 #define U8_C(x) x ## U
57403 #define S16_C(x) x
57404diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57405index 0232ccb..13d9165 100644
57406--- a/include/asm-generic/kmap_types.h
57407+++ b/include/asm-generic/kmap_types.h
57408@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57409 KMAP_D(17) KM_NMI,
57410 KMAP_D(18) KM_NMI_PTE,
57411 KMAP_D(19) KM_KDB,
57412+KMAP_D(20) KM_CLEARPAGE,
57413 /*
57414 * Remember to update debug_kmap_atomic() when adding new kmap types!
57415 */
57416-KMAP_D(20) KM_TYPE_NR
57417+KMAP_D(21) KM_TYPE_NR
57418 };
57419
57420 #undef KMAP_D
57421diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57422index 725612b..9cc513a 100644
57423--- a/include/asm-generic/pgtable-nopmd.h
57424+++ b/include/asm-generic/pgtable-nopmd.h
57425@@ -1,14 +1,19 @@
57426 #ifndef _PGTABLE_NOPMD_H
57427 #define _PGTABLE_NOPMD_H
57428
57429-#ifndef __ASSEMBLY__
57430-
57431 #include <asm-generic/pgtable-nopud.h>
57432
57433-struct mm_struct;
57434-
57435 #define __PAGETABLE_PMD_FOLDED
57436
57437+#define PMD_SHIFT PUD_SHIFT
57438+#define PTRS_PER_PMD 1
57439+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57440+#define PMD_MASK (~(PMD_SIZE-1))
57441+
57442+#ifndef __ASSEMBLY__
57443+
57444+struct mm_struct;
57445+
57446 /*
57447 * Having the pmd type consist of a pud gets the size right, and allows
57448 * us to conceptually access the pud entry that this pmd is folded into
57449@@ -16,11 +21,6 @@ struct mm_struct;
57450 */
57451 typedef struct { pud_t pud; } pmd_t;
57452
57453-#define PMD_SHIFT PUD_SHIFT
57454-#define PTRS_PER_PMD 1
57455-#define PMD_SIZE (1UL << PMD_SHIFT)
57456-#define PMD_MASK (~(PMD_SIZE-1))
57457-
57458 /*
57459 * The "pud_xxx()" functions here are trivial for a folded two-level
57460 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57461diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57462index 810431d..ccc3638 100644
57463--- a/include/asm-generic/pgtable-nopud.h
57464+++ b/include/asm-generic/pgtable-nopud.h
57465@@ -1,10 +1,15 @@
57466 #ifndef _PGTABLE_NOPUD_H
57467 #define _PGTABLE_NOPUD_H
57468
57469-#ifndef __ASSEMBLY__
57470-
57471 #define __PAGETABLE_PUD_FOLDED
57472
57473+#define PUD_SHIFT PGDIR_SHIFT
57474+#define PTRS_PER_PUD 1
57475+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57476+#define PUD_MASK (~(PUD_SIZE-1))
57477+
57478+#ifndef __ASSEMBLY__
57479+
57480 /*
57481 * Having the pud type consist of a pgd gets the size right, and allows
57482 * us to conceptually access the pgd entry that this pud is folded into
57483@@ -12,11 +17,6 @@
57484 */
57485 typedef struct { pgd_t pgd; } pud_t;
57486
57487-#define PUD_SHIFT PGDIR_SHIFT
57488-#define PTRS_PER_PUD 1
57489-#define PUD_SIZE (1UL << PUD_SHIFT)
57490-#define PUD_MASK (~(PUD_SIZE-1))
57491-
57492 /*
57493 * The "pgd_xxx()" functions here are trivial for a folded two-level
57494 * setup: the pud is never bad, and a pud always exists (as it's folded
57495diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57496index 76bff2b..c7a14e2 100644
57497--- a/include/asm-generic/pgtable.h
57498+++ b/include/asm-generic/pgtable.h
57499@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57500 #endif /* __HAVE_ARCH_PMD_WRITE */
57501 #endif
57502
57503+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57504+static inline unsigned long pax_open_kernel(void) { return 0; }
57505+#endif
57506+
57507+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57508+static inline unsigned long pax_close_kernel(void) { return 0; }
57509+#endif
57510+
57511 #endif /* !__ASSEMBLY__ */
57512
57513 #endif /* _ASM_GENERIC_PGTABLE_H */
57514diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57515index b5e2e4c..6a5373e 100644
57516--- a/include/asm-generic/vmlinux.lds.h
57517+++ b/include/asm-generic/vmlinux.lds.h
57518@@ -217,6 +217,7 @@
57519 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57520 VMLINUX_SYMBOL(__start_rodata) = .; \
57521 *(.rodata) *(.rodata.*) \
57522+ *(.data..read_only) \
57523 *(__vermagic) /* Kernel version magic */ \
57524 . = ALIGN(8); \
57525 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57526@@ -722,17 +723,18 @@
57527 * section in the linker script will go there too. @phdr should have
57528 * a leading colon.
57529 *
57530- * Note that this macros defines __per_cpu_load as an absolute symbol.
57531+ * Note that this macros defines per_cpu_load as an absolute symbol.
57532 * If there is no need to put the percpu section at a predetermined
57533 * address, use PERCPU_SECTION.
57534 */
57535 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57536- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57537- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57538+ per_cpu_load = .; \
57539+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57540 - LOAD_OFFSET) { \
57541+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57542 PERCPU_INPUT(cacheline) \
57543 } phdr \
57544- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57545+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57546
57547 /**
57548 * PERCPU_SECTION - define output section for percpu area, simple version
57549diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57550index bf4b2dc..2d0762f 100644
57551--- a/include/drm/drmP.h
57552+++ b/include/drm/drmP.h
57553@@ -72,6 +72,7 @@
57554 #include <linux/workqueue.h>
57555 #include <linux/poll.h>
57556 #include <asm/pgalloc.h>
57557+#include <asm/local.h>
57558 #include "drm.h"
57559
57560 #include <linux/idr.h>
57561@@ -1038,7 +1039,7 @@ struct drm_device {
57562
57563 /** \name Usage Counters */
57564 /*@{ */
57565- int open_count; /**< Outstanding files open */
57566+ local_t open_count; /**< Outstanding files open */
57567 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57568 atomic_t vma_count; /**< Outstanding vma areas open */
57569 int buf_use; /**< Buffers in use -- cannot alloc */
57570@@ -1049,7 +1050,7 @@ struct drm_device {
57571 /*@{ */
57572 unsigned long counters;
57573 enum drm_stat_type types[15];
57574- atomic_t counts[15];
57575+ atomic_unchecked_t counts[15];
57576 /*@} */
57577
57578 struct list_head filelist;
57579diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57580index 73b0712..0b7ef2f 100644
57581--- a/include/drm/drm_crtc_helper.h
57582+++ b/include/drm/drm_crtc_helper.h
57583@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57584
57585 /* disable crtc when not in use - more explicit than dpms off */
57586 void (*disable)(struct drm_crtc *crtc);
57587-};
57588+} __no_const;
57589
57590 struct drm_encoder_helper_funcs {
57591 void (*dpms)(struct drm_encoder *encoder, int mode);
57592@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57593 struct drm_connector *connector);
57594 /* disable encoder when not in use - more explicit than dpms off */
57595 void (*disable)(struct drm_encoder *encoder);
57596-};
57597+} __no_const;
57598
57599 struct drm_connector_helper_funcs {
57600 int (*get_modes)(struct drm_connector *connector);
57601diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57602index 26c1f78..6722682 100644
57603--- a/include/drm/ttm/ttm_memory.h
57604+++ b/include/drm/ttm/ttm_memory.h
57605@@ -47,7 +47,7 @@
57606
57607 struct ttm_mem_shrink {
57608 int (*do_shrink) (struct ttm_mem_shrink *);
57609-};
57610+} __no_const;
57611
57612 /**
57613 * struct ttm_mem_global - Global memory accounting structure.
57614diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57615index e86dfca..40cc55f 100644
57616--- a/include/linux/a.out.h
57617+++ b/include/linux/a.out.h
57618@@ -39,6 +39,14 @@ enum machine_type {
57619 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57620 };
57621
57622+/* Constants for the N_FLAGS field */
57623+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57624+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57625+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57626+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57627+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57628+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57629+
57630 #if !defined (N_MAGIC)
57631 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57632 #endif
57633diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57634index 49a83ca..df96b54 100644
57635--- a/include/linux/atmdev.h
57636+++ b/include/linux/atmdev.h
57637@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57638 #endif
57639
57640 struct k_atm_aal_stats {
57641-#define __HANDLE_ITEM(i) atomic_t i
57642+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57643 __AAL_STAT_ITEMS
57644 #undef __HANDLE_ITEM
57645 };
57646diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57647index fd88a39..8a801b4 100644
57648--- a/include/linux/binfmts.h
57649+++ b/include/linux/binfmts.h
57650@@ -18,7 +18,7 @@ struct pt_regs;
57651 #define BINPRM_BUF_SIZE 128
57652
57653 #ifdef __KERNEL__
57654-#include <linux/list.h>
57655+#include <linux/sched.h>
57656
57657 #define CORENAME_MAX_SIZE 128
57658
57659@@ -58,6 +58,7 @@ struct linux_binprm {
57660 unsigned interp_flags;
57661 unsigned interp_data;
57662 unsigned long loader, exec;
57663+ char tcomm[TASK_COMM_LEN];
57664 };
57665
57666 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
57667@@ -88,6 +89,7 @@ struct linux_binfmt {
57668 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57669 int (*load_shlib)(struct file *);
57670 int (*core_dump)(struct coredump_params *cprm);
57671+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57672 unsigned long min_coredump; /* minimal dump size */
57673 };
57674
57675diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57676index 0ed1eb0..3ab569b 100644
57677--- a/include/linux/blkdev.h
57678+++ b/include/linux/blkdev.h
57679@@ -1315,7 +1315,7 @@ struct block_device_operations {
57680 /* this callback is with swap_lock and sometimes page table lock held */
57681 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57682 struct module *owner;
57683-};
57684+} __do_const;
57685
57686 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57687 unsigned long);
57688diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57689index 4d1a074..88f929a 100644
57690--- a/include/linux/blktrace_api.h
57691+++ b/include/linux/blktrace_api.h
57692@@ -162,7 +162,7 @@ struct blk_trace {
57693 struct dentry *dir;
57694 struct dentry *dropped_file;
57695 struct dentry *msg_file;
57696- atomic_t dropped;
57697+ atomic_unchecked_t dropped;
57698 };
57699
57700 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57701diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57702index 83195fb..0b0f77d 100644
57703--- a/include/linux/byteorder/little_endian.h
57704+++ b/include/linux/byteorder/little_endian.h
57705@@ -42,51 +42,51 @@
57706
57707 static inline __le64 __cpu_to_le64p(const __u64 *p)
57708 {
57709- return (__force __le64)*p;
57710+ return (__force const __le64)*p;
57711 }
57712 static inline __u64 __le64_to_cpup(const __le64 *p)
57713 {
57714- return (__force __u64)*p;
57715+ return (__force const __u64)*p;
57716 }
57717 static inline __le32 __cpu_to_le32p(const __u32 *p)
57718 {
57719- return (__force __le32)*p;
57720+ return (__force const __le32)*p;
57721 }
57722 static inline __u32 __le32_to_cpup(const __le32 *p)
57723 {
57724- return (__force __u32)*p;
57725+ return (__force const __u32)*p;
57726 }
57727 static inline __le16 __cpu_to_le16p(const __u16 *p)
57728 {
57729- return (__force __le16)*p;
57730+ return (__force const __le16)*p;
57731 }
57732 static inline __u16 __le16_to_cpup(const __le16 *p)
57733 {
57734- return (__force __u16)*p;
57735+ return (__force const __u16)*p;
57736 }
57737 static inline __be64 __cpu_to_be64p(const __u64 *p)
57738 {
57739- return (__force __be64)__swab64p(p);
57740+ return (__force const __be64)__swab64p(p);
57741 }
57742 static inline __u64 __be64_to_cpup(const __be64 *p)
57743 {
57744- return __swab64p((__u64 *)p);
57745+ return __swab64p((const __u64 *)p);
57746 }
57747 static inline __be32 __cpu_to_be32p(const __u32 *p)
57748 {
57749- return (__force __be32)__swab32p(p);
57750+ return (__force const __be32)__swab32p(p);
57751 }
57752 static inline __u32 __be32_to_cpup(const __be32 *p)
57753 {
57754- return __swab32p((__u32 *)p);
57755+ return __swab32p((const __u32 *)p);
57756 }
57757 static inline __be16 __cpu_to_be16p(const __u16 *p)
57758 {
57759- return (__force __be16)__swab16p(p);
57760+ return (__force const __be16)__swab16p(p);
57761 }
57762 static inline __u16 __be16_to_cpup(const __be16 *p)
57763 {
57764- return __swab16p((__u16 *)p);
57765+ return __swab16p((const __u16 *)p);
57766 }
57767 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57768 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57769diff --git a/include/linux/cache.h b/include/linux/cache.h
57770index 4c57065..4307975 100644
57771--- a/include/linux/cache.h
57772+++ b/include/linux/cache.h
57773@@ -16,6 +16,10 @@
57774 #define __read_mostly
57775 #endif
57776
57777+#ifndef __read_only
57778+#define __read_only __read_mostly
57779+#endif
57780+
57781 #ifndef ____cacheline_aligned
57782 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57783 #endif
57784diff --git a/include/linux/capability.h b/include/linux/capability.h
57785index a63d13d..069bfd5 100644
57786--- a/include/linux/capability.h
57787+++ b/include/linux/capability.h
57788@@ -548,6 +548,9 @@ extern bool capable(int cap);
57789 extern bool ns_capable(struct user_namespace *ns, int cap);
57790 extern bool task_ns_capable(struct task_struct *t, int cap);
57791 extern bool nsown_capable(int cap);
57792+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57793+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57794+extern bool capable_nolog(int cap);
57795
57796 /* audit system wants to get cap info from files as well */
57797 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57798diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
57799index 04ffb2e..6799180 100644
57800--- a/include/linux/cleancache.h
57801+++ b/include/linux/cleancache.h
57802@@ -31,7 +31,7 @@ struct cleancache_ops {
57803 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57804 void (*flush_inode)(int, struct cleancache_filekey);
57805 void (*flush_fs)(int);
57806-};
57807+} __no_const;
57808
57809 extern struct cleancache_ops
57810 cleancache_register_ops(struct cleancache_ops *ops);
57811diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
57812index dfadc96..c0e70c1 100644
57813--- a/include/linux/compiler-gcc4.h
57814+++ b/include/linux/compiler-gcc4.h
57815@@ -31,6 +31,12 @@
57816
57817
57818 #if __GNUC_MINOR__ >= 5
57819+
57820+#ifdef CONSTIFY_PLUGIN
57821+#define __no_const __attribute__((no_const))
57822+#define __do_const __attribute__((do_const))
57823+#endif
57824+
57825 /*
57826 * Mark a position in code as unreachable. This can be used to
57827 * suppress control flow warnings after asm blocks that transfer
57828@@ -46,6 +52,11 @@
57829 #define __noclone __attribute__((__noclone__))
57830
57831 #endif
57832+
57833+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57834+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57835+#define __bos0(ptr) __bos((ptr), 0)
57836+#define __bos1(ptr) __bos((ptr), 1)
57837 #endif
57838
57839 #if __GNUC_MINOR__ > 0
57840diff --git a/include/linux/compiler.h b/include/linux/compiler.h
57841index 320d6c9..8573a1c 100644
57842--- a/include/linux/compiler.h
57843+++ b/include/linux/compiler.h
57844@@ -5,31 +5,62 @@
57845
57846 #ifdef __CHECKER__
57847 # define __user __attribute__((noderef, address_space(1)))
57848+# define __force_user __force __user
57849 # define __kernel __attribute__((address_space(0)))
57850+# define __force_kernel __force __kernel
57851 # define __safe __attribute__((safe))
57852 # define __force __attribute__((force))
57853 # define __nocast __attribute__((nocast))
57854 # define __iomem __attribute__((noderef, address_space(2)))
57855+# define __force_iomem __force __iomem
57856 # define __acquires(x) __attribute__((context(x,0,1)))
57857 # define __releases(x) __attribute__((context(x,1,0)))
57858 # define __acquire(x) __context__(x,1)
57859 # define __release(x) __context__(x,-1)
57860 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57861 # define __percpu __attribute__((noderef, address_space(3)))
57862+# define __force_percpu __force __percpu
57863 #ifdef CONFIG_SPARSE_RCU_POINTER
57864 # define __rcu __attribute__((noderef, address_space(4)))
57865+# define __force_rcu __force __rcu
57866 #else
57867 # define __rcu
57868+# define __force_rcu
57869 #endif
57870 extern void __chk_user_ptr(const volatile void __user *);
57871 extern void __chk_io_ptr(const volatile void __iomem *);
57872+#elif defined(CHECKER_PLUGIN)
57873+//# define __user
57874+//# define __force_user
57875+//# define __kernel
57876+//# define __force_kernel
57877+# define __safe
57878+# define __force
57879+# define __nocast
57880+# define __iomem
57881+# define __force_iomem
57882+# define __chk_user_ptr(x) (void)0
57883+# define __chk_io_ptr(x) (void)0
57884+# define __builtin_warning(x, y...) (1)
57885+# define __acquires(x)
57886+# define __releases(x)
57887+# define __acquire(x) (void)0
57888+# define __release(x) (void)0
57889+# define __cond_lock(x,c) (c)
57890+# define __percpu
57891+# define __force_percpu
57892+# define __rcu
57893+# define __force_rcu
57894 #else
57895 # define __user
57896+# define __force_user
57897 # define __kernel
57898+# define __force_kernel
57899 # define __safe
57900 # define __force
57901 # define __nocast
57902 # define __iomem
57903+# define __force_iomem
57904 # define __chk_user_ptr(x) (void)0
57905 # define __chk_io_ptr(x) (void)0
57906 # define __builtin_warning(x, y...) (1)
57907@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
57908 # define __release(x) (void)0
57909 # define __cond_lock(x,c) (c)
57910 # define __percpu
57911+# define __force_percpu
57912 # define __rcu
57913+# define __force_rcu
57914 #endif
57915
57916 #ifdef __KERNEL__
57917@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57918 # define __attribute_const__ /* unimplemented */
57919 #endif
57920
57921+#ifndef __no_const
57922+# define __no_const
57923+#endif
57924+
57925+#ifndef __do_const
57926+# define __do_const
57927+#endif
57928+
57929 /*
57930 * Tell gcc if a function is cold. The compiler will assume any path
57931 * directly leading to the call is unlikely.
57932@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57933 #define __cold
57934 #endif
57935
57936+#ifndef __alloc_size
57937+#define __alloc_size(...)
57938+#endif
57939+
57940+#ifndef __bos
57941+#define __bos(ptr, arg)
57942+#endif
57943+
57944+#ifndef __bos0
57945+#define __bos0(ptr)
57946+#endif
57947+
57948+#ifndef __bos1
57949+#define __bos1(ptr)
57950+#endif
57951+
57952 /* Simple shorthand for a section definition */
57953 #ifndef __section
57954 # define __section(S) __attribute__ ((__section__(#S)))
57955@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57956 * use is to mediate communication between process-level code and irq/NMI
57957 * handlers, all running on the same CPU.
57958 */
57959-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57960+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57961+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57962
57963 #endif /* __LINUX_COMPILER_H */
57964diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
57965index e9eaec5..bfeb9bb 100644
57966--- a/include/linux/cpuset.h
57967+++ b/include/linux/cpuset.h
57968@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
57969 * nodemask.
57970 */
57971 smp_mb();
57972- --ACCESS_ONCE(current->mems_allowed_change_disable);
57973+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57974 }
57975
57976 static inline void set_mems_allowed(nodemask_t nodemask)
57977diff --git a/include/linux/cred.h b/include/linux/cred.h
57978index 4030896..8d6f342 100644
57979--- a/include/linux/cred.h
57980+++ b/include/linux/cred.h
57981@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
57982 static inline void validate_process_creds(void)
57983 {
57984 }
57985+static inline void validate_task_creds(struct task_struct *task)
57986+{
57987+}
57988 #endif
57989
57990 /**
57991diff --git a/include/linux/crypto.h b/include/linux/crypto.h
57992index 8a94217..15d49e3 100644
57993--- a/include/linux/crypto.h
57994+++ b/include/linux/crypto.h
57995@@ -365,7 +365,7 @@ struct cipher_tfm {
57996 const u8 *key, unsigned int keylen);
57997 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57998 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57999-};
58000+} __no_const;
58001
58002 struct hash_tfm {
58003 int (*init)(struct hash_desc *desc);
58004@@ -386,13 +386,13 @@ struct compress_tfm {
58005 int (*cot_decompress)(struct crypto_tfm *tfm,
58006 const u8 *src, unsigned int slen,
58007 u8 *dst, unsigned int *dlen);
58008-};
58009+} __no_const;
58010
58011 struct rng_tfm {
58012 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58013 unsigned int dlen);
58014 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58015-};
58016+} __no_const;
58017
58018 #define crt_ablkcipher crt_u.ablkcipher
58019 #define crt_aead crt_u.aead
58020diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
58021index 7925bf0..d5143d2 100644
58022--- a/include/linux/decompress/mm.h
58023+++ b/include/linux/decompress/mm.h
58024@@ -77,7 +77,7 @@ static void free(void *where)
58025 * warnings when not needed (indeed large_malloc / large_free are not
58026 * needed by inflate */
58027
58028-#define malloc(a) kmalloc(a, GFP_KERNEL)
58029+#define malloc(a) kmalloc((a), GFP_KERNEL)
58030 #define free(a) kfree(a)
58031
58032 #define large_malloc(a) vmalloc(a)
58033diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
58034index e13117c..e9fc938 100644
58035--- a/include/linux/dma-mapping.h
58036+++ b/include/linux/dma-mapping.h
58037@@ -46,7 +46,7 @@ struct dma_map_ops {
58038 u64 (*get_required_mask)(struct device *dev);
58039 #endif
58040 int is_phys;
58041-};
58042+} __do_const;
58043
58044 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58045
58046diff --git a/include/linux/efi.h b/include/linux/efi.h
58047index 2362a0b..cfaf8fcc 100644
58048--- a/include/linux/efi.h
58049+++ b/include/linux/efi.h
58050@@ -446,7 +446,7 @@ struct efivar_operations {
58051 efi_get_variable_t *get_variable;
58052 efi_get_next_variable_t *get_next_variable;
58053 efi_set_variable_t *set_variable;
58054-};
58055+} __no_const;
58056
58057 struct efivars {
58058 /*
58059diff --git a/include/linux/elf.h b/include/linux/elf.h
58060index 31f0508..5421c01 100644
58061--- a/include/linux/elf.h
58062+++ b/include/linux/elf.h
58063@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58064 #define PT_GNU_EH_FRAME 0x6474e550
58065
58066 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58067+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58068+
58069+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58070+
58071+/* Constants for the e_flags field */
58072+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58073+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58074+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58075+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58076+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58077+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58078
58079 /*
58080 * Extended Numbering
58081@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58082 #define DT_DEBUG 21
58083 #define DT_TEXTREL 22
58084 #define DT_JMPREL 23
58085+#define DT_FLAGS 30
58086+ #define DF_TEXTREL 0x00000004
58087 #define DT_ENCODING 32
58088 #define OLD_DT_LOOS 0x60000000
58089 #define DT_LOOS 0x6000000d
58090@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58091 #define PF_W 0x2
58092 #define PF_X 0x1
58093
58094+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58095+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58096+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58097+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58098+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58099+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58100+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58101+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58102+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58103+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58104+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58105+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58106+
58107 typedef struct elf32_phdr{
58108 Elf32_Word p_type;
58109 Elf32_Off p_offset;
58110@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58111 #define EI_OSABI 7
58112 #define EI_PAD 8
58113
58114+#define EI_PAX 14
58115+
58116 #define ELFMAG0 0x7f /* EI_MAG */
58117 #define ELFMAG1 'E'
58118 #define ELFMAG2 'L'
58119@@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
58120 #define elf_note elf32_note
58121 #define elf_addr_t Elf32_Off
58122 #define Elf_Half Elf32_Half
58123+#define elf_dyn Elf32_Dyn
58124
58125 #else
58126
58127@@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
58128 #define elf_note elf64_note
58129 #define elf_addr_t Elf64_Off
58130 #define Elf_Half Elf64_Half
58131+#define elf_dyn Elf64_Dyn
58132
58133 #endif
58134
58135diff --git a/include/linux/filter.h b/include/linux/filter.h
58136index 8eeb205..d59bfa2 100644
58137--- a/include/linux/filter.h
58138+++ b/include/linux/filter.h
58139@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
58140
58141 struct sk_buff;
58142 struct sock;
58143+struct bpf_jit_work;
58144
58145 struct sk_filter
58146 {
58147@@ -141,6 +142,9 @@ struct sk_filter
58148 unsigned int len; /* Number of filter blocks */
58149 unsigned int (*bpf_func)(const struct sk_buff *skb,
58150 const struct sock_filter *filter);
58151+#ifdef CONFIG_BPF_JIT
58152+ struct bpf_jit_work *work;
58153+#endif
58154 struct rcu_head rcu;
58155 struct sock_filter insns[0];
58156 };
58157diff --git a/include/linux/firewire.h b/include/linux/firewire.h
58158index 84ccf8e..2e9b14c 100644
58159--- a/include/linux/firewire.h
58160+++ b/include/linux/firewire.h
58161@@ -428,7 +428,7 @@ struct fw_iso_context {
58162 union {
58163 fw_iso_callback_t sc;
58164 fw_iso_mc_callback_t mc;
58165- } callback;
58166+ } __no_const callback;
58167 void *callback_data;
58168 };
58169
58170diff --git a/include/linux/fs.h b/include/linux/fs.h
58171index e0bc4ff..d79c2fa 100644
58172--- a/include/linux/fs.h
58173+++ b/include/linux/fs.h
58174@@ -1608,7 +1608,8 @@ struct file_operations {
58175 int (*setlease)(struct file *, long, struct file_lock **);
58176 long (*fallocate)(struct file *file, int mode, loff_t offset,
58177 loff_t len);
58178-};
58179+} __do_const;
58180+typedef struct file_operations __no_const file_operations_no_const;
58181
58182 struct inode_operations {
58183 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58184diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
58185index 003dc0f..3c4ea97 100644
58186--- a/include/linux/fs_struct.h
58187+++ b/include/linux/fs_struct.h
58188@@ -6,7 +6,7 @@
58189 #include <linux/seqlock.h>
58190
58191 struct fs_struct {
58192- int users;
58193+ atomic_t users;
58194 spinlock_t lock;
58195 seqcount_t seq;
58196 int umask;
58197diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
58198index ce31408..b1ad003 100644
58199--- a/include/linux/fscache-cache.h
58200+++ b/include/linux/fscache-cache.h
58201@@ -102,7 +102,7 @@ struct fscache_operation {
58202 fscache_operation_release_t release;
58203 };
58204
58205-extern atomic_t fscache_op_debug_id;
58206+extern atomic_unchecked_t fscache_op_debug_id;
58207 extern void fscache_op_work_func(struct work_struct *work);
58208
58209 extern void fscache_enqueue_operation(struct fscache_operation *);
58210@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
58211 {
58212 INIT_WORK(&op->work, fscache_op_work_func);
58213 atomic_set(&op->usage, 1);
58214- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58215+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58216 op->processor = processor;
58217 op->release = release;
58218 INIT_LIST_HEAD(&op->pend_link);
58219diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
58220index 2a53f10..0187fdf 100644
58221--- a/include/linux/fsnotify.h
58222+++ b/include/linux/fsnotify.h
58223@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
58224 */
58225 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58226 {
58227- return kstrdup(name, GFP_KERNEL);
58228+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58229 }
58230
58231 /*
58232diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
58233index 91d0e0a3..035666b 100644
58234--- a/include/linux/fsnotify_backend.h
58235+++ b/include/linux/fsnotify_backend.h
58236@@ -105,6 +105,7 @@ struct fsnotify_ops {
58237 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
58238 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
58239 };
58240+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
58241
58242 /*
58243 * A group is a "thing" that wants to receive notification about filesystem
58244diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
58245index c3da42d..c70e0df 100644
58246--- a/include/linux/ftrace_event.h
58247+++ b/include/linux/ftrace_event.h
58248@@ -97,7 +97,7 @@ struct trace_event_functions {
58249 trace_print_func raw;
58250 trace_print_func hex;
58251 trace_print_func binary;
58252-};
58253+} __no_const;
58254
58255 struct trace_event {
58256 struct hlist_node node;
58257@@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
58258 extern int trace_add_event_call(struct ftrace_event_call *call);
58259 extern void trace_remove_event_call(struct ftrace_event_call *call);
58260
58261-#define is_signed_type(type) (((type)(-1)) < 0)
58262+#define is_signed_type(type) (((type)(-1)) < (type)1)
58263
58264 int trace_set_clr_event(const char *system, const char *event, int set);
58265
58266diff --git a/include/linux/genhd.h b/include/linux/genhd.h
58267index 6d18f35..ab71e2c 100644
58268--- a/include/linux/genhd.h
58269+++ b/include/linux/genhd.h
58270@@ -185,7 +185,7 @@ struct gendisk {
58271 struct kobject *slave_dir;
58272
58273 struct timer_rand_state *random;
58274- atomic_t sync_io; /* RAID */
58275+ atomic_unchecked_t sync_io; /* RAID */
58276 struct disk_events *ev;
58277 #ifdef CONFIG_BLK_DEV_INTEGRITY
58278 struct blk_integrity *integrity;
58279diff --git a/include/linux/gracl.h b/include/linux/gracl.h
58280new file mode 100644
58281index 0000000..0dc3943
58282--- /dev/null
58283+++ b/include/linux/gracl.h
58284@@ -0,0 +1,317 @@
58285+#ifndef GR_ACL_H
58286+#define GR_ACL_H
58287+
58288+#include <linux/grdefs.h>
58289+#include <linux/resource.h>
58290+#include <linux/capability.h>
58291+#include <linux/dcache.h>
58292+#include <asm/resource.h>
58293+
58294+/* Major status information */
58295+
58296+#define GR_VERSION "grsecurity 2.2.2"
58297+#define GRSECURITY_VERSION 0x2202
58298+
58299+enum {
58300+ GR_SHUTDOWN = 0,
58301+ GR_ENABLE = 1,
58302+ GR_SPROLE = 2,
58303+ GR_RELOAD = 3,
58304+ GR_SEGVMOD = 4,
58305+ GR_STATUS = 5,
58306+ GR_UNSPROLE = 6,
58307+ GR_PASSSET = 7,
58308+ GR_SPROLEPAM = 8,
58309+};
58310+
58311+/* Password setup definitions
58312+ * kernel/grhash.c */
58313+enum {
58314+ GR_PW_LEN = 128,
58315+ GR_SALT_LEN = 16,
58316+ GR_SHA_LEN = 32,
58317+};
58318+
58319+enum {
58320+ GR_SPROLE_LEN = 64,
58321+};
58322+
58323+enum {
58324+ GR_NO_GLOB = 0,
58325+ GR_REG_GLOB,
58326+ GR_CREATE_GLOB
58327+};
58328+
58329+#define GR_NLIMITS 32
58330+
58331+/* Begin Data Structures */
58332+
58333+struct sprole_pw {
58334+ unsigned char *rolename;
58335+ unsigned char salt[GR_SALT_LEN];
58336+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58337+};
58338+
58339+struct name_entry {
58340+ __u32 key;
58341+ ino_t inode;
58342+ dev_t device;
58343+ char *name;
58344+ __u16 len;
58345+ __u8 deleted;
58346+ struct name_entry *prev;
58347+ struct name_entry *next;
58348+};
58349+
58350+struct inodev_entry {
58351+ struct name_entry *nentry;
58352+ struct inodev_entry *prev;
58353+ struct inodev_entry *next;
58354+};
58355+
58356+struct acl_role_db {
58357+ struct acl_role_label **r_hash;
58358+ __u32 r_size;
58359+};
58360+
58361+struct inodev_db {
58362+ struct inodev_entry **i_hash;
58363+ __u32 i_size;
58364+};
58365+
58366+struct name_db {
58367+ struct name_entry **n_hash;
58368+ __u32 n_size;
58369+};
58370+
58371+struct crash_uid {
58372+ uid_t uid;
58373+ unsigned long expires;
58374+};
58375+
58376+struct gr_hash_struct {
58377+ void **table;
58378+ void **nametable;
58379+ void *first;
58380+ __u32 table_size;
58381+ __u32 used_size;
58382+ int type;
58383+};
58384+
58385+/* Userspace Grsecurity ACL data structures */
58386+
58387+struct acl_subject_label {
58388+ char *filename;
58389+ ino_t inode;
58390+ dev_t device;
58391+ __u32 mode;
58392+ kernel_cap_t cap_mask;
58393+ kernel_cap_t cap_lower;
58394+ kernel_cap_t cap_invert_audit;
58395+
58396+ struct rlimit res[GR_NLIMITS];
58397+ __u32 resmask;
58398+
58399+ __u8 user_trans_type;
58400+ __u8 group_trans_type;
58401+ uid_t *user_transitions;
58402+ gid_t *group_transitions;
58403+ __u16 user_trans_num;
58404+ __u16 group_trans_num;
58405+
58406+ __u32 sock_families[2];
58407+ __u32 ip_proto[8];
58408+ __u32 ip_type;
58409+ struct acl_ip_label **ips;
58410+ __u32 ip_num;
58411+ __u32 inaddr_any_override;
58412+
58413+ __u32 crashes;
58414+ unsigned long expires;
58415+
58416+ struct acl_subject_label *parent_subject;
58417+ struct gr_hash_struct *hash;
58418+ struct acl_subject_label *prev;
58419+ struct acl_subject_label *next;
58420+
58421+ struct acl_object_label **obj_hash;
58422+ __u32 obj_hash_size;
58423+ __u16 pax_flags;
58424+};
58425+
58426+struct role_allowed_ip {
58427+ __u32 addr;
58428+ __u32 netmask;
58429+
58430+ struct role_allowed_ip *prev;
58431+ struct role_allowed_ip *next;
58432+};
58433+
58434+struct role_transition {
58435+ char *rolename;
58436+
58437+ struct role_transition *prev;
58438+ struct role_transition *next;
58439+};
58440+
58441+struct acl_role_label {
58442+ char *rolename;
58443+ uid_t uidgid;
58444+ __u16 roletype;
58445+
58446+ __u16 auth_attempts;
58447+ unsigned long expires;
58448+
58449+ struct acl_subject_label *root_label;
58450+ struct gr_hash_struct *hash;
58451+
58452+ struct acl_role_label *prev;
58453+ struct acl_role_label *next;
58454+
58455+ struct role_transition *transitions;
58456+ struct role_allowed_ip *allowed_ips;
58457+ uid_t *domain_children;
58458+ __u16 domain_child_num;
58459+
58460+ struct acl_subject_label **subj_hash;
58461+ __u32 subj_hash_size;
58462+};
58463+
58464+struct user_acl_role_db {
58465+ struct acl_role_label **r_table;
58466+ __u32 num_pointers; /* Number of allocations to track */
58467+ __u32 num_roles; /* Number of roles */
58468+ __u32 num_domain_children; /* Number of domain children */
58469+ __u32 num_subjects; /* Number of subjects */
58470+ __u32 num_objects; /* Number of objects */
58471+};
58472+
58473+struct acl_object_label {
58474+ char *filename;
58475+ ino_t inode;
58476+ dev_t device;
58477+ __u32 mode;
58478+
58479+ struct acl_subject_label *nested;
58480+ struct acl_object_label *globbed;
58481+
58482+ /* next two structures not used */
58483+
58484+ struct acl_object_label *prev;
58485+ struct acl_object_label *next;
58486+};
58487+
58488+struct acl_ip_label {
58489+ char *iface;
58490+ __u32 addr;
58491+ __u32 netmask;
58492+ __u16 low, high;
58493+ __u8 mode;
58494+ __u32 type;
58495+ __u32 proto[8];
58496+
58497+ /* next two structures not used */
58498+
58499+ struct acl_ip_label *prev;
58500+ struct acl_ip_label *next;
58501+};
58502+
58503+struct gr_arg {
58504+ struct user_acl_role_db role_db;
58505+ unsigned char pw[GR_PW_LEN];
58506+ unsigned char salt[GR_SALT_LEN];
58507+ unsigned char sum[GR_SHA_LEN];
58508+ unsigned char sp_role[GR_SPROLE_LEN];
58509+ struct sprole_pw *sprole_pws;
58510+ dev_t segv_device;
58511+ ino_t segv_inode;
58512+ uid_t segv_uid;
58513+ __u16 num_sprole_pws;
58514+ __u16 mode;
58515+};
58516+
58517+struct gr_arg_wrapper {
58518+ struct gr_arg *arg;
58519+ __u32 version;
58520+ __u32 size;
58521+};
58522+
58523+struct subject_map {
58524+ struct acl_subject_label *user;
58525+ struct acl_subject_label *kernel;
58526+ struct subject_map *prev;
58527+ struct subject_map *next;
58528+};
58529+
58530+struct acl_subj_map_db {
58531+ struct subject_map **s_hash;
58532+ __u32 s_size;
58533+};
58534+
58535+/* End Data Structures Section */
58536+
58537+/* Hash functions generated by empirical testing by Brad Spengler
58538+ Makes good use of the low bits of the inode. Generally 0-1 times
58539+ in loop for successful match. 0-3 for unsuccessful match.
58540+ Shift/add algorithm with modulus of table size and an XOR*/
58541+
58542+static __inline__ unsigned int
58543+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58544+{
58545+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58546+}
58547+
58548+ static __inline__ unsigned int
58549+shash(const struct acl_subject_label *userp, const unsigned int sz)
58550+{
58551+ return ((const unsigned long)userp % sz);
58552+}
58553+
58554+static __inline__ unsigned int
58555+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58556+{
58557+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58558+}
58559+
58560+static __inline__ unsigned int
58561+nhash(const char *name, const __u16 len, const unsigned int sz)
58562+{
58563+ return full_name_hash((const unsigned char *)name, len) % sz;
58564+}
58565+
58566+#define FOR_EACH_ROLE_START(role) \
58567+ role = role_list; \
58568+ while (role) {
58569+
58570+#define FOR_EACH_ROLE_END(role) \
58571+ role = role->prev; \
58572+ }
58573+
58574+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58575+ subj = NULL; \
58576+ iter = 0; \
58577+ while (iter < role->subj_hash_size) { \
58578+ if (subj == NULL) \
58579+ subj = role->subj_hash[iter]; \
58580+ if (subj == NULL) { \
58581+ iter++; \
58582+ continue; \
58583+ }
58584+
58585+#define FOR_EACH_SUBJECT_END(subj,iter) \
58586+ subj = subj->next; \
58587+ if (subj == NULL) \
58588+ iter++; \
58589+ }
58590+
58591+
58592+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58593+ subj = role->hash->first; \
58594+ while (subj != NULL) {
58595+
58596+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58597+ subj = subj->next; \
58598+ }
58599+
58600+#endif
58601+
58602diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58603new file mode 100644
58604index 0000000..323ecf2
58605--- /dev/null
58606+++ b/include/linux/gralloc.h
58607@@ -0,0 +1,9 @@
58608+#ifndef __GRALLOC_H
58609+#define __GRALLOC_H
58610+
58611+void acl_free_all(void);
58612+int acl_alloc_stack_init(unsigned long size);
58613+void *acl_alloc(unsigned long len);
58614+void *acl_alloc_num(unsigned long num, unsigned long len);
58615+
58616+#endif
58617diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58618new file mode 100644
58619index 0000000..b30e9bc
58620--- /dev/null
58621+++ b/include/linux/grdefs.h
58622@@ -0,0 +1,140 @@
58623+#ifndef GRDEFS_H
58624+#define GRDEFS_H
58625+
58626+/* Begin grsecurity status declarations */
58627+
58628+enum {
58629+ GR_READY = 0x01,
58630+ GR_STATUS_INIT = 0x00 // disabled state
58631+};
58632+
58633+/* Begin ACL declarations */
58634+
58635+/* Role flags */
58636+
58637+enum {
58638+ GR_ROLE_USER = 0x0001,
58639+ GR_ROLE_GROUP = 0x0002,
58640+ GR_ROLE_DEFAULT = 0x0004,
58641+ GR_ROLE_SPECIAL = 0x0008,
58642+ GR_ROLE_AUTH = 0x0010,
58643+ GR_ROLE_NOPW = 0x0020,
58644+ GR_ROLE_GOD = 0x0040,
58645+ GR_ROLE_LEARN = 0x0080,
58646+ GR_ROLE_TPE = 0x0100,
58647+ GR_ROLE_DOMAIN = 0x0200,
58648+ GR_ROLE_PAM = 0x0400,
58649+ GR_ROLE_PERSIST = 0x0800
58650+};
58651+
58652+/* ACL Subject and Object mode flags */
58653+enum {
58654+ GR_DELETED = 0x80000000
58655+};
58656+
58657+/* ACL Object-only mode flags */
58658+enum {
58659+ GR_READ = 0x00000001,
58660+ GR_APPEND = 0x00000002,
58661+ GR_WRITE = 0x00000004,
58662+ GR_EXEC = 0x00000008,
58663+ GR_FIND = 0x00000010,
58664+ GR_INHERIT = 0x00000020,
58665+ GR_SETID = 0x00000040,
58666+ GR_CREATE = 0x00000080,
58667+ GR_DELETE = 0x00000100,
58668+ GR_LINK = 0x00000200,
58669+ GR_AUDIT_READ = 0x00000400,
58670+ GR_AUDIT_APPEND = 0x00000800,
58671+ GR_AUDIT_WRITE = 0x00001000,
58672+ GR_AUDIT_EXEC = 0x00002000,
58673+ GR_AUDIT_FIND = 0x00004000,
58674+ GR_AUDIT_INHERIT= 0x00008000,
58675+ GR_AUDIT_SETID = 0x00010000,
58676+ GR_AUDIT_CREATE = 0x00020000,
58677+ GR_AUDIT_DELETE = 0x00040000,
58678+ GR_AUDIT_LINK = 0x00080000,
58679+ GR_PTRACERD = 0x00100000,
58680+ GR_NOPTRACE = 0x00200000,
58681+ GR_SUPPRESS = 0x00400000,
58682+ GR_NOLEARN = 0x00800000,
58683+ GR_INIT_TRANSFER= 0x01000000
58684+};
58685+
58686+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58687+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58688+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58689+
58690+/* ACL subject-only mode flags */
58691+enum {
58692+ GR_KILL = 0x00000001,
58693+ GR_VIEW = 0x00000002,
58694+ GR_PROTECTED = 0x00000004,
58695+ GR_LEARN = 0x00000008,
58696+ GR_OVERRIDE = 0x00000010,
58697+ /* just a placeholder, this mode is only used in userspace */
58698+ GR_DUMMY = 0x00000020,
58699+ GR_PROTSHM = 0x00000040,
58700+ GR_KILLPROC = 0x00000080,
58701+ GR_KILLIPPROC = 0x00000100,
58702+ /* just a placeholder, this mode is only used in userspace */
58703+ GR_NOTROJAN = 0x00000200,
58704+ GR_PROTPROCFD = 0x00000400,
58705+ GR_PROCACCT = 0x00000800,
58706+ GR_RELAXPTRACE = 0x00001000,
58707+ GR_NESTED = 0x00002000,
58708+ GR_INHERITLEARN = 0x00004000,
58709+ GR_PROCFIND = 0x00008000,
58710+ GR_POVERRIDE = 0x00010000,
58711+ GR_KERNELAUTH = 0x00020000,
58712+ GR_ATSECURE = 0x00040000,
58713+ GR_SHMEXEC = 0x00080000
58714+};
58715+
58716+enum {
58717+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58718+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58719+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58720+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58721+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58722+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58723+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58724+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58725+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58726+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58727+};
58728+
58729+enum {
58730+ GR_ID_USER = 0x01,
58731+ GR_ID_GROUP = 0x02,
58732+};
58733+
58734+enum {
58735+ GR_ID_ALLOW = 0x01,
58736+ GR_ID_DENY = 0x02,
58737+};
58738+
58739+#define GR_CRASH_RES 31
58740+#define GR_UIDTABLE_MAX 500
58741+
58742+/* begin resource learning section */
58743+enum {
58744+ GR_RLIM_CPU_BUMP = 60,
58745+ GR_RLIM_FSIZE_BUMP = 50000,
58746+ GR_RLIM_DATA_BUMP = 10000,
58747+ GR_RLIM_STACK_BUMP = 1000,
58748+ GR_RLIM_CORE_BUMP = 10000,
58749+ GR_RLIM_RSS_BUMP = 500000,
58750+ GR_RLIM_NPROC_BUMP = 1,
58751+ GR_RLIM_NOFILE_BUMP = 5,
58752+ GR_RLIM_MEMLOCK_BUMP = 50000,
58753+ GR_RLIM_AS_BUMP = 500000,
58754+ GR_RLIM_LOCKS_BUMP = 2,
58755+ GR_RLIM_SIGPENDING_BUMP = 5,
58756+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58757+ GR_RLIM_NICE_BUMP = 1,
58758+ GR_RLIM_RTPRIO_BUMP = 1,
58759+ GR_RLIM_RTTIME_BUMP = 1000000
58760+};
58761+
58762+#endif
58763diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
58764new file mode 100644
58765index 0000000..da390f1
58766--- /dev/null
58767+++ b/include/linux/grinternal.h
58768@@ -0,0 +1,221 @@
58769+#ifndef __GRINTERNAL_H
58770+#define __GRINTERNAL_H
58771+
58772+#ifdef CONFIG_GRKERNSEC
58773+
58774+#include <linux/fs.h>
58775+#include <linux/mnt_namespace.h>
58776+#include <linux/nsproxy.h>
58777+#include <linux/gracl.h>
58778+#include <linux/grdefs.h>
58779+#include <linux/grmsg.h>
58780+
58781+void gr_add_learn_entry(const char *fmt, ...)
58782+ __attribute__ ((format (printf, 1, 2)));
58783+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58784+ const struct vfsmount *mnt);
58785+__u32 gr_check_create(const struct dentry *new_dentry,
58786+ const struct dentry *parent,
58787+ const struct vfsmount *mnt, const __u32 mode);
58788+int gr_check_protected_task(const struct task_struct *task);
58789+__u32 to_gr_audit(const __u32 reqmode);
58790+int gr_set_acls(const int type);
58791+int gr_apply_subject_to_task(struct task_struct *task);
58792+int gr_acl_is_enabled(void);
58793+char gr_roletype_to_char(void);
58794+
58795+void gr_handle_alertkill(struct task_struct *task);
58796+char *gr_to_filename(const struct dentry *dentry,
58797+ const struct vfsmount *mnt);
58798+char *gr_to_filename1(const struct dentry *dentry,
58799+ const struct vfsmount *mnt);
58800+char *gr_to_filename2(const struct dentry *dentry,
58801+ const struct vfsmount *mnt);
58802+char *gr_to_filename3(const struct dentry *dentry,
58803+ const struct vfsmount *mnt);
58804+
58805+extern int grsec_enable_ptrace_readexec;
58806+extern int grsec_enable_harden_ptrace;
58807+extern int grsec_enable_link;
58808+extern int grsec_enable_fifo;
58809+extern int grsec_enable_execve;
58810+extern int grsec_enable_shm;
58811+extern int grsec_enable_execlog;
58812+extern int grsec_enable_signal;
58813+extern int grsec_enable_audit_ptrace;
58814+extern int grsec_enable_forkfail;
58815+extern int grsec_enable_time;
58816+extern int grsec_enable_rofs;
58817+extern int grsec_enable_chroot_shmat;
58818+extern int grsec_enable_chroot_mount;
58819+extern int grsec_enable_chroot_double;
58820+extern int grsec_enable_chroot_pivot;
58821+extern int grsec_enable_chroot_chdir;
58822+extern int grsec_enable_chroot_chmod;
58823+extern int grsec_enable_chroot_mknod;
58824+extern int grsec_enable_chroot_fchdir;
58825+extern int grsec_enable_chroot_nice;
58826+extern int grsec_enable_chroot_execlog;
58827+extern int grsec_enable_chroot_caps;
58828+extern int grsec_enable_chroot_sysctl;
58829+extern int grsec_enable_chroot_unix;
58830+extern int grsec_enable_tpe;
58831+extern int grsec_tpe_gid;
58832+extern int grsec_enable_tpe_all;
58833+extern int grsec_enable_tpe_invert;
58834+extern int grsec_enable_socket_all;
58835+extern int grsec_socket_all_gid;
58836+extern int grsec_enable_socket_client;
58837+extern int grsec_socket_client_gid;
58838+extern int grsec_enable_socket_server;
58839+extern int grsec_socket_server_gid;
58840+extern int grsec_audit_gid;
58841+extern int grsec_enable_group;
58842+extern int grsec_enable_audit_textrel;
58843+extern int grsec_enable_log_rwxmaps;
58844+extern int grsec_enable_mount;
58845+extern int grsec_enable_chdir;
58846+extern int grsec_resource_logging;
58847+extern int grsec_enable_blackhole;
58848+extern int grsec_lastack_retries;
58849+extern int grsec_enable_brute;
58850+extern int grsec_lock;
58851+
58852+extern spinlock_t grsec_alert_lock;
58853+extern unsigned long grsec_alert_wtime;
58854+extern unsigned long grsec_alert_fyet;
58855+
58856+extern spinlock_t grsec_audit_lock;
58857+
58858+extern rwlock_t grsec_exec_file_lock;
58859+
58860+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58861+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58862+ (tsk)->exec_file->f_vfsmnt) : "/")
58863+
58864+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58865+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58866+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58867+
58868+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58869+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58870+ (tsk)->exec_file->f_vfsmnt) : "/")
58871+
58872+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58873+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58874+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58875+
58876+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58877+
58878+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58879+
58880+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58881+ (task)->pid, (cred)->uid, \
58882+ (cred)->euid, (cred)->gid, (cred)->egid, \
58883+ gr_parent_task_fullpath(task), \
58884+ (task)->real_parent->comm, (task)->real_parent->pid, \
58885+ (pcred)->uid, (pcred)->euid, \
58886+ (pcred)->gid, (pcred)->egid
58887+
58888+#define GR_CHROOT_CAPS {{ \
58889+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58890+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58891+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58892+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58893+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58894+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58895+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58896+
58897+#define security_learn(normal_msg,args...) \
58898+({ \
58899+ read_lock(&grsec_exec_file_lock); \
58900+ gr_add_learn_entry(normal_msg "\n", ## args); \
58901+ read_unlock(&grsec_exec_file_lock); \
58902+})
58903+
58904+enum {
58905+ GR_DO_AUDIT,
58906+ GR_DONT_AUDIT,
58907+ /* used for non-audit messages that we shouldn't kill the task on */
58908+ GR_DONT_AUDIT_GOOD
58909+};
58910+
58911+enum {
58912+ GR_TTYSNIFF,
58913+ GR_RBAC,
58914+ GR_RBAC_STR,
58915+ GR_STR_RBAC,
58916+ GR_RBAC_MODE2,
58917+ GR_RBAC_MODE3,
58918+ GR_FILENAME,
58919+ GR_SYSCTL_HIDDEN,
58920+ GR_NOARGS,
58921+ GR_ONE_INT,
58922+ GR_ONE_INT_TWO_STR,
58923+ GR_ONE_STR,
58924+ GR_STR_INT,
58925+ GR_TWO_STR_INT,
58926+ GR_TWO_INT,
58927+ GR_TWO_U64,
58928+ GR_THREE_INT,
58929+ GR_FIVE_INT_TWO_STR,
58930+ GR_TWO_STR,
58931+ GR_THREE_STR,
58932+ GR_FOUR_STR,
58933+ GR_STR_FILENAME,
58934+ GR_FILENAME_STR,
58935+ GR_FILENAME_TWO_INT,
58936+ GR_FILENAME_TWO_INT_STR,
58937+ GR_TEXTREL,
58938+ GR_PTRACE,
58939+ GR_RESOURCE,
58940+ GR_CAP,
58941+ GR_SIG,
58942+ GR_SIG2,
58943+ GR_CRASH1,
58944+ GR_CRASH2,
58945+ GR_PSACCT,
58946+ GR_RWXMAP
58947+};
58948+
58949+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58950+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58951+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58952+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58953+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58954+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58955+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58956+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58957+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58958+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58959+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58960+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58961+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58962+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58963+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58964+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58965+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58966+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58967+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58968+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58969+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58970+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58971+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58972+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58973+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58974+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58975+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58976+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58977+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58978+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58979+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58980+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58981+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58982+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58983+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58984+
58985+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58986+
58987+#endif
58988+
58989+#endif
58990diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
58991new file mode 100644
58992index 0000000..8b9ed56
58993--- /dev/null
58994+++ b/include/linux/grmsg.h
58995@@ -0,0 +1,110 @@
58996+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58997+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58998+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58999+#define GR_STOPMOD_MSG "denied modification of module state by "
59000+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59001+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59002+#define GR_IOPERM_MSG "denied use of ioperm() by "
59003+#define GR_IOPL_MSG "denied use of iopl() by "
59004+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59005+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59006+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59007+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59008+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59009+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59010+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59011+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59012+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59013+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59014+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59015+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59016+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59017+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59018+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59019+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59020+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59021+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59022+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59023+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59024+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59025+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59026+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59027+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59028+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59029+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59030+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
59031+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59032+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59033+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59034+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59035+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59036+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59037+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59038+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59039+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
59040+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59041+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59042+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59043+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59044+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59045+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59046+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59047+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59048+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
59049+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59050+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59051+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59052+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59053+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59054+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59055+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59056+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59057+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59058+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59059+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59060+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59061+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59062+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59063+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59064+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59065+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59066+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59067+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59068+#define GR_FAILFORK_MSG "failed fork with errno %s by "
59069+#define GR_NICE_CHROOT_MSG "denied priority change by "
59070+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59071+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59072+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59073+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59074+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59075+#define GR_TIME_MSG "time set by "
59076+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59077+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59078+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59079+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59080+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59081+#define GR_BIND_MSG "denied bind() by "
59082+#define GR_CONNECT_MSG "denied connect() by "
59083+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59084+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59085+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59086+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59087+#define GR_CAP_ACL_MSG "use of %s denied for "
59088+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
59089+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59090+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59091+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59092+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59093+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59094+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59095+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59096+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59097+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59098+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59099+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59100+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59101+#define GR_VM86_MSG "denied use of vm86 by "
59102+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59103+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
59104+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59105+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
59106diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
59107new file mode 100644
59108index 0000000..10c8ced
59109--- /dev/null
59110+++ b/include/linux/grsecurity.h
59111@@ -0,0 +1,229 @@
59112+#ifndef GR_SECURITY_H
59113+#define GR_SECURITY_H
59114+#include <linux/fs.h>
59115+#include <linux/fs_struct.h>
59116+#include <linux/binfmts.h>
59117+#include <linux/gracl.h>
59118+
59119+/* notify of brain-dead configs */
59120+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59121+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59122+#endif
59123+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59124+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59125+#endif
59126+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59127+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59128+#endif
59129+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59130+#error "CONFIG_PAX enabled, but no PaX options are enabled."
59131+#endif
59132+
59133+#include <linux/compat.h>
59134+
59135+struct user_arg_ptr {
59136+#ifdef CONFIG_COMPAT
59137+ bool is_compat;
59138+#endif
59139+ union {
59140+ const char __user *const __user *native;
59141+#ifdef CONFIG_COMPAT
59142+ compat_uptr_t __user *compat;
59143+#endif
59144+ } ptr;
59145+};
59146+
59147+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59148+void gr_handle_brute_check(void);
59149+void gr_handle_kernel_exploit(void);
59150+int gr_process_user_ban(void);
59151+
59152+char gr_roletype_to_char(void);
59153+
59154+int gr_acl_enable_at_secure(void);
59155+
59156+int gr_check_user_change(int real, int effective, int fs);
59157+int gr_check_group_change(int real, int effective, int fs);
59158+
59159+void gr_del_task_from_ip_table(struct task_struct *p);
59160+
59161+int gr_pid_is_chrooted(struct task_struct *p);
59162+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59163+int gr_handle_chroot_nice(void);
59164+int gr_handle_chroot_sysctl(const int op);
59165+int gr_handle_chroot_setpriority(struct task_struct *p,
59166+ const int niceval);
59167+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59168+int gr_handle_chroot_chroot(const struct dentry *dentry,
59169+ const struct vfsmount *mnt);
59170+void gr_handle_chroot_chdir(struct path *path);
59171+int gr_handle_chroot_chmod(const struct dentry *dentry,
59172+ const struct vfsmount *mnt, const int mode);
59173+int gr_handle_chroot_mknod(const struct dentry *dentry,
59174+ const struct vfsmount *mnt, const int mode);
59175+int gr_handle_chroot_mount(const struct dentry *dentry,
59176+ const struct vfsmount *mnt,
59177+ const char *dev_name);
59178+int gr_handle_chroot_pivot(void);
59179+int gr_handle_chroot_unix(const pid_t pid);
59180+
59181+int gr_handle_rawio(const struct inode *inode);
59182+
59183+void gr_handle_ioperm(void);
59184+void gr_handle_iopl(void);
59185+
59186+int gr_tpe_allow(const struct file *file);
59187+
59188+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59189+void gr_clear_chroot_entries(struct task_struct *task);
59190+
59191+void gr_log_forkfail(const int retval);
59192+void gr_log_timechange(void);
59193+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59194+void gr_log_chdir(const struct dentry *dentry,
59195+ const struct vfsmount *mnt);
59196+void gr_log_chroot_exec(const struct dentry *dentry,
59197+ const struct vfsmount *mnt);
59198+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59199+void gr_log_remount(const char *devname, const int retval);
59200+void gr_log_unmount(const char *devname, const int retval);
59201+void gr_log_mount(const char *from, const char *to, const int retval);
59202+void gr_log_textrel(struct vm_area_struct *vma);
59203+void gr_log_rwxmmap(struct file *file);
59204+void gr_log_rwxmprotect(struct file *file);
59205+
59206+int gr_handle_follow_link(const struct inode *parent,
59207+ const struct inode *inode,
59208+ const struct dentry *dentry,
59209+ const struct vfsmount *mnt);
59210+int gr_handle_fifo(const struct dentry *dentry,
59211+ const struct vfsmount *mnt,
59212+ const struct dentry *dir, const int flag,
59213+ const int acc_mode);
59214+int gr_handle_hardlink(const struct dentry *dentry,
59215+ const struct vfsmount *mnt,
59216+ struct inode *inode,
59217+ const int mode, const char *to);
59218+
59219+int gr_is_capable(const int cap);
59220+int gr_is_capable_nolog(const int cap);
59221+void gr_learn_resource(const struct task_struct *task, const int limit,
59222+ const unsigned long wanted, const int gt);
59223+void gr_copy_label(struct task_struct *tsk);
59224+void gr_handle_crash(struct task_struct *task, const int sig);
59225+int gr_handle_signal(const struct task_struct *p, const int sig);
59226+int gr_check_crash_uid(const uid_t uid);
59227+int gr_check_protected_task(const struct task_struct *task);
59228+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59229+int gr_acl_handle_mmap(const struct file *file,
59230+ const unsigned long prot);
59231+int gr_acl_handle_mprotect(const struct file *file,
59232+ const unsigned long prot);
59233+int gr_check_hidden_task(const struct task_struct *tsk);
59234+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59235+ const struct vfsmount *mnt);
59236+__u32 gr_acl_handle_utime(const struct dentry *dentry,
59237+ const struct vfsmount *mnt);
59238+__u32 gr_acl_handle_access(const struct dentry *dentry,
59239+ const struct vfsmount *mnt, const int fmode);
59240+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59241+ const struct vfsmount *mnt, mode_t mode);
59242+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59243+ const struct vfsmount *mnt, mode_t mode);
59244+__u32 gr_acl_handle_chown(const struct dentry *dentry,
59245+ const struct vfsmount *mnt);
59246+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59247+ const struct vfsmount *mnt);
59248+int gr_handle_ptrace(struct task_struct *task, const long request);
59249+int gr_handle_proc_ptrace(struct task_struct *task);
59250+__u32 gr_acl_handle_execve(const struct dentry *dentry,
59251+ const struct vfsmount *mnt);
59252+int gr_check_crash_exec(const struct file *filp);
59253+int gr_acl_is_enabled(void);
59254+void gr_set_kernel_label(struct task_struct *task);
59255+void gr_set_role_label(struct task_struct *task, const uid_t uid,
59256+ const gid_t gid);
59257+int gr_set_proc_label(const struct dentry *dentry,
59258+ const struct vfsmount *mnt,
59259+ const int unsafe_flags);
59260+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59261+ const struct vfsmount *mnt);
59262+__u32 gr_acl_handle_open(const struct dentry *dentry,
59263+ const struct vfsmount *mnt, int acc_mode);
59264+__u32 gr_acl_handle_creat(const struct dentry *dentry,
59265+ const struct dentry *p_dentry,
59266+ const struct vfsmount *p_mnt,
59267+ int open_flags, int acc_mode, const int imode);
59268+void gr_handle_create(const struct dentry *dentry,
59269+ const struct vfsmount *mnt);
59270+void gr_handle_proc_create(const struct dentry *dentry,
59271+ const struct inode *inode);
59272+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59273+ const struct dentry *parent_dentry,
59274+ const struct vfsmount *parent_mnt,
59275+ const int mode);
59276+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59277+ const struct dentry *parent_dentry,
59278+ const struct vfsmount *parent_mnt);
59279+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59280+ const struct vfsmount *mnt);
59281+void gr_handle_delete(const ino_t ino, const dev_t dev);
59282+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59283+ const struct vfsmount *mnt);
59284+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59285+ const struct dentry *parent_dentry,
59286+ const struct vfsmount *parent_mnt,
59287+ const char *from);
59288+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59289+ const struct dentry *parent_dentry,
59290+ const struct vfsmount *parent_mnt,
59291+ const struct dentry *old_dentry,
59292+ const struct vfsmount *old_mnt, const char *to);
59293+int gr_acl_handle_rename(struct dentry *new_dentry,
59294+ struct dentry *parent_dentry,
59295+ const struct vfsmount *parent_mnt,
59296+ struct dentry *old_dentry,
59297+ struct inode *old_parent_inode,
59298+ struct vfsmount *old_mnt, const char *newname);
59299+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59300+ struct dentry *old_dentry,
59301+ struct dentry *new_dentry,
59302+ struct vfsmount *mnt, const __u8 replace);
59303+__u32 gr_check_link(const struct dentry *new_dentry,
59304+ const struct dentry *parent_dentry,
59305+ const struct vfsmount *parent_mnt,
59306+ const struct dentry *old_dentry,
59307+ const struct vfsmount *old_mnt);
59308+int gr_acl_handle_filldir(const struct file *file, const char *name,
59309+ const unsigned int namelen, const ino_t ino);
59310+
59311+__u32 gr_acl_handle_unix(const struct dentry *dentry,
59312+ const struct vfsmount *mnt);
59313+void gr_acl_handle_exit(void);
59314+void gr_acl_handle_psacct(struct task_struct *task, const long code);
59315+int gr_acl_handle_procpidmem(const struct task_struct *task);
59316+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59317+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59318+void gr_audit_ptrace(struct task_struct *task);
59319+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59320+
59321+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
59322+
59323+#ifdef CONFIG_GRKERNSEC
59324+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59325+void gr_handle_vm86(void);
59326+void gr_handle_mem_readwrite(u64 from, u64 to);
59327+
59328+void gr_log_badprocpid(const char *entry);
59329+
59330+extern int grsec_enable_dmesg;
59331+extern int grsec_disable_privio;
59332+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59333+extern int grsec_enable_chroot_findtask;
59334+#endif
59335+#ifdef CONFIG_GRKERNSEC_SETXID
59336+extern int grsec_enable_setxid;
59337+#endif
59338+#endif
59339+
59340+#endif
59341diff --git a/include/linux/grsock.h b/include/linux/grsock.h
59342new file mode 100644
59343index 0000000..e7ffaaf
59344--- /dev/null
59345+++ b/include/linux/grsock.h
59346@@ -0,0 +1,19 @@
59347+#ifndef __GRSOCK_H
59348+#define __GRSOCK_H
59349+
59350+extern void gr_attach_curr_ip(const struct sock *sk);
59351+extern int gr_handle_sock_all(const int family, const int type,
59352+ const int protocol);
59353+extern int gr_handle_sock_server(const struct sockaddr *sck);
59354+extern int gr_handle_sock_server_other(const struct sock *sck);
59355+extern int gr_handle_sock_client(const struct sockaddr *sck);
59356+extern int gr_search_connect(struct socket * sock,
59357+ struct sockaddr_in * addr);
59358+extern int gr_search_bind(struct socket * sock,
59359+ struct sockaddr_in * addr);
59360+extern int gr_search_listen(struct socket * sock);
59361+extern int gr_search_accept(struct socket * sock);
59362+extern int gr_search_socket(const int domain, const int type,
59363+ const int protocol);
59364+
59365+#endif
59366diff --git a/include/linux/hid.h b/include/linux/hid.h
59367index c235e4e..f0cf7a0 100644
59368--- a/include/linux/hid.h
59369+++ b/include/linux/hid.h
59370@@ -679,7 +679,7 @@ struct hid_ll_driver {
59371 unsigned int code, int value);
59372
59373 int (*parse)(struct hid_device *hdev);
59374-};
59375+} __no_const;
59376
59377 #define PM_HINT_FULLON 1<<5
59378 #define PM_HINT_NORMAL 1<<1
59379diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59380index 3a93f73..b19d0b3 100644
59381--- a/include/linux/highmem.h
59382+++ b/include/linux/highmem.h
59383@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59384 kunmap_atomic(kaddr, KM_USER0);
59385 }
59386
59387+static inline void sanitize_highpage(struct page *page)
59388+{
59389+ void *kaddr;
59390+ unsigned long flags;
59391+
59392+ local_irq_save(flags);
59393+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59394+ clear_page(kaddr);
59395+ kunmap_atomic(kaddr, KM_CLEARPAGE);
59396+ local_irq_restore(flags);
59397+}
59398+
59399 static inline void zero_user_segments(struct page *page,
59400 unsigned start1, unsigned end1,
59401 unsigned start2, unsigned end2)
59402diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59403index 07d103a..04ec65b 100644
59404--- a/include/linux/i2c.h
59405+++ b/include/linux/i2c.h
59406@@ -364,6 +364,7 @@ struct i2c_algorithm {
59407 /* To determine what the adapter supports */
59408 u32 (*functionality) (struct i2c_adapter *);
59409 };
59410+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59411
59412 /*
59413 * i2c_adapter is the structure used to identify a physical i2c bus along
59414diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59415index a6deef4..c56a7f2 100644
59416--- a/include/linux/i2o.h
59417+++ b/include/linux/i2o.h
59418@@ -564,7 +564,7 @@ struct i2o_controller {
59419 struct i2o_device *exec; /* Executive */
59420 #if BITS_PER_LONG == 64
59421 spinlock_t context_list_lock; /* lock for context_list */
59422- atomic_t context_list_counter; /* needed for unique contexts */
59423+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59424 struct list_head context_list; /* list of context id's
59425 and pointers */
59426 #endif
59427diff --git a/include/linux/init.h b/include/linux/init.h
59428index 9146f39..885354d 100644
59429--- a/include/linux/init.h
59430+++ b/include/linux/init.h
59431@@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59432
59433 /* Each module must use one module_init(). */
59434 #define module_init(initfn) \
59435- static inline initcall_t __inittest(void) \
59436+ static inline __used initcall_t __inittest(void) \
59437 { return initfn; } \
59438 int init_module(void) __attribute__((alias(#initfn)));
59439
59440 /* This is only required if you want to be unloadable. */
59441 #define module_exit(exitfn) \
59442- static inline exitcall_t __exittest(void) \
59443+ static inline __used exitcall_t __exittest(void) \
59444 { return exitfn; } \
59445 void cleanup_module(void) __attribute__((alias(#exitfn)));
59446
59447diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59448index 32574ee..00d4ef1 100644
59449--- a/include/linux/init_task.h
59450+++ b/include/linux/init_task.h
59451@@ -128,6 +128,12 @@ extern struct cred init_cred;
59452
59453 #define INIT_TASK_COMM "swapper"
59454
59455+#ifdef CONFIG_X86
59456+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59457+#else
59458+#define INIT_TASK_THREAD_INFO
59459+#endif
59460+
59461 /*
59462 * INIT_TASK is used to set up the first task table, touch at
59463 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59464@@ -166,6 +172,7 @@ extern struct cred init_cred;
59465 RCU_INIT_POINTER(.cred, &init_cred), \
59466 .comm = INIT_TASK_COMM, \
59467 .thread = INIT_THREAD, \
59468+ INIT_TASK_THREAD_INFO \
59469 .fs = &init_fs, \
59470 .files = &init_files, \
59471 .signal = &init_signals, \
59472diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59473index e6ca56d..8583707 100644
59474--- a/include/linux/intel-iommu.h
59475+++ b/include/linux/intel-iommu.h
59476@@ -296,7 +296,7 @@ struct iommu_flush {
59477 u8 fm, u64 type);
59478 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59479 unsigned int size_order, u64 type);
59480-};
59481+} __no_const;
59482
59483 enum {
59484 SR_DMAR_FECTL_REG,
59485diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59486index a64b00e..464d8bc 100644
59487--- a/include/linux/interrupt.h
59488+++ b/include/linux/interrupt.h
59489@@ -441,7 +441,7 @@ enum
59490 /* map softirq index to softirq name. update 'softirq_to_name' in
59491 * kernel/softirq.c when adding a new softirq.
59492 */
59493-extern char *softirq_to_name[NR_SOFTIRQS];
59494+extern const char * const softirq_to_name[NR_SOFTIRQS];
59495
59496 /* softirq mask and active fields moved to irq_cpustat_t in
59497 * asm/hardirq.h to get better cache usage. KAO
59498@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59499
59500 struct softirq_action
59501 {
59502- void (*action)(struct softirq_action *);
59503+ void (*action)(void);
59504 };
59505
59506 asmlinkage void do_softirq(void);
59507 asmlinkage void __do_softirq(void);
59508-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59509+extern void open_softirq(int nr, void (*action)(void));
59510 extern void softirq_init(void);
59511 static inline void __raise_softirq_irqoff(unsigned int nr)
59512 {
59513diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59514index 3875719..4cd454c 100644
59515--- a/include/linux/kallsyms.h
59516+++ b/include/linux/kallsyms.h
59517@@ -15,7 +15,8 @@
59518
59519 struct module;
59520
59521-#ifdef CONFIG_KALLSYMS
59522+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59523+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59524 /* Lookup the address for a symbol. Returns 0 if not found. */
59525 unsigned long kallsyms_lookup_name(const char *name);
59526
59527@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59528 /* Stupid that this does nothing, but I didn't create this mess. */
59529 #define __print_symbol(fmt, addr)
59530 #endif /*CONFIG_KALLSYMS*/
59531+#else /* when included by kallsyms.c, vsnprintf.c, or
59532+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59533+extern void __print_symbol(const char *fmt, unsigned long address);
59534+extern int sprint_backtrace(char *buffer, unsigned long address);
59535+extern int sprint_symbol(char *buffer, unsigned long address);
59536+const char *kallsyms_lookup(unsigned long addr,
59537+ unsigned long *symbolsize,
59538+ unsigned long *offset,
59539+ char **modname, char *namebuf);
59540+#endif
59541
59542 /* This macro allows us to keep printk typechecking */
59543 static __printf(1, 2)
59544diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59545index fa39183..40160be 100644
59546--- a/include/linux/kgdb.h
59547+++ b/include/linux/kgdb.h
59548@@ -53,7 +53,7 @@ extern int kgdb_connected;
59549 extern int kgdb_io_module_registered;
59550
59551 extern atomic_t kgdb_setting_breakpoint;
59552-extern atomic_t kgdb_cpu_doing_single_step;
59553+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59554
59555 extern struct task_struct *kgdb_usethread;
59556 extern struct task_struct *kgdb_contthread;
59557@@ -251,7 +251,7 @@ struct kgdb_arch {
59558 void (*disable_hw_break)(struct pt_regs *regs);
59559 void (*remove_all_hw_break)(void);
59560 void (*correct_hw_break)(void);
59561-};
59562+} __do_const;
59563
59564 /**
59565 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59566@@ -276,7 +276,7 @@ struct kgdb_io {
59567 void (*pre_exception) (void);
59568 void (*post_exception) (void);
59569 int is_console;
59570-};
59571+} __do_const;
59572
59573 extern struct kgdb_arch arch_kgdb_ops;
59574
59575diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59576index b16f653..eb908f4 100644
59577--- a/include/linux/kmod.h
59578+++ b/include/linux/kmod.h
59579@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59580 * usually useless though. */
59581 extern __printf(2, 3)
59582 int __request_module(bool wait, const char *name, ...);
59583+extern __printf(3, 4)
59584+int ___request_module(bool wait, char *param_name, const char *name, ...);
59585 #define request_module(mod...) __request_module(true, mod)
59586 #define request_module_nowait(mod...) __request_module(false, mod)
59587 #define try_then_request_module(x, mod...) \
59588diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59589index d526231..086e89b 100644
59590--- a/include/linux/kvm_host.h
59591+++ b/include/linux/kvm_host.h
59592@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59593 void vcpu_load(struct kvm_vcpu *vcpu);
59594 void vcpu_put(struct kvm_vcpu *vcpu);
59595
59596-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59597+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59598 struct module *module);
59599 void kvm_exit(void);
59600
59601@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59602 struct kvm_guest_debug *dbg);
59603 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59604
59605-int kvm_arch_init(void *opaque);
59606+int kvm_arch_init(const void *opaque);
59607 void kvm_arch_exit(void);
59608
59609 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59610diff --git a/include/linux/libata.h b/include/linux/libata.h
59611index cafc09a..d7e7829 100644
59612--- a/include/linux/libata.h
59613+++ b/include/linux/libata.h
59614@@ -909,7 +909,7 @@ struct ata_port_operations {
59615 * fields must be pointers.
59616 */
59617 const struct ata_port_operations *inherits;
59618-};
59619+} __do_const;
59620
59621 struct ata_port_info {
59622 unsigned long flags;
59623diff --git a/include/linux/mca.h b/include/linux/mca.h
59624index 3797270..7765ede 100644
59625--- a/include/linux/mca.h
59626+++ b/include/linux/mca.h
59627@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59628 int region);
59629 void * (*mca_transform_memory)(struct mca_device *,
59630 void *memory);
59631-};
59632+} __no_const;
59633
59634 struct mca_bus {
59635 u64 default_dma_mask;
59636diff --git a/include/linux/memory.h b/include/linux/memory.h
59637index 935699b..11042cc 100644
59638--- a/include/linux/memory.h
59639+++ b/include/linux/memory.h
59640@@ -144,7 +144,7 @@ struct memory_accessor {
59641 size_t count);
59642 ssize_t (*write)(struct memory_accessor *, const char *buf,
59643 off_t offset, size_t count);
59644-};
59645+} __no_const;
59646
59647 /*
59648 * Kernel text modification mutex, used for code patching. Users of this lock
59649diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59650index 9970337..9444122 100644
59651--- a/include/linux/mfd/abx500.h
59652+++ b/include/linux/mfd/abx500.h
59653@@ -188,6 +188,7 @@ struct abx500_ops {
59654 int (*event_registers_startup_state_get) (struct device *, u8 *);
59655 int (*startup_irq_enabled) (struct device *, unsigned int);
59656 };
59657+typedef struct abx500_ops __no_const abx500_ops_no_const;
59658
59659 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59660 void abx500_remove_ops(struct device *dev);
59661diff --git a/include/linux/mm.h b/include/linux/mm.h
59662index 4baadd1..2e0b45e 100644
59663--- a/include/linux/mm.h
59664+++ b/include/linux/mm.h
59665@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59666
59667 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59668 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59669+
59670+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59671+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59672+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59673+#else
59674 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59675+#endif
59676+
59677 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59678 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59679
59680@@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59681 int set_page_dirty_lock(struct page *page);
59682 int clear_page_dirty_for_io(struct page *page);
59683
59684-/* Is the vma a continuation of the stack vma above it? */
59685-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59686-{
59687- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59688-}
59689-
59690-static inline int stack_guard_page_start(struct vm_area_struct *vma,
59691- unsigned long addr)
59692-{
59693- return (vma->vm_flags & VM_GROWSDOWN) &&
59694- (vma->vm_start == addr) &&
59695- !vma_growsdown(vma->vm_prev, addr);
59696-}
59697-
59698-/* Is the vma a continuation of the stack vma below it? */
59699-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59700-{
59701- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59702-}
59703-
59704-static inline int stack_guard_page_end(struct vm_area_struct *vma,
59705- unsigned long addr)
59706-{
59707- return (vma->vm_flags & VM_GROWSUP) &&
59708- (vma->vm_end == addr) &&
59709- !vma_growsup(vma->vm_next, addr);
59710-}
59711-
59712 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59713 unsigned long old_addr, struct vm_area_struct *new_vma,
59714 unsigned long new_addr, unsigned long len);
59715@@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59716 }
59717 #endif
59718
59719+#ifdef CONFIG_MMU
59720+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59721+#else
59722+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59723+{
59724+ return __pgprot(0);
59725+}
59726+#endif
59727+
59728 int vma_wants_writenotify(struct vm_area_struct *vma);
59729
59730 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59731@@ -1419,6 +1407,7 @@ out:
59732 }
59733
59734 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59735+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59736
59737 extern unsigned long do_brk(unsigned long, unsigned long);
59738
59739@@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
59740 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59741 struct vm_area_struct **pprev);
59742
59743+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59744+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59745+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59746+
59747 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59748 NULL if none. Assume start_addr < end_addr. */
59749 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59750@@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
59751 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59752 }
59753
59754-#ifdef CONFIG_MMU
59755-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59756-#else
59757-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59758-{
59759- return __pgprot(0);
59760-}
59761-#endif
59762-
59763 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59764 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59765 unsigned long pfn, unsigned long size, pgprot_t);
59766@@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
59767 extern int sysctl_memory_failure_early_kill;
59768 extern int sysctl_memory_failure_recovery;
59769 extern void shake_page(struct page *p, int access);
59770-extern atomic_long_t mce_bad_pages;
59771+extern atomic_long_unchecked_t mce_bad_pages;
59772 extern int soft_offline_page(struct page *page, int flags);
59773
59774 extern void dump_page(struct page *page);
59775@@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
59776 unsigned int pages_per_huge_page);
59777 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59778
59779+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59780+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59781+#else
59782+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59783+#endif
59784+
59785 #endif /* __KERNEL__ */
59786 #endif /* _LINUX_MM_H */
59787diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
59788index 5b42f1b..759e4b4 100644
59789--- a/include/linux/mm_types.h
59790+++ b/include/linux/mm_types.h
59791@@ -253,6 +253,8 @@ struct vm_area_struct {
59792 #ifdef CONFIG_NUMA
59793 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59794 #endif
59795+
59796+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59797 };
59798
59799 struct core_thread {
59800@@ -389,6 +391,24 @@ struct mm_struct {
59801 #ifdef CONFIG_CPUMASK_OFFSTACK
59802 struct cpumask cpumask_allocation;
59803 #endif
59804+
59805+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59806+ unsigned long pax_flags;
59807+#endif
59808+
59809+#ifdef CONFIG_PAX_DLRESOLVE
59810+ unsigned long call_dl_resolve;
59811+#endif
59812+
59813+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59814+ unsigned long call_syscall;
59815+#endif
59816+
59817+#ifdef CONFIG_PAX_ASLR
59818+ unsigned long delta_mmap; /* randomized offset */
59819+ unsigned long delta_stack; /* randomized offset */
59820+#endif
59821+
59822 };
59823
59824 static inline void mm_init_cpumask(struct mm_struct *mm)
59825diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
59826index 1d1b1e1..2a13c78 100644
59827--- a/include/linux/mmu_notifier.h
59828+++ b/include/linux/mmu_notifier.h
59829@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
59830 */
59831 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59832 ({ \
59833- pte_t __pte; \
59834+ pte_t ___pte; \
59835 struct vm_area_struct *___vma = __vma; \
59836 unsigned long ___address = __address; \
59837- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59838+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59839 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59840- __pte; \
59841+ ___pte; \
59842 })
59843
59844 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59845diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
59846index 188cb2f..d78409b 100644
59847--- a/include/linux/mmzone.h
59848+++ b/include/linux/mmzone.h
59849@@ -369,7 +369,7 @@ struct zone {
59850 unsigned long flags; /* zone flags, see below */
59851
59852 /* Zone statistics */
59853- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59854+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59855
59856 /*
59857 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59858diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
59859index 468819c..17b9db3 100644
59860--- a/include/linux/mod_devicetable.h
59861+++ b/include/linux/mod_devicetable.h
59862@@ -12,7 +12,7 @@
59863 typedef unsigned long kernel_ulong_t;
59864 #endif
59865
59866-#define PCI_ANY_ID (~0)
59867+#define PCI_ANY_ID ((__u16)~0)
59868
59869 struct pci_device_id {
59870 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59871@@ -131,7 +131,7 @@ struct usb_device_id {
59872 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59873 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59874
59875-#define HID_ANY_ID (~0)
59876+#define HID_ANY_ID (~0U)
59877
59878 struct hid_device_id {
59879 __u16 bus;
59880diff --git a/include/linux/module.h b/include/linux/module.h
59881index 3cb7839..511cb87 100644
59882--- a/include/linux/module.h
59883+++ b/include/linux/module.h
59884@@ -17,6 +17,7 @@
59885 #include <linux/moduleparam.h>
59886 #include <linux/tracepoint.h>
59887 #include <linux/export.h>
59888+#include <linux/fs.h>
59889
59890 #include <linux/percpu.h>
59891 #include <asm/module.h>
59892@@ -261,19 +262,16 @@ struct module
59893 int (*init)(void);
59894
59895 /* If this is non-NULL, vfree after init() returns */
59896- void *module_init;
59897+ void *module_init_rx, *module_init_rw;
59898
59899 /* Here is the actual code + data, vfree'd on unload. */
59900- void *module_core;
59901+ void *module_core_rx, *module_core_rw;
59902
59903 /* Here are the sizes of the init and core sections */
59904- unsigned int init_size, core_size;
59905+ unsigned int init_size_rw, core_size_rw;
59906
59907 /* The size of the executable code in each section. */
59908- unsigned int init_text_size, core_text_size;
59909-
59910- /* Size of RO sections of the module (text+rodata) */
59911- unsigned int init_ro_size, core_ro_size;
59912+ unsigned int init_size_rx, core_size_rx;
59913
59914 /* Arch-specific module values */
59915 struct mod_arch_specific arch;
59916@@ -329,6 +327,10 @@ struct module
59917 #ifdef CONFIG_EVENT_TRACING
59918 struct ftrace_event_call **trace_events;
59919 unsigned int num_trace_events;
59920+ struct file_operations trace_id;
59921+ struct file_operations trace_enable;
59922+ struct file_operations trace_format;
59923+ struct file_operations trace_filter;
59924 #endif
59925 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59926 unsigned int num_ftrace_callsites;
59927@@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
59928 bool is_module_percpu_address(unsigned long addr);
59929 bool is_module_text_address(unsigned long addr);
59930
59931+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59932+{
59933+
59934+#ifdef CONFIG_PAX_KERNEXEC
59935+ if (ktla_ktva(addr) >= (unsigned long)start &&
59936+ ktla_ktva(addr) < (unsigned long)start + size)
59937+ return 1;
59938+#endif
59939+
59940+ return ((void *)addr >= start && (void *)addr < start + size);
59941+}
59942+
59943+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59944+{
59945+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59946+}
59947+
59948+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59949+{
59950+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59951+}
59952+
59953+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59954+{
59955+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59956+}
59957+
59958+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59959+{
59960+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59961+}
59962+
59963 static inline int within_module_core(unsigned long addr, struct module *mod)
59964 {
59965- return (unsigned long)mod->module_core <= addr &&
59966- addr < (unsigned long)mod->module_core + mod->core_size;
59967+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59968 }
59969
59970 static inline int within_module_init(unsigned long addr, struct module *mod)
59971 {
59972- return (unsigned long)mod->module_init <= addr &&
59973- addr < (unsigned long)mod->module_init + mod->init_size;
59974+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59975 }
59976
59977 /* Search for module by name: must hold module_mutex. */
59978diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
59979index b2be02e..6a9fdb1 100644
59980--- a/include/linux/moduleloader.h
59981+++ b/include/linux/moduleloader.h
59982@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
59983 sections. Returns NULL on failure. */
59984 void *module_alloc(unsigned long size);
59985
59986+#ifdef CONFIG_PAX_KERNEXEC
59987+void *module_alloc_exec(unsigned long size);
59988+#else
59989+#define module_alloc_exec(x) module_alloc(x)
59990+#endif
59991+
59992 /* Free memory returned from module_alloc. */
59993 void module_free(struct module *mod, void *module_region);
59994
59995+#ifdef CONFIG_PAX_KERNEXEC
59996+void module_free_exec(struct module *mod, void *module_region);
59997+#else
59998+#define module_free_exec(x, y) module_free((x), (y))
59999+#endif
60000+
60001 /* Apply the given relocation to the (simplified) ELF. Return -error
60002 or 0. */
60003 int apply_relocate(Elf_Shdr *sechdrs,
60004diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
60005index 7939f63..ec6df57 100644
60006--- a/include/linux/moduleparam.h
60007+++ b/include/linux/moduleparam.h
60008@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
60009 * @len is usually just sizeof(string).
60010 */
60011 #define module_param_string(name, string, len, perm) \
60012- static const struct kparam_string __param_string_##name \
60013+ static const struct kparam_string __param_string_##name __used \
60014 = { len, string }; \
60015 __module_param_call(MODULE_PARAM_PREFIX, name, \
60016 &param_ops_string, \
60017@@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
60018 * module_param_named() for why this might be necessary.
60019 */
60020 #define module_param_array_named(name, array, type, nump, perm) \
60021- static const struct kparam_array __param_arr_##name \
60022+ static const struct kparam_array __param_arr_##name __used \
60023 = { .max = ARRAY_SIZE(array), .num = nump, \
60024 .ops = &param_ops_##type, \
60025 .elemsize = sizeof(array[0]), .elem = array }; \
60026diff --git a/include/linux/namei.h b/include/linux/namei.h
60027index ffc0213..2c1f2cb 100644
60028--- a/include/linux/namei.h
60029+++ b/include/linux/namei.h
60030@@ -24,7 +24,7 @@ struct nameidata {
60031 unsigned seq;
60032 int last_type;
60033 unsigned depth;
60034- char *saved_names[MAX_NESTED_LINKS + 1];
60035+ const char *saved_names[MAX_NESTED_LINKS + 1];
60036
60037 /* Intent data */
60038 union {
60039@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
60040 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60041 extern void unlock_rename(struct dentry *, struct dentry *);
60042
60043-static inline void nd_set_link(struct nameidata *nd, char *path)
60044+static inline void nd_set_link(struct nameidata *nd, const char *path)
60045 {
60046 nd->saved_names[nd->depth] = path;
60047 }
60048
60049-static inline char *nd_get_link(struct nameidata *nd)
60050+static inline const char *nd_get_link(const struct nameidata *nd)
60051 {
60052 return nd->saved_names[nd->depth];
60053 }
60054diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
60055index a82ad4d..90d15b7 100644
60056--- a/include/linux/netdevice.h
60057+++ b/include/linux/netdevice.h
60058@@ -949,6 +949,7 @@ struct net_device_ops {
60059 int (*ndo_set_features)(struct net_device *dev,
60060 u32 features);
60061 };
60062+typedef struct net_device_ops __no_const net_device_ops_no_const;
60063
60064 /*
60065 * The DEVICE structure.
60066@@ -1088,7 +1089,7 @@ struct net_device {
60067 int iflink;
60068
60069 struct net_device_stats stats;
60070- atomic_long_t rx_dropped; /* dropped packets by core network
60071+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
60072 * Do not use this in drivers.
60073 */
60074
60075diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
60076new file mode 100644
60077index 0000000..33f4af8
60078--- /dev/null
60079+++ b/include/linux/netfilter/xt_gradm.h
60080@@ -0,0 +1,9 @@
60081+#ifndef _LINUX_NETFILTER_XT_GRADM_H
60082+#define _LINUX_NETFILTER_XT_GRADM_H 1
60083+
60084+struct xt_gradm_mtinfo {
60085+ __u16 flags;
60086+ __u16 invflags;
60087+};
60088+
60089+#endif
60090diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
60091index c65a18a..0c05f3a 100644
60092--- a/include/linux/of_pdt.h
60093+++ b/include/linux/of_pdt.h
60094@@ -32,7 +32,7 @@ struct of_pdt_ops {
60095
60096 /* return 0 on success; fill in 'len' with number of bytes in path */
60097 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
60098-};
60099+} __no_const;
60100
60101 extern void *prom_early_alloc(unsigned long size);
60102
60103diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
60104index a4c5624..79d6d88 100644
60105--- a/include/linux/oprofile.h
60106+++ b/include/linux/oprofile.h
60107@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
60108 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60109 char const * name, ulong * val);
60110
60111-/** Create a file for read-only access to an atomic_t. */
60112+/** Create a file for read-only access to an atomic_unchecked_t. */
60113 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60114- char const * name, atomic_t * val);
60115+ char const * name, atomic_unchecked_t * val);
60116
60117 /** create a directory */
60118 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60119diff --git a/include/linux/padata.h b/include/linux/padata.h
60120index 4633b2f..988bc08 100644
60121--- a/include/linux/padata.h
60122+++ b/include/linux/padata.h
60123@@ -129,7 +129,7 @@ struct parallel_data {
60124 struct padata_instance *pinst;
60125 struct padata_parallel_queue __percpu *pqueue;
60126 struct padata_serial_queue __percpu *squeue;
60127- atomic_t seq_nr;
60128+ atomic_unchecked_t seq_nr;
60129 atomic_t reorder_objects;
60130 atomic_t refcnt;
60131 unsigned int max_seq_nr;
60132diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
60133index b1f8912..c955bff 100644
60134--- a/include/linux/perf_event.h
60135+++ b/include/linux/perf_event.h
60136@@ -748,8 +748,8 @@ struct perf_event {
60137
60138 enum perf_event_active_state state;
60139 unsigned int attach_state;
60140- local64_t count;
60141- atomic64_t child_count;
60142+ local64_t count; /* PaX: fix it one day */
60143+ atomic64_unchecked_t child_count;
60144
60145 /*
60146 * These are the total time in nanoseconds that the event
60147@@ -800,8 +800,8 @@ struct perf_event {
60148 * These accumulate total time (in nanoseconds) that children
60149 * events have been enabled and running, respectively.
60150 */
60151- atomic64_t child_total_time_enabled;
60152- atomic64_t child_total_time_running;
60153+ atomic64_unchecked_t child_total_time_enabled;
60154+ atomic64_unchecked_t child_total_time_running;
60155
60156 /*
60157 * Protect attach/detach and child_list:
60158diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
60159index 77257c9..51d473a 100644
60160--- a/include/linux/pipe_fs_i.h
60161+++ b/include/linux/pipe_fs_i.h
60162@@ -46,9 +46,9 @@ struct pipe_buffer {
60163 struct pipe_inode_info {
60164 wait_queue_head_t wait;
60165 unsigned int nrbufs, curbuf, buffers;
60166- unsigned int readers;
60167- unsigned int writers;
60168- unsigned int waiting_writers;
60169+ atomic_t readers;
60170+ atomic_t writers;
60171+ atomic_t waiting_writers;
60172 unsigned int r_counter;
60173 unsigned int w_counter;
60174 struct page *tmp_page;
60175diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
60176index d3085e7..fd01052 100644
60177--- a/include/linux/pm_runtime.h
60178+++ b/include/linux/pm_runtime.h
60179@@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
60180
60181 static inline void pm_runtime_mark_last_busy(struct device *dev)
60182 {
60183- ACCESS_ONCE(dev->power.last_busy) = jiffies;
60184+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60185 }
60186
60187 #else /* !CONFIG_PM_RUNTIME */
60188diff --git a/include/linux/poison.h b/include/linux/poison.h
60189index 79159de..f1233a9 100644
60190--- a/include/linux/poison.h
60191+++ b/include/linux/poison.h
60192@@ -19,8 +19,8 @@
60193 * under normal circumstances, used to verify that nobody uses
60194 * non-initialized list entries.
60195 */
60196-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60197-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60198+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60199+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60200
60201 /********** include/linux/timer.h **********/
60202 /*
60203diff --git a/include/linux/preempt.h b/include/linux/preempt.h
60204index 58969b2..ead129b 100644
60205--- a/include/linux/preempt.h
60206+++ b/include/linux/preempt.h
60207@@ -123,7 +123,7 @@ struct preempt_ops {
60208 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60209 void (*sched_out)(struct preempt_notifier *notifier,
60210 struct task_struct *next);
60211-};
60212+} __no_const;
60213
60214 /**
60215 * preempt_notifier - key for installing preemption notifiers
60216diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
60217index 643b96c..ef55a9c 100644
60218--- a/include/linux/proc_fs.h
60219+++ b/include/linux/proc_fs.h
60220@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
60221 return proc_create_data(name, mode, parent, proc_fops, NULL);
60222 }
60223
60224+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60225+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60226+{
60227+#ifdef CONFIG_GRKERNSEC_PROC_USER
60228+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60229+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60230+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60231+#else
60232+ return proc_create_data(name, mode, parent, proc_fops, NULL);
60233+#endif
60234+}
60235+
60236+
60237 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60238 mode_t mode, struct proc_dir_entry *base,
60239 read_proc_t *read_proc, void * data)
60240@@ -258,7 +271,7 @@ union proc_op {
60241 int (*proc_show)(struct seq_file *m,
60242 struct pid_namespace *ns, struct pid *pid,
60243 struct task_struct *task);
60244-};
60245+} __no_const;
60246
60247 struct ctl_table_header;
60248 struct ctl_table;
60249diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
60250index 800f113..e9ee2e3 100644
60251--- a/include/linux/ptrace.h
60252+++ b/include/linux/ptrace.h
60253@@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
60254 extern void exit_ptrace(struct task_struct *tracer);
60255 #define PTRACE_MODE_READ 1
60256 #define PTRACE_MODE_ATTACH 2
60257-/* Returns 0 on success, -errno on denial. */
60258-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60259 /* Returns true on success, false on denial. */
60260 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60261+/* Returns true on success, false on denial. */
60262+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60263+/* Returns true on success, false on denial. */
60264+extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
60265
60266 static inline int ptrace_reparented(struct task_struct *child)
60267 {
60268diff --git a/include/linux/random.h b/include/linux/random.h
60269index 8f74538..02a1012 100644
60270--- a/include/linux/random.h
60271+++ b/include/linux/random.h
60272@@ -69,12 +69,17 @@ void srandom32(u32 seed);
60273
60274 u32 prandom32(struct rnd_state *);
60275
60276+static inline unsigned long pax_get_random_long(void)
60277+{
60278+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60279+}
60280+
60281 /*
60282 * Handle minimum values for seeds
60283 */
60284 static inline u32 __seed(u32 x, u32 m)
60285 {
60286- return (x < m) ? x + m : x;
60287+ return (x <= m) ? x + m + 1 : x;
60288 }
60289
60290 /**
60291diff --git a/include/linux/reboot.h b/include/linux/reboot.h
60292index e0879a7..a12f962 100644
60293--- a/include/linux/reboot.h
60294+++ b/include/linux/reboot.h
60295@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
60296 * Architecture-specific implementations of sys_reboot commands.
60297 */
60298
60299-extern void machine_restart(char *cmd);
60300-extern void machine_halt(void);
60301-extern void machine_power_off(void);
60302+extern void machine_restart(char *cmd) __noreturn;
60303+extern void machine_halt(void) __noreturn;
60304+extern void machine_power_off(void) __noreturn;
60305
60306 extern void machine_shutdown(void);
60307 struct pt_regs;
60308@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
60309 */
60310
60311 extern void kernel_restart_prepare(char *cmd);
60312-extern void kernel_restart(char *cmd);
60313-extern void kernel_halt(void);
60314-extern void kernel_power_off(void);
60315+extern void kernel_restart(char *cmd) __noreturn;
60316+extern void kernel_halt(void) __noreturn;
60317+extern void kernel_power_off(void) __noreturn;
60318
60319 extern int C_A_D; /* for sysctl */
60320 void ctrl_alt_del(void);
60321@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60322 * Emergency restart, callable from an interrupt handler.
60323 */
60324
60325-extern void emergency_restart(void);
60326+extern void emergency_restart(void) __noreturn;
60327 #include <asm/emergency-restart.h>
60328
60329 #endif
60330diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
60331index 96d465f..b084e05 100644
60332--- a/include/linux/reiserfs_fs.h
60333+++ b/include/linux/reiserfs_fs.h
60334@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60335 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60336
60337 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60338-#define get_generation(s) atomic_read (&fs_generation(s))
60339+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60340 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60341 #define __fs_changed(gen,s) (gen != get_generation (s))
60342 #define fs_changed(gen,s) \
60343diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
60344index 52c83b6..18ed7eb 100644
60345--- a/include/linux/reiserfs_fs_sb.h
60346+++ b/include/linux/reiserfs_fs_sb.h
60347@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60348 /* Comment? -Hans */
60349 wait_queue_head_t s_wait;
60350 /* To be obsoleted soon by per buffer seals.. -Hans */
60351- atomic_t s_generation_counter; // increased by one every time the
60352+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60353 // tree gets re-balanced
60354 unsigned long s_properties; /* File system properties. Currently holds
60355 on-disk FS format */
60356diff --git a/include/linux/relay.h b/include/linux/relay.h
60357index 14a86bc..17d0700 100644
60358--- a/include/linux/relay.h
60359+++ b/include/linux/relay.h
60360@@ -159,7 +159,7 @@ struct rchan_callbacks
60361 * The callback should return 0 if successful, negative if not.
60362 */
60363 int (*remove_buf_file)(struct dentry *dentry);
60364-};
60365+} __no_const;
60366
60367 /*
60368 * CONFIG_RELAY kernel API, kernel/relay.c
60369diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
60370index c6c6084..5bf1212 100644
60371--- a/include/linux/rfkill.h
60372+++ b/include/linux/rfkill.h
60373@@ -147,6 +147,7 @@ struct rfkill_ops {
60374 void (*query)(struct rfkill *rfkill, void *data);
60375 int (*set_block)(void *data, bool blocked);
60376 };
60377+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60378
60379 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60380 /**
60381diff --git a/include/linux/rio.h b/include/linux/rio.h
60382index 4d50611..c6858a2 100644
60383--- a/include/linux/rio.h
60384+++ b/include/linux/rio.h
60385@@ -315,7 +315,7 @@ struct rio_ops {
60386 int mbox, void *buffer, size_t len);
60387 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60388 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60389-};
60390+} __no_const;
60391
60392 #define RIO_RESOURCE_MEM 0x00000100
60393 #define RIO_RESOURCE_DOORBELL 0x00000200
60394diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60395index 2148b12..519b820 100644
60396--- a/include/linux/rmap.h
60397+++ b/include/linux/rmap.h
60398@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60399 void anon_vma_init(void); /* create anon_vma_cachep */
60400 int anon_vma_prepare(struct vm_area_struct *);
60401 void unlink_anon_vmas(struct vm_area_struct *);
60402-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60403-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60404+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60405+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60406 void __anon_vma_link(struct vm_area_struct *);
60407
60408 static inline void anon_vma_merge(struct vm_area_struct *vma,
60409diff --git a/include/linux/sched.h b/include/linux/sched.h
60410index 1c4f3e9..b4e4851 100644
60411--- a/include/linux/sched.h
60412+++ b/include/linux/sched.h
60413@@ -101,6 +101,7 @@ struct bio_list;
60414 struct fs_struct;
60415 struct perf_event_context;
60416 struct blk_plug;
60417+struct linux_binprm;
60418
60419 /*
60420 * List of flags we want to share for kernel threads,
60421@@ -380,10 +381,13 @@ struct user_namespace;
60422 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60423
60424 extern int sysctl_max_map_count;
60425+extern unsigned long sysctl_heap_stack_gap;
60426
60427 #include <linux/aio.h>
60428
60429 #ifdef CONFIG_MMU
60430+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60431+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60432 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60433 extern unsigned long
60434 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60435@@ -629,6 +633,17 @@ struct signal_struct {
60436 #ifdef CONFIG_TASKSTATS
60437 struct taskstats *stats;
60438 #endif
60439+
60440+#ifdef CONFIG_GRKERNSEC
60441+ u32 curr_ip;
60442+ u32 saved_ip;
60443+ u32 gr_saddr;
60444+ u32 gr_daddr;
60445+ u16 gr_sport;
60446+ u16 gr_dport;
60447+ u8 used_accept:1;
60448+#endif
60449+
60450 #ifdef CONFIG_AUDIT
60451 unsigned audit_tty;
60452 struct tty_audit_buf *tty_audit_buf;
60453@@ -710,6 +725,11 @@ struct user_struct {
60454 struct key *session_keyring; /* UID's default session keyring */
60455 #endif
60456
60457+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60458+ unsigned int banned;
60459+ unsigned long ban_expires;
60460+#endif
60461+
60462 /* Hash table maintenance information */
60463 struct hlist_node uidhash_node;
60464 uid_t uid;
60465@@ -1337,8 +1357,8 @@ struct task_struct {
60466 struct list_head thread_group;
60467
60468 struct completion *vfork_done; /* for vfork() */
60469- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60470- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60471+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60472+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60473
60474 cputime_t utime, stime, utimescaled, stimescaled;
60475 cputime_t gtime;
60476@@ -1354,13 +1374,6 @@ struct task_struct {
60477 struct task_cputime cputime_expires;
60478 struct list_head cpu_timers[3];
60479
60480-/* process credentials */
60481- const struct cred __rcu *real_cred; /* objective and real subjective task
60482- * credentials (COW) */
60483- const struct cred __rcu *cred; /* effective (overridable) subjective task
60484- * credentials (COW) */
60485- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60486-
60487 char comm[TASK_COMM_LEN]; /* executable name excluding path
60488 - access with [gs]et_task_comm (which lock
60489 it with task_lock())
60490@@ -1377,8 +1390,16 @@ struct task_struct {
60491 #endif
60492 /* CPU-specific state of this task */
60493 struct thread_struct thread;
60494+/* thread_info moved to task_struct */
60495+#ifdef CONFIG_X86
60496+ struct thread_info tinfo;
60497+#endif
60498 /* filesystem information */
60499 struct fs_struct *fs;
60500+
60501+ const struct cred __rcu *cred; /* effective (overridable) subjective task
60502+ * credentials (COW) */
60503+
60504 /* open file information */
60505 struct files_struct *files;
60506 /* namespaces */
60507@@ -1425,6 +1446,11 @@ struct task_struct {
60508 struct rt_mutex_waiter *pi_blocked_on;
60509 #endif
60510
60511+/* process credentials */
60512+ const struct cred __rcu *real_cred; /* objective and real subjective task
60513+ * credentials (COW) */
60514+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60515+
60516 #ifdef CONFIG_DEBUG_MUTEXES
60517 /* mutex deadlock detection */
60518 struct mutex_waiter *blocked_on;
60519@@ -1540,6 +1566,27 @@ struct task_struct {
60520 unsigned long default_timer_slack_ns;
60521
60522 struct list_head *scm_work_list;
60523+
60524+#ifdef CONFIG_GRKERNSEC
60525+ /* grsecurity */
60526+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60527+ u64 exec_id;
60528+#endif
60529+#ifdef CONFIG_GRKERNSEC_SETXID
60530+ const struct cred *delayed_cred;
60531+#endif
60532+ struct dentry *gr_chroot_dentry;
60533+ struct acl_subject_label *acl;
60534+ struct acl_role_label *role;
60535+ struct file *exec_file;
60536+ u16 acl_role_id;
60537+ /* is this the task that authenticated to the special role */
60538+ u8 acl_sp_role;
60539+ u8 is_writable;
60540+ u8 brute;
60541+ u8 gr_is_chrooted;
60542+#endif
60543+
60544 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60545 /* Index of current stored address in ret_stack */
60546 int curr_ret_stack;
60547@@ -1574,6 +1621,51 @@ struct task_struct {
60548 #endif
60549 };
60550
60551+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60552+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60553+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60554+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60555+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60556+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60557+
60558+#ifdef CONFIG_PAX_SOFTMODE
60559+extern int pax_softmode;
60560+#endif
60561+
60562+extern int pax_check_flags(unsigned long *);
60563+
60564+/* if tsk != current then task_lock must be held on it */
60565+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60566+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60567+{
60568+ if (likely(tsk->mm))
60569+ return tsk->mm->pax_flags;
60570+ else
60571+ return 0UL;
60572+}
60573+
60574+/* if tsk != current then task_lock must be held on it */
60575+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60576+{
60577+ if (likely(tsk->mm)) {
60578+ tsk->mm->pax_flags = flags;
60579+ return 0;
60580+ }
60581+ return -EINVAL;
60582+}
60583+#endif
60584+
60585+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60586+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60587+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60588+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60589+#endif
60590+
60591+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60592+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60593+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60594+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60595+
60596 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60597 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60598
60599@@ -2081,7 +2173,9 @@ void yield(void);
60600 extern struct exec_domain default_exec_domain;
60601
60602 union thread_union {
60603+#ifndef CONFIG_X86
60604 struct thread_info thread_info;
60605+#endif
60606 unsigned long stack[THREAD_SIZE/sizeof(long)];
60607 };
60608
60609@@ -2114,6 +2208,7 @@ extern struct pid_namespace init_pid_ns;
60610 */
60611
60612 extern struct task_struct *find_task_by_vpid(pid_t nr);
60613+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60614 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60615 struct pid_namespace *ns);
60616
60617@@ -2235,6 +2330,12 @@ static inline void mmdrop(struct mm_struct * mm)
60618 extern void mmput(struct mm_struct *);
60619 /* Grab a reference to a task's mm, if it is not already going away */
60620 extern struct mm_struct *get_task_mm(struct task_struct *task);
60621+/*
60622+ * Grab a reference to a task's mm, if it is not already going away
60623+ * and ptrace_may_access with the mode parameter passed to it
60624+ * succeeds.
60625+ */
60626+extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
60627 /* Remove the current tasks stale references to the old mm_struct */
60628 extern void mm_release(struct task_struct *, struct mm_struct *);
60629 /* Allocate a new mm structure and copy contents from tsk->mm */
60630@@ -2251,7 +2352,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60631 extern void exit_itimers(struct signal_struct *);
60632 extern void flush_itimer_signals(void);
60633
60634-extern NORET_TYPE void do_group_exit(int);
60635+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60636
60637 extern void daemonize(const char *, ...);
60638 extern int allow_signal(int);
60639@@ -2416,13 +2517,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60640
60641 #endif
60642
60643-static inline int object_is_on_stack(void *obj)
60644+static inline int object_starts_on_stack(void *obj)
60645 {
60646- void *stack = task_stack_page(current);
60647+ const void *stack = task_stack_page(current);
60648
60649 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60650 }
60651
60652+#ifdef CONFIG_PAX_USERCOPY
60653+extern int object_is_on_stack(const void *obj, unsigned long len);
60654+#endif
60655+
60656 extern void thread_info_cache_init(void);
60657
60658 #ifdef CONFIG_DEBUG_STACK_USAGE
60659diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60660index 899fbb4..1cb4138 100644
60661--- a/include/linux/screen_info.h
60662+++ b/include/linux/screen_info.h
60663@@ -43,7 +43,8 @@ struct screen_info {
60664 __u16 pages; /* 0x32 */
60665 __u16 vesa_attributes; /* 0x34 */
60666 __u32 capabilities; /* 0x36 */
60667- __u8 _reserved[6]; /* 0x3a */
60668+ __u16 vesapm_size; /* 0x3a */
60669+ __u8 _reserved[4]; /* 0x3c */
60670 } __attribute__((packed));
60671
60672 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60673diff --git a/include/linux/security.h b/include/linux/security.h
60674index e8c619d..e0cbd1c 100644
60675--- a/include/linux/security.h
60676+++ b/include/linux/security.h
60677@@ -37,6 +37,7 @@
60678 #include <linux/xfrm.h>
60679 #include <linux/slab.h>
60680 #include <linux/xattr.h>
60681+#include <linux/grsecurity.h>
60682 #include <net/flow.h>
60683
60684 /* Maximum number of letters for an LSM name string */
60685diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60686index 0b69a46..b2ffa4c 100644
60687--- a/include/linux/seq_file.h
60688+++ b/include/linux/seq_file.h
60689@@ -24,6 +24,9 @@ struct seq_file {
60690 struct mutex lock;
60691 const struct seq_operations *op;
60692 int poll_event;
60693+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60694+ u64 exec_id;
60695+#endif
60696 void *private;
60697 };
60698
60699@@ -33,6 +36,7 @@ struct seq_operations {
60700 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60701 int (*show) (struct seq_file *m, void *v);
60702 };
60703+typedef struct seq_operations __no_const seq_operations_no_const;
60704
60705 #define SEQ_SKIP 1
60706
60707diff --git a/include/linux/shm.h b/include/linux/shm.h
60708index 92808b8..c28cac4 100644
60709--- a/include/linux/shm.h
60710+++ b/include/linux/shm.h
60711@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
60712
60713 /* The task created the shm object. NULL if the task is dead. */
60714 struct task_struct *shm_creator;
60715+#ifdef CONFIG_GRKERNSEC
60716+ time_t shm_createtime;
60717+ pid_t shm_lapid;
60718+#endif
60719 };
60720
60721 /* shm_mode upper byte flags */
60722diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
60723index fe86488..1563c1c 100644
60724--- a/include/linux/skbuff.h
60725+++ b/include/linux/skbuff.h
60726@@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
60727 */
60728 static inline int skb_queue_empty(const struct sk_buff_head *list)
60729 {
60730- return list->next == (struct sk_buff *)list;
60731+ return list->next == (const struct sk_buff *)list;
60732 }
60733
60734 /**
60735@@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
60736 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60737 const struct sk_buff *skb)
60738 {
60739- return skb->next == (struct sk_buff *)list;
60740+ return skb->next == (const struct sk_buff *)list;
60741 }
60742
60743 /**
60744@@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60745 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60746 const struct sk_buff *skb)
60747 {
60748- return skb->prev == (struct sk_buff *)list;
60749+ return skb->prev == (const struct sk_buff *)list;
60750 }
60751
60752 /**
60753@@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
60754 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60755 */
60756 #ifndef NET_SKB_PAD
60757-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60758+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60759 #endif
60760
60761 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60762diff --git a/include/linux/slab.h b/include/linux/slab.h
60763index 573c809..e84c132 100644
60764--- a/include/linux/slab.h
60765+++ b/include/linux/slab.h
60766@@ -11,12 +11,20 @@
60767
60768 #include <linux/gfp.h>
60769 #include <linux/types.h>
60770+#include <linux/err.h>
60771
60772 /*
60773 * Flags to pass to kmem_cache_create().
60774 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60775 */
60776 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60777+
60778+#ifdef CONFIG_PAX_USERCOPY
60779+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60780+#else
60781+#define SLAB_USERCOPY 0x00000000UL
60782+#endif
60783+
60784 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60785 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60786 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60787@@ -87,10 +95,13 @@
60788 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60789 * Both make kfree a no-op.
60790 */
60791-#define ZERO_SIZE_PTR ((void *)16)
60792+#define ZERO_SIZE_PTR \
60793+({ \
60794+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60795+ (void *)(-MAX_ERRNO-1L); \
60796+})
60797
60798-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60799- (unsigned long)ZERO_SIZE_PTR)
60800+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60801
60802 /*
60803 * struct kmem_cache related prototypes
60804@@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
60805 void kfree(const void *);
60806 void kzfree(const void *);
60807 size_t ksize(const void *);
60808+void check_object_size(const void *ptr, unsigned long n, bool to);
60809
60810 /*
60811 * Allocator specific definitions. These are mainly used to establish optimized
60812@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
60813
60814 void __init kmem_cache_init_late(void);
60815
60816+#define kmalloc(x, y) \
60817+({ \
60818+ void *___retval; \
60819+ intoverflow_t ___x = (intoverflow_t)x; \
60820+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60821+ ___retval = NULL; \
60822+ else \
60823+ ___retval = kmalloc((size_t)___x, (y)); \
60824+ ___retval; \
60825+})
60826+
60827+#define kmalloc_node(x, y, z) \
60828+({ \
60829+ void *___retval; \
60830+ intoverflow_t ___x = (intoverflow_t)x; \
60831+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60832+ ___retval = NULL; \
60833+ else \
60834+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60835+ ___retval; \
60836+})
60837+
60838+#define kzalloc(x, y) \
60839+({ \
60840+ void *___retval; \
60841+ intoverflow_t ___x = (intoverflow_t)x; \
60842+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60843+ ___retval = NULL; \
60844+ else \
60845+ ___retval = kzalloc((size_t)___x, (y)); \
60846+ ___retval; \
60847+})
60848+
60849+#define __krealloc(x, y, z) \
60850+({ \
60851+ void *___retval; \
60852+ intoverflow_t ___y = (intoverflow_t)y; \
60853+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60854+ ___retval = NULL; \
60855+ else \
60856+ ___retval = __krealloc((x), (size_t)___y, (z)); \
60857+ ___retval; \
60858+})
60859+
60860+#define krealloc(x, y, z) \
60861+({ \
60862+ void *___retval; \
60863+ intoverflow_t ___y = (intoverflow_t)y; \
60864+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60865+ ___retval = NULL; \
60866+ else \
60867+ ___retval = krealloc((x), (size_t)___y, (z)); \
60868+ ___retval; \
60869+})
60870+
60871 #endif /* _LINUX_SLAB_H */
60872diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
60873index d00e0ba..1b3bf7b 100644
60874--- a/include/linux/slab_def.h
60875+++ b/include/linux/slab_def.h
60876@@ -68,10 +68,10 @@ struct kmem_cache {
60877 unsigned long node_allocs;
60878 unsigned long node_frees;
60879 unsigned long node_overflow;
60880- atomic_t allochit;
60881- atomic_t allocmiss;
60882- atomic_t freehit;
60883- atomic_t freemiss;
60884+ atomic_unchecked_t allochit;
60885+ atomic_unchecked_t allocmiss;
60886+ atomic_unchecked_t freehit;
60887+ atomic_unchecked_t freemiss;
60888
60889 /*
60890 * If debugging is enabled, then the allocator can add additional
60891diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
60892index a32bcfd..53b71f4 100644
60893--- a/include/linux/slub_def.h
60894+++ b/include/linux/slub_def.h
60895@@ -89,7 +89,7 @@ struct kmem_cache {
60896 struct kmem_cache_order_objects max;
60897 struct kmem_cache_order_objects min;
60898 gfp_t allocflags; /* gfp flags to use on each alloc */
60899- int refcount; /* Refcount for slab cache destroy */
60900+ atomic_t refcount; /* Refcount for slab cache destroy */
60901 void (*ctor)(void *);
60902 int inuse; /* Offset to metadata */
60903 int align; /* Alignment */
60904@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
60905 }
60906
60907 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60908-void *__kmalloc(size_t size, gfp_t flags);
60909+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60910
60911 static __always_inline void *
60912 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60913diff --git a/include/linux/sonet.h b/include/linux/sonet.h
60914index de8832d..0147b46 100644
60915--- a/include/linux/sonet.h
60916+++ b/include/linux/sonet.h
60917@@ -61,7 +61,7 @@ struct sonet_stats {
60918 #include <linux/atomic.h>
60919
60920 struct k_sonet_stats {
60921-#define __HANDLE_ITEM(i) atomic_t i
60922+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60923 __SONET_ITEMS
60924 #undef __HANDLE_ITEM
60925 };
60926diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
60927index 3d8f9c4..69f1c0a 100644
60928--- a/include/linux/sunrpc/clnt.h
60929+++ b/include/linux/sunrpc/clnt.h
60930@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
60931 {
60932 switch (sap->sa_family) {
60933 case AF_INET:
60934- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60935+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60936 case AF_INET6:
60937- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60938+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60939 }
60940 return 0;
60941 }
60942@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
60943 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60944 const struct sockaddr *src)
60945 {
60946- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60947+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60948 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60949
60950 dsin->sin_family = ssin->sin_family;
60951@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
60952 if (sa->sa_family != AF_INET6)
60953 return 0;
60954
60955- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60956+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60957 }
60958
60959 #endif /* __KERNEL__ */
60960diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
60961index e775689..9e206d9 100644
60962--- a/include/linux/sunrpc/sched.h
60963+++ b/include/linux/sunrpc/sched.h
60964@@ -105,6 +105,7 @@ struct rpc_call_ops {
60965 void (*rpc_call_done)(struct rpc_task *, void *);
60966 void (*rpc_release)(void *);
60967 };
60968+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
60969
60970 struct rpc_task_setup {
60971 struct rpc_task *task;
60972diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
60973index c14fe86..393245e 100644
60974--- a/include/linux/sunrpc/svc_rdma.h
60975+++ b/include/linux/sunrpc/svc_rdma.h
60976@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60977 extern unsigned int svcrdma_max_requests;
60978 extern unsigned int svcrdma_max_req_size;
60979
60980-extern atomic_t rdma_stat_recv;
60981-extern atomic_t rdma_stat_read;
60982-extern atomic_t rdma_stat_write;
60983-extern atomic_t rdma_stat_sq_starve;
60984-extern atomic_t rdma_stat_rq_starve;
60985-extern atomic_t rdma_stat_rq_poll;
60986-extern atomic_t rdma_stat_rq_prod;
60987-extern atomic_t rdma_stat_sq_poll;
60988-extern atomic_t rdma_stat_sq_prod;
60989+extern atomic_unchecked_t rdma_stat_recv;
60990+extern atomic_unchecked_t rdma_stat_read;
60991+extern atomic_unchecked_t rdma_stat_write;
60992+extern atomic_unchecked_t rdma_stat_sq_starve;
60993+extern atomic_unchecked_t rdma_stat_rq_starve;
60994+extern atomic_unchecked_t rdma_stat_rq_poll;
60995+extern atomic_unchecked_t rdma_stat_rq_prod;
60996+extern atomic_unchecked_t rdma_stat_sq_poll;
60997+extern atomic_unchecked_t rdma_stat_sq_prod;
60998
60999 #define RPCRDMA_VERSION 1
61000
61001diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
61002index 703cfa3..0b8ca72ac 100644
61003--- a/include/linux/sysctl.h
61004+++ b/include/linux/sysctl.h
61005@@ -155,7 +155,11 @@ enum
61006 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61007 };
61008
61009-
61010+#ifdef CONFIG_PAX_SOFTMODE
61011+enum {
61012+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61013+};
61014+#endif
61015
61016 /* CTL_VM names: */
61017 enum
61018@@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
61019
61020 extern int proc_dostring(struct ctl_table *, int,
61021 void __user *, size_t *, loff_t *);
61022+extern int proc_dostring_modpriv(struct ctl_table *, int,
61023+ void __user *, size_t *, loff_t *);
61024 extern int proc_dointvec(struct ctl_table *, int,
61025 void __user *, size_t *, loff_t *);
61026 extern int proc_dointvec_minmax(struct ctl_table *, int,
61027diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
61028index a71a292..51bd91d 100644
61029--- a/include/linux/tracehook.h
61030+++ b/include/linux/tracehook.h
61031@@ -54,12 +54,12 @@ struct linux_binprm;
61032 /*
61033 * ptrace report for syscall entry and exit looks identical.
61034 */
61035-static inline void ptrace_report_syscall(struct pt_regs *regs)
61036+static inline int ptrace_report_syscall(struct pt_regs *regs)
61037 {
61038 int ptrace = current->ptrace;
61039
61040 if (!(ptrace & PT_PTRACED))
61041- return;
61042+ return 0;
61043
61044 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
61045
61046@@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61047 send_sig(current->exit_code, current, 1);
61048 current->exit_code = 0;
61049 }
61050+
61051+ return fatal_signal_pending(current);
61052 }
61053
61054 /**
61055@@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61056 static inline __must_check int tracehook_report_syscall_entry(
61057 struct pt_regs *regs)
61058 {
61059- ptrace_report_syscall(regs);
61060- return 0;
61061+ return ptrace_report_syscall(regs);
61062 }
61063
61064 /**
61065diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
61066index ff7dc08..893e1bd 100644
61067--- a/include/linux/tty_ldisc.h
61068+++ b/include/linux/tty_ldisc.h
61069@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
61070
61071 struct module *owner;
61072
61073- int refcount;
61074+ atomic_t refcount;
61075 };
61076
61077 struct tty_ldisc {
61078diff --git a/include/linux/types.h b/include/linux/types.h
61079index 57a9723..dbe234a 100644
61080--- a/include/linux/types.h
61081+++ b/include/linux/types.h
61082@@ -213,10 +213,26 @@ typedef struct {
61083 int counter;
61084 } atomic_t;
61085
61086+#ifdef CONFIG_PAX_REFCOUNT
61087+typedef struct {
61088+ int counter;
61089+} atomic_unchecked_t;
61090+#else
61091+typedef atomic_t atomic_unchecked_t;
61092+#endif
61093+
61094 #ifdef CONFIG_64BIT
61095 typedef struct {
61096 long counter;
61097 } atomic64_t;
61098+
61099+#ifdef CONFIG_PAX_REFCOUNT
61100+typedef struct {
61101+ long counter;
61102+} atomic64_unchecked_t;
61103+#else
61104+typedef atomic64_t atomic64_unchecked_t;
61105+#endif
61106 #endif
61107
61108 struct list_head {
61109diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
61110index 5ca0951..ab496a5 100644
61111--- a/include/linux/uaccess.h
61112+++ b/include/linux/uaccess.h
61113@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
61114 long ret; \
61115 mm_segment_t old_fs = get_fs(); \
61116 \
61117- set_fs(KERNEL_DS); \
61118 pagefault_disable(); \
61119- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61120- pagefault_enable(); \
61121+ set_fs(KERNEL_DS); \
61122+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
61123 set_fs(old_fs); \
61124+ pagefault_enable(); \
61125 ret; \
61126 })
61127
61128diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
61129index 99c1b4d..bb94261 100644
61130--- a/include/linux/unaligned/access_ok.h
61131+++ b/include/linux/unaligned/access_ok.h
61132@@ -6,32 +6,32 @@
61133
61134 static inline u16 get_unaligned_le16(const void *p)
61135 {
61136- return le16_to_cpup((__le16 *)p);
61137+ return le16_to_cpup((const __le16 *)p);
61138 }
61139
61140 static inline u32 get_unaligned_le32(const void *p)
61141 {
61142- return le32_to_cpup((__le32 *)p);
61143+ return le32_to_cpup((const __le32 *)p);
61144 }
61145
61146 static inline u64 get_unaligned_le64(const void *p)
61147 {
61148- return le64_to_cpup((__le64 *)p);
61149+ return le64_to_cpup((const __le64 *)p);
61150 }
61151
61152 static inline u16 get_unaligned_be16(const void *p)
61153 {
61154- return be16_to_cpup((__be16 *)p);
61155+ return be16_to_cpup((const __be16 *)p);
61156 }
61157
61158 static inline u32 get_unaligned_be32(const void *p)
61159 {
61160- return be32_to_cpup((__be32 *)p);
61161+ return be32_to_cpup((const __be32 *)p);
61162 }
61163
61164 static inline u64 get_unaligned_be64(const void *p)
61165 {
61166- return be64_to_cpup((__be64 *)p);
61167+ return be64_to_cpup((const __be64 *)p);
61168 }
61169
61170 static inline void put_unaligned_le16(u16 val, void *p)
61171diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
61172index e5a40c3..20ab0f6 100644
61173--- a/include/linux/usb/renesas_usbhs.h
61174+++ b/include/linux/usb/renesas_usbhs.h
61175@@ -39,7 +39,7 @@ enum {
61176 */
61177 struct renesas_usbhs_driver_callback {
61178 int (*notify_hotplug)(struct platform_device *pdev);
61179-};
61180+} __no_const;
61181
61182 /*
61183 * callback functions for platform
61184@@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
61185 * VBUS control is needed for Host
61186 */
61187 int (*set_vbus)(struct platform_device *pdev, int enable);
61188-};
61189+} __no_const;
61190
61191 /*
61192 * parameters for renesas usbhs
61193diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
61194index 6f8fbcf..8259001 100644
61195--- a/include/linux/vermagic.h
61196+++ b/include/linux/vermagic.h
61197@@ -25,9 +25,35 @@
61198 #define MODULE_ARCH_VERMAGIC ""
61199 #endif
61200
61201+#ifdef CONFIG_PAX_REFCOUNT
61202+#define MODULE_PAX_REFCOUNT "REFCOUNT "
61203+#else
61204+#define MODULE_PAX_REFCOUNT ""
61205+#endif
61206+
61207+#ifdef CONSTIFY_PLUGIN
61208+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61209+#else
61210+#define MODULE_CONSTIFY_PLUGIN ""
61211+#endif
61212+
61213+#ifdef STACKLEAK_PLUGIN
61214+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61215+#else
61216+#define MODULE_STACKLEAK_PLUGIN ""
61217+#endif
61218+
61219+#ifdef CONFIG_GRKERNSEC
61220+#define MODULE_GRSEC "GRSEC "
61221+#else
61222+#define MODULE_GRSEC ""
61223+#endif
61224+
61225 #define VERMAGIC_STRING \
61226 UTS_RELEASE " " \
61227 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61228 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61229- MODULE_ARCH_VERMAGIC
61230+ MODULE_ARCH_VERMAGIC \
61231+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61232+ MODULE_GRSEC
61233
61234diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
61235index 4bde182..aec92c1 100644
61236--- a/include/linux/vmalloc.h
61237+++ b/include/linux/vmalloc.h
61238@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
61239 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61240 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61241 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61242+
61243+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61244+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61245+#endif
61246+
61247 /* bits [20..32] reserved for arch specific ioremap internals */
61248
61249 /*
61250@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
61251 # endif
61252 #endif
61253
61254+#define vmalloc(x) \
61255+({ \
61256+ void *___retval; \
61257+ intoverflow_t ___x = (intoverflow_t)x; \
61258+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61259+ ___retval = NULL; \
61260+ else \
61261+ ___retval = vmalloc((unsigned long)___x); \
61262+ ___retval; \
61263+})
61264+
61265+#define vzalloc(x) \
61266+({ \
61267+ void *___retval; \
61268+ intoverflow_t ___x = (intoverflow_t)x; \
61269+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61270+ ___retval = NULL; \
61271+ else \
61272+ ___retval = vzalloc((unsigned long)___x); \
61273+ ___retval; \
61274+})
61275+
61276+#define __vmalloc(x, y, z) \
61277+({ \
61278+ void *___retval; \
61279+ intoverflow_t ___x = (intoverflow_t)x; \
61280+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61281+ ___retval = NULL; \
61282+ else \
61283+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61284+ ___retval; \
61285+})
61286+
61287+#define vmalloc_user(x) \
61288+({ \
61289+ void *___retval; \
61290+ intoverflow_t ___x = (intoverflow_t)x; \
61291+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61292+ ___retval = NULL; \
61293+ else \
61294+ ___retval = vmalloc_user((unsigned long)___x); \
61295+ ___retval; \
61296+})
61297+
61298+#define vmalloc_exec(x) \
61299+({ \
61300+ void *___retval; \
61301+ intoverflow_t ___x = (intoverflow_t)x; \
61302+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61303+ ___retval = NULL; \
61304+ else \
61305+ ___retval = vmalloc_exec((unsigned long)___x); \
61306+ ___retval; \
61307+})
61308+
61309+#define vmalloc_node(x, y) \
61310+({ \
61311+ void *___retval; \
61312+ intoverflow_t ___x = (intoverflow_t)x; \
61313+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61314+ ___retval = NULL; \
61315+ else \
61316+ ___retval = vmalloc_node((unsigned long)___x, (y));\
61317+ ___retval; \
61318+})
61319+
61320+#define vzalloc_node(x, y) \
61321+({ \
61322+ void *___retval; \
61323+ intoverflow_t ___x = (intoverflow_t)x; \
61324+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61325+ ___retval = NULL; \
61326+ else \
61327+ ___retval = vzalloc_node((unsigned long)___x, (y));\
61328+ ___retval; \
61329+})
61330+
61331+#define vmalloc_32(x) \
61332+({ \
61333+ void *___retval; \
61334+ intoverflow_t ___x = (intoverflow_t)x; \
61335+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61336+ ___retval = NULL; \
61337+ else \
61338+ ___retval = vmalloc_32((unsigned long)___x); \
61339+ ___retval; \
61340+})
61341+
61342+#define vmalloc_32_user(x) \
61343+({ \
61344+void *___retval; \
61345+ intoverflow_t ___x = (intoverflow_t)x; \
61346+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61347+ ___retval = NULL; \
61348+ else \
61349+ ___retval = vmalloc_32_user((unsigned long)___x);\
61350+ ___retval; \
61351+})
61352+
61353 #endif /* _LINUX_VMALLOC_H */
61354diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
61355index 65efb92..137adbb 100644
61356--- a/include/linux/vmstat.h
61357+++ b/include/linux/vmstat.h
61358@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
61359 /*
61360 * Zone based page accounting with per cpu differentials.
61361 */
61362-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61363+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61364
61365 static inline void zone_page_state_add(long x, struct zone *zone,
61366 enum zone_stat_item item)
61367 {
61368- atomic_long_add(x, &zone->vm_stat[item]);
61369- atomic_long_add(x, &vm_stat[item]);
61370+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61371+ atomic_long_add_unchecked(x, &vm_stat[item]);
61372 }
61373
61374 static inline unsigned long global_page_state(enum zone_stat_item item)
61375 {
61376- long x = atomic_long_read(&vm_stat[item]);
61377+ long x = atomic_long_read_unchecked(&vm_stat[item]);
61378 #ifdef CONFIG_SMP
61379 if (x < 0)
61380 x = 0;
61381@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
61382 static inline unsigned long zone_page_state(struct zone *zone,
61383 enum zone_stat_item item)
61384 {
61385- long x = atomic_long_read(&zone->vm_stat[item]);
61386+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61387 #ifdef CONFIG_SMP
61388 if (x < 0)
61389 x = 0;
61390@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
61391 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61392 enum zone_stat_item item)
61393 {
61394- long x = atomic_long_read(&zone->vm_stat[item]);
61395+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61396
61397 #ifdef CONFIG_SMP
61398 int cpu;
61399@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
61400
61401 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61402 {
61403- atomic_long_inc(&zone->vm_stat[item]);
61404- atomic_long_inc(&vm_stat[item]);
61405+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
61406+ atomic_long_inc_unchecked(&vm_stat[item]);
61407 }
61408
61409 static inline void __inc_zone_page_state(struct page *page,
61410@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
61411
61412 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61413 {
61414- atomic_long_dec(&zone->vm_stat[item]);
61415- atomic_long_dec(&vm_stat[item]);
61416+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
61417+ atomic_long_dec_unchecked(&vm_stat[item]);
61418 }
61419
61420 static inline void __dec_zone_page_state(struct page *page,
61421diff --git a/include/linux/xattr.h b/include/linux/xattr.h
61422index e5d1220..ef6e406 100644
61423--- a/include/linux/xattr.h
61424+++ b/include/linux/xattr.h
61425@@ -57,6 +57,11 @@
61426 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
61427 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
61428
61429+/* User namespace */
61430+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
61431+#define XATTR_PAX_FLAGS_SUFFIX "flags"
61432+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
61433+
61434 #ifdef __KERNEL__
61435
61436 #include <linux/types.h>
61437diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
61438index 4aeff96..b378cdc 100644
61439--- a/include/media/saa7146_vv.h
61440+++ b/include/media/saa7146_vv.h
61441@@ -163,7 +163,7 @@ struct saa7146_ext_vv
61442 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61443
61444 /* the extension can override this */
61445- struct v4l2_ioctl_ops ops;
61446+ v4l2_ioctl_ops_no_const ops;
61447 /* pointer to the saa7146 core ops */
61448 const struct v4l2_ioctl_ops *core_ops;
61449
61450diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61451index c7c40f1..4f01585 100644
61452--- a/include/media/v4l2-dev.h
61453+++ b/include/media/v4l2-dev.h
61454@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61455
61456
61457 struct v4l2_file_operations {
61458- struct module *owner;
61459+ struct module * const owner;
61460 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61461 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61462 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61463@@ -68,6 +68,7 @@ struct v4l2_file_operations {
61464 int (*open) (struct file *);
61465 int (*release) (struct file *);
61466 };
61467+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61468
61469 /*
61470 * Newer version of video_device, handled by videodev2.c
61471diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61472index 4d1c74a..65e1221 100644
61473--- a/include/media/v4l2-ioctl.h
61474+++ b/include/media/v4l2-ioctl.h
61475@@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61476 long (*vidioc_default) (struct file *file, void *fh,
61477 bool valid_prio, int cmd, void *arg);
61478 };
61479-
61480+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61481
61482 /* v4l debugging and diagnostics */
61483
61484diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61485index 8d55251..dfe5b0a 100644
61486--- a/include/net/caif/caif_hsi.h
61487+++ b/include/net/caif/caif_hsi.h
61488@@ -98,7 +98,7 @@ struct cfhsi_drv {
61489 void (*rx_done_cb) (struct cfhsi_drv *drv);
61490 void (*wake_up_cb) (struct cfhsi_drv *drv);
61491 void (*wake_down_cb) (struct cfhsi_drv *drv);
61492-};
61493+} __no_const;
61494
61495 /* Structure implemented by HSI device. */
61496 struct cfhsi_dev {
61497diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61498index 9e5425b..8136ffc 100644
61499--- a/include/net/caif/cfctrl.h
61500+++ b/include/net/caif/cfctrl.h
61501@@ -52,7 +52,7 @@ struct cfctrl_rsp {
61502 void (*radioset_rsp)(void);
61503 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61504 struct cflayer *client_layer);
61505-};
61506+} __no_const;
61507
61508 /* Link Setup Parameters for CAIF-Links. */
61509 struct cfctrl_link_param {
61510@@ -101,8 +101,8 @@ struct cfctrl_request_info {
61511 struct cfctrl {
61512 struct cfsrvl serv;
61513 struct cfctrl_rsp res;
61514- atomic_t req_seq_no;
61515- atomic_t rsp_seq_no;
61516+ atomic_unchecked_t req_seq_no;
61517+ atomic_unchecked_t rsp_seq_no;
61518 struct list_head list;
61519 /* Protects from simultaneous access to first_req list */
61520 spinlock_t info_list_lock;
61521diff --git a/include/net/flow.h b/include/net/flow.h
61522index 57f15a7..0de26c6 100644
61523--- a/include/net/flow.h
61524+++ b/include/net/flow.h
61525@@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61526
61527 extern void flow_cache_flush(void);
61528 extern void flow_cache_flush_deferred(void);
61529-extern atomic_t flow_cache_genid;
61530+extern atomic_unchecked_t flow_cache_genid;
61531
61532 #endif
61533diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61534index e9ff3fc..9d3e5c7 100644
61535--- a/include/net/inetpeer.h
61536+++ b/include/net/inetpeer.h
61537@@ -48,8 +48,8 @@ struct inet_peer {
61538 */
61539 union {
61540 struct {
61541- atomic_t rid; /* Frag reception counter */
61542- atomic_t ip_id_count; /* IP ID for the next packet */
61543+ atomic_unchecked_t rid; /* Frag reception counter */
61544+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61545 __u32 tcp_ts;
61546 __u32 tcp_ts_stamp;
61547 };
61548@@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61549 more++;
61550 inet_peer_refcheck(p);
61551 do {
61552- old = atomic_read(&p->ip_id_count);
61553+ old = atomic_read_unchecked(&p->ip_id_count);
61554 new = old + more;
61555 if (!new)
61556 new = 1;
61557- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61558+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61559 return new;
61560 }
61561
61562diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61563index 10422ef..662570f 100644
61564--- a/include/net/ip_fib.h
61565+++ b/include/net/ip_fib.h
61566@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61567
61568 #define FIB_RES_SADDR(net, res) \
61569 ((FIB_RES_NH(res).nh_saddr_genid == \
61570- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61571+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61572 FIB_RES_NH(res).nh_saddr : \
61573 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61574 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61575diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61576index e5a7b9a..f4fc44b 100644
61577--- a/include/net/ip_vs.h
61578+++ b/include/net/ip_vs.h
61579@@ -509,7 +509,7 @@ struct ip_vs_conn {
61580 struct ip_vs_conn *control; /* Master control connection */
61581 atomic_t n_control; /* Number of controlled ones */
61582 struct ip_vs_dest *dest; /* real server */
61583- atomic_t in_pkts; /* incoming packet counter */
61584+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61585
61586 /* packet transmitter for different forwarding methods. If it
61587 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61588@@ -647,7 +647,7 @@ struct ip_vs_dest {
61589 __be16 port; /* port number of the server */
61590 union nf_inet_addr addr; /* IP address of the server */
61591 volatile unsigned flags; /* dest status flags */
61592- atomic_t conn_flags; /* flags to copy to conn */
61593+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61594 atomic_t weight; /* server weight */
61595
61596 atomic_t refcnt; /* reference counter */
61597diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61598index 69b610a..fe3962c 100644
61599--- a/include/net/irda/ircomm_core.h
61600+++ b/include/net/irda/ircomm_core.h
61601@@ -51,7 +51,7 @@ typedef struct {
61602 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61603 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61604 struct ircomm_info *);
61605-} call_t;
61606+} __no_const call_t;
61607
61608 struct ircomm_cb {
61609 irda_queue_t queue;
61610diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61611index 59ba38bc..d515662 100644
61612--- a/include/net/irda/ircomm_tty.h
61613+++ b/include/net/irda/ircomm_tty.h
61614@@ -35,6 +35,7 @@
61615 #include <linux/termios.h>
61616 #include <linux/timer.h>
61617 #include <linux/tty.h> /* struct tty_struct */
61618+#include <asm/local.h>
61619
61620 #include <net/irda/irias_object.h>
61621 #include <net/irda/ircomm_core.h>
61622@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61623 unsigned short close_delay;
61624 unsigned short closing_wait; /* time to wait before closing */
61625
61626- int open_count;
61627- int blocked_open; /* # of blocked opens */
61628+ local_t open_count;
61629+ local_t blocked_open; /* # of blocked opens */
61630
61631 /* Protect concurent access to :
61632 * o self->open_count
61633diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61634index f2419cf..473679f 100644
61635--- a/include/net/iucv/af_iucv.h
61636+++ b/include/net/iucv/af_iucv.h
61637@@ -139,7 +139,7 @@ struct iucv_sock {
61638 struct iucv_sock_list {
61639 struct hlist_head head;
61640 rwlock_t lock;
61641- atomic_t autobind_name;
61642+ atomic_unchecked_t autobind_name;
61643 };
61644
61645 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61646diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61647index 2720884..3aa5c25 100644
61648--- a/include/net/neighbour.h
61649+++ b/include/net/neighbour.h
61650@@ -122,7 +122,7 @@ struct neigh_ops {
61651 void (*error_report)(struct neighbour *, struct sk_buff *);
61652 int (*output)(struct neighbour *, struct sk_buff *);
61653 int (*connected_output)(struct neighbour *, struct sk_buff *);
61654-};
61655+} __do_const;
61656
61657 struct pneigh_entry {
61658 struct pneigh_entry *next;
61659diff --git a/include/net/netlink.h b/include/net/netlink.h
61660index cb1f350..3279d2c 100644
61661--- a/include/net/netlink.h
61662+++ b/include/net/netlink.h
61663@@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61664 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61665 {
61666 if (mark)
61667- skb_trim(skb, (unsigned char *) mark - skb->data);
61668+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61669 }
61670
61671 /**
61672diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61673index d786b4f..4c3dd41 100644
61674--- a/include/net/netns/ipv4.h
61675+++ b/include/net/netns/ipv4.h
61676@@ -56,8 +56,8 @@ struct netns_ipv4 {
61677
61678 unsigned int sysctl_ping_group_range[2];
61679
61680- atomic_t rt_genid;
61681- atomic_t dev_addr_genid;
61682+ atomic_unchecked_t rt_genid;
61683+ atomic_unchecked_t dev_addr_genid;
61684
61685 #ifdef CONFIG_IP_MROUTE
61686 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61687diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61688index 6a72a58..e6a127d 100644
61689--- a/include/net/sctp/sctp.h
61690+++ b/include/net/sctp/sctp.h
61691@@ -318,9 +318,9 @@ do { \
61692
61693 #else /* SCTP_DEBUG */
61694
61695-#define SCTP_DEBUG_PRINTK(whatever...)
61696-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61697-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61698+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61699+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61700+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61701 #define SCTP_ENABLE_DEBUG
61702 #define SCTP_DISABLE_DEBUG
61703 #define SCTP_ASSERT(expr, str, func)
61704diff --git a/include/net/sock.h b/include/net/sock.h
61705index 32e3937..87a1dbc 100644
61706--- a/include/net/sock.h
61707+++ b/include/net/sock.h
61708@@ -277,7 +277,7 @@ struct sock {
61709 #ifdef CONFIG_RPS
61710 __u32 sk_rxhash;
61711 #endif
61712- atomic_t sk_drops;
61713+ atomic_unchecked_t sk_drops;
61714 int sk_rcvbuf;
61715
61716 struct sk_filter __rcu *sk_filter;
61717@@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
61718 }
61719
61720 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61721- char __user *from, char *to,
61722+ char __user *from, unsigned char *to,
61723 int copy, int offset)
61724 {
61725 if (skb->ip_summed == CHECKSUM_NONE) {
61726diff --git a/include/net/tcp.h b/include/net/tcp.h
61727index bb18c4d..bb87972 100644
61728--- a/include/net/tcp.h
61729+++ b/include/net/tcp.h
61730@@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
61731 char *name;
61732 sa_family_t family;
61733 const struct file_operations *seq_fops;
61734- struct seq_operations seq_ops;
61735+ seq_operations_no_const seq_ops;
61736 };
61737
61738 struct tcp_iter_state {
61739diff --git a/include/net/udp.h b/include/net/udp.h
61740index 3b285f4..0219639 100644
61741--- a/include/net/udp.h
61742+++ b/include/net/udp.h
61743@@ -237,7 +237,7 @@ struct udp_seq_afinfo {
61744 sa_family_t family;
61745 struct udp_table *udp_table;
61746 const struct file_operations *seq_fops;
61747- struct seq_operations seq_ops;
61748+ seq_operations_no_const seq_ops;
61749 };
61750
61751 struct udp_iter_state {
61752diff --git a/include/net/xfrm.h b/include/net/xfrm.h
61753index b203e14..1df3991 100644
61754--- a/include/net/xfrm.h
61755+++ b/include/net/xfrm.h
61756@@ -505,7 +505,7 @@ struct xfrm_policy {
61757 struct timer_list timer;
61758
61759 struct flow_cache_object flo;
61760- atomic_t genid;
61761+ atomic_unchecked_t genid;
61762 u32 priority;
61763 u32 index;
61764 struct xfrm_mark mark;
61765diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
61766index 1a046b1..ee0bef0 100644
61767--- a/include/rdma/iw_cm.h
61768+++ b/include/rdma/iw_cm.h
61769@@ -122,7 +122,7 @@ struct iw_cm_verbs {
61770 int backlog);
61771
61772 int (*destroy_listen)(struct iw_cm_id *cm_id);
61773-};
61774+} __no_const;
61775
61776 /**
61777 * iw_create_cm_id - Create an IW CM identifier.
61778diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
61779index 5d1a758..1dbf795 100644
61780--- a/include/scsi/libfc.h
61781+++ b/include/scsi/libfc.h
61782@@ -748,6 +748,7 @@ struct libfc_function_template {
61783 */
61784 void (*disc_stop_final) (struct fc_lport *);
61785 };
61786+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61787
61788 /**
61789 * struct fc_disc - Discovery context
61790@@ -851,7 +852,7 @@ struct fc_lport {
61791 struct fc_vport *vport;
61792
61793 /* Operational Information */
61794- struct libfc_function_template tt;
61795+ libfc_function_template_no_const tt;
61796 u8 link_up;
61797 u8 qfull;
61798 enum fc_lport_state state;
61799diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
61800index 5591ed5..13eb457 100644
61801--- a/include/scsi/scsi_device.h
61802+++ b/include/scsi/scsi_device.h
61803@@ -161,9 +161,9 @@ struct scsi_device {
61804 unsigned int max_device_blocked; /* what device_blocked counts down from */
61805 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61806
61807- atomic_t iorequest_cnt;
61808- atomic_t iodone_cnt;
61809- atomic_t ioerr_cnt;
61810+ atomic_unchecked_t iorequest_cnt;
61811+ atomic_unchecked_t iodone_cnt;
61812+ atomic_unchecked_t ioerr_cnt;
61813
61814 struct device sdev_gendev,
61815 sdev_dev;
61816diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
61817index 2a65167..91e01f8 100644
61818--- a/include/scsi/scsi_transport_fc.h
61819+++ b/include/scsi/scsi_transport_fc.h
61820@@ -711,7 +711,7 @@ struct fc_function_template {
61821 unsigned long show_host_system_hostname:1;
61822
61823 unsigned long disable_target_scan:1;
61824-};
61825+} __do_const;
61826
61827
61828 /**
61829diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
61830index 030b87c..98a6954 100644
61831--- a/include/sound/ak4xxx-adda.h
61832+++ b/include/sound/ak4xxx-adda.h
61833@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61834 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61835 unsigned char val);
61836 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61837-};
61838+} __no_const;
61839
61840 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61841
61842diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
61843index 8c05e47..2b5df97 100644
61844--- a/include/sound/hwdep.h
61845+++ b/include/sound/hwdep.h
61846@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61847 struct snd_hwdep_dsp_status *status);
61848 int (*dsp_load)(struct snd_hwdep *hw,
61849 struct snd_hwdep_dsp_image *image);
61850-};
61851+} __no_const;
61852
61853 struct snd_hwdep {
61854 struct snd_card *card;
61855diff --git a/include/sound/info.h b/include/sound/info.h
61856index 5492cc4..1a65278 100644
61857--- a/include/sound/info.h
61858+++ b/include/sound/info.h
61859@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61860 struct snd_info_buffer *buffer);
61861 void (*write)(struct snd_info_entry *entry,
61862 struct snd_info_buffer *buffer);
61863-};
61864+} __no_const;
61865
61866 struct snd_info_entry_ops {
61867 int (*open)(struct snd_info_entry *entry,
61868diff --git a/include/sound/pcm.h b/include/sound/pcm.h
61869index 0cf91b2..b70cae4 100644
61870--- a/include/sound/pcm.h
61871+++ b/include/sound/pcm.h
61872@@ -81,6 +81,7 @@ struct snd_pcm_ops {
61873 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61874 int (*ack)(struct snd_pcm_substream *substream);
61875 };
61876+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61877
61878 /*
61879 *
61880diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
61881index af1b49e..a5d55a5 100644
61882--- a/include/sound/sb16_csp.h
61883+++ b/include/sound/sb16_csp.h
61884@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61885 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61886 int (*csp_stop) (struct snd_sb_csp * p);
61887 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61888-};
61889+} __no_const;
61890
61891 /*
61892 * CSP private data
61893diff --git a/include/sound/soc.h b/include/sound/soc.h
61894index 11cfb59..e3f93f4 100644
61895--- a/include/sound/soc.h
61896+++ b/include/sound/soc.h
61897@@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
61898 /* platform IO - used for platform DAPM */
61899 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
61900 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
61901-};
61902+} __do_const;
61903
61904 struct snd_soc_platform {
61905 const char *name;
61906diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
61907index 444cd6b..3327cc5 100644
61908--- a/include/sound/ymfpci.h
61909+++ b/include/sound/ymfpci.h
61910@@ -358,7 +358,7 @@ struct snd_ymfpci {
61911 spinlock_t reg_lock;
61912 spinlock_t voice_lock;
61913 wait_queue_head_t interrupt_sleep;
61914- atomic_t interrupt_sleep_count;
61915+ atomic_unchecked_t interrupt_sleep_count;
61916 struct snd_info_entry *proc_entry;
61917 const struct firmware *dsp_microcode;
61918 const struct firmware *controller_microcode;
61919diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
61920index a79886c..b483af6 100644
61921--- a/include/target/target_core_base.h
61922+++ b/include/target/target_core_base.h
61923@@ -346,7 +346,7 @@ struct t10_reservation_ops {
61924 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61925 int (*t10_pr_register)(struct se_cmd *);
61926 int (*t10_pr_clear)(struct se_cmd *);
61927-};
61928+} __no_const;
61929
61930 struct t10_reservation {
61931 /* Reservation effects all target ports */
61932@@ -465,8 +465,8 @@ struct se_cmd {
61933 atomic_t t_se_count;
61934 atomic_t t_task_cdbs_left;
61935 atomic_t t_task_cdbs_ex_left;
61936- atomic_t t_task_cdbs_sent;
61937- atomic_t t_transport_aborted;
61938+ atomic_unchecked_t t_task_cdbs_sent;
61939+ atomic_unchecked_t t_transport_aborted;
61940 atomic_t t_transport_active;
61941 atomic_t t_transport_complete;
61942 atomic_t t_transport_queue_active;
61943@@ -704,7 +704,7 @@ struct se_device {
61944 /* Active commands on this virtual SE device */
61945 atomic_t simple_cmds;
61946 atomic_t depth_left;
61947- atomic_t dev_ordered_id;
61948+ atomic_unchecked_t dev_ordered_id;
61949 atomic_t execute_tasks;
61950 atomic_t dev_ordered_sync;
61951 atomic_t dev_qf_count;
61952diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
61953index 1c09820..7f5ec79 100644
61954--- a/include/trace/events/irq.h
61955+++ b/include/trace/events/irq.h
61956@@ -36,7 +36,7 @@ struct softirq_action;
61957 */
61958 TRACE_EVENT(irq_handler_entry,
61959
61960- TP_PROTO(int irq, struct irqaction *action),
61961+ TP_PROTO(int irq, const struct irqaction *action),
61962
61963 TP_ARGS(irq, action),
61964
61965@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61966 */
61967 TRACE_EVENT(irq_handler_exit,
61968
61969- TP_PROTO(int irq, struct irqaction *action, int ret),
61970+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61971
61972 TP_ARGS(irq, action, ret),
61973
61974diff --git a/include/video/udlfb.h b/include/video/udlfb.h
61975index c41f308..6918de3 100644
61976--- a/include/video/udlfb.h
61977+++ b/include/video/udlfb.h
61978@@ -52,10 +52,10 @@ struct dlfb_data {
61979 u32 pseudo_palette[256];
61980 int blank_mode; /*one of FB_BLANK_ */
61981 /* blit-only rendering path metrics, exposed through sysfs */
61982- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61983- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61984- atomic_t bytes_sent; /* to usb, after compression including overhead */
61985- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61986+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61987+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61988+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61989+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61990 };
61991
61992 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61993diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
61994index 0993a22..32ba2fe 100644
61995--- a/include/video/uvesafb.h
61996+++ b/include/video/uvesafb.h
61997@@ -177,6 +177,7 @@ struct uvesafb_par {
61998 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61999 u8 pmi_setpal; /* PMI for palette changes */
62000 u16 *pmi_base; /* protected mode interface location */
62001+ u8 *pmi_code; /* protected mode code location */
62002 void *pmi_start;
62003 void *pmi_pal;
62004 u8 *vbe_state_orig; /*
62005diff --git a/init/Kconfig b/init/Kconfig
62006index 43298f9..2f56c12 100644
62007--- a/init/Kconfig
62008+++ b/init/Kconfig
62009@@ -1214,7 +1214,7 @@ config SLUB_DEBUG
62010
62011 config COMPAT_BRK
62012 bool "Disable heap randomization"
62013- default y
62014+ default n
62015 help
62016 Randomizing heap placement makes heap exploits harder, but it
62017 also breaks ancient binaries (including anything libc5 based).
62018diff --git a/init/do_mounts.c b/init/do_mounts.c
62019index db6e5ee..7677ff7 100644
62020--- a/init/do_mounts.c
62021+++ b/init/do_mounts.c
62022@@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
62023
62024 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
62025 {
62026- int err = sys_mount(name, "/root", fs, flags, data);
62027+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
62028 if (err)
62029 return err;
62030
62031- sys_chdir((const char __user __force *)"/root");
62032+ sys_chdir((const char __force_user*)"/root");
62033 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
62034 printk(KERN_INFO
62035 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62036@@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
62037 va_start(args, fmt);
62038 vsprintf(buf, fmt, args);
62039 va_end(args);
62040- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62041+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62042 if (fd >= 0) {
62043 sys_ioctl(fd, FDEJECT, 0);
62044 sys_close(fd);
62045 }
62046 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62047- fd = sys_open("/dev/console", O_RDWR, 0);
62048+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
62049 if (fd >= 0) {
62050 sys_ioctl(fd, TCGETS, (long)&termios);
62051 termios.c_lflag &= ~ICANON;
62052 sys_ioctl(fd, TCSETSF, (long)&termios);
62053- sys_read(fd, &c, 1);
62054+ sys_read(fd, (char __user *)&c, 1);
62055 termios.c_lflag |= ICANON;
62056 sys_ioctl(fd, TCSETSF, (long)&termios);
62057 sys_close(fd);
62058@@ -553,6 +553,6 @@ void __init prepare_namespace(void)
62059 mount_root();
62060 out:
62061 devtmpfs_mount("dev");
62062- sys_mount(".", "/", NULL, MS_MOVE, NULL);
62063- sys_chroot((const char __user __force *)".");
62064+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62065+ sys_chroot((const char __force_user *)".");
62066 }
62067diff --git a/init/do_mounts.h b/init/do_mounts.h
62068index f5b978a..69dbfe8 100644
62069--- a/init/do_mounts.h
62070+++ b/init/do_mounts.h
62071@@ -15,15 +15,15 @@ extern int root_mountflags;
62072
62073 static inline int create_dev(char *name, dev_t dev)
62074 {
62075- sys_unlink(name);
62076- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62077+ sys_unlink((char __force_user *)name);
62078+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
62079 }
62080
62081 #if BITS_PER_LONG == 32
62082 static inline u32 bstat(char *name)
62083 {
62084 struct stat64 stat;
62085- if (sys_stat64(name, &stat) != 0)
62086+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
62087 return 0;
62088 if (!S_ISBLK(stat.st_mode))
62089 return 0;
62090@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
62091 static inline u32 bstat(char *name)
62092 {
62093 struct stat stat;
62094- if (sys_newstat(name, &stat) != 0)
62095+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
62096 return 0;
62097 if (!S_ISBLK(stat.st_mode))
62098 return 0;
62099diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
62100index 3098a38..253064e 100644
62101--- a/init/do_mounts_initrd.c
62102+++ b/init/do_mounts_initrd.c
62103@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
62104 create_dev("/dev/root.old", Root_RAM0);
62105 /* mount initrd on rootfs' /root */
62106 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62107- sys_mkdir("/old", 0700);
62108- root_fd = sys_open("/", 0, 0);
62109- old_fd = sys_open("/old", 0, 0);
62110+ sys_mkdir((const char __force_user *)"/old", 0700);
62111+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
62112+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
62113 /* move initrd over / and chdir/chroot in initrd root */
62114- sys_chdir("/root");
62115- sys_mount(".", "/", NULL, MS_MOVE, NULL);
62116- sys_chroot(".");
62117+ sys_chdir((const char __force_user *)"/root");
62118+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62119+ sys_chroot((const char __force_user *)".");
62120
62121 /*
62122 * In case that a resume from disk is carried out by linuxrc or one of
62123@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
62124
62125 /* move initrd to rootfs' /old */
62126 sys_fchdir(old_fd);
62127- sys_mount("/", ".", NULL, MS_MOVE, NULL);
62128+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
62129 /* switch root and cwd back to / of rootfs */
62130 sys_fchdir(root_fd);
62131- sys_chroot(".");
62132+ sys_chroot((const char __force_user *)".");
62133 sys_close(old_fd);
62134 sys_close(root_fd);
62135
62136 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62137- sys_chdir("/old");
62138+ sys_chdir((const char __force_user *)"/old");
62139 return;
62140 }
62141
62142@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
62143 mount_root();
62144
62145 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62146- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62147+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
62148 if (!error)
62149 printk("okay\n");
62150 else {
62151- int fd = sys_open("/dev/root.old", O_RDWR, 0);
62152+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
62153 if (error == -ENOENT)
62154 printk("/initrd does not exist. Ignored.\n");
62155 else
62156 printk("failed\n");
62157 printk(KERN_NOTICE "Unmounting old root\n");
62158- sys_umount("/old", MNT_DETACH);
62159+ sys_umount((char __force_user *)"/old", MNT_DETACH);
62160 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62161 if (fd < 0) {
62162 error = fd;
62163@@ -116,11 +116,11 @@ int __init initrd_load(void)
62164 * mounted in the normal path.
62165 */
62166 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62167- sys_unlink("/initrd.image");
62168+ sys_unlink((const char __force_user *)"/initrd.image");
62169 handle_initrd();
62170 return 1;
62171 }
62172 }
62173- sys_unlink("/initrd.image");
62174+ sys_unlink((const char __force_user *)"/initrd.image");
62175 return 0;
62176 }
62177diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
62178index 32c4799..c27ee74 100644
62179--- a/init/do_mounts_md.c
62180+++ b/init/do_mounts_md.c
62181@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62182 partitioned ? "_d" : "", minor,
62183 md_setup_args[ent].device_names);
62184
62185- fd = sys_open(name, 0, 0);
62186+ fd = sys_open((char __force_user *)name, 0, 0);
62187 if (fd < 0) {
62188 printk(KERN_ERR "md: open failed - cannot start "
62189 "array %s\n", name);
62190@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62191 * array without it
62192 */
62193 sys_close(fd);
62194- fd = sys_open(name, 0, 0);
62195+ fd = sys_open((char __force_user *)name, 0, 0);
62196 sys_ioctl(fd, BLKRRPART, 0);
62197 }
62198 sys_close(fd);
62199@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62200
62201 wait_for_device_probe();
62202
62203- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62204+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62205 if (fd >= 0) {
62206 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62207 sys_close(fd);
62208diff --git a/init/initramfs.c b/init/initramfs.c
62209index 2531811..040d4d4 100644
62210--- a/init/initramfs.c
62211+++ b/init/initramfs.c
62212@@ -74,7 +74,7 @@ static void __init free_hash(void)
62213 }
62214 }
62215
62216-static long __init do_utime(char __user *filename, time_t mtime)
62217+static long __init do_utime(__force char __user *filename, time_t mtime)
62218 {
62219 struct timespec t[2];
62220
62221@@ -109,7 +109,7 @@ static void __init dir_utime(void)
62222 struct dir_entry *de, *tmp;
62223 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62224 list_del(&de->list);
62225- do_utime(de->name, de->mtime);
62226+ do_utime((char __force_user *)de->name, de->mtime);
62227 kfree(de->name);
62228 kfree(de);
62229 }
62230@@ -271,7 +271,7 @@ static int __init maybe_link(void)
62231 if (nlink >= 2) {
62232 char *old = find_link(major, minor, ino, mode, collected);
62233 if (old)
62234- return (sys_link(old, collected) < 0) ? -1 : 1;
62235+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62236 }
62237 return 0;
62238 }
62239@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
62240 {
62241 struct stat st;
62242
62243- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62244+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62245 if (S_ISDIR(st.st_mode))
62246- sys_rmdir(path);
62247+ sys_rmdir((char __force_user *)path);
62248 else
62249- sys_unlink(path);
62250+ sys_unlink((char __force_user *)path);
62251 }
62252 }
62253
62254@@ -305,7 +305,7 @@ static int __init do_name(void)
62255 int openflags = O_WRONLY|O_CREAT;
62256 if (ml != 1)
62257 openflags |= O_TRUNC;
62258- wfd = sys_open(collected, openflags, mode);
62259+ wfd = sys_open((char __force_user *)collected, openflags, mode);
62260
62261 if (wfd >= 0) {
62262 sys_fchown(wfd, uid, gid);
62263@@ -317,17 +317,17 @@ static int __init do_name(void)
62264 }
62265 }
62266 } else if (S_ISDIR(mode)) {
62267- sys_mkdir(collected, mode);
62268- sys_chown(collected, uid, gid);
62269- sys_chmod(collected, mode);
62270+ sys_mkdir((char __force_user *)collected, mode);
62271+ sys_chown((char __force_user *)collected, uid, gid);
62272+ sys_chmod((char __force_user *)collected, mode);
62273 dir_add(collected, mtime);
62274 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62275 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62276 if (maybe_link() == 0) {
62277- sys_mknod(collected, mode, rdev);
62278- sys_chown(collected, uid, gid);
62279- sys_chmod(collected, mode);
62280- do_utime(collected, mtime);
62281+ sys_mknod((char __force_user *)collected, mode, rdev);
62282+ sys_chown((char __force_user *)collected, uid, gid);
62283+ sys_chmod((char __force_user *)collected, mode);
62284+ do_utime((char __force_user *)collected, mtime);
62285 }
62286 }
62287 return 0;
62288@@ -336,15 +336,15 @@ static int __init do_name(void)
62289 static int __init do_copy(void)
62290 {
62291 if (count >= body_len) {
62292- sys_write(wfd, victim, body_len);
62293+ sys_write(wfd, (char __force_user *)victim, body_len);
62294 sys_close(wfd);
62295- do_utime(vcollected, mtime);
62296+ do_utime((char __force_user *)vcollected, mtime);
62297 kfree(vcollected);
62298 eat(body_len);
62299 state = SkipIt;
62300 return 0;
62301 } else {
62302- sys_write(wfd, victim, count);
62303+ sys_write(wfd, (char __force_user *)victim, count);
62304 body_len -= count;
62305 eat(count);
62306 return 1;
62307@@ -355,9 +355,9 @@ static int __init do_symlink(void)
62308 {
62309 collected[N_ALIGN(name_len) + body_len] = '\0';
62310 clean_path(collected, 0);
62311- sys_symlink(collected + N_ALIGN(name_len), collected);
62312- sys_lchown(collected, uid, gid);
62313- do_utime(collected, mtime);
62314+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62315+ sys_lchown((char __force_user *)collected, uid, gid);
62316+ do_utime((char __force_user *)collected, mtime);
62317 state = SkipIt;
62318 next_state = Reset;
62319 return 0;
62320diff --git a/init/main.c b/init/main.c
62321index 217ed23..ec5406f 100644
62322--- a/init/main.c
62323+++ b/init/main.c
62324@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
62325 extern void tc_init(void);
62326 #endif
62327
62328+extern void grsecurity_init(void);
62329+
62330 /*
62331 * Debug helper: via this flag we know that we are in 'early bootup code'
62332 * where only the boot processor is running with IRQ disabled. This means
62333@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
62334
62335 __setup("reset_devices", set_reset_devices);
62336
62337+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62338+extern char pax_enter_kernel_user[];
62339+extern char pax_exit_kernel_user[];
62340+extern pgdval_t clone_pgd_mask;
62341+#endif
62342+
62343+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62344+static int __init setup_pax_nouderef(char *str)
62345+{
62346+#ifdef CONFIG_X86_32
62347+ unsigned int cpu;
62348+ struct desc_struct *gdt;
62349+
62350+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
62351+ gdt = get_cpu_gdt_table(cpu);
62352+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62353+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62354+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62355+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62356+ }
62357+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62358+#else
62359+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62360+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62361+ clone_pgd_mask = ~(pgdval_t)0UL;
62362+#endif
62363+
62364+ return 0;
62365+}
62366+early_param("pax_nouderef", setup_pax_nouderef);
62367+#endif
62368+
62369+#ifdef CONFIG_PAX_SOFTMODE
62370+int pax_softmode;
62371+
62372+static int __init setup_pax_softmode(char *str)
62373+{
62374+ get_option(&str, &pax_softmode);
62375+ return 1;
62376+}
62377+__setup("pax_softmode=", setup_pax_softmode);
62378+#endif
62379+
62380 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62381 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62382 static const char *panic_later, *panic_param;
62383@@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
62384 {
62385 int count = preempt_count();
62386 int ret;
62387+ const char *msg1 = "", *msg2 = "";
62388
62389 if (initcall_debug)
62390 ret = do_one_initcall_debug(fn);
62391@@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
62392 sprintf(msgbuf, "error code %d ", ret);
62393
62394 if (preempt_count() != count) {
62395- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62396+ msg1 = " preemption imbalance";
62397 preempt_count() = count;
62398 }
62399 if (irqs_disabled()) {
62400- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62401+ msg2 = " disabled interrupts";
62402 local_irq_enable();
62403 }
62404- if (msgbuf[0]) {
62405- printk("initcall %pF returned with %s\n", fn, msgbuf);
62406+ if (msgbuf[0] || *msg1 || *msg2) {
62407+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62408 }
62409
62410 return ret;
62411@@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
62412 do_basic_setup();
62413
62414 /* Open the /dev/console on the rootfs, this should never fail */
62415- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62416+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62417 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62418
62419 (void) sys_dup(0);
62420@@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
62421 if (!ramdisk_execute_command)
62422 ramdisk_execute_command = "/init";
62423
62424- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62425+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62426 ramdisk_execute_command = NULL;
62427 prepare_namespace();
62428 }
62429
62430+ grsecurity_init();
62431+
62432 /*
62433 * Ok, we have completed the initial bootup, and
62434 * we're essentially up and running. Get rid of the
62435diff --git a/ipc/mqueue.c b/ipc/mqueue.c
62436index 5b4293d..f179875 100644
62437--- a/ipc/mqueue.c
62438+++ b/ipc/mqueue.c
62439@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
62440 mq_bytes = (mq_msg_tblsz +
62441 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62442
62443+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62444 spin_lock(&mq_lock);
62445 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62446 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62447diff --git a/ipc/msg.c b/ipc/msg.c
62448index 7385de2..a8180e0 100644
62449--- a/ipc/msg.c
62450+++ b/ipc/msg.c
62451@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62452 return security_msg_queue_associate(msq, msgflg);
62453 }
62454
62455+static struct ipc_ops msg_ops = {
62456+ .getnew = newque,
62457+ .associate = msg_security,
62458+ .more_checks = NULL
62459+};
62460+
62461 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62462 {
62463 struct ipc_namespace *ns;
62464- struct ipc_ops msg_ops;
62465 struct ipc_params msg_params;
62466
62467 ns = current->nsproxy->ipc_ns;
62468
62469- msg_ops.getnew = newque;
62470- msg_ops.associate = msg_security;
62471- msg_ops.more_checks = NULL;
62472-
62473 msg_params.key = key;
62474 msg_params.flg = msgflg;
62475
62476diff --git a/ipc/sem.c b/ipc/sem.c
62477index 5215a81..cfc0cac 100644
62478--- a/ipc/sem.c
62479+++ b/ipc/sem.c
62480@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62481 return 0;
62482 }
62483
62484+static struct ipc_ops sem_ops = {
62485+ .getnew = newary,
62486+ .associate = sem_security,
62487+ .more_checks = sem_more_checks
62488+};
62489+
62490 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62491 {
62492 struct ipc_namespace *ns;
62493- struct ipc_ops sem_ops;
62494 struct ipc_params sem_params;
62495
62496 ns = current->nsproxy->ipc_ns;
62497@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62498 if (nsems < 0 || nsems > ns->sc_semmsl)
62499 return -EINVAL;
62500
62501- sem_ops.getnew = newary;
62502- sem_ops.associate = sem_security;
62503- sem_ops.more_checks = sem_more_checks;
62504-
62505 sem_params.key = key;
62506 sem_params.flg = semflg;
62507 sem_params.u.nsems = nsems;
62508diff --git a/ipc/shm.c b/ipc/shm.c
62509index b76be5b..859e750 100644
62510--- a/ipc/shm.c
62511+++ b/ipc/shm.c
62512@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62513 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62514 #endif
62515
62516+#ifdef CONFIG_GRKERNSEC
62517+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62518+ const time_t shm_createtime, const uid_t cuid,
62519+ const int shmid);
62520+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62521+ const time_t shm_createtime);
62522+#endif
62523+
62524 void shm_init_ns(struct ipc_namespace *ns)
62525 {
62526 ns->shm_ctlmax = SHMMAX;
62527@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62528 shp->shm_lprid = 0;
62529 shp->shm_atim = shp->shm_dtim = 0;
62530 shp->shm_ctim = get_seconds();
62531+#ifdef CONFIG_GRKERNSEC
62532+ {
62533+ struct timespec timeval;
62534+ do_posix_clock_monotonic_gettime(&timeval);
62535+
62536+ shp->shm_createtime = timeval.tv_sec;
62537+ }
62538+#endif
62539 shp->shm_segsz = size;
62540 shp->shm_nattch = 0;
62541 shp->shm_file = file;
62542@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62543 return 0;
62544 }
62545
62546+static struct ipc_ops shm_ops = {
62547+ .getnew = newseg,
62548+ .associate = shm_security,
62549+ .more_checks = shm_more_checks
62550+};
62551+
62552 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62553 {
62554 struct ipc_namespace *ns;
62555- struct ipc_ops shm_ops;
62556 struct ipc_params shm_params;
62557
62558 ns = current->nsproxy->ipc_ns;
62559
62560- shm_ops.getnew = newseg;
62561- shm_ops.associate = shm_security;
62562- shm_ops.more_checks = shm_more_checks;
62563-
62564 shm_params.key = key;
62565 shm_params.flg = shmflg;
62566 shm_params.u.size = size;
62567@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62568 f_mode = FMODE_READ | FMODE_WRITE;
62569 }
62570 if (shmflg & SHM_EXEC) {
62571+
62572+#ifdef CONFIG_PAX_MPROTECT
62573+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
62574+ goto out;
62575+#endif
62576+
62577 prot |= PROT_EXEC;
62578 acc_mode |= S_IXUGO;
62579 }
62580@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62581 if (err)
62582 goto out_unlock;
62583
62584+#ifdef CONFIG_GRKERNSEC
62585+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62586+ shp->shm_perm.cuid, shmid) ||
62587+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62588+ err = -EACCES;
62589+ goto out_unlock;
62590+ }
62591+#endif
62592+
62593 path = shp->shm_file->f_path;
62594 path_get(&path);
62595 shp->shm_nattch++;
62596+#ifdef CONFIG_GRKERNSEC
62597+ shp->shm_lapid = current->pid;
62598+#endif
62599 size = i_size_read(path.dentry->d_inode);
62600 shm_unlock(shp);
62601
62602diff --git a/kernel/acct.c b/kernel/acct.c
62603index fa7eb3d..7faf116 100644
62604--- a/kernel/acct.c
62605+++ b/kernel/acct.c
62606@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62607 */
62608 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62609 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62610- file->f_op->write(file, (char *)&ac,
62611+ file->f_op->write(file, (char __force_user *)&ac,
62612 sizeof(acct_t), &file->f_pos);
62613 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62614 set_fs(fs);
62615diff --git a/kernel/audit.c b/kernel/audit.c
62616index 09fae26..ed71d5b 100644
62617--- a/kernel/audit.c
62618+++ b/kernel/audit.c
62619@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62620 3) suppressed due to audit_rate_limit
62621 4) suppressed due to audit_backlog_limit
62622 */
62623-static atomic_t audit_lost = ATOMIC_INIT(0);
62624+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62625
62626 /* The netlink socket. */
62627 static struct sock *audit_sock;
62628@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62629 unsigned long now;
62630 int print;
62631
62632- atomic_inc(&audit_lost);
62633+ atomic_inc_unchecked(&audit_lost);
62634
62635 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62636
62637@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62638 printk(KERN_WARNING
62639 "audit: audit_lost=%d audit_rate_limit=%d "
62640 "audit_backlog_limit=%d\n",
62641- atomic_read(&audit_lost),
62642+ atomic_read_unchecked(&audit_lost),
62643 audit_rate_limit,
62644 audit_backlog_limit);
62645 audit_panic(message);
62646@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62647 status_set.pid = audit_pid;
62648 status_set.rate_limit = audit_rate_limit;
62649 status_set.backlog_limit = audit_backlog_limit;
62650- status_set.lost = atomic_read(&audit_lost);
62651+ status_set.lost = atomic_read_unchecked(&audit_lost);
62652 status_set.backlog = skb_queue_len(&audit_skb_queue);
62653 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62654 &status_set, sizeof(status_set));
62655@@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62656 avail = audit_expand(ab,
62657 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62658 if (!avail)
62659- goto out;
62660+ goto out_va_end;
62661 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62662 }
62663- va_end(args2);
62664 if (len > 0)
62665 skb_put(skb, len);
62666+out_va_end:
62667+ va_end(args2);
62668 out:
62669 return;
62670 }
62671diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62672index 47b7fc1..c003c33 100644
62673--- a/kernel/auditsc.c
62674+++ b/kernel/auditsc.c
62675@@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62676 struct audit_buffer **ab,
62677 struct audit_aux_data_execve *axi)
62678 {
62679- int i;
62680- size_t len, len_sent = 0;
62681+ int i, len;
62682+ size_t len_sent = 0;
62683 const char __user *p;
62684 char *buf;
62685
62686@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62687 }
62688
62689 /* global counter which is incremented every time something logs in */
62690-static atomic_t session_id = ATOMIC_INIT(0);
62691+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62692
62693 /**
62694 * audit_set_loginuid - set a task's audit_context loginuid
62695@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62696 */
62697 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62698 {
62699- unsigned int sessionid = atomic_inc_return(&session_id);
62700+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62701 struct audit_context *context = task->audit_context;
62702
62703 if (context && context->in_syscall) {
62704diff --git a/kernel/capability.c b/kernel/capability.c
62705index b463871..fa3ea1f 100644
62706--- a/kernel/capability.c
62707+++ b/kernel/capability.c
62708@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
62709 * before modification is attempted and the application
62710 * fails.
62711 */
62712+ if (tocopy > ARRAY_SIZE(kdata))
62713+ return -EFAULT;
62714+
62715 if (copy_to_user(dataptr, kdata, tocopy
62716 * sizeof(struct __user_cap_data_struct))) {
62717 return -EFAULT;
62718@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
62719 BUG();
62720 }
62721
62722- if (security_capable(ns, current_cred(), cap) == 0) {
62723+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62724 current->flags |= PF_SUPERPRIV;
62725 return true;
62726 }
62727@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
62728 }
62729 EXPORT_SYMBOL(ns_capable);
62730
62731+bool ns_capable_nolog(struct user_namespace *ns, int cap)
62732+{
62733+ if (unlikely(!cap_valid(cap))) {
62734+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62735+ BUG();
62736+ }
62737+
62738+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62739+ current->flags |= PF_SUPERPRIV;
62740+ return true;
62741+ }
62742+ return false;
62743+}
62744+EXPORT_SYMBOL(ns_capable_nolog);
62745+
62746+bool capable_nolog(int cap)
62747+{
62748+ return ns_capable_nolog(&init_user_ns, cap);
62749+}
62750+EXPORT_SYMBOL(capable_nolog);
62751+
62752 /**
62753 * task_ns_capable - Determine whether current task has a superior
62754 * capability targeted at a specific task's user namespace.
62755@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
62756 }
62757 EXPORT_SYMBOL(task_ns_capable);
62758
62759+bool task_ns_capable_nolog(struct task_struct *t, int cap)
62760+{
62761+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62762+}
62763+EXPORT_SYMBOL(task_ns_capable_nolog);
62764+
62765 /**
62766 * nsown_capable - Check superior capability to one's own user_ns
62767 * @cap: The capability in question
62768diff --git a/kernel/compat.c b/kernel/compat.c
62769index f346ced..aa2b1f4 100644
62770--- a/kernel/compat.c
62771+++ b/kernel/compat.c
62772@@ -13,6 +13,7 @@
62773
62774 #include <linux/linkage.h>
62775 #include <linux/compat.h>
62776+#include <linux/module.h>
62777 #include <linux/errno.h>
62778 #include <linux/time.h>
62779 #include <linux/signal.h>
62780@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
62781 mm_segment_t oldfs;
62782 long ret;
62783
62784- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62785+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62786 oldfs = get_fs();
62787 set_fs(KERNEL_DS);
62788 ret = hrtimer_nanosleep_restart(restart);
62789@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
62790 oldfs = get_fs();
62791 set_fs(KERNEL_DS);
62792 ret = hrtimer_nanosleep(&tu,
62793- rmtp ? (struct timespec __user *)&rmt : NULL,
62794+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
62795 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62796 set_fs(oldfs);
62797
62798@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
62799 mm_segment_t old_fs = get_fs();
62800
62801 set_fs(KERNEL_DS);
62802- ret = sys_sigpending((old_sigset_t __user *) &s);
62803+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
62804 set_fs(old_fs);
62805 if (ret == 0)
62806 ret = put_user(s, set);
62807@@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
62808 old_fs = get_fs();
62809 set_fs(KERNEL_DS);
62810 ret = sys_sigprocmask(how,
62811- set ? (old_sigset_t __user *) &s : NULL,
62812- oset ? (old_sigset_t __user *) &s : NULL);
62813+ set ? (old_sigset_t __force_user *) &s : NULL,
62814+ oset ? (old_sigset_t __force_user *) &s : NULL);
62815 set_fs(old_fs);
62816 if (ret == 0)
62817 if (oset)
62818@@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
62819 mm_segment_t old_fs = get_fs();
62820
62821 set_fs(KERNEL_DS);
62822- ret = sys_old_getrlimit(resource, &r);
62823+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62824 set_fs(old_fs);
62825
62826 if (!ret) {
62827@@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
62828 mm_segment_t old_fs = get_fs();
62829
62830 set_fs(KERNEL_DS);
62831- ret = sys_getrusage(who, (struct rusage __user *) &r);
62832+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62833 set_fs(old_fs);
62834
62835 if (ret)
62836@@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
62837 set_fs (KERNEL_DS);
62838 ret = sys_wait4(pid,
62839 (stat_addr ?
62840- (unsigned int __user *) &status : NULL),
62841- options, (struct rusage __user *) &r);
62842+ (unsigned int __force_user *) &status : NULL),
62843+ options, (struct rusage __force_user *) &r);
62844 set_fs (old_fs);
62845
62846 if (ret > 0) {
62847@@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
62848 memset(&info, 0, sizeof(info));
62849
62850 set_fs(KERNEL_DS);
62851- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62852- uru ? (struct rusage __user *)&ru : NULL);
62853+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62854+ uru ? (struct rusage __force_user *)&ru : NULL);
62855 set_fs(old_fs);
62856
62857 if ((ret < 0) || (info.si_signo == 0))
62858@@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
62859 oldfs = get_fs();
62860 set_fs(KERNEL_DS);
62861 err = sys_timer_settime(timer_id, flags,
62862- (struct itimerspec __user *) &newts,
62863- (struct itimerspec __user *) &oldts);
62864+ (struct itimerspec __force_user *) &newts,
62865+ (struct itimerspec __force_user *) &oldts);
62866 set_fs(oldfs);
62867 if (!err && old && put_compat_itimerspec(old, &oldts))
62868 return -EFAULT;
62869@@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
62870 oldfs = get_fs();
62871 set_fs(KERNEL_DS);
62872 err = sys_timer_gettime(timer_id,
62873- (struct itimerspec __user *) &ts);
62874+ (struct itimerspec __force_user *) &ts);
62875 set_fs(oldfs);
62876 if (!err && put_compat_itimerspec(setting, &ts))
62877 return -EFAULT;
62878@@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
62879 oldfs = get_fs();
62880 set_fs(KERNEL_DS);
62881 err = sys_clock_settime(which_clock,
62882- (struct timespec __user *) &ts);
62883+ (struct timespec __force_user *) &ts);
62884 set_fs(oldfs);
62885 return err;
62886 }
62887@@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
62888 oldfs = get_fs();
62889 set_fs(KERNEL_DS);
62890 err = sys_clock_gettime(which_clock,
62891- (struct timespec __user *) &ts);
62892+ (struct timespec __force_user *) &ts);
62893 set_fs(oldfs);
62894 if (!err && put_compat_timespec(&ts, tp))
62895 return -EFAULT;
62896@@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
62897
62898 oldfs = get_fs();
62899 set_fs(KERNEL_DS);
62900- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62901+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62902 set_fs(oldfs);
62903
62904 err = compat_put_timex(utp, &txc);
62905@@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
62906 oldfs = get_fs();
62907 set_fs(KERNEL_DS);
62908 err = sys_clock_getres(which_clock,
62909- (struct timespec __user *) &ts);
62910+ (struct timespec __force_user *) &ts);
62911 set_fs(oldfs);
62912 if (!err && tp && put_compat_timespec(&ts, tp))
62913 return -EFAULT;
62914@@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
62915 long err;
62916 mm_segment_t oldfs;
62917 struct timespec tu;
62918- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62919+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62920
62921- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62922+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62923 oldfs = get_fs();
62924 set_fs(KERNEL_DS);
62925 err = clock_nanosleep_restart(restart);
62926@@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
62927 oldfs = get_fs();
62928 set_fs(KERNEL_DS);
62929 err = sys_clock_nanosleep(which_clock, flags,
62930- (struct timespec __user *) &in,
62931- (struct timespec __user *) &out);
62932+ (struct timespec __force_user *) &in,
62933+ (struct timespec __force_user *) &out);
62934 set_fs(oldfs);
62935
62936 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62937diff --git a/kernel/configs.c b/kernel/configs.c
62938index 42e8fa0..9e7406b 100644
62939--- a/kernel/configs.c
62940+++ b/kernel/configs.c
62941@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62942 struct proc_dir_entry *entry;
62943
62944 /* create the current config file */
62945+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62946+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62947+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62948+ &ikconfig_file_ops);
62949+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62950+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62951+ &ikconfig_file_ops);
62952+#endif
62953+#else
62954 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62955 &ikconfig_file_ops);
62956+#endif
62957+
62958 if (!entry)
62959 return -ENOMEM;
62960
62961diff --git a/kernel/cred.c b/kernel/cred.c
62962index 5791612..a3c04dc 100644
62963--- a/kernel/cred.c
62964+++ b/kernel/cred.c
62965@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
62966 validate_creds(cred);
62967 put_cred(cred);
62968 }
62969+
62970+#ifdef CONFIG_GRKERNSEC_SETXID
62971+ cred = (struct cred *) tsk->delayed_cred;
62972+ if (cred) {
62973+ tsk->delayed_cred = NULL;
62974+ validate_creds(cred);
62975+ put_cred(cred);
62976+ }
62977+#endif
62978 }
62979
62980 /**
62981@@ -470,7 +479,7 @@ error_put:
62982 * Always returns 0 thus allowing this function to be tail-called at the end
62983 * of, say, sys_setgid().
62984 */
62985-int commit_creds(struct cred *new)
62986+static int __commit_creds(struct cred *new)
62987 {
62988 struct task_struct *task = current;
62989 const struct cred *old = task->real_cred;
62990@@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
62991
62992 get_cred(new); /* we will require a ref for the subj creds too */
62993
62994+ gr_set_role_label(task, new->uid, new->gid);
62995+
62996 /* dumpability changes */
62997 if (old->euid != new->euid ||
62998 old->egid != new->egid ||
62999@@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
63000 put_cred(old);
63001 return 0;
63002 }
63003+#ifdef CONFIG_GRKERNSEC_SETXID
63004+extern int set_user(struct cred *new);
63005+
63006+void gr_delayed_cred_worker(void)
63007+{
63008+ const struct cred *new = current->delayed_cred;
63009+ struct cred *ncred;
63010+
63011+ current->delayed_cred = NULL;
63012+
63013+ if (current_uid() && new != NULL) {
63014+ // from doing get_cred on it when queueing this
63015+ put_cred(new);
63016+ return;
63017+ } else if (new == NULL)
63018+ return;
63019+
63020+ ncred = prepare_creds();
63021+ if (!ncred)
63022+ goto die;
63023+ // uids
63024+ ncred->uid = new->uid;
63025+ ncred->euid = new->euid;
63026+ ncred->suid = new->suid;
63027+ ncred->fsuid = new->fsuid;
63028+ // gids
63029+ ncred->gid = new->gid;
63030+ ncred->egid = new->egid;
63031+ ncred->sgid = new->sgid;
63032+ ncred->fsgid = new->fsgid;
63033+ // groups
63034+ if (set_groups(ncred, new->group_info) < 0) {
63035+ abort_creds(ncred);
63036+ goto die;
63037+ }
63038+ // caps
63039+ ncred->securebits = new->securebits;
63040+ ncred->cap_inheritable = new->cap_inheritable;
63041+ ncred->cap_permitted = new->cap_permitted;
63042+ ncred->cap_effective = new->cap_effective;
63043+ ncred->cap_bset = new->cap_bset;
63044+
63045+ if (set_user(ncred)) {
63046+ abort_creds(ncred);
63047+ goto die;
63048+ }
63049+
63050+ // from doing get_cred on it when queueing this
63051+ put_cred(new);
63052+
63053+ __commit_creds(ncred);
63054+ return;
63055+die:
63056+ // from doing get_cred on it when queueing this
63057+ put_cred(new);
63058+ do_group_exit(SIGKILL);
63059+}
63060+#endif
63061+
63062+int commit_creds(struct cred *new)
63063+{
63064+#ifdef CONFIG_GRKERNSEC_SETXID
63065+ struct task_struct *t;
63066+
63067+ /* we won't get called with tasklist_lock held for writing
63068+ and interrupts disabled as the cred struct in that case is
63069+ init_cred
63070+ */
63071+ if (grsec_enable_setxid && !current_is_single_threaded() &&
63072+ !current_uid() && new->uid) {
63073+ rcu_read_lock();
63074+ read_lock(&tasklist_lock);
63075+ for (t = next_thread(current); t != current;
63076+ t = next_thread(t)) {
63077+ if (t->delayed_cred == NULL) {
63078+ t->delayed_cred = get_cred(new);
63079+ set_tsk_need_resched(t);
63080+ }
63081+ }
63082+ read_unlock(&tasklist_lock);
63083+ rcu_read_unlock();
63084+ }
63085+#endif
63086+ return __commit_creds(new);
63087+}
63088+
63089 EXPORT_SYMBOL(commit_creds);
63090
63091 /**
63092diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
63093index 0d7c087..01b8cef 100644
63094--- a/kernel/debug/debug_core.c
63095+++ b/kernel/debug/debug_core.c
63096@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
63097 */
63098 static atomic_t masters_in_kgdb;
63099 static atomic_t slaves_in_kgdb;
63100-static atomic_t kgdb_break_tasklet_var;
63101+static atomic_unchecked_t kgdb_break_tasklet_var;
63102 atomic_t kgdb_setting_breakpoint;
63103
63104 struct task_struct *kgdb_usethread;
63105@@ -129,7 +129,7 @@ int kgdb_single_step;
63106 static pid_t kgdb_sstep_pid;
63107
63108 /* to keep track of the CPU which is doing the single stepping*/
63109-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63110+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63111
63112 /*
63113 * If you are debugging a problem where roundup (the collection of
63114@@ -542,7 +542,7 @@ return_normal:
63115 * kernel will only try for the value of sstep_tries before
63116 * giving up and continuing on.
63117 */
63118- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63119+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63120 (kgdb_info[cpu].task &&
63121 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
63122 atomic_set(&kgdb_active, -1);
63123@@ -636,8 +636,8 @@ cpu_master_loop:
63124 }
63125
63126 kgdb_restore:
63127- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
63128- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
63129+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
63130+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
63131 if (kgdb_info[sstep_cpu].task)
63132 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
63133 else
63134@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
63135 static void kgdb_tasklet_bpt(unsigned long ing)
63136 {
63137 kgdb_breakpoint();
63138- atomic_set(&kgdb_break_tasklet_var, 0);
63139+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
63140 }
63141
63142 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
63143
63144 void kgdb_schedule_breakpoint(void)
63145 {
63146- if (atomic_read(&kgdb_break_tasklet_var) ||
63147+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
63148 atomic_read(&kgdb_active) != -1 ||
63149 atomic_read(&kgdb_setting_breakpoint))
63150 return;
63151- atomic_inc(&kgdb_break_tasklet_var);
63152+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
63153 tasklet_schedule(&kgdb_tasklet_breakpoint);
63154 }
63155 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
63156diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
63157index 63786e7..0780cac 100644
63158--- a/kernel/debug/kdb/kdb_main.c
63159+++ b/kernel/debug/kdb/kdb_main.c
63160@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
63161 list_for_each_entry(mod, kdb_modules, list) {
63162
63163 kdb_printf("%-20s%8u 0x%p ", mod->name,
63164- mod->core_size, (void *)mod);
63165+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
63166 #ifdef CONFIG_MODULE_UNLOAD
63167 kdb_printf("%4d ", module_refcount(mod));
63168 #endif
63169@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
63170 kdb_printf(" (Loading)");
63171 else
63172 kdb_printf(" (Live)");
63173- kdb_printf(" 0x%p", mod->module_core);
63174+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63175
63176 #ifdef CONFIG_MODULE_UNLOAD
63177 {
63178diff --git a/kernel/events/core.c b/kernel/events/core.c
63179index 58690af..d903d75 100644
63180--- a/kernel/events/core.c
63181+++ b/kernel/events/core.c
63182@@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
63183 return 0;
63184 }
63185
63186-static atomic64_t perf_event_id;
63187+static atomic64_unchecked_t perf_event_id;
63188
63189 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63190 enum event_type_t event_type);
63191@@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
63192
63193 static inline u64 perf_event_count(struct perf_event *event)
63194 {
63195- return local64_read(&event->count) + atomic64_read(&event->child_count);
63196+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63197 }
63198
63199 static u64 perf_event_read(struct perf_event *event)
63200@@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
63201 mutex_lock(&event->child_mutex);
63202 total += perf_event_read(event);
63203 *enabled += event->total_time_enabled +
63204- atomic64_read(&event->child_total_time_enabled);
63205+ atomic64_read_unchecked(&event->child_total_time_enabled);
63206 *running += event->total_time_running +
63207- atomic64_read(&event->child_total_time_running);
63208+ atomic64_read_unchecked(&event->child_total_time_running);
63209
63210 list_for_each_entry(child, &event->child_list, child_list) {
63211 total += perf_event_read(child);
63212@@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
63213 userpg->offset -= local64_read(&event->hw.prev_count);
63214
63215 userpg->time_enabled = enabled +
63216- atomic64_read(&event->child_total_time_enabled);
63217+ atomic64_read_unchecked(&event->child_total_time_enabled);
63218
63219 userpg->time_running = running +
63220- atomic64_read(&event->child_total_time_running);
63221+ atomic64_read_unchecked(&event->child_total_time_running);
63222
63223 barrier();
63224 ++userpg->lock;
63225@@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
63226 values[n++] = perf_event_count(event);
63227 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63228 values[n++] = enabled +
63229- atomic64_read(&event->child_total_time_enabled);
63230+ atomic64_read_unchecked(&event->child_total_time_enabled);
63231 }
63232 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63233 values[n++] = running +
63234- atomic64_read(&event->child_total_time_running);
63235+ atomic64_read_unchecked(&event->child_total_time_running);
63236 }
63237 if (read_format & PERF_FORMAT_ID)
63238 values[n++] = primary_event_id(event);
63239@@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
63240 * need to add enough zero bytes after the string to handle
63241 * the 64bit alignment we do later.
63242 */
63243- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63244+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
63245 if (!buf) {
63246 name = strncpy(tmp, "//enomem", sizeof(tmp));
63247 goto got_name;
63248 }
63249- name = d_path(&file->f_path, buf, PATH_MAX);
63250+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63251 if (IS_ERR(name)) {
63252 name = strncpy(tmp, "//toolong", sizeof(tmp));
63253 goto got_name;
63254@@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
63255 event->parent = parent_event;
63256
63257 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63258- event->id = atomic64_inc_return(&perf_event_id);
63259+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
63260
63261 event->state = PERF_EVENT_STATE_INACTIVE;
63262
63263@@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
63264 /*
63265 * Add back the child's count to the parent's count:
63266 */
63267- atomic64_add(child_val, &parent_event->child_count);
63268- atomic64_add(child_event->total_time_enabled,
63269+ atomic64_add_unchecked(child_val, &parent_event->child_count);
63270+ atomic64_add_unchecked(child_event->total_time_enabled,
63271 &parent_event->child_total_time_enabled);
63272- atomic64_add(child_event->total_time_running,
63273+ atomic64_add_unchecked(child_event->total_time_running,
63274 &parent_event->child_total_time_running);
63275
63276 /*
63277diff --git a/kernel/exit.c b/kernel/exit.c
63278index e6e01b9..619f837 100644
63279--- a/kernel/exit.c
63280+++ b/kernel/exit.c
63281@@ -57,6 +57,10 @@
63282 #include <asm/pgtable.h>
63283 #include <asm/mmu_context.h>
63284
63285+#ifdef CONFIG_GRKERNSEC
63286+extern rwlock_t grsec_exec_file_lock;
63287+#endif
63288+
63289 static void exit_mm(struct task_struct * tsk);
63290
63291 static void __unhash_process(struct task_struct *p, bool group_dead)
63292@@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
63293 struct task_struct *leader;
63294 int zap_leader;
63295 repeat:
63296+#ifdef CONFIG_NET
63297+ gr_del_task_from_ip_table(p);
63298+#endif
63299+
63300 /* don't need to get the RCU readlock here - the process is dead and
63301 * can't be modifying its own credentials. But shut RCU-lockdep up */
63302 rcu_read_lock();
63303@@ -380,7 +388,7 @@ int allow_signal(int sig)
63304 * know it'll be handled, so that they don't get converted to
63305 * SIGKILL or just silently dropped.
63306 */
63307- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63308+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63309 recalc_sigpending();
63310 spin_unlock_irq(&current->sighand->siglock);
63311 return 0;
63312@@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
63313 vsnprintf(current->comm, sizeof(current->comm), name, args);
63314 va_end(args);
63315
63316+#ifdef CONFIG_GRKERNSEC
63317+ write_lock(&grsec_exec_file_lock);
63318+ if (current->exec_file) {
63319+ fput(current->exec_file);
63320+ current->exec_file = NULL;
63321+ }
63322+ write_unlock(&grsec_exec_file_lock);
63323+#endif
63324+
63325+ gr_set_kernel_label(current);
63326+
63327 /*
63328 * If we were started as result of loading a module, close all of the
63329 * user space pages. We don't need them, and if we didn't close them
63330@@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
63331 struct task_struct *tsk = current;
63332 int group_dead;
63333
63334+ set_fs(USER_DS);
63335+
63336 profile_task_exit(tsk);
63337
63338 WARN_ON(blk_needs_flush_plug(tsk));
63339@@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
63340 * mm_release()->clear_child_tid() from writing to a user-controlled
63341 * kernel address.
63342 */
63343- set_fs(USER_DS);
63344
63345 ptrace_event(PTRACE_EVENT_EXIT, code);
63346
63347@@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
63348 tsk->exit_code = code;
63349 taskstats_exit(tsk, group_dead);
63350
63351+ gr_acl_handle_psacct(tsk, code);
63352+ gr_acl_handle_exit();
63353+
63354 exit_mm(tsk);
63355
63356 if (group_dead)
63357diff --git a/kernel/fork.c b/kernel/fork.c
63358index da4a6a1..0483b61 100644
63359--- a/kernel/fork.c
63360+++ b/kernel/fork.c
63361@@ -280,7 +280,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
63362 *stackend = STACK_END_MAGIC; /* for overflow detection */
63363
63364 #ifdef CONFIG_CC_STACKPROTECTOR
63365- tsk->stack_canary = get_random_int();
63366+ tsk->stack_canary = pax_get_random_long();
63367 #endif
63368
63369 /*
63370@@ -304,13 +304,77 @@ out:
63371 }
63372
63373 #ifdef CONFIG_MMU
63374+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63375+{
63376+ struct vm_area_struct *tmp;
63377+ unsigned long charge;
63378+ struct mempolicy *pol;
63379+ struct file *file;
63380+
63381+ charge = 0;
63382+ if (mpnt->vm_flags & VM_ACCOUNT) {
63383+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63384+ if (security_vm_enough_memory(len))
63385+ goto fail_nomem;
63386+ charge = len;
63387+ }
63388+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63389+ if (!tmp)
63390+ goto fail_nomem;
63391+ *tmp = *mpnt;
63392+ tmp->vm_mm = mm;
63393+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
63394+ pol = mpol_dup(vma_policy(mpnt));
63395+ if (IS_ERR(pol))
63396+ goto fail_nomem_policy;
63397+ vma_set_policy(tmp, pol);
63398+ if (anon_vma_fork(tmp, mpnt))
63399+ goto fail_nomem_anon_vma_fork;
63400+ tmp->vm_flags &= ~VM_LOCKED;
63401+ tmp->vm_next = tmp->vm_prev = NULL;
63402+ tmp->vm_mirror = NULL;
63403+ file = tmp->vm_file;
63404+ if (file) {
63405+ struct inode *inode = file->f_path.dentry->d_inode;
63406+ struct address_space *mapping = file->f_mapping;
63407+
63408+ get_file(file);
63409+ if (tmp->vm_flags & VM_DENYWRITE)
63410+ atomic_dec(&inode->i_writecount);
63411+ mutex_lock(&mapping->i_mmap_mutex);
63412+ if (tmp->vm_flags & VM_SHARED)
63413+ mapping->i_mmap_writable++;
63414+ flush_dcache_mmap_lock(mapping);
63415+ /* insert tmp into the share list, just after mpnt */
63416+ vma_prio_tree_add(tmp, mpnt);
63417+ flush_dcache_mmap_unlock(mapping);
63418+ mutex_unlock(&mapping->i_mmap_mutex);
63419+ }
63420+
63421+ /*
63422+ * Clear hugetlb-related page reserves for children. This only
63423+ * affects MAP_PRIVATE mappings. Faults generated by the child
63424+ * are not guaranteed to succeed, even if read-only
63425+ */
63426+ if (is_vm_hugetlb_page(tmp))
63427+ reset_vma_resv_huge_pages(tmp);
63428+
63429+ return tmp;
63430+
63431+fail_nomem_anon_vma_fork:
63432+ mpol_put(pol);
63433+fail_nomem_policy:
63434+ kmem_cache_free(vm_area_cachep, tmp);
63435+fail_nomem:
63436+ vm_unacct_memory(charge);
63437+ return NULL;
63438+}
63439+
63440 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63441 {
63442 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63443 struct rb_node **rb_link, *rb_parent;
63444 int retval;
63445- unsigned long charge;
63446- struct mempolicy *pol;
63447
63448 down_write(&oldmm->mmap_sem);
63449 flush_cache_dup_mm(oldmm);
63450@@ -322,8 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63451 mm->locked_vm = 0;
63452 mm->mmap = NULL;
63453 mm->mmap_cache = NULL;
63454- mm->free_area_cache = oldmm->mmap_base;
63455- mm->cached_hole_size = ~0UL;
63456+ mm->free_area_cache = oldmm->free_area_cache;
63457+ mm->cached_hole_size = oldmm->cached_hole_size;
63458 mm->map_count = 0;
63459 cpumask_clear(mm_cpumask(mm));
63460 mm->mm_rb = RB_ROOT;
63461@@ -339,8 +403,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63462
63463 prev = NULL;
63464 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63465- struct file *file;
63466-
63467 if (mpnt->vm_flags & VM_DONTCOPY) {
63468 long pages = vma_pages(mpnt);
63469 mm->total_vm -= pages;
63470@@ -348,53 +410,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63471 -pages);
63472 continue;
63473 }
63474- charge = 0;
63475- if (mpnt->vm_flags & VM_ACCOUNT) {
63476- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63477- if (security_vm_enough_memory(len))
63478- goto fail_nomem;
63479- charge = len;
63480+ tmp = dup_vma(mm, mpnt);
63481+ if (!tmp) {
63482+ retval = -ENOMEM;
63483+ goto out;
63484 }
63485- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63486- if (!tmp)
63487- goto fail_nomem;
63488- *tmp = *mpnt;
63489- INIT_LIST_HEAD(&tmp->anon_vma_chain);
63490- pol = mpol_dup(vma_policy(mpnt));
63491- retval = PTR_ERR(pol);
63492- if (IS_ERR(pol))
63493- goto fail_nomem_policy;
63494- vma_set_policy(tmp, pol);
63495- tmp->vm_mm = mm;
63496- if (anon_vma_fork(tmp, mpnt))
63497- goto fail_nomem_anon_vma_fork;
63498- tmp->vm_flags &= ~VM_LOCKED;
63499- tmp->vm_next = tmp->vm_prev = NULL;
63500- file = tmp->vm_file;
63501- if (file) {
63502- struct inode *inode = file->f_path.dentry->d_inode;
63503- struct address_space *mapping = file->f_mapping;
63504-
63505- get_file(file);
63506- if (tmp->vm_flags & VM_DENYWRITE)
63507- atomic_dec(&inode->i_writecount);
63508- mutex_lock(&mapping->i_mmap_mutex);
63509- if (tmp->vm_flags & VM_SHARED)
63510- mapping->i_mmap_writable++;
63511- flush_dcache_mmap_lock(mapping);
63512- /* insert tmp into the share list, just after mpnt */
63513- vma_prio_tree_add(tmp, mpnt);
63514- flush_dcache_mmap_unlock(mapping);
63515- mutex_unlock(&mapping->i_mmap_mutex);
63516- }
63517-
63518- /*
63519- * Clear hugetlb-related page reserves for children. This only
63520- * affects MAP_PRIVATE mappings. Faults generated by the child
63521- * are not guaranteed to succeed, even if read-only
63522- */
63523- if (is_vm_hugetlb_page(tmp))
63524- reset_vma_resv_huge_pages(tmp);
63525
63526 /*
63527 * Link in the new vma and copy the page table entries.
63528@@ -417,6 +437,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63529 if (retval)
63530 goto out;
63531 }
63532+
63533+#ifdef CONFIG_PAX_SEGMEXEC
63534+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63535+ struct vm_area_struct *mpnt_m;
63536+
63537+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63538+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63539+
63540+ if (!mpnt->vm_mirror)
63541+ continue;
63542+
63543+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63544+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63545+ mpnt->vm_mirror = mpnt_m;
63546+ } else {
63547+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63548+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63549+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63550+ mpnt->vm_mirror->vm_mirror = mpnt;
63551+ }
63552+ }
63553+ BUG_ON(mpnt_m);
63554+ }
63555+#endif
63556+
63557 /* a new mm has just been created */
63558 arch_dup_mmap(oldmm, mm);
63559 retval = 0;
63560@@ -425,14 +470,6 @@ out:
63561 flush_tlb_mm(oldmm);
63562 up_write(&oldmm->mmap_sem);
63563 return retval;
63564-fail_nomem_anon_vma_fork:
63565- mpol_put(pol);
63566-fail_nomem_policy:
63567- kmem_cache_free(vm_area_cachep, tmp);
63568-fail_nomem:
63569- retval = -ENOMEM;
63570- vm_unacct_memory(charge);
63571- goto out;
63572 }
63573
63574 static inline int mm_alloc_pgd(struct mm_struct *mm)
63575@@ -644,6 +681,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
63576 }
63577 EXPORT_SYMBOL_GPL(get_task_mm);
63578
63579+struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
63580+{
63581+ struct mm_struct *mm;
63582+ int err;
63583+
63584+ err = mutex_lock_killable(&task->signal->cred_guard_mutex);
63585+ if (err)
63586+ return ERR_PTR(err);
63587+
63588+ mm = get_task_mm(task);
63589+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
63590+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
63591+ mmput(mm);
63592+ mm = ERR_PTR(-EACCES);
63593+ }
63594+ mutex_unlock(&task->signal->cred_guard_mutex);
63595+
63596+ return mm;
63597+}
63598+
63599 /* Please note the differences between mmput and mm_release.
63600 * mmput is called whenever we stop holding onto a mm_struct,
63601 * error success whatever.
63602@@ -829,13 +886,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63603 spin_unlock(&fs->lock);
63604 return -EAGAIN;
63605 }
63606- fs->users++;
63607+ atomic_inc(&fs->users);
63608 spin_unlock(&fs->lock);
63609 return 0;
63610 }
63611 tsk->fs = copy_fs_struct(fs);
63612 if (!tsk->fs)
63613 return -ENOMEM;
63614+ gr_set_chroot_entries(tsk, &tsk->fs->root);
63615 return 0;
63616 }
63617
63618@@ -1097,6 +1155,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63619 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63620 #endif
63621 retval = -EAGAIN;
63622+
63623+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63624+
63625 if (atomic_read(&p->real_cred->user->processes) >=
63626 task_rlimit(p, RLIMIT_NPROC)) {
63627 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63628@@ -1256,6 +1317,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63629 if (clone_flags & CLONE_THREAD)
63630 p->tgid = current->tgid;
63631
63632+ gr_copy_label(p);
63633+
63634+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63635+ p->exec_id = current->exec_id;
63636+#endif
63637+
63638 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63639 /*
63640 * Clear TID on mm_release()?
63641@@ -1418,6 +1485,8 @@ bad_fork_cleanup_count:
63642 bad_fork_free:
63643 free_task(p);
63644 fork_out:
63645+ gr_log_forkfail(retval);
63646+
63647 return ERR_PTR(retval);
63648 }
63649
63650@@ -1518,6 +1587,8 @@ long do_fork(unsigned long clone_flags,
63651 if (clone_flags & CLONE_PARENT_SETTID)
63652 put_user(nr, parent_tidptr);
63653
63654+ gr_handle_brute_check();
63655+
63656 if (clone_flags & CLONE_VFORK) {
63657 p->vfork_done = &vfork;
63658 init_completion(&vfork);
63659@@ -1627,7 +1698,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63660 return 0;
63661
63662 /* don't need lock here; in the worst case we'll do useless copy */
63663- if (fs->users == 1)
63664+ if (atomic_read(&fs->users) == 1)
63665 return 0;
63666
63667 *new_fsp = copy_fs_struct(fs);
63668@@ -1716,7 +1787,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63669 fs = current->fs;
63670 spin_lock(&fs->lock);
63671 current->fs = new_fs;
63672- if (--fs->users)
63673+ gr_set_chroot_entries(current, &current->fs->root);
63674+ if (atomic_dec_return(&fs->users))
63675 new_fs = NULL;
63676 else
63677 new_fs = fs;
63678diff --git a/kernel/futex.c b/kernel/futex.c
63679index 1614be2..37abc7e 100644
63680--- a/kernel/futex.c
63681+++ b/kernel/futex.c
63682@@ -54,6 +54,7 @@
63683 #include <linux/mount.h>
63684 #include <linux/pagemap.h>
63685 #include <linux/syscalls.h>
63686+#include <linux/ptrace.h>
63687 #include <linux/signal.h>
63688 #include <linux/export.h>
63689 #include <linux/magic.h>
63690@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63691 struct page *page, *page_head;
63692 int err, ro = 0;
63693
63694+#ifdef CONFIG_PAX_SEGMEXEC
63695+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63696+ return -EFAULT;
63697+#endif
63698+
63699 /*
63700 * The futex address must be "naturally" aligned.
63701 */
63702@@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
63703 if (!p)
63704 goto err_unlock;
63705 ret = -EPERM;
63706+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63707+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63708+ goto err_unlock;
63709+#endif
63710 pcred = __task_cred(p);
63711 /* If victim is in different user_ns, then uids are not
63712 comparable, so we must have CAP_SYS_PTRACE */
63713@@ -2724,6 +2734,7 @@ static int __init futex_init(void)
63714 {
63715 u32 curval;
63716 int i;
63717+ mm_segment_t oldfs;
63718
63719 /*
63720 * This will fail and we want it. Some arch implementations do
63721@@ -2735,8 +2746,11 @@ static int __init futex_init(void)
63722 * implementation, the non-functional ones will return
63723 * -ENOSYS.
63724 */
63725+ oldfs = get_fs();
63726+ set_fs(USER_DS);
63727 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63728 futex_cmpxchg_enabled = 1;
63729+ set_fs(oldfs);
63730
63731 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63732 plist_head_init(&futex_queues[i].chain);
63733diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
63734index 5f9e689..582d46d 100644
63735--- a/kernel/futex_compat.c
63736+++ b/kernel/futex_compat.c
63737@@ -10,6 +10,7 @@
63738 #include <linux/compat.h>
63739 #include <linux/nsproxy.h>
63740 #include <linux/futex.h>
63741+#include <linux/ptrace.h>
63742
63743 #include <asm/uaccess.h>
63744
63745@@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63746 {
63747 struct compat_robust_list_head __user *head;
63748 unsigned long ret;
63749- const struct cred *cred = current_cred(), *pcred;
63750+ const struct cred *cred = current_cred();
63751+ const struct cred *pcred;
63752
63753 if (!futex_cmpxchg_enabled)
63754 return -ENOSYS;
63755@@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63756 if (!p)
63757 goto err_unlock;
63758 ret = -EPERM;
63759+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63760+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63761+ goto err_unlock;
63762+#endif
63763 pcred = __task_cred(p);
63764 /* If victim is in different user_ns, then uids are not
63765 comparable, so we must have CAP_SYS_PTRACE */
63766diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
63767index 9b22d03..6295b62 100644
63768--- a/kernel/gcov/base.c
63769+++ b/kernel/gcov/base.c
63770@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63771 }
63772
63773 #ifdef CONFIG_MODULES
63774-static inline int within(void *addr, void *start, unsigned long size)
63775-{
63776- return ((addr >= start) && (addr < start + size));
63777-}
63778-
63779 /* Update list and generate events when modules are unloaded. */
63780 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63781 void *data)
63782@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63783 prev = NULL;
63784 /* Remove entries located in module from linked list. */
63785 for (info = gcov_info_head; info; info = info->next) {
63786- if (within(info, mod->module_core, mod->core_size)) {
63787+ if (within_module_core_rw((unsigned long)info, mod)) {
63788 if (prev)
63789 prev->next = info->next;
63790 else
63791diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
63792index ae34bf5..4e2f3d0 100644
63793--- a/kernel/hrtimer.c
63794+++ b/kernel/hrtimer.c
63795@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
63796 local_irq_restore(flags);
63797 }
63798
63799-static void run_hrtimer_softirq(struct softirq_action *h)
63800+static void run_hrtimer_softirq(void)
63801 {
63802 hrtimer_peek_ahead_timers();
63803 }
63804diff --git a/kernel/jump_label.c b/kernel/jump_label.c
63805index 66ff710..05a5128 100644
63806--- a/kernel/jump_label.c
63807+++ b/kernel/jump_label.c
63808@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
63809
63810 size = (((unsigned long)stop - (unsigned long)start)
63811 / sizeof(struct jump_entry));
63812+ pax_open_kernel();
63813 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63814+ pax_close_kernel();
63815 }
63816
63817 static void jump_label_update(struct jump_label_key *key, int enable);
63818@@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
63819 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63820 struct jump_entry *iter;
63821
63822+ pax_open_kernel();
63823 for (iter = iter_start; iter < iter_stop; iter++) {
63824 if (within_module_init(iter->code, mod))
63825 iter->code = 0;
63826 }
63827+ pax_close_kernel();
63828 }
63829
63830 static int
63831diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
63832index 079f1d3..a407562 100644
63833--- a/kernel/kallsyms.c
63834+++ b/kernel/kallsyms.c
63835@@ -11,6 +11,9 @@
63836 * Changed the compression method from stem compression to "table lookup"
63837 * compression (see scripts/kallsyms.c for a more complete description)
63838 */
63839+#ifdef CONFIG_GRKERNSEC_HIDESYM
63840+#define __INCLUDED_BY_HIDESYM 1
63841+#endif
63842 #include <linux/kallsyms.h>
63843 #include <linux/module.h>
63844 #include <linux/init.h>
63845@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
63846
63847 static inline int is_kernel_inittext(unsigned long addr)
63848 {
63849+ if (system_state != SYSTEM_BOOTING)
63850+ return 0;
63851+
63852 if (addr >= (unsigned long)_sinittext
63853 && addr <= (unsigned long)_einittext)
63854 return 1;
63855 return 0;
63856 }
63857
63858+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63859+#ifdef CONFIG_MODULES
63860+static inline int is_module_text(unsigned long addr)
63861+{
63862+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63863+ return 1;
63864+
63865+ addr = ktla_ktva(addr);
63866+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63867+}
63868+#else
63869+static inline int is_module_text(unsigned long addr)
63870+{
63871+ return 0;
63872+}
63873+#endif
63874+#endif
63875+
63876 static inline int is_kernel_text(unsigned long addr)
63877 {
63878 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63879@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
63880
63881 static inline int is_kernel(unsigned long addr)
63882 {
63883+
63884+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63885+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63886+ return 1;
63887+
63888+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63889+#else
63890 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63891+#endif
63892+
63893 return 1;
63894 return in_gate_area_no_mm(addr);
63895 }
63896
63897 static int is_ksym_addr(unsigned long addr)
63898 {
63899+
63900+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63901+ if (is_module_text(addr))
63902+ return 0;
63903+#endif
63904+
63905 if (all_var)
63906 return is_kernel(addr);
63907
63908@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
63909
63910 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63911 {
63912- iter->name[0] = '\0';
63913 iter->nameoff = get_symbol_offset(new_pos);
63914 iter->pos = new_pos;
63915 }
63916@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
63917 {
63918 struct kallsym_iter *iter = m->private;
63919
63920+#ifdef CONFIG_GRKERNSEC_HIDESYM
63921+ if (current_uid())
63922+ return 0;
63923+#endif
63924+
63925 /* Some debugging symbols have no name. Ignore them. */
63926 if (!iter->name[0])
63927 return 0;
63928@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
63929 struct kallsym_iter *iter;
63930 int ret;
63931
63932- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63933+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63934 if (!iter)
63935 return -ENOMEM;
63936 reset_iter(iter, 0);
63937diff --git a/kernel/kexec.c b/kernel/kexec.c
63938index dc7bc08..4601964 100644
63939--- a/kernel/kexec.c
63940+++ b/kernel/kexec.c
63941@@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
63942 unsigned long flags)
63943 {
63944 struct compat_kexec_segment in;
63945- struct kexec_segment out, __user *ksegments;
63946+ struct kexec_segment out;
63947+ struct kexec_segment __user *ksegments;
63948 unsigned long i, result;
63949
63950 /* Don't allow clients that don't understand the native
63951diff --git a/kernel/kmod.c b/kernel/kmod.c
63952index a4bea97..7a1ae9a 100644
63953--- a/kernel/kmod.c
63954+++ b/kernel/kmod.c
63955@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
63956 * If module auto-loading support is disabled then this function
63957 * becomes a no-operation.
63958 */
63959-int __request_module(bool wait, const char *fmt, ...)
63960+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63961 {
63962- va_list args;
63963 char module_name[MODULE_NAME_LEN];
63964 unsigned int max_modprobes;
63965 int ret;
63966- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63967+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63968 static char *envp[] = { "HOME=/",
63969 "TERM=linux",
63970 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63971@@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
63972 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63973 static int kmod_loop_msg;
63974
63975- va_start(args, fmt);
63976- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63977- va_end(args);
63978+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63979 if (ret >= MODULE_NAME_LEN)
63980 return -ENAMETOOLONG;
63981
63982@@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
63983 if (ret)
63984 return ret;
63985
63986+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63987+ if (!current_uid()) {
63988+ /* hack to workaround consolekit/udisks stupidity */
63989+ read_lock(&tasklist_lock);
63990+ if (!strcmp(current->comm, "mount") &&
63991+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63992+ read_unlock(&tasklist_lock);
63993+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63994+ return -EPERM;
63995+ }
63996+ read_unlock(&tasklist_lock);
63997+ }
63998+#endif
63999+
64000 /* If modprobe needs a service that is in a module, we get a recursive
64001 * loop. Limit the number of running kmod threads to max_threads/2 or
64002 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
64003@@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
64004 atomic_dec(&kmod_concurrent);
64005 return ret;
64006 }
64007+
64008+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
64009+{
64010+ va_list args;
64011+ int ret;
64012+
64013+ va_start(args, fmt);
64014+ ret = ____request_module(wait, module_param, fmt, args);
64015+ va_end(args);
64016+
64017+ return ret;
64018+}
64019+
64020+int __request_module(bool wait, const char *fmt, ...)
64021+{
64022+ va_list args;
64023+ int ret;
64024+
64025+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64026+ if (current_uid()) {
64027+ char module_param[MODULE_NAME_LEN];
64028+
64029+ memset(module_param, 0, sizeof(module_param));
64030+
64031+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
64032+
64033+ va_start(args, fmt);
64034+ ret = ____request_module(wait, module_param, fmt, args);
64035+ va_end(args);
64036+
64037+ return ret;
64038+ }
64039+#endif
64040+
64041+ va_start(args, fmt);
64042+ ret = ____request_module(wait, NULL, fmt, args);
64043+ va_end(args);
64044+
64045+ return ret;
64046+}
64047+
64048 EXPORT_SYMBOL(__request_module);
64049 #endif /* CONFIG_MODULES */
64050
64051@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
64052 *
64053 * Thus the __user pointer cast is valid here.
64054 */
64055- sys_wait4(pid, (int __user *)&ret, 0, NULL);
64056+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
64057
64058 /*
64059 * If ret is 0, either ____call_usermodehelper failed and the
64060diff --git a/kernel/kprobes.c b/kernel/kprobes.c
64061index faa39d1..d7ad37e 100644
64062--- a/kernel/kprobes.c
64063+++ b/kernel/kprobes.c
64064@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
64065 * kernel image and loaded module images reside. This is required
64066 * so x86_64 can correctly handle the %rip-relative fixups.
64067 */
64068- kip->insns = module_alloc(PAGE_SIZE);
64069+ kip->insns = module_alloc_exec(PAGE_SIZE);
64070 if (!kip->insns) {
64071 kfree(kip);
64072 return NULL;
64073@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
64074 */
64075 if (!list_is_singular(&kip->list)) {
64076 list_del(&kip->list);
64077- module_free(NULL, kip->insns);
64078+ module_free_exec(NULL, kip->insns);
64079 kfree(kip);
64080 }
64081 return 1;
64082@@ -1953,7 +1953,7 @@ static int __init init_kprobes(void)
64083 {
64084 int i, err = 0;
64085 unsigned long offset = 0, size = 0;
64086- char *modname, namebuf[128];
64087+ char *modname, namebuf[KSYM_NAME_LEN];
64088 const char *symbol_name;
64089 void *addr;
64090 struct kprobe_blackpoint *kb;
64091@@ -2079,7 +2079,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
64092 const char *sym = NULL;
64093 unsigned int i = *(loff_t *) v;
64094 unsigned long offset = 0;
64095- char *modname, namebuf[128];
64096+ char *modname, namebuf[KSYM_NAME_LEN];
64097
64098 head = &kprobe_table[i];
64099 preempt_disable();
64100diff --git a/kernel/lockdep.c b/kernel/lockdep.c
64101index b2e08c9..01d8049 100644
64102--- a/kernel/lockdep.c
64103+++ b/kernel/lockdep.c
64104@@ -592,6 +592,10 @@ static int static_obj(void *obj)
64105 end = (unsigned long) &_end,
64106 addr = (unsigned long) obj;
64107
64108+#ifdef CONFIG_PAX_KERNEXEC
64109+ start = ktla_ktva(start);
64110+#endif
64111+
64112 /*
64113 * static variable?
64114 */
64115@@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
64116 if (!static_obj(lock->key)) {
64117 debug_locks_off();
64118 printk("INFO: trying to register non-static key.\n");
64119+ printk("lock:%pS key:%pS.\n", lock, lock->key);
64120 printk("the code is fine but needs lockdep annotation.\n");
64121 printk("turning off the locking correctness validator.\n");
64122 dump_stack();
64123@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
64124 if (!class)
64125 return 0;
64126 }
64127- atomic_inc((atomic_t *)&class->ops);
64128+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
64129 if (very_verbose(class)) {
64130 printk("\nacquire class [%p] %s", class->key, class->name);
64131 if (class->name_version > 1)
64132diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
64133index 91c32a0..b2c71c5 100644
64134--- a/kernel/lockdep_proc.c
64135+++ b/kernel/lockdep_proc.c
64136@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
64137
64138 static void print_name(struct seq_file *m, struct lock_class *class)
64139 {
64140- char str[128];
64141+ char str[KSYM_NAME_LEN];
64142 const char *name = class->name;
64143
64144 if (!name) {
64145diff --git a/kernel/module.c b/kernel/module.c
64146index 178333c..04e3408 100644
64147--- a/kernel/module.c
64148+++ b/kernel/module.c
64149@@ -58,6 +58,7 @@
64150 #include <linux/jump_label.h>
64151 #include <linux/pfn.h>
64152 #include <linux/bsearch.h>
64153+#include <linux/grsecurity.h>
64154
64155 #define CREATE_TRACE_POINTS
64156 #include <trace/events/module.h>
64157@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64158
64159 /* Bounds of module allocation, for speeding __module_address.
64160 * Protected by module_mutex. */
64161-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64162+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64163+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64164
64165 int register_module_notifier(struct notifier_block * nb)
64166 {
64167@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64168 return true;
64169
64170 list_for_each_entry_rcu(mod, &modules, list) {
64171- struct symsearch arr[] = {
64172+ struct symsearch modarr[] = {
64173 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64174 NOT_GPL_ONLY, false },
64175 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64176@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64177 #endif
64178 };
64179
64180- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64181+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64182 return true;
64183 }
64184 return false;
64185@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
64186 static int percpu_modalloc(struct module *mod,
64187 unsigned long size, unsigned long align)
64188 {
64189- if (align > PAGE_SIZE) {
64190+ if (align-1 >= PAGE_SIZE) {
64191 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64192 mod->name, align, PAGE_SIZE);
64193 align = PAGE_SIZE;
64194@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64195 */
64196 #ifdef CONFIG_SYSFS
64197
64198-#ifdef CONFIG_KALLSYMS
64199+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64200 static inline bool sect_empty(const Elf_Shdr *sect)
64201 {
64202 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64203@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
64204
64205 static void unset_module_core_ro_nx(struct module *mod)
64206 {
64207- set_page_attributes(mod->module_core + mod->core_text_size,
64208- mod->module_core + mod->core_size,
64209+ set_page_attributes(mod->module_core_rw,
64210+ mod->module_core_rw + mod->core_size_rw,
64211 set_memory_x);
64212- set_page_attributes(mod->module_core,
64213- mod->module_core + mod->core_ro_size,
64214+ set_page_attributes(mod->module_core_rx,
64215+ mod->module_core_rx + mod->core_size_rx,
64216 set_memory_rw);
64217 }
64218
64219 static void unset_module_init_ro_nx(struct module *mod)
64220 {
64221- set_page_attributes(mod->module_init + mod->init_text_size,
64222- mod->module_init + mod->init_size,
64223+ set_page_attributes(mod->module_init_rw,
64224+ mod->module_init_rw + mod->init_size_rw,
64225 set_memory_x);
64226- set_page_attributes(mod->module_init,
64227- mod->module_init + mod->init_ro_size,
64228+ set_page_attributes(mod->module_init_rx,
64229+ mod->module_init_rx + mod->init_size_rx,
64230 set_memory_rw);
64231 }
64232
64233@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64234
64235 mutex_lock(&module_mutex);
64236 list_for_each_entry_rcu(mod, &modules, list) {
64237- if ((mod->module_core) && (mod->core_text_size)) {
64238- set_page_attributes(mod->module_core,
64239- mod->module_core + mod->core_text_size,
64240+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
64241+ set_page_attributes(mod->module_core_rx,
64242+ mod->module_core_rx + mod->core_size_rx,
64243 set_memory_rw);
64244 }
64245- if ((mod->module_init) && (mod->init_text_size)) {
64246- set_page_attributes(mod->module_init,
64247- mod->module_init + mod->init_text_size,
64248+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
64249+ set_page_attributes(mod->module_init_rx,
64250+ mod->module_init_rx + mod->init_size_rx,
64251 set_memory_rw);
64252 }
64253 }
64254@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64255
64256 mutex_lock(&module_mutex);
64257 list_for_each_entry_rcu(mod, &modules, list) {
64258- if ((mod->module_core) && (mod->core_text_size)) {
64259- set_page_attributes(mod->module_core,
64260- mod->module_core + mod->core_text_size,
64261+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
64262+ set_page_attributes(mod->module_core_rx,
64263+ mod->module_core_rx + mod->core_size_rx,
64264 set_memory_ro);
64265 }
64266- if ((mod->module_init) && (mod->init_text_size)) {
64267- set_page_attributes(mod->module_init,
64268- mod->module_init + mod->init_text_size,
64269+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
64270+ set_page_attributes(mod->module_init_rx,
64271+ mod->module_init_rx + mod->init_size_rx,
64272 set_memory_ro);
64273 }
64274 }
64275@@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
64276
64277 /* This may be NULL, but that's OK */
64278 unset_module_init_ro_nx(mod);
64279- module_free(mod, mod->module_init);
64280+ module_free(mod, mod->module_init_rw);
64281+ module_free_exec(mod, mod->module_init_rx);
64282 kfree(mod->args);
64283 percpu_modfree(mod);
64284
64285 /* Free lock-classes: */
64286- lockdep_free_key_range(mod->module_core, mod->core_size);
64287+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64288+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64289
64290 /* Finally, free the core (containing the module structure) */
64291 unset_module_core_ro_nx(mod);
64292- module_free(mod, mod->module_core);
64293+ module_free_exec(mod, mod->module_core_rx);
64294+ module_free(mod, mod->module_core_rw);
64295
64296 #ifdef CONFIG_MPU
64297 update_protections(current->mm);
64298@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64299 unsigned int i;
64300 int ret = 0;
64301 const struct kernel_symbol *ksym;
64302+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64303+ int is_fs_load = 0;
64304+ int register_filesystem_found = 0;
64305+ char *p;
64306+
64307+ p = strstr(mod->args, "grsec_modharden_fs");
64308+ if (p) {
64309+ char *endptr = p + strlen("grsec_modharden_fs");
64310+ /* copy \0 as well */
64311+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64312+ is_fs_load = 1;
64313+ }
64314+#endif
64315
64316 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64317 const char *name = info->strtab + sym[i].st_name;
64318
64319+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64320+ /* it's a real shame this will never get ripped and copied
64321+ upstream! ;(
64322+ */
64323+ if (is_fs_load && !strcmp(name, "register_filesystem"))
64324+ register_filesystem_found = 1;
64325+#endif
64326+
64327 switch (sym[i].st_shndx) {
64328 case SHN_COMMON:
64329 /* We compiled with -fno-common. These are not
64330@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64331 ksym = resolve_symbol_wait(mod, info, name);
64332 /* Ok if resolved. */
64333 if (ksym && !IS_ERR(ksym)) {
64334+ pax_open_kernel();
64335 sym[i].st_value = ksym->value;
64336+ pax_close_kernel();
64337 break;
64338 }
64339
64340@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64341 secbase = (unsigned long)mod_percpu(mod);
64342 else
64343 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64344+ pax_open_kernel();
64345 sym[i].st_value += secbase;
64346+ pax_close_kernel();
64347 break;
64348 }
64349 }
64350
64351+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64352+ if (is_fs_load && !register_filesystem_found) {
64353+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64354+ ret = -EPERM;
64355+ }
64356+#endif
64357+
64358 return ret;
64359 }
64360
64361@@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
64362 || s->sh_entsize != ~0UL
64363 || strstarts(sname, ".init"))
64364 continue;
64365- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64366+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64367+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64368+ else
64369+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64370 DEBUGP("\t%s\n", name);
64371 }
64372- switch (m) {
64373- case 0: /* executable */
64374- mod->core_size = debug_align(mod->core_size);
64375- mod->core_text_size = mod->core_size;
64376- break;
64377- case 1: /* RO: text and ro-data */
64378- mod->core_size = debug_align(mod->core_size);
64379- mod->core_ro_size = mod->core_size;
64380- break;
64381- case 3: /* whole core */
64382- mod->core_size = debug_align(mod->core_size);
64383- break;
64384- }
64385 }
64386
64387 DEBUGP("Init section allocation order:\n");
64388@@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
64389 || s->sh_entsize != ~0UL
64390 || !strstarts(sname, ".init"))
64391 continue;
64392- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64393- | INIT_OFFSET_MASK);
64394+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64395+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64396+ else
64397+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64398+ s->sh_entsize |= INIT_OFFSET_MASK;
64399 DEBUGP("\t%s\n", sname);
64400 }
64401- switch (m) {
64402- case 0: /* executable */
64403- mod->init_size = debug_align(mod->init_size);
64404- mod->init_text_size = mod->init_size;
64405- break;
64406- case 1: /* RO: text and ro-data */
64407- mod->init_size = debug_align(mod->init_size);
64408- mod->init_ro_size = mod->init_size;
64409- break;
64410- case 3: /* whole init */
64411- mod->init_size = debug_align(mod->init_size);
64412- break;
64413- }
64414 }
64415 }
64416
64417@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64418
64419 /* Put symbol section at end of init part of module. */
64420 symsect->sh_flags |= SHF_ALLOC;
64421- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64422+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64423 info->index.sym) | INIT_OFFSET_MASK;
64424 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64425
64426@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64427 }
64428
64429 /* Append room for core symbols at end of core part. */
64430- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64431- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64432+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64433+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64434
64435 /* Put string table section at end of init part of module. */
64436 strsect->sh_flags |= SHF_ALLOC;
64437- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64438+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64439 info->index.str) | INIT_OFFSET_MASK;
64440 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64441
64442 /* Append room for core symbols' strings at end of core part. */
64443- info->stroffs = mod->core_size;
64444+ info->stroffs = mod->core_size_rx;
64445 __set_bit(0, info->strmap);
64446- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64447+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64448 }
64449
64450 static void add_kallsyms(struct module *mod, const struct load_info *info)
64451@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64452 /* Make sure we get permanent strtab: don't use info->strtab. */
64453 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64454
64455+ pax_open_kernel();
64456+
64457 /* Set types up while we still have access to sections. */
64458 for (i = 0; i < mod->num_symtab; i++)
64459 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64460
64461- mod->core_symtab = dst = mod->module_core + info->symoffs;
64462+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64463 src = mod->symtab;
64464 *dst = *src;
64465 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64466@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64467 }
64468 mod->core_num_syms = ndst;
64469
64470- mod->core_strtab = s = mod->module_core + info->stroffs;
64471+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64472 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64473 if (test_bit(i, info->strmap))
64474 *++s = mod->strtab[i];
64475+
64476+ pax_close_kernel();
64477 }
64478 #else
64479 static inline void layout_symtab(struct module *mod, struct load_info *info)
64480@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64481 return size == 0 ? NULL : vmalloc_exec(size);
64482 }
64483
64484-static void *module_alloc_update_bounds(unsigned long size)
64485+static void *module_alloc_update_bounds_rw(unsigned long size)
64486 {
64487 void *ret = module_alloc(size);
64488
64489 if (ret) {
64490 mutex_lock(&module_mutex);
64491 /* Update module bounds. */
64492- if ((unsigned long)ret < module_addr_min)
64493- module_addr_min = (unsigned long)ret;
64494- if ((unsigned long)ret + size > module_addr_max)
64495- module_addr_max = (unsigned long)ret + size;
64496+ if ((unsigned long)ret < module_addr_min_rw)
64497+ module_addr_min_rw = (unsigned long)ret;
64498+ if ((unsigned long)ret + size > module_addr_max_rw)
64499+ module_addr_max_rw = (unsigned long)ret + size;
64500+ mutex_unlock(&module_mutex);
64501+ }
64502+ return ret;
64503+}
64504+
64505+static void *module_alloc_update_bounds_rx(unsigned long size)
64506+{
64507+ void *ret = module_alloc_exec(size);
64508+
64509+ if (ret) {
64510+ mutex_lock(&module_mutex);
64511+ /* Update module bounds. */
64512+ if ((unsigned long)ret < module_addr_min_rx)
64513+ module_addr_min_rx = (unsigned long)ret;
64514+ if ((unsigned long)ret + size > module_addr_max_rx)
64515+ module_addr_max_rx = (unsigned long)ret + size;
64516 mutex_unlock(&module_mutex);
64517 }
64518 return ret;
64519@@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64520 static int check_modinfo(struct module *mod, struct load_info *info)
64521 {
64522 const char *modmagic = get_modinfo(info, "vermagic");
64523+ const char *license = get_modinfo(info, "license");
64524 int err;
64525
64526+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64527+ if (!license || !license_is_gpl_compatible(license))
64528+ return -ENOEXEC;
64529+#endif
64530+
64531 /* This is allowed: modprobe --force will invalidate it. */
64532 if (!modmagic) {
64533 err = try_to_force_load(mod, "bad vermagic");
64534@@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64535 }
64536
64537 /* Set up license info based on the info section */
64538- set_license(mod, get_modinfo(info, "license"));
64539+ set_license(mod, license);
64540
64541 return 0;
64542 }
64543@@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64544 void *ptr;
64545
64546 /* Do the allocs. */
64547- ptr = module_alloc_update_bounds(mod->core_size);
64548+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64549 /*
64550 * The pointer to this block is stored in the module structure
64551 * which is inside the block. Just mark it as not being a
64552@@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64553 if (!ptr)
64554 return -ENOMEM;
64555
64556- memset(ptr, 0, mod->core_size);
64557- mod->module_core = ptr;
64558+ memset(ptr, 0, mod->core_size_rw);
64559+ mod->module_core_rw = ptr;
64560
64561- ptr = module_alloc_update_bounds(mod->init_size);
64562+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64563 /*
64564 * The pointer to this block is stored in the module structure
64565 * which is inside the block. This block doesn't need to be
64566 * scanned as it contains data and code that will be freed
64567 * after the module is initialized.
64568 */
64569- kmemleak_ignore(ptr);
64570- if (!ptr && mod->init_size) {
64571- module_free(mod, mod->module_core);
64572+ kmemleak_not_leak(ptr);
64573+ if (!ptr && mod->init_size_rw) {
64574+ module_free(mod, mod->module_core_rw);
64575 return -ENOMEM;
64576 }
64577- memset(ptr, 0, mod->init_size);
64578- mod->module_init = ptr;
64579+ memset(ptr, 0, mod->init_size_rw);
64580+ mod->module_init_rw = ptr;
64581+
64582+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64583+ kmemleak_not_leak(ptr);
64584+ if (!ptr) {
64585+ module_free(mod, mod->module_init_rw);
64586+ module_free(mod, mod->module_core_rw);
64587+ return -ENOMEM;
64588+ }
64589+
64590+ pax_open_kernel();
64591+ memset(ptr, 0, mod->core_size_rx);
64592+ pax_close_kernel();
64593+ mod->module_core_rx = ptr;
64594+
64595+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64596+ kmemleak_not_leak(ptr);
64597+ if (!ptr && mod->init_size_rx) {
64598+ module_free_exec(mod, mod->module_core_rx);
64599+ module_free(mod, mod->module_init_rw);
64600+ module_free(mod, mod->module_core_rw);
64601+ return -ENOMEM;
64602+ }
64603+
64604+ pax_open_kernel();
64605+ memset(ptr, 0, mod->init_size_rx);
64606+ pax_close_kernel();
64607+ mod->module_init_rx = ptr;
64608
64609 /* Transfer each section which specifies SHF_ALLOC */
64610 DEBUGP("final section addresses:\n");
64611@@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64612 if (!(shdr->sh_flags & SHF_ALLOC))
64613 continue;
64614
64615- if (shdr->sh_entsize & INIT_OFFSET_MASK)
64616- dest = mod->module_init
64617- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64618- else
64619- dest = mod->module_core + shdr->sh_entsize;
64620+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64621+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64622+ dest = mod->module_init_rw
64623+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64624+ else
64625+ dest = mod->module_init_rx
64626+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64627+ } else {
64628+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64629+ dest = mod->module_core_rw + shdr->sh_entsize;
64630+ else
64631+ dest = mod->module_core_rx + shdr->sh_entsize;
64632+ }
64633+
64634+ if (shdr->sh_type != SHT_NOBITS) {
64635+
64636+#ifdef CONFIG_PAX_KERNEXEC
64637+#ifdef CONFIG_X86_64
64638+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64639+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64640+#endif
64641+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64642+ pax_open_kernel();
64643+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64644+ pax_close_kernel();
64645+ } else
64646+#endif
64647
64648- if (shdr->sh_type != SHT_NOBITS)
64649 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64650+ }
64651 /* Update sh_addr to point to copy in image. */
64652- shdr->sh_addr = (unsigned long)dest;
64653+
64654+#ifdef CONFIG_PAX_KERNEXEC
64655+ if (shdr->sh_flags & SHF_EXECINSTR)
64656+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
64657+ else
64658+#endif
64659+
64660+ shdr->sh_addr = (unsigned long)dest;
64661 DEBUGP("\t0x%lx %s\n",
64662 shdr->sh_addr, info->secstrings + shdr->sh_name);
64663 }
64664@@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64665 * Do it before processing of module parameters, so the module
64666 * can provide parameter accessor functions of its own.
64667 */
64668- if (mod->module_init)
64669- flush_icache_range((unsigned long)mod->module_init,
64670- (unsigned long)mod->module_init
64671- + mod->init_size);
64672- flush_icache_range((unsigned long)mod->module_core,
64673- (unsigned long)mod->module_core + mod->core_size);
64674+ if (mod->module_init_rx)
64675+ flush_icache_range((unsigned long)mod->module_init_rx,
64676+ (unsigned long)mod->module_init_rx
64677+ + mod->init_size_rx);
64678+ flush_icache_range((unsigned long)mod->module_core_rx,
64679+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64680
64681 set_fs(old_fs);
64682 }
64683@@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64684 {
64685 kfree(info->strmap);
64686 percpu_modfree(mod);
64687- module_free(mod, mod->module_init);
64688- module_free(mod, mod->module_core);
64689+ module_free_exec(mod, mod->module_init_rx);
64690+ module_free_exec(mod, mod->module_core_rx);
64691+ module_free(mod, mod->module_init_rw);
64692+ module_free(mod, mod->module_core_rw);
64693 }
64694
64695 int __weak module_finalize(const Elf_Ehdr *hdr,
64696@@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64697 if (err)
64698 goto free_unload;
64699
64700+ /* Now copy in args */
64701+ mod->args = strndup_user(uargs, ~0UL >> 1);
64702+ if (IS_ERR(mod->args)) {
64703+ err = PTR_ERR(mod->args);
64704+ goto free_unload;
64705+ }
64706+
64707 /* Set up MODINFO_ATTR fields */
64708 setup_modinfo(mod, &info);
64709
64710+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64711+ {
64712+ char *p, *p2;
64713+
64714+ if (strstr(mod->args, "grsec_modharden_netdev")) {
64715+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64716+ err = -EPERM;
64717+ goto free_modinfo;
64718+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64719+ p += strlen("grsec_modharden_normal");
64720+ p2 = strstr(p, "_");
64721+ if (p2) {
64722+ *p2 = '\0';
64723+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64724+ *p2 = '_';
64725+ }
64726+ err = -EPERM;
64727+ goto free_modinfo;
64728+ }
64729+ }
64730+#endif
64731+
64732 /* Fix up syms, so that st_value is a pointer to location. */
64733 err = simplify_symbols(mod, &info);
64734 if (err < 0)
64735@@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
64736
64737 flush_module_icache(mod);
64738
64739- /* Now copy in args */
64740- mod->args = strndup_user(uargs, ~0UL >> 1);
64741- if (IS_ERR(mod->args)) {
64742- err = PTR_ERR(mod->args);
64743- goto free_arch_cleanup;
64744- }
64745-
64746 /* Mark state as coming so strong_try_module_get() ignores us. */
64747 mod->state = MODULE_STATE_COMING;
64748
64749@@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
64750 unlock:
64751 mutex_unlock(&module_mutex);
64752 synchronize_sched();
64753- kfree(mod->args);
64754- free_arch_cleanup:
64755 module_arch_cleanup(mod);
64756 free_modinfo:
64757 free_modinfo(mod);
64758+ kfree(mod->args);
64759 free_unload:
64760 module_unload_free(mod);
64761 free_module:
64762@@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64763 MODULE_STATE_COMING, mod);
64764
64765 /* Set RO and NX regions for core */
64766- set_section_ro_nx(mod->module_core,
64767- mod->core_text_size,
64768- mod->core_ro_size,
64769- mod->core_size);
64770+ set_section_ro_nx(mod->module_core_rx,
64771+ mod->core_size_rx,
64772+ mod->core_size_rx,
64773+ mod->core_size_rx);
64774
64775 /* Set RO and NX regions for init */
64776- set_section_ro_nx(mod->module_init,
64777- mod->init_text_size,
64778- mod->init_ro_size,
64779- mod->init_size);
64780+ set_section_ro_nx(mod->module_init_rx,
64781+ mod->init_size_rx,
64782+ mod->init_size_rx,
64783+ mod->init_size_rx);
64784
64785 do_mod_ctors(mod);
64786 /* Start the module */
64787@@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64788 mod->strtab = mod->core_strtab;
64789 #endif
64790 unset_module_init_ro_nx(mod);
64791- module_free(mod, mod->module_init);
64792- mod->module_init = NULL;
64793- mod->init_size = 0;
64794- mod->init_ro_size = 0;
64795- mod->init_text_size = 0;
64796+ module_free(mod, mod->module_init_rw);
64797+ module_free_exec(mod, mod->module_init_rx);
64798+ mod->module_init_rw = NULL;
64799+ mod->module_init_rx = NULL;
64800+ mod->init_size_rw = 0;
64801+ mod->init_size_rx = 0;
64802 mutex_unlock(&module_mutex);
64803
64804 return 0;
64805@@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
64806 unsigned long nextval;
64807
64808 /* At worse, next value is at end of module */
64809- if (within_module_init(addr, mod))
64810- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64811+ if (within_module_init_rx(addr, mod))
64812+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64813+ else if (within_module_init_rw(addr, mod))
64814+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64815+ else if (within_module_core_rx(addr, mod))
64816+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64817+ else if (within_module_core_rw(addr, mod))
64818+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64819 else
64820- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64821+ return NULL;
64822
64823 /* Scan for closest preceding symbol, and next symbol. (ELF
64824 starts real symbols at 1). */
64825@@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
64826 char buf[8];
64827
64828 seq_printf(m, "%s %u",
64829- mod->name, mod->init_size + mod->core_size);
64830+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64831 print_unload_info(m, mod);
64832
64833 /* Informative for users. */
64834@@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
64835 mod->state == MODULE_STATE_COMING ? "Loading":
64836 "Live");
64837 /* Used by oprofile and other similar tools. */
64838- seq_printf(m, " 0x%pK", mod->module_core);
64839+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64840
64841 /* Taints info */
64842 if (mod->taints)
64843@@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
64844
64845 static int __init proc_modules_init(void)
64846 {
64847+#ifndef CONFIG_GRKERNSEC_HIDESYM
64848+#ifdef CONFIG_GRKERNSEC_PROC_USER
64849+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64850+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64851+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64852+#else
64853 proc_create("modules", 0, NULL, &proc_modules_operations);
64854+#endif
64855+#else
64856+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64857+#endif
64858 return 0;
64859 }
64860 module_init(proc_modules_init);
64861@@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
64862 {
64863 struct module *mod;
64864
64865- if (addr < module_addr_min || addr > module_addr_max)
64866+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64867+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64868 return NULL;
64869
64870 list_for_each_entry_rcu(mod, &modules, list)
64871- if (within_module_core(addr, mod)
64872- || within_module_init(addr, mod))
64873+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64874 return mod;
64875 return NULL;
64876 }
64877@@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
64878 */
64879 struct module *__module_text_address(unsigned long addr)
64880 {
64881- struct module *mod = __module_address(addr);
64882+ struct module *mod;
64883+
64884+#ifdef CONFIG_X86_32
64885+ addr = ktla_ktva(addr);
64886+#endif
64887+
64888+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64889+ return NULL;
64890+
64891+ mod = __module_address(addr);
64892+
64893 if (mod) {
64894 /* Make sure it's within the text section. */
64895- if (!within(addr, mod->module_init, mod->init_text_size)
64896- && !within(addr, mod->module_core, mod->core_text_size))
64897+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64898 mod = NULL;
64899 }
64900 return mod;
64901diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
64902index 7e3443f..b2a1e6b 100644
64903--- a/kernel/mutex-debug.c
64904+++ b/kernel/mutex-debug.c
64905@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
64906 }
64907
64908 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64909- struct thread_info *ti)
64910+ struct task_struct *task)
64911 {
64912 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64913
64914 /* Mark the current thread as blocked on the lock: */
64915- ti->task->blocked_on = waiter;
64916+ task->blocked_on = waiter;
64917 }
64918
64919 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64920- struct thread_info *ti)
64921+ struct task_struct *task)
64922 {
64923 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64924- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64925- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64926- ti->task->blocked_on = NULL;
64927+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64928+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64929+ task->blocked_on = NULL;
64930
64931 list_del_init(&waiter->list);
64932 waiter->task = NULL;
64933diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
64934index 0799fd3..d06ae3b 100644
64935--- a/kernel/mutex-debug.h
64936+++ b/kernel/mutex-debug.h
64937@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
64938 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64939 extern void debug_mutex_add_waiter(struct mutex *lock,
64940 struct mutex_waiter *waiter,
64941- struct thread_info *ti);
64942+ struct task_struct *task);
64943 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64944- struct thread_info *ti);
64945+ struct task_struct *task);
64946 extern void debug_mutex_unlock(struct mutex *lock);
64947 extern void debug_mutex_init(struct mutex *lock, const char *name,
64948 struct lock_class_key *key);
64949diff --git a/kernel/mutex.c b/kernel/mutex.c
64950index 89096dd..f91ebc5 100644
64951--- a/kernel/mutex.c
64952+++ b/kernel/mutex.c
64953@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64954 spin_lock_mutex(&lock->wait_lock, flags);
64955
64956 debug_mutex_lock_common(lock, &waiter);
64957- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64958+ debug_mutex_add_waiter(lock, &waiter, task);
64959
64960 /* add waiting tasks to the end of the waitqueue (FIFO): */
64961 list_add_tail(&waiter.list, &lock->wait_list);
64962@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64963 * TASK_UNINTERRUPTIBLE case.)
64964 */
64965 if (unlikely(signal_pending_state(state, task))) {
64966- mutex_remove_waiter(lock, &waiter,
64967- task_thread_info(task));
64968+ mutex_remove_waiter(lock, &waiter, task);
64969 mutex_release(&lock->dep_map, 1, ip);
64970 spin_unlock_mutex(&lock->wait_lock, flags);
64971
64972@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64973 done:
64974 lock_acquired(&lock->dep_map, ip);
64975 /* got the lock - rejoice! */
64976- mutex_remove_waiter(lock, &waiter, current_thread_info());
64977+ mutex_remove_waiter(lock, &waiter, task);
64978 mutex_set_owner(lock);
64979
64980 /* set it to 0 if there are no waiters left: */
64981diff --git a/kernel/padata.c b/kernel/padata.c
64982index b452599..5d68f4e 100644
64983--- a/kernel/padata.c
64984+++ b/kernel/padata.c
64985@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
64986 padata->pd = pd;
64987 padata->cb_cpu = cb_cpu;
64988
64989- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64990- atomic_set(&pd->seq_nr, -1);
64991+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64992+ atomic_set_unchecked(&pd->seq_nr, -1);
64993
64994- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64995+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64996
64997 target_cpu = padata_cpu_hash(padata);
64998 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64999@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
65000 padata_init_pqueues(pd);
65001 padata_init_squeues(pd);
65002 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
65003- atomic_set(&pd->seq_nr, -1);
65004+ atomic_set_unchecked(&pd->seq_nr, -1);
65005 atomic_set(&pd->reorder_objects, 0);
65006 atomic_set(&pd->refcnt, 0);
65007 pd->pinst = pinst;
65008diff --git a/kernel/panic.c b/kernel/panic.c
65009index 3458469..342c500 100644
65010--- a/kernel/panic.c
65011+++ b/kernel/panic.c
65012@@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
65013 va_end(args);
65014 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
65015 #ifdef CONFIG_DEBUG_BUGVERBOSE
65016- dump_stack();
65017+ /*
65018+ * Avoid nested stack-dumping if a panic occurs during oops processing
65019+ */
65020+ if (!oops_in_progress)
65021+ dump_stack();
65022 #endif
65023
65024 /*
65025@@ -382,7 +386,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
65026 const char *board;
65027
65028 printk(KERN_WARNING "------------[ cut here ]------------\n");
65029- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
65030+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
65031 board = dmi_get_system_info(DMI_PRODUCT_NAME);
65032 if (board)
65033 printk(KERN_WARNING "Hardware name: %s\n", board);
65034@@ -437,7 +441,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
65035 */
65036 void __stack_chk_fail(void)
65037 {
65038- panic("stack-protector: Kernel stack is corrupted in: %p\n",
65039+ dump_stack();
65040+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
65041 __builtin_return_address(0));
65042 }
65043 EXPORT_SYMBOL(__stack_chk_fail);
65044diff --git a/kernel/pid.c b/kernel/pid.c
65045index fa5f722..0c93e57 100644
65046--- a/kernel/pid.c
65047+++ b/kernel/pid.c
65048@@ -33,6 +33,7 @@
65049 #include <linux/rculist.h>
65050 #include <linux/bootmem.h>
65051 #include <linux/hash.h>
65052+#include <linux/security.h>
65053 #include <linux/pid_namespace.h>
65054 #include <linux/init_task.h>
65055 #include <linux/syscalls.h>
65056@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
65057
65058 int pid_max = PID_MAX_DEFAULT;
65059
65060-#define RESERVED_PIDS 300
65061+#define RESERVED_PIDS 500
65062
65063 int pid_max_min = RESERVED_PIDS + 1;
65064 int pid_max_max = PID_MAX_LIMIT;
65065@@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
65066 */
65067 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65068 {
65069+ struct task_struct *task;
65070+
65071 rcu_lockdep_assert(rcu_read_lock_held(),
65072 "find_task_by_pid_ns() needs rcu_read_lock()"
65073 " protection");
65074- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65075+
65076+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65077+
65078+ if (gr_pid_is_chrooted(task))
65079+ return NULL;
65080+
65081+ return task;
65082 }
65083
65084 struct task_struct *find_task_by_vpid(pid_t vnr)
65085@@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
65086 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65087 }
65088
65089+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65090+{
65091+ rcu_lockdep_assert(rcu_read_lock_held(),
65092+ "find_task_by_pid_ns() needs rcu_read_lock()"
65093+ " protection");
65094+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65095+}
65096+
65097 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65098 {
65099 struct pid *pid;
65100diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
65101index e7cb76d..75eceb3 100644
65102--- a/kernel/posix-cpu-timers.c
65103+++ b/kernel/posix-cpu-timers.c
65104@@ -6,6 +6,7 @@
65105 #include <linux/posix-timers.h>
65106 #include <linux/errno.h>
65107 #include <linux/math64.h>
65108+#include <linux/security.h>
65109 #include <asm/uaccess.h>
65110 #include <linux/kernel_stat.h>
65111 #include <trace/events/timer.h>
65112@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
65113
65114 static __init int init_posix_cpu_timers(void)
65115 {
65116- struct k_clock process = {
65117+ static struct k_clock process = {
65118 .clock_getres = process_cpu_clock_getres,
65119 .clock_get = process_cpu_clock_get,
65120 .timer_create = process_cpu_timer_create,
65121 .nsleep = process_cpu_nsleep,
65122 .nsleep_restart = process_cpu_nsleep_restart,
65123 };
65124- struct k_clock thread = {
65125+ static struct k_clock thread = {
65126 .clock_getres = thread_cpu_clock_getres,
65127 .clock_get = thread_cpu_clock_get,
65128 .timer_create = thread_cpu_timer_create,
65129diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
65130index 69185ae..cc2847a 100644
65131--- a/kernel/posix-timers.c
65132+++ b/kernel/posix-timers.c
65133@@ -43,6 +43,7 @@
65134 #include <linux/idr.h>
65135 #include <linux/posix-clock.h>
65136 #include <linux/posix-timers.h>
65137+#include <linux/grsecurity.h>
65138 #include <linux/syscalls.h>
65139 #include <linux/wait.h>
65140 #include <linux/workqueue.h>
65141@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
65142 * which we beg off on and pass to do_sys_settimeofday().
65143 */
65144
65145-static struct k_clock posix_clocks[MAX_CLOCKS];
65146+static struct k_clock *posix_clocks[MAX_CLOCKS];
65147
65148 /*
65149 * These ones are defined below.
65150@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
65151 */
65152 static __init int init_posix_timers(void)
65153 {
65154- struct k_clock clock_realtime = {
65155+ static struct k_clock clock_realtime = {
65156 .clock_getres = hrtimer_get_res,
65157 .clock_get = posix_clock_realtime_get,
65158 .clock_set = posix_clock_realtime_set,
65159@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
65160 .timer_get = common_timer_get,
65161 .timer_del = common_timer_del,
65162 };
65163- struct k_clock clock_monotonic = {
65164+ static struct k_clock clock_monotonic = {
65165 .clock_getres = hrtimer_get_res,
65166 .clock_get = posix_ktime_get_ts,
65167 .nsleep = common_nsleep,
65168@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
65169 .timer_get = common_timer_get,
65170 .timer_del = common_timer_del,
65171 };
65172- struct k_clock clock_monotonic_raw = {
65173+ static struct k_clock clock_monotonic_raw = {
65174 .clock_getres = hrtimer_get_res,
65175 .clock_get = posix_get_monotonic_raw,
65176 };
65177- struct k_clock clock_realtime_coarse = {
65178+ static struct k_clock clock_realtime_coarse = {
65179 .clock_getres = posix_get_coarse_res,
65180 .clock_get = posix_get_realtime_coarse,
65181 };
65182- struct k_clock clock_monotonic_coarse = {
65183+ static struct k_clock clock_monotonic_coarse = {
65184 .clock_getres = posix_get_coarse_res,
65185 .clock_get = posix_get_monotonic_coarse,
65186 };
65187- struct k_clock clock_boottime = {
65188+ static struct k_clock clock_boottime = {
65189 .clock_getres = hrtimer_get_res,
65190 .clock_get = posix_get_boottime,
65191 .nsleep = common_nsleep,
65192@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
65193 return;
65194 }
65195
65196- posix_clocks[clock_id] = *new_clock;
65197+ posix_clocks[clock_id] = new_clock;
65198 }
65199 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65200
65201@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
65202 return (id & CLOCKFD_MASK) == CLOCKFD ?
65203 &clock_posix_dynamic : &clock_posix_cpu;
65204
65205- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65206+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65207 return NULL;
65208- return &posix_clocks[id];
65209+ return posix_clocks[id];
65210 }
65211
65212 static int common_timer_create(struct k_itimer *new_timer)
65213@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
65214 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65215 return -EFAULT;
65216
65217+ /* only the CLOCK_REALTIME clock can be set, all other clocks
65218+ have their clock_set fptr set to a nosettime dummy function
65219+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65220+ call common_clock_set, which calls do_sys_settimeofday, which
65221+ we hook
65222+ */
65223+
65224 return kc->clock_set(which_clock, &new_tp);
65225 }
65226
65227diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
65228index d523593..68197a4 100644
65229--- a/kernel/power/poweroff.c
65230+++ b/kernel/power/poweroff.c
65231@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
65232 .enable_mask = SYSRQ_ENABLE_BOOT,
65233 };
65234
65235-static int pm_sysrq_init(void)
65236+static int __init pm_sysrq_init(void)
65237 {
65238 register_sysrq_key('o', &sysrq_poweroff_op);
65239 return 0;
65240diff --git a/kernel/power/process.c b/kernel/power/process.c
65241index 3d4b954..11af930 100644
65242--- a/kernel/power/process.c
65243+++ b/kernel/power/process.c
65244@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
65245 u64 elapsed_csecs64;
65246 unsigned int elapsed_csecs;
65247 bool wakeup = false;
65248+ bool timedout = false;
65249
65250 do_gettimeofday(&start);
65251
65252@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
65253
65254 while (true) {
65255 todo = 0;
65256+ if (time_after(jiffies, end_time))
65257+ timedout = true;
65258 read_lock(&tasklist_lock);
65259 do_each_thread(g, p) {
65260 if (frozen(p) || !freezable(p))
65261@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
65262 * try_to_stop() after schedule() in ptrace/signal
65263 * stop sees TIF_FREEZE.
65264 */
65265- if (!task_is_stopped_or_traced(p) &&
65266- !freezer_should_skip(p))
65267+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65268 todo++;
65269+ if (timedout) {
65270+ printk(KERN_ERR "Task refusing to freeze:\n");
65271+ sched_show_task(p);
65272+ }
65273+ }
65274 } while_each_thread(g, p);
65275 read_unlock(&tasklist_lock);
65276
65277@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
65278 todo += wq_busy;
65279 }
65280
65281- if (!todo || time_after(jiffies, end_time))
65282+ if (!todo || timedout)
65283 break;
65284
65285 if (pm_wakeup_pending()) {
65286diff --git a/kernel/printk.c b/kernel/printk.c
65287index 7982a0a..2095fdc 100644
65288--- a/kernel/printk.c
65289+++ b/kernel/printk.c
65290@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
65291 if (from_file && type != SYSLOG_ACTION_OPEN)
65292 return 0;
65293
65294+#ifdef CONFIG_GRKERNSEC_DMESG
65295+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65296+ return -EPERM;
65297+#endif
65298+
65299 if (syslog_action_restricted(type)) {
65300 if (capable(CAP_SYSLOG))
65301 return 0;
65302diff --git a/kernel/profile.c b/kernel/profile.c
65303index 76b8e77..a2930e8 100644
65304--- a/kernel/profile.c
65305+++ b/kernel/profile.c
65306@@ -39,7 +39,7 @@ struct profile_hit {
65307 /* Oprofile timer tick hook */
65308 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65309
65310-static atomic_t *prof_buffer;
65311+static atomic_unchecked_t *prof_buffer;
65312 static unsigned long prof_len, prof_shift;
65313
65314 int prof_on __read_mostly;
65315@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65316 hits[i].pc = 0;
65317 continue;
65318 }
65319- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65320+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65321 hits[i].hits = hits[i].pc = 0;
65322 }
65323 }
65324@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65325 * Add the current hit(s) and flush the write-queue out
65326 * to the global buffer:
65327 */
65328- atomic_add(nr_hits, &prof_buffer[pc]);
65329+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65330 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65331- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65332+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65333 hits[i].pc = hits[i].hits = 0;
65334 }
65335 out:
65336@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65337 {
65338 unsigned long pc;
65339 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65340- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65341+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65342 }
65343 #endif /* !CONFIG_SMP */
65344
65345@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65346 return -EFAULT;
65347 buf++; p++; count--; read++;
65348 }
65349- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65350+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65351 if (copy_to_user(buf, (void *)pnt, count))
65352 return -EFAULT;
65353 read += count;
65354@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
65355 }
65356 #endif
65357 profile_discard_flip_buffers();
65358- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65359+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65360 return count;
65361 }
65362
65363diff --git a/kernel/ptrace.c b/kernel/ptrace.c
65364index 78ab24a..332c915 100644
65365--- a/kernel/ptrace.c
65366+++ b/kernel/ptrace.c
65367@@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
65368 return ret;
65369 }
65370
65371-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65372+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65373+ unsigned int log)
65374 {
65375 const struct cred *cred = current_cred(), *tcred;
65376
65377@@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65378 cred->gid == tcred->sgid &&
65379 cred->gid == tcred->gid))
65380 goto ok;
65381- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65382+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65383+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65384 goto ok;
65385 rcu_read_unlock();
65386 return -EPERM;
65387@@ -207,7 +209,9 @@ ok:
65388 smp_rmb();
65389 if (task->mm)
65390 dumpable = get_dumpable(task->mm);
65391- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65392+ if (!dumpable &&
65393+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65394+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65395 return -EPERM;
65396
65397 return security_ptrace_access_check(task, mode);
65398@@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
65399 {
65400 int err;
65401 task_lock(task);
65402- err = __ptrace_may_access(task, mode);
65403+ err = __ptrace_may_access(task, mode, 0);
65404+ task_unlock(task);
65405+ return !err;
65406+}
65407+
65408+bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
65409+{
65410+ return __ptrace_may_access(task, mode, 0);
65411+}
65412+
65413+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65414+{
65415+ int err;
65416+ task_lock(task);
65417+ err = __ptrace_may_access(task, mode, 1);
65418 task_unlock(task);
65419 return !err;
65420 }
65421@@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65422 goto out;
65423
65424 task_lock(task);
65425- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65426+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65427 task_unlock(task);
65428 if (retval)
65429 goto unlock_creds;
65430@@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65431 task->ptrace = PT_PTRACED;
65432 if (seize)
65433 task->ptrace |= PT_SEIZED;
65434- if (task_ns_capable(task, CAP_SYS_PTRACE))
65435+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65436 task->ptrace |= PT_PTRACE_CAP;
65437
65438 __ptrace_link(task, current);
65439@@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
65440 break;
65441 return -EIO;
65442 }
65443- if (copy_to_user(dst, buf, retval))
65444+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65445 return -EFAULT;
65446 copied += retval;
65447 src += retval;
65448@@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
65449 bool seized = child->ptrace & PT_SEIZED;
65450 int ret = -EIO;
65451 siginfo_t siginfo, *si;
65452- void __user *datavp = (void __user *) data;
65453+ void __user *datavp = (__force void __user *) data;
65454 unsigned long __user *datalp = datavp;
65455 unsigned long flags;
65456
65457@@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
65458 goto out;
65459 }
65460
65461+ if (gr_handle_ptrace(child, request)) {
65462+ ret = -EPERM;
65463+ goto out_put_task_struct;
65464+ }
65465+
65466 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65467 ret = ptrace_attach(child, request, data);
65468 /*
65469 * Some architectures need to do book-keeping after
65470 * a ptrace attach.
65471 */
65472- if (!ret)
65473+ if (!ret) {
65474 arch_ptrace_attach(child);
65475+ gr_audit_ptrace(child);
65476+ }
65477 goto out_put_task_struct;
65478 }
65479
65480@@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65481 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65482 if (copied != sizeof(tmp))
65483 return -EIO;
65484- return put_user(tmp, (unsigned long __user *)data);
65485+ return put_user(tmp, (__force unsigned long __user *)data);
65486 }
65487
65488 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65489@@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65490 goto out;
65491 }
65492
65493+ if (gr_handle_ptrace(child, request)) {
65494+ ret = -EPERM;
65495+ goto out_put_task_struct;
65496+ }
65497+
65498 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65499 ret = ptrace_attach(child, request, data);
65500 /*
65501 * Some architectures need to do book-keeping after
65502 * a ptrace attach.
65503 */
65504- if (!ret)
65505+ if (!ret) {
65506 arch_ptrace_attach(child);
65507+ gr_audit_ptrace(child);
65508+ }
65509 goto out_put_task_struct;
65510 }
65511
65512diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65513index 764825c..3aa6ac4 100644
65514--- a/kernel/rcutorture.c
65515+++ b/kernel/rcutorture.c
65516@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65517 { 0 };
65518 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65519 { 0 };
65520-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65521-static atomic_t n_rcu_torture_alloc;
65522-static atomic_t n_rcu_torture_alloc_fail;
65523-static atomic_t n_rcu_torture_free;
65524-static atomic_t n_rcu_torture_mberror;
65525-static atomic_t n_rcu_torture_error;
65526+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65527+static atomic_unchecked_t n_rcu_torture_alloc;
65528+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65529+static atomic_unchecked_t n_rcu_torture_free;
65530+static atomic_unchecked_t n_rcu_torture_mberror;
65531+static atomic_unchecked_t n_rcu_torture_error;
65532 static long n_rcu_torture_boost_ktrerror;
65533 static long n_rcu_torture_boost_rterror;
65534 static long n_rcu_torture_boost_failure;
65535@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65536
65537 spin_lock_bh(&rcu_torture_lock);
65538 if (list_empty(&rcu_torture_freelist)) {
65539- atomic_inc(&n_rcu_torture_alloc_fail);
65540+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65541 spin_unlock_bh(&rcu_torture_lock);
65542 return NULL;
65543 }
65544- atomic_inc(&n_rcu_torture_alloc);
65545+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65546 p = rcu_torture_freelist.next;
65547 list_del_init(p);
65548 spin_unlock_bh(&rcu_torture_lock);
65549@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65550 static void
65551 rcu_torture_free(struct rcu_torture *p)
65552 {
65553- atomic_inc(&n_rcu_torture_free);
65554+ atomic_inc_unchecked(&n_rcu_torture_free);
65555 spin_lock_bh(&rcu_torture_lock);
65556 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65557 spin_unlock_bh(&rcu_torture_lock);
65558@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65559 i = rp->rtort_pipe_count;
65560 if (i > RCU_TORTURE_PIPE_LEN)
65561 i = RCU_TORTURE_PIPE_LEN;
65562- atomic_inc(&rcu_torture_wcount[i]);
65563+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65564 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65565 rp->rtort_mbtest = 0;
65566 rcu_torture_free(rp);
65567@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65568 i = rp->rtort_pipe_count;
65569 if (i > RCU_TORTURE_PIPE_LEN)
65570 i = RCU_TORTURE_PIPE_LEN;
65571- atomic_inc(&rcu_torture_wcount[i]);
65572+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65573 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65574 rp->rtort_mbtest = 0;
65575 list_del(&rp->rtort_free);
65576@@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65577 i = old_rp->rtort_pipe_count;
65578 if (i > RCU_TORTURE_PIPE_LEN)
65579 i = RCU_TORTURE_PIPE_LEN;
65580- atomic_inc(&rcu_torture_wcount[i]);
65581+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65582 old_rp->rtort_pipe_count++;
65583 cur_ops->deferred_free(old_rp);
65584 }
65585@@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65586 return;
65587 }
65588 if (p->rtort_mbtest == 0)
65589- atomic_inc(&n_rcu_torture_mberror);
65590+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65591 spin_lock(&rand_lock);
65592 cur_ops->read_delay(&rand);
65593 n_rcu_torture_timers++;
65594@@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65595 continue;
65596 }
65597 if (p->rtort_mbtest == 0)
65598- atomic_inc(&n_rcu_torture_mberror);
65599+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65600 cur_ops->read_delay(&rand);
65601 preempt_disable();
65602 pipe_count = p->rtort_pipe_count;
65603@@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65604 rcu_torture_current,
65605 rcu_torture_current_version,
65606 list_empty(&rcu_torture_freelist),
65607- atomic_read(&n_rcu_torture_alloc),
65608- atomic_read(&n_rcu_torture_alloc_fail),
65609- atomic_read(&n_rcu_torture_free),
65610- atomic_read(&n_rcu_torture_mberror),
65611+ atomic_read_unchecked(&n_rcu_torture_alloc),
65612+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65613+ atomic_read_unchecked(&n_rcu_torture_free),
65614+ atomic_read_unchecked(&n_rcu_torture_mberror),
65615 n_rcu_torture_boost_ktrerror,
65616 n_rcu_torture_boost_rterror,
65617 n_rcu_torture_boost_failure,
65618 n_rcu_torture_boosts,
65619 n_rcu_torture_timers);
65620- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65621+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65622 n_rcu_torture_boost_ktrerror != 0 ||
65623 n_rcu_torture_boost_rterror != 0 ||
65624 n_rcu_torture_boost_failure != 0)
65625@@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65626 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65627 if (i > 1) {
65628 cnt += sprintf(&page[cnt], "!!! ");
65629- atomic_inc(&n_rcu_torture_error);
65630+ atomic_inc_unchecked(&n_rcu_torture_error);
65631 WARN_ON_ONCE(1);
65632 }
65633 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65634@@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65635 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65636 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65637 cnt += sprintf(&page[cnt], " %d",
65638- atomic_read(&rcu_torture_wcount[i]));
65639+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65640 }
65641 cnt += sprintf(&page[cnt], "\n");
65642 if (cur_ops->stats)
65643@@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65644
65645 if (cur_ops->cleanup)
65646 cur_ops->cleanup();
65647- if (atomic_read(&n_rcu_torture_error))
65648+ if (atomic_read_unchecked(&n_rcu_torture_error))
65649 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65650 else
65651 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65652@@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65653
65654 rcu_torture_current = NULL;
65655 rcu_torture_current_version = 0;
65656- atomic_set(&n_rcu_torture_alloc, 0);
65657- atomic_set(&n_rcu_torture_alloc_fail, 0);
65658- atomic_set(&n_rcu_torture_free, 0);
65659- atomic_set(&n_rcu_torture_mberror, 0);
65660- atomic_set(&n_rcu_torture_error, 0);
65661+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65662+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65663+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65664+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65665+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65666 n_rcu_torture_boost_ktrerror = 0;
65667 n_rcu_torture_boost_rterror = 0;
65668 n_rcu_torture_boost_failure = 0;
65669 n_rcu_torture_boosts = 0;
65670 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65671- atomic_set(&rcu_torture_wcount[i], 0);
65672+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65673 for_each_possible_cpu(cpu) {
65674 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65675 per_cpu(rcu_torture_count, cpu)[i] = 0;
65676diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65677index 6b76d81..7afc1b3 100644
65678--- a/kernel/rcutree.c
65679+++ b/kernel/rcutree.c
65680@@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65681 trace_rcu_dyntick("Start");
65682 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65683 smp_mb__before_atomic_inc(); /* See above. */
65684- atomic_inc(&rdtp->dynticks);
65685+ atomic_inc_unchecked(&rdtp->dynticks);
65686 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65687- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65688+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65689 local_irq_restore(flags);
65690 }
65691
65692@@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65693 return;
65694 }
65695 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65696- atomic_inc(&rdtp->dynticks);
65697+ atomic_inc_unchecked(&rdtp->dynticks);
65698 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65699 smp_mb__after_atomic_inc(); /* See above. */
65700- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65701+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65702 trace_rcu_dyntick("End");
65703 local_irq_restore(flags);
65704 }
65705@@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
65706 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65707
65708 if (rdtp->dynticks_nmi_nesting == 0 &&
65709- (atomic_read(&rdtp->dynticks) & 0x1))
65710+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65711 return;
65712 rdtp->dynticks_nmi_nesting++;
65713 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65714- atomic_inc(&rdtp->dynticks);
65715+ atomic_inc_unchecked(&rdtp->dynticks);
65716 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65717 smp_mb__after_atomic_inc(); /* See above. */
65718- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65719+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65720 }
65721
65722 /**
65723@@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
65724 return;
65725 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65726 smp_mb__before_atomic_inc(); /* See above. */
65727- atomic_inc(&rdtp->dynticks);
65728+ atomic_inc_unchecked(&rdtp->dynticks);
65729 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65730- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65731+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65732 }
65733
65734 /**
65735@@ -474,7 +474,7 @@ void rcu_irq_exit(void)
65736 */
65737 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65738 {
65739- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65740+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65741 return 0;
65742 }
65743
65744@@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
65745 unsigned int curr;
65746 unsigned int snap;
65747
65748- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
65749+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65750 snap = (unsigned int)rdp->dynticks_snap;
65751
65752 /*
65753@@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
65754 /*
65755 * Do RCU core processing for the current CPU.
65756 */
65757-static void rcu_process_callbacks(struct softirq_action *unused)
65758+static void rcu_process_callbacks(void)
65759 {
65760 trace_rcu_utilization("Start RCU core");
65761 __rcu_process_callbacks(&rcu_sched_state,
65762diff --git a/kernel/rcutree.h b/kernel/rcutree.h
65763index 849ce9e..74bc9de 100644
65764--- a/kernel/rcutree.h
65765+++ b/kernel/rcutree.h
65766@@ -86,7 +86,7 @@
65767 struct rcu_dynticks {
65768 int dynticks_nesting; /* Track irq/process nesting level. */
65769 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65770- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65771+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65772 };
65773
65774 /* RCU's kthread states for tracing. */
65775diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
65776index 4b9b9f8..2326053 100644
65777--- a/kernel/rcutree_plugin.h
65778+++ b/kernel/rcutree_plugin.h
65779@@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
65780
65781 /* Clean up and exit. */
65782 smp_mb(); /* ensure expedited GP seen before counter increment. */
65783- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65784+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65785 unlock_mb_ret:
65786 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65787 mb_ret:
65788@@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
65789
65790 #else /* #ifndef CONFIG_SMP */
65791
65792-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65793-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65794+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65795+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65796
65797 static int synchronize_sched_expedited_cpu_stop(void *data)
65798 {
65799@@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
65800 int firstsnap, s, snap, trycount = 0;
65801
65802 /* Note that atomic_inc_return() implies full memory barrier. */
65803- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65804+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65805 get_online_cpus();
65806
65807 /*
65808@@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
65809 }
65810
65811 /* Check to see if someone else did our work for us. */
65812- s = atomic_read(&sync_sched_expedited_done);
65813+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65814 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65815 smp_mb(); /* ensure test happens before caller kfree */
65816 return;
65817@@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
65818 * grace period works for us.
65819 */
65820 get_online_cpus();
65821- snap = atomic_read(&sync_sched_expedited_started) - 1;
65822+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65823 smp_mb(); /* ensure read is before try_stop_cpus(). */
65824 }
65825
65826@@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
65827 * than we did beat us to the punch.
65828 */
65829 do {
65830- s = atomic_read(&sync_sched_expedited_done);
65831+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65832 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65833 smp_mb(); /* ensure test happens before caller kfree */
65834 break;
65835 }
65836- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65837+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65838
65839 put_online_cpus();
65840 }
65841@@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
65842 for_each_online_cpu(thatcpu) {
65843 if (thatcpu == cpu)
65844 continue;
65845- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
65846+ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
65847 thatcpu).dynticks);
65848 smp_mb(); /* Order sampling of snap with end of grace period. */
65849 if ((snap & 0x1) != 0) {
65850diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
65851index 9feffa4..54058df 100644
65852--- a/kernel/rcutree_trace.c
65853+++ b/kernel/rcutree_trace.c
65854@@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
65855 rdp->qs_pending);
65856 #ifdef CONFIG_NO_HZ
65857 seq_printf(m, " dt=%d/%d/%d df=%lu",
65858- atomic_read(&rdp->dynticks->dynticks),
65859+ atomic_read_unchecked(&rdp->dynticks->dynticks),
65860 rdp->dynticks->dynticks_nesting,
65861 rdp->dynticks->dynticks_nmi_nesting,
65862 rdp->dynticks_fqs);
65863@@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
65864 rdp->qs_pending);
65865 #ifdef CONFIG_NO_HZ
65866 seq_printf(m, ",%d,%d,%d,%lu",
65867- atomic_read(&rdp->dynticks->dynticks),
65868+ atomic_read_unchecked(&rdp->dynticks->dynticks),
65869 rdp->dynticks->dynticks_nesting,
65870 rdp->dynticks->dynticks_nmi_nesting,
65871 rdp->dynticks_fqs);
65872diff --git a/kernel/resource.c b/kernel/resource.c
65873index 7640b3a..5879283 100644
65874--- a/kernel/resource.c
65875+++ b/kernel/resource.c
65876@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
65877
65878 static int __init ioresources_init(void)
65879 {
65880+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65881+#ifdef CONFIG_GRKERNSEC_PROC_USER
65882+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65883+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65884+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65885+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65886+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65887+#endif
65888+#else
65889 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65890 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65891+#endif
65892 return 0;
65893 }
65894 __initcall(ioresources_init);
65895diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
65896index 3d9f31c..7fefc9e 100644
65897--- a/kernel/rtmutex-tester.c
65898+++ b/kernel/rtmutex-tester.c
65899@@ -20,7 +20,7 @@
65900 #define MAX_RT_TEST_MUTEXES 8
65901
65902 static spinlock_t rttest_lock;
65903-static atomic_t rttest_event;
65904+static atomic_unchecked_t rttest_event;
65905
65906 struct test_thread_data {
65907 int opcode;
65908@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65909
65910 case RTTEST_LOCKCONT:
65911 td->mutexes[td->opdata] = 1;
65912- td->event = atomic_add_return(1, &rttest_event);
65913+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65914 return 0;
65915
65916 case RTTEST_RESET:
65917@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65918 return 0;
65919
65920 case RTTEST_RESETEVENT:
65921- atomic_set(&rttest_event, 0);
65922+ atomic_set_unchecked(&rttest_event, 0);
65923 return 0;
65924
65925 default:
65926@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65927 return ret;
65928
65929 td->mutexes[id] = 1;
65930- td->event = atomic_add_return(1, &rttest_event);
65931+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65932 rt_mutex_lock(&mutexes[id]);
65933- td->event = atomic_add_return(1, &rttest_event);
65934+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65935 td->mutexes[id] = 4;
65936 return 0;
65937
65938@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65939 return ret;
65940
65941 td->mutexes[id] = 1;
65942- td->event = atomic_add_return(1, &rttest_event);
65943+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65944 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65945- td->event = atomic_add_return(1, &rttest_event);
65946+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65947 td->mutexes[id] = ret ? 0 : 4;
65948 return ret ? -EINTR : 0;
65949
65950@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65951 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65952 return ret;
65953
65954- td->event = atomic_add_return(1, &rttest_event);
65955+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65956 rt_mutex_unlock(&mutexes[id]);
65957- td->event = atomic_add_return(1, &rttest_event);
65958+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65959 td->mutexes[id] = 0;
65960 return 0;
65961
65962@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65963 break;
65964
65965 td->mutexes[dat] = 2;
65966- td->event = atomic_add_return(1, &rttest_event);
65967+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65968 break;
65969
65970 default:
65971@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65972 return;
65973
65974 td->mutexes[dat] = 3;
65975- td->event = atomic_add_return(1, &rttest_event);
65976+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65977 break;
65978
65979 case RTTEST_LOCKNOWAIT:
65980@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65981 return;
65982
65983 td->mutexes[dat] = 1;
65984- td->event = atomic_add_return(1, &rttest_event);
65985+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65986 return;
65987
65988 default:
65989diff --git a/kernel/sched.c b/kernel/sched.c
65990index d6b149c..896cbb8 100644
65991--- a/kernel/sched.c
65992+++ b/kernel/sched.c
65993@@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
65994 BUG(); /* the idle class will always have a runnable task */
65995 }
65996
65997+#ifdef CONFIG_GRKERNSEC_SETXID
65998+extern void gr_delayed_cred_worker(void);
65999+static inline void gr_cred_schedule(void)
66000+{
66001+ if (unlikely(current->delayed_cred))
66002+ gr_delayed_cred_worker();
66003+}
66004+#else
66005+static inline void gr_cred_schedule(void)
66006+{
66007+}
66008+#endif
66009+
66010 /*
66011 * __schedule() is the main scheduler function.
66012 */
66013@@ -4408,6 +4421,8 @@ need_resched:
66014
66015 schedule_debug(prev);
66016
66017+ gr_cred_schedule();
66018+
66019 if (sched_feat(HRTICK))
66020 hrtick_clear(rq);
66021
66022@@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
66023 /* convert nice value [19,-20] to rlimit style value [1,40] */
66024 int nice_rlim = 20 - nice;
66025
66026+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
66027+
66028 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
66029 capable(CAP_SYS_NICE));
66030 }
66031@@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
66032 if (nice > 19)
66033 nice = 19;
66034
66035- if (increment < 0 && !can_nice(current, nice))
66036+ if (increment < 0 && (!can_nice(current, nice) ||
66037+ gr_handle_chroot_nice()))
66038 return -EPERM;
66039
66040 retval = security_task_setnice(current, nice);
66041@@ -5288,6 +5306,7 @@ recheck:
66042 unsigned long rlim_rtprio =
66043 task_rlimit(p, RLIMIT_RTPRIO);
66044
66045+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
66046 /* can't set/change the rt policy */
66047 if (policy != p->policy && !rlim_rtprio)
66048 return -EPERM;
66049diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
66050index 429242f..d7cca82 100644
66051--- a/kernel/sched_autogroup.c
66052+++ b/kernel/sched_autogroup.c
66053@@ -7,7 +7,7 @@
66054
66055 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
66056 static struct autogroup autogroup_default;
66057-static atomic_t autogroup_seq_nr;
66058+static atomic_unchecked_t autogroup_seq_nr;
66059
66060 static void __init autogroup_init(struct task_struct *init_task)
66061 {
66062@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
66063
66064 kref_init(&ag->kref);
66065 init_rwsem(&ag->lock);
66066- ag->id = atomic_inc_return(&autogroup_seq_nr);
66067+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
66068 ag->tg = tg;
66069 #ifdef CONFIG_RT_GROUP_SCHED
66070 /*
66071diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
66072index 8a39fa3..34f3dbc 100644
66073--- a/kernel/sched_fair.c
66074+++ b/kernel/sched_fair.c
66075@@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
66076 * run_rebalance_domains is triggered when needed from the scheduler tick.
66077 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
66078 */
66079-static void run_rebalance_domains(struct softirq_action *h)
66080+static void run_rebalance_domains(void)
66081 {
66082 int this_cpu = smp_processor_id();
66083 struct rq *this_rq = cpu_rq(this_cpu);
66084diff --git a/kernel/signal.c b/kernel/signal.c
66085index 2065515..aed2987 100644
66086--- a/kernel/signal.c
66087+++ b/kernel/signal.c
66088@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
66089
66090 int print_fatal_signals __read_mostly;
66091
66092-static void __user *sig_handler(struct task_struct *t, int sig)
66093+static __sighandler_t sig_handler(struct task_struct *t, int sig)
66094 {
66095 return t->sighand->action[sig - 1].sa.sa_handler;
66096 }
66097
66098-static int sig_handler_ignored(void __user *handler, int sig)
66099+static int sig_handler_ignored(__sighandler_t handler, int sig)
66100 {
66101 /* Is it explicitly or implicitly ignored? */
66102 return handler == SIG_IGN ||
66103@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
66104 static int sig_task_ignored(struct task_struct *t, int sig,
66105 int from_ancestor_ns)
66106 {
66107- void __user *handler;
66108+ __sighandler_t handler;
66109
66110 handler = sig_handler(t, sig);
66111
66112@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
66113 atomic_inc(&user->sigpending);
66114 rcu_read_unlock();
66115
66116+ if (!override_rlimit)
66117+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66118+
66119 if (override_rlimit ||
66120 atomic_read(&user->sigpending) <=
66121 task_rlimit(t, RLIMIT_SIGPENDING)) {
66122@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
66123
66124 int unhandled_signal(struct task_struct *tsk, int sig)
66125 {
66126- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66127+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66128 if (is_global_init(tsk))
66129 return 1;
66130 if (handler != SIG_IGN && handler != SIG_DFL)
66131@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
66132 }
66133 }
66134
66135+ /* allow glibc communication via tgkill to other threads in our
66136+ thread group */
66137+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
66138+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
66139+ && gr_handle_signal(t, sig))
66140+ return -EPERM;
66141+
66142 return security_task_kill(t, info, sig, 0);
66143 }
66144
66145@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66146 return send_signal(sig, info, p, 1);
66147 }
66148
66149-static int
66150+int
66151 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66152 {
66153 return send_signal(sig, info, t, 0);
66154@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66155 unsigned long int flags;
66156 int ret, blocked, ignored;
66157 struct k_sigaction *action;
66158+ int is_unhandled = 0;
66159
66160 spin_lock_irqsave(&t->sighand->siglock, flags);
66161 action = &t->sighand->action[sig-1];
66162@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66163 }
66164 if (action->sa.sa_handler == SIG_DFL)
66165 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66166+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66167+ is_unhandled = 1;
66168 ret = specific_send_sig_info(sig, info, t);
66169 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66170
66171+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
66172+ normal operation */
66173+ if (is_unhandled) {
66174+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66175+ gr_handle_crash(t, sig);
66176+ }
66177+
66178 return ret;
66179 }
66180
66181@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66182 ret = check_kill_permission(sig, info, p);
66183 rcu_read_unlock();
66184
66185- if (!ret && sig)
66186+ if (!ret && sig) {
66187 ret = do_send_sig_info(sig, info, p, true);
66188+ if (!ret)
66189+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66190+ }
66191
66192 return ret;
66193 }
66194@@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
66195 int error = -ESRCH;
66196
66197 rcu_read_lock();
66198- p = find_task_by_vpid(pid);
66199+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66200+ /* allow glibc communication via tgkill to other threads in our
66201+ thread group */
66202+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66203+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
66204+ p = find_task_by_vpid_unrestricted(pid);
66205+ else
66206+#endif
66207+ p = find_task_by_vpid(pid);
66208 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66209 error = check_kill_permission(sig, info, p);
66210 /*
66211diff --git a/kernel/smp.c b/kernel/smp.c
66212index db197d6..17aef0b 100644
66213--- a/kernel/smp.c
66214+++ b/kernel/smp.c
66215@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
66216 }
66217 EXPORT_SYMBOL(smp_call_function);
66218
66219-void ipi_call_lock(void)
66220+void ipi_call_lock(void) __acquires(call_function.lock)
66221 {
66222 raw_spin_lock(&call_function.lock);
66223 }
66224
66225-void ipi_call_unlock(void)
66226+void ipi_call_unlock(void) __releases(call_function.lock)
66227 {
66228 raw_spin_unlock(&call_function.lock);
66229 }
66230
66231-void ipi_call_lock_irq(void)
66232+void ipi_call_lock_irq(void) __acquires(call_function.lock)
66233 {
66234 raw_spin_lock_irq(&call_function.lock);
66235 }
66236
66237-void ipi_call_unlock_irq(void)
66238+void ipi_call_unlock_irq(void) __releases(call_function.lock)
66239 {
66240 raw_spin_unlock_irq(&call_function.lock);
66241 }
66242diff --git a/kernel/softirq.c b/kernel/softirq.c
66243index 2c71d91..1021f81 100644
66244--- a/kernel/softirq.c
66245+++ b/kernel/softirq.c
66246@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
66247
66248 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66249
66250-char *softirq_to_name[NR_SOFTIRQS] = {
66251+const char * const softirq_to_name[NR_SOFTIRQS] = {
66252 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66253 "TASKLET", "SCHED", "HRTIMER", "RCU"
66254 };
66255@@ -235,7 +235,7 @@ restart:
66256 kstat_incr_softirqs_this_cpu(vec_nr);
66257
66258 trace_softirq_entry(vec_nr);
66259- h->action(h);
66260+ h->action();
66261 trace_softirq_exit(vec_nr);
66262 if (unlikely(prev_count != preempt_count())) {
66263 printk(KERN_ERR "huh, entered softirq %u %s %p"
66264@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66265 local_irq_restore(flags);
66266 }
66267
66268-void open_softirq(int nr, void (*action)(struct softirq_action *))
66269+void open_softirq(int nr, void (*action)(void))
66270 {
66271- softirq_vec[nr].action = action;
66272+ pax_open_kernel();
66273+ *(void **)&softirq_vec[nr].action = action;
66274+ pax_close_kernel();
66275 }
66276
66277 /*
66278@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
66279
66280 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66281
66282-static void tasklet_action(struct softirq_action *a)
66283+static void tasklet_action(void)
66284 {
66285 struct tasklet_struct *list;
66286
66287@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
66288 }
66289 }
66290
66291-static void tasklet_hi_action(struct softirq_action *a)
66292+static void tasklet_hi_action(void)
66293 {
66294 struct tasklet_struct *list;
66295
66296diff --git a/kernel/sys.c b/kernel/sys.c
66297index 481611f..0754d86 100644
66298--- a/kernel/sys.c
66299+++ b/kernel/sys.c
66300@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
66301 error = -EACCES;
66302 goto out;
66303 }
66304+
66305+ if (gr_handle_chroot_setpriority(p, niceval)) {
66306+ error = -EACCES;
66307+ goto out;
66308+ }
66309+
66310 no_nice = security_task_setnice(p, niceval);
66311 if (no_nice) {
66312 error = no_nice;
66313@@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
66314 goto error;
66315 }
66316
66317+ if (gr_check_group_change(new->gid, new->egid, -1))
66318+ goto error;
66319+
66320 if (rgid != (gid_t) -1 ||
66321 (egid != (gid_t) -1 && egid != old->gid))
66322 new->sgid = new->egid;
66323@@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66324 old = current_cred();
66325
66326 retval = -EPERM;
66327+
66328+ if (gr_check_group_change(gid, gid, gid))
66329+ goto error;
66330+
66331 if (nsown_capable(CAP_SETGID))
66332 new->gid = new->egid = new->sgid = new->fsgid = gid;
66333 else if (gid == old->gid || gid == old->sgid)
66334@@ -618,7 +631,7 @@ error:
66335 /*
66336 * change the user struct in a credentials set to match the new UID
66337 */
66338-static int set_user(struct cred *new)
66339+int set_user(struct cred *new)
66340 {
66341 struct user_struct *new_user;
66342
66343@@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
66344 goto error;
66345 }
66346
66347+ if (gr_check_user_change(new->uid, new->euid, -1))
66348+ goto error;
66349+
66350 if (new->uid != old->uid) {
66351 retval = set_user(new);
66352 if (retval < 0)
66353@@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66354 old = current_cred();
66355
66356 retval = -EPERM;
66357+
66358+ if (gr_check_crash_uid(uid))
66359+ goto error;
66360+ if (gr_check_user_change(uid, uid, uid))
66361+ goto error;
66362+
66363 if (nsown_capable(CAP_SETUID)) {
66364 new->suid = new->uid = uid;
66365 if (uid != old->uid) {
66366@@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66367 goto error;
66368 }
66369
66370+ if (gr_check_user_change(ruid, euid, -1))
66371+ goto error;
66372+
66373 if (ruid != (uid_t) -1) {
66374 new->uid = ruid;
66375 if (ruid != old->uid) {
66376@@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66377 goto error;
66378 }
66379
66380+ if (gr_check_group_change(rgid, egid, -1))
66381+ goto error;
66382+
66383 if (rgid != (gid_t) -1)
66384 new->gid = rgid;
66385 if (egid != (gid_t) -1)
66386@@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66387 old = current_cred();
66388 old_fsuid = old->fsuid;
66389
66390+ if (gr_check_user_change(-1, -1, uid))
66391+ goto error;
66392+
66393 if (uid == old->uid || uid == old->euid ||
66394 uid == old->suid || uid == old->fsuid ||
66395 nsown_capable(CAP_SETUID)) {
66396@@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66397 }
66398 }
66399
66400+error:
66401 abort_creds(new);
66402 return old_fsuid;
66403
66404@@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66405 if (gid == old->gid || gid == old->egid ||
66406 gid == old->sgid || gid == old->fsgid ||
66407 nsown_capable(CAP_SETGID)) {
66408+ if (gr_check_group_change(-1, -1, gid))
66409+ goto error;
66410+
66411 if (gid != old_fsgid) {
66412 new->fsgid = gid;
66413 goto change_okay;
66414 }
66415 }
66416
66417+error:
66418 abort_creds(new);
66419 return old_fsgid;
66420
66421@@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
66422 }
66423 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
66424 snprintf(buf, len, "2.6.%u%s", v, rest);
66425- ret = copy_to_user(release, buf, len);
66426+ if (len > sizeof(buf))
66427+ ret = -EFAULT;
66428+ else
66429+ ret = copy_to_user(release, buf, len);
66430 }
66431 return ret;
66432 }
66433@@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
66434 return -EFAULT;
66435
66436 down_read(&uts_sem);
66437- error = __copy_to_user(&name->sysname, &utsname()->sysname,
66438+ error = __copy_to_user(name->sysname, &utsname()->sysname,
66439 __OLD_UTS_LEN);
66440 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66441- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66442+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
66443 __OLD_UTS_LEN);
66444 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66445- error |= __copy_to_user(&name->release, &utsname()->release,
66446+ error |= __copy_to_user(name->release, &utsname()->release,
66447 __OLD_UTS_LEN);
66448 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66449- error |= __copy_to_user(&name->version, &utsname()->version,
66450+ error |= __copy_to_user(name->version, &utsname()->version,
66451 __OLD_UTS_LEN);
66452 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66453- error |= __copy_to_user(&name->machine, &utsname()->machine,
66454+ error |= __copy_to_user(name->machine, &utsname()->machine,
66455 __OLD_UTS_LEN);
66456 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66457 up_read(&uts_sem);
66458@@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
66459 error = get_dumpable(me->mm);
66460 break;
66461 case PR_SET_DUMPABLE:
66462- if (arg2 < 0 || arg2 > 1) {
66463+ if (arg2 > 1) {
66464 error = -EINVAL;
66465 break;
66466 }
66467diff --git a/kernel/sysctl.c b/kernel/sysctl.c
66468index ae27196..7506d69 100644
66469--- a/kernel/sysctl.c
66470+++ b/kernel/sysctl.c
66471@@ -86,6 +86,13 @@
66472
66473
66474 #if defined(CONFIG_SYSCTL)
66475+#include <linux/grsecurity.h>
66476+#include <linux/grinternal.h>
66477+
66478+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66479+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66480+ const int op);
66481+extern int gr_handle_chroot_sysctl(const int op);
66482
66483 /* External variables not in a header file. */
66484 extern int sysctl_overcommit_memory;
66485@@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66486 }
66487
66488 #endif
66489+extern struct ctl_table grsecurity_table[];
66490
66491 static struct ctl_table root_table[];
66492 static struct ctl_table_root sysctl_table_root;
66493@@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66494 int sysctl_legacy_va_layout;
66495 #endif
66496
66497+#ifdef CONFIG_PAX_SOFTMODE
66498+static ctl_table pax_table[] = {
66499+ {
66500+ .procname = "softmode",
66501+ .data = &pax_softmode,
66502+ .maxlen = sizeof(unsigned int),
66503+ .mode = 0600,
66504+ .proc_handler = &proc_dointvec,
66505+ },
66506+
66507+ { }
66508+};
66509+#endif
66510+
66511 /* The default sysctl tables: */
66512
66513 static struct ctl_table root_table[] = {
66514@@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66515 #endif
66516
66517 static struct ctl_table kern_table[] = {
66518+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66519+ {
66520+ .procname = "grsecurity",
66521+ .mode = 0500,
66522+ .child = grsecurity_table,
66523+ },
66524+#endif
66525+
66526+#ifdef CONFIG_PAX_SOFTMODE
66527+ {
66528+ .procname = "pax",
66529+ .mode = 0500,
66530+ .child = pax_table,
66531+ },
66532+#endif
66533+
66534 {
66535 .procname = "sched_child_runs_first",
66536 .data = &sysctl_sched_child_runs_first,
66537@@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66538 .data = &modprobe_path,
66539 .maxlen = KMOD_PATH_LEN,
66540 .mode = 0644,
66541- .proc_handler = proc_dostring,
66542+ .proc_handler = proc_dostring_modpriv,
66543 },
66544 {
66545 .procname = "modules_disabled",
66546@@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66547 .extra1 = &zero,
66548 .extra2 = &one,
66549 },
66550+#endif
66551 {
66552 .procname = "kptr_restrict",
66553 .data = &kptr_restrict,
66554 .maxlen = sizeof(int),
66555 .mode = 0644,
66556 .proc_handler = proc_dmesg_restrict,
66557+#ifdef CONFIG_GRKERNSEC_HIDESYM
66558+ .extra1 = &two,
66559+#else
66560 .extra1 = &zero,
66561+#endif
66562 .extra2 = &two,
66563 },
66564-#endif
66565 {
66566 .procname = "ngroups_max",
66567 .data = &ngroups_max,
66568@@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66569 .proc_handler = proc_dointvec_minmax,
66570 .extra1 = &zero,
66571 },
66572+ {
66573+ .procname = "heap_stack_gap",
66574+ .data = &sysctl_heap_stack_gap,
66575+ .maxlen = sizeof(sysctl_heap_stack_gap),
66576+ .mode = 0644,
66577+ .proc_handler = proc_doulongvec_minmax,
66578+ },
66579 #else
66580 {
66581 .procname = "nr_trim_pages",
66582@@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66583 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66584 {
66585 int mode;
66586+ int error;
66587+
66588+ if (table->parent != NULL && table->parent->procname != NULL &&
66589+ table->procname != NULL &&
66590+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66591+ return -EACCES;
66592+ if (gr_handle_chroot_sysctl(op))
66593+ return -EACCES;
66594+ error = gr_handle_sysctl(table, op);
66595+ if (error)
66596+ return error;
66597
66598 if (root->permissions)
66599 mode = root->permissions(root, current->nsproxy, table);
66600@@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66601 buffer, lenp, ppos);
66602 }
66603
66604+int proc_dostring_modpriv(struct ctl_table *table, int write,
66605+ void __user *buffer, size_t *lenp, loff_t *ppos)
66606+{
66607+ if (write && !capable(CAP_SYS_MODULE))
66608+ return -EPERM;
66609+
66610+ return _proc_do_string(table->data, table->maxlen, write,
66611+ buffer, lenp, ppos);
66612+}
66613+
66614 static size_t proc_skip_spaces(char **buf)
66615 {
66616 size_t ret;
66617@@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66618 len = strlen(tmp);
66619 if (len > *size)
66620 len = *size;
66621+ if (len > sizeof(tmp))
66622+ len = sizeof(tmp);
66623 if (copy_to_user(*buf, tmp, len))
66624 return -EFAULT;
66625 *size -= len;
66626@@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66627 *i = val;
66628 } else {
66629 val = convdiv * (*i) / convmul;
66630- if (!first)
66631+ if (!first) {
66632 err = proc_put_char(&buffer, &left, '\t');
66633+ if (err)
66634+ break;
66635+ }
66636 err = proc_put_long(&buffer, &left, val, false);
66637 if (err)
66638 break;
66639@@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66640 return -ENOSYS;
66641 }
66642
66643+int proc_dostring_modpriv(struct ctl_table *table, int write,
66644+ void __user *buffer, size_t *lenp, loff_t *ppos)
66645+{
66646+ return -ENOSYS;
66647+}
66648+
66649 int proc_dointvec(struct ctl_table *table, int write,
66650 void __user *buffer, size_t *lenp, loff_t *ppos)
66651 {
66652@@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66653 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66654 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66655 EXPORT_SYMBOL(proc_dostring);
66656+EXPORT_SYMBOL(proc_dostring_modpriv);
66657 EXPORT_SYMBOL(proc_doulongvec_minmax);
66658 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66659 EXPORT_SYMBOL(register_sysctl_table);
66660diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66661index a650694..aaeeb20 100644
66662--- a/kernel/sysctl_binary.c
66663+++ b/kernel/sysctl_binary.c
66664@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66665 int i;
66666
66667 set_fs(KERNEL_DS);
66668- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66669+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66670 set_fs(old_fs);
66671 if (result < 0)
66672 goto out_kfree;
66673@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66674 }
66675
66676 set_fs(KERNEL_DS);
66677- result = vfs_write(file, buffer, str - buffer, &pos);
66678+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66679 set_fs(old_fs);
66680 if (result < 0)
66681 goto out_kfree;
66682@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66683 int i;
66684
66685 set_fs(KERNEL_DS);
66686- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66687+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66688 set_fs(old_fs);
66689 if (result < 0)
66690 goto out_kfree;
66691@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66692 }
66693
66694 set_fs(KERNEL_DS);
66695- result = vfs_write(file, buffer, str - buffer, &pos);
66696+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66697 set_fs(old_fs);
66698 if (result < 0)
66699 goto out_kfree;
66700@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
66701 int i;
66702
66703 set_fs(KERNEL_DS);
66704- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66705+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66706 set_fs(old_fs);
66707 if (result < 0)
66708 goto out;
66709@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66710 __le16 dnaddr;
66711
66712 set_fs(KERNEL_DS);
66713- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66714+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66715 set_fs(old_fs);
66716 if (result < 0)
66717 goto out;
66718@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66719 le16_to_cpu(dnaddr) & 0x3ff);
66720
66721 set_fs(KERNEL_DS);
66722- result = vfs_write(file, buf, len, &pos);
66723+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66724 set_fs(old_fs);
66725 if (result < 0)
66726 goto out;
66727diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
66728index 362da65..ab8ef8c 100644
66729--- a/kernel/sysctl_check.c
66730+++ b/kernel/sysctl_check.c
66731@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
66732 set_fail(&fail, table, "Directory with extra2");
66733 } else {
66734 if ((table->proc_handler == proc_dostring) ||
66735+ (table->proc_handler == proc_dostring_modpriv) ||
66736 (table->proc_handler == proc_dointvec) ||
66737 (table->proc_handler == proc_dointvec_minmax) ||
66738 (table->proc_handler == proc_dointvec_jiffies) ||
66739diff --git a/kernel/taskstats.c b/kernel/taskstats.c
66740index e660464..c8b9e67 100644
66741--- a/kernel/taskstats.c
66742+++ b/kernel/taskstats.c
66743@@ -27,9 +27,12 @@
66744 #include <linux/cgroup.h>
66745 #include <linux/fs.h>
66746 #include <linux/file.h>
66747+#include <linux/grsecurity.h>
66748 #include <net/genetlink.h>
66749 #include <linux/atomic.h>
66750
66751+extern int gr_is_taskstats_denied(int pid);
66752+
66753 /*
66754 * Maximum length of a cpumask that can be specified in
66755 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66756@@ -556,6 +559,9 @@ err:
66757
66758 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66759 {
66760+ if (gr_is_taskstats_denied(current->pid))
66761+ return -EACCES;
66762+
66763 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66764 return cmd_attr_register_cpumask(info);
66765 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66766diff --git a/kernel/time.c b/kernel/time.c
66767index 73e416d..cfc6f69 100644
66768--- a/kernel/time.c
66769+++ b/kernel/time.c
66770@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
66771 return error;
66772
66773 if (tz) {
66774+ /* we log in do_settimeofday called below, so don't log twice
66775+ */
66776+ if (!tv)
66777+ gr_log_timechange();
66778+
66779 /* SMP safe, global irq locking makes it work. */
66780 sys_tz = *tz;
66781 update_vsyscall_tz();
66782diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
66783index 8a46f5d..bbe6f9c 100644
66784--- a/kernel/time/alarmtimer.c
66785+++ b/kernel/time/alarmtimer.c
66786@@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
66787 struct platform_device *pdev;
66788 int error = 0;
66789 int i;
66790- struct k_clock alarm_clock = {
66791+ static struct k_clock alarm_clock = {
66792 .clock_getres = alarm_clock_getres,
66793 .clock_get = alarm_clock_get,
66794 .timer_create = alarm_timer_create,
66795diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
66796index fd4a7b1..fae5c2a 100644
66797--- a/kernel/time/tick-broadcast.c
66798+++ b/kernel/time/tick-broadcast.c
66799@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
66800 * then clear the broadcast bit.
66801 */
66802 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66803- int cpu = smp_processor_id();
66804+ cpu = smp_processor_id();
66805
66806 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66807 tick_broadcast_clear_oneshot(cpu);
66808diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
66809index 2378413..be455fd 100644
66810--- a/kernel/time/timekeeping.c
66811+++ b/kernel/time/timekeeping.c
66812@@ -14,6 +14,7 @@
66813 #include <linux/init.h>
66814 #include <linux/mm.h>
66815 #include <linux/sched.h>
66816+#include <linux/grsecurity.h>
66817 #include <linux/syscore_ops.h>
66818 #include <linux/clocksource.h>
66819 #include <linux/jiffies.h>
66820@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
66821 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66822 return -EINVAL;
66823
66824+ gr_log_timechange();
66825+
66826 write_seqlock_irqsave(&xtime_lock, flags);
66827
66828 timekeeping_forward_now();
66829diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
66830index 3258455..f35227d 100644
66831--- a/kernel/time/timer_list.c
66832+++ b/kernel/time/timer_list.c
66833@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
66834
66835 static void print_name_offset(struct seq_file *m, void *sym)
66836 {
66837+#ifdef CONFIG_GRKERNSEC_HIDESYM
66838+ SEQ_printf(m, "<%p>", NULL);
66839+#else
66840 char symname[KSYM_NAME_LEN];
66841
66842 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66843 SEQ_printf(m, "<%pK>", sym);
66844 else
66845 SEQ_printf(m, "%s", symname);
66846+#endif
66847 }
66848
66849 static void
66850@@ -112,7 +116,11 @@ next_one:
66851 static void
66852 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66853 {
66854+#ifdef CONFIG_GRKERNSEC_HIDESYM
66855+ SEQ_printf(m, " .base: %p\n", NULL);
66856+#else
66857 SEQ_printf(m, " .base: %pK\n", base);
66858+#endif
66859 SEQ_printf(m, " .index: %d\n",
66860 base->index);
66861 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66862@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
66863 {
66864 struct proc_dir_entry *pe;
66865
66866+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66867+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66868+#else
66869 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66870+#endif
66871 if (!pe)
66872 return -ENOMEM;
66873 return 0;
66874diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
66875index 0b537f2..9e71eca 100644
66876--- a/kernel/time/timer_stats.c
66877+++ b/kernel/time/timer_stats.c
66878@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66879 static unsigned long nr_entries;
66880 static struct entry entries[MAX_ENTRIES];
66881
66882-static atomic_t overflow_count;
66883+static atomic_unchecked_t overflow_count;
66884
66885 /*
66886 * The entries are in a hash-table, for fast lookup:
66887@@ -140,7 +140,7 @@ static void reset_entries(void)
66888 nr_entries = 0;
66889 memset(entries, 0, sizeof(entries));
66890 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66891- atomic_set(&overflow_count, 0);
66892+ atomic_set_unchecked(&overflow_count, 0);
66893 }
66894
66895 static struct entry *alloc_entry(void)
66896@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66897 if (likely(entry))
66898 entry->count++;
66899 else
66900- atomic_inc(&overflow_count);
66901+ atomic_inc_unchecked(&overflow_count);
66902
66903 out_unlock:
66904 raw_spin_unlock_irqrestore(lock, flags);
66905@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66906
66907 static void print_name_offset(struct seq_file *m, unsigned long addr)
66908 {
66909+#ifdef CONFIG_GRKERNSEC_HIDESYM
66910+ seq_printf(m, "<%p>", NULL);
66911+#else
66912 char symname[KSYM_NAME_LEN];
66913
66914 if (lookup_symbol_name(addr, symname) < 0)
66915 seq_printf(m, "<%p>", (void *)addr);
66916 else
66917 seq_printf(m, "%s", symname);
66918+#endif
66919 }
66920
66921 static int tstats_show(struct seq_file *m, void *v)
66922@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
66923
66924 seq_puts(m, "Timer Stats Version: v0.2\n");
66925 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66926- if (atomic_read(&overflow_count))
66927+ if (atomic_read_unchecked(&overflow_count))
66928 seq_printf(m, "Overflow: %d entries\n",
66929- atomic_read(&overflow_count));
66930+ atomic_read_unchecked(&overflow_count));
66931
66932 for (i = 0; i < nr_entries; i++) {
66933 entry = entries + i;
66934@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
66935 {
66936 struct proc_dir_entry *pe;
66937
66938+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66939+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66940+#else
66941 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66942+#endif
66943 if (!pe)
66944 return -ENOMEM;
66945 return 0;
66946diff --git a/kernel/timer.c b/kernel/timer.c
66947index 9c3c62b..441690e 100644
66948--- a/kernel/timer.c
66949+++ b/kernel/timer.c
66950@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66951 /*
66952 * This function runs timers and the timer-tq in bottom half context.
66953 */
66954-static void run_timer_softirq(struct softirq_action *h)
66955+static void run_timer_softirq(void)
66956 {
66957 struct tvec_base *base = __this_cpu_read(tvec_bases);
66958
66959diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
66960index 16fc34a..efd8bb8 100644
66961--- a/kernel/trace/blktrace.c
66962+++ b/kernel/trace/blktrace.c
66963@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
66964 struct blk_trace *bt = filp->private_data;
66965 char buf[16];
66966
66967- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66968+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66969
66970 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66971 }
66972@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
66973 return 1;
66974
66975 bt = buf->chan->private_data;
66976- atomic_inc(&bt->dropped);
66977+ atomic_inc_unchecked(&bt->dropped);
66978 return 0;
66979 }
66980
66981@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
66982
66983 bt->dir = dir;
66984 bt->dev = dev;
66985- atomic_set(&bt->dropped, 0);
66986+ atomic_set_unchecked(&bt->dropped, 0);
66987
66988 ret = -EIO;
66989 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66990diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
66991index 25b4f4d..6f4772d 100644
66992--- a/kernel/trace/ftrace.c
66993+++ b/kernel/trace/ftrace.c
66994@@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
66995 if (unlikely(ftrace_disabled))
66996 return 0;
66997
66998+ ret = ftrace_arch_code_modify_prepare();
66999+ FTRACE_WARN_ON(ret);
67000+ if (ret)
67001+ return 0;
67002+
67003 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
67004+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
67005 if (ret) {
67006 ftrace_bug(ret, ip);
67007- return 0;
67008 }
67009- return 1;
67010+ return ret ? 0 : 1;
67011 }
67012
67013 /*
67014@@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
67015
67016 int
67017 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
67018- void *data)
67019+ void *data)
67020 {
67021 struct ftrace_func_probe *entry;
67022 struct ftrace_page *pg;
67023diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
67024index f2bd275..adaf3a2 100644
67025--- a/kernel/trace/trace.c
67026+++ b/kernel/trace/trace.c
67027@@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
67028 };
67029 #endif
67030
67031-static struct dentry *d_tracer;
67032-
67033 struct dentry *tracing_init_dentry(void)
67034 {
67035+ static struct dentry *d_tracer;
67036 static int once;
67037
67038 if (d_tracer)
67039@@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
67040 return d_tracer;
67041 }
67042
67043-static struct dentry *d_percpu;
67044-
67045 struct dentry *tracing_dentry_percpu(void)
67046 {
67047+ static struct dentry *d_percpu;
67048 static int once;
67049 struct dentry *d_tracer;
67050
67051diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
67052index c212a7f..7b02394 100644
67053--- a/kernel/trace/trace_events.c
67054+++ b/kernel/trace/trace_events.c
67055@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
67056 struct ftrace_module_file_ops {
67057 struct list_head list;
67058 struct module *mod;
67059- struct file_operations id;
67060- struct file_operations enable;
67061- struct file_operations format;
67062- struct file_operations filter;
67063 };
67064
67065 static struct ftrace_module_file_ops *
67066@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
67067
67068 file_ops->mod = mod;
67069
67070- file_ops->id = ftrace_event_id_fops;
67071- file_ops->id.owner = mod;
67072-
67073- file_ops->enable = ftrace_enable_fops;
67074- file_ops->enable.owner = mod;
67075-
67076- file_ops->filter = ftrace_event_filter_fops;
67077- file_ops->filter.owner = mod;
67078-
67079- file_ops->format = ftrace_event_format_fops;
67080- file_ops->format.owner = mod;
67081+ pax_open_kernel();
67082+ *(void **)&mod->trace_id.owner = mod;
67083+ *(void **)&mod->trace_enable.owner = mod;
67084+ *(void **)&mod->trace_filter.owner = mod;
67085+ *(void **)&mod->trace_format.owner = mod;
67086+ pax_close_kernel();
67087
67088 list_add(&file_ops->list, &ftrace_module_file_list);
67089
67090@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
67091
67092 for_each_event(call, start, end) {
67093 __trace_add_event_call(*call, mod,
67094- &file_ops->id, &file_ops->enable,
67095- &file_ops->filter, &file_ops->format);
67096+ &mod->trace_id, &mod->trace_enable,
67097+ &mod->trace_filter, &mod->trace_format);
67098 }
67099 }
67100
67101diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
67102index 00d527c..7c5b1a3 100644
67103--- a/kernel/trace/trace_kprobe.c
67104+++ b/kernel/trace/trace_kprobe.c
67105@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67106 long ret;
67107 int maxlen = get_rloc_len(*(u32 *)dest);
67108 u8 *dst = get_rloc_data(dest);
67109- u8 *src = addr;
67110+ const u8 __user *src = (const u8 __force_user *)addr;
67111 mm_segment_t old_fs = get_fs();
67112 if (!maxlen)
67113 return;
67114@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67115 pagefault_disable();
67116 do
67117 ret = __copy_from_user_inatomic(dst++, src++, 1);
67118- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
67119+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
67120 dst[-1] = '\0';
67121 pagefault_enable();
67122 set_fs(old_fs);
67123@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67124 ((u8 *)get_rloc_data(dest))[0] = '\0';
67125 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
67126 } else
67127- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67128+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67129 get_rloc_offs(*(u32 *)dest));
67130 }
67131 /* Return the length of string -- including null terminal byte */
67132@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
67133 set_fs(KERNEL_DS);
67134 pagefault_disable();
67135 do {
67136- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67137+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67138 len++;
67139 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67140 pagefault_enable();
67141diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
67142index fd3c8aa..5f324a6 100644
67143--- a/kernel/trace/trace_mmiotrace.c
67144+++ b/kernel/trace/trace_mmiotrace.c
67145@@ -24,7 +24,7 @@ struct header_iter {
67146 static struct trace_array *mmio_trace_array;
67147 static bool overrun_detected;
67148 static unsigned long prev_overruns;
67149-static atomic_t dropped_count;
67150+static atomic_unchecked_t dropped_count;
67151
67152 static void mmio_reset_data(struct trace_array *tr)
67153 {
67154@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
67155
67156 static unsigned long count_overruns(struct trace_iterator *iter)
67157 {
67158- unsigned long cnt = atomic_xchg(&dropped_count, 0);
67159+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67160 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67161
67162 if (over > prev_overruns)
67163@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
67164 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67165 sizeof(*entry), 0, pc);
67166 if (!event) {
67167- atomic_inc(&dropped_count);
67168+ atomic_inc_unchecked(&dropped_count);
67169 return;
67170 }
67171 entry = ring_buffer_event_data(event);
67172@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
67173 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67174 sizeof(*entry), 0, pc);
67175 if (!event) {
67176- atomic_inc(&dropped_count);
67177+ atomic_inc_unchecked(&dropped_count);
67178 return;
67179 }
67180 entry = ring_buffer_event_data(event);
67181diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
67182index 5199930..26c73a0 100644
67183--- a/kernel/trace/trace_output.c
67184+++ b/kernel/trace/trace_output.c
67185@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
67186
67187 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67188 if (!IS_ERR(p)) {
67189- p = mangle_path(s->buffer + s->len, p, "\n");
67190+ p = mangle_path(s->buffer + s->len, p, "\n\\");
67191 if (p) {
67192 s->len = p - s->buffer;
67193 return 1;
67194diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
67195index 77575b3..6e623d1 100644
67196--- a/kernel/trace/trace_stack.c
67197+++ b/kernel/trace/trace_stack.c
67198@@ -50,7 +50,7 @@ static inline void check_stack(void)
67199 return;
67200
67201 /* we do not handle interrupt stacks yet */
67202- if (!object_is_on_stack(&this_size))
67203+ if (!object_starts_on_stack(&this_size))
67204 return;
67205
67206 local_irq_save(flags);
67207diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
67208index 209b379..7f76423 100644
67209--- a/kernel/trace/trace_workqueue.c
67210+++ b/kernel/trace/trace_workqueue.c
67211@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67212 int cpu;
67213 pid_t pid;
67214 /* Can be inserted from interrupt or user context, need to be atomic */
67215- atomic_t inserted;
67216+ atomic_unchecked_t inserted;
67217 /*
67218 * Don't need to be atomic, works are serialized in a single workqueue thread
67219 * on a single CPU.
67220@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67221 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67222 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67223 if (node->pid == wq_thread->pid) {
67224- atomic_inc(&node->inserted);
67225+ atomic_inc_unchecked(&node->inserted);
67226 goto found;
67227 }
67228 }
67229@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
67230 tsk = get_pid_task(pid, PIDTYPE_PID);
67231 if (tsk) {
67232 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67233- atomic_read(&cws->inserted), cws->executed,
67234+ atomic_read_unchecked(&cws->inserted), cws->executed,
67235 tsk->comm);
67236 put_task_struct(tsk);
67237 }
67238diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
67239index 82928f5..92da771 100644
67240--- a/lib/Kconfig.debug
67241+++ b/lib/Kconfig.debug
67242@@ -1103,6 +1103,7 @@ config LATENCYTOP
67243 depends on DEBUG_KERNEL
67244 depends on STACKTRACE_SUPPORT
67245 depends on PROC_FS
67246+ depends on !GRKERNSEC_HIDESYM
67247 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
67248 select KALLSYMS
67249 select KALLSYMS_ALL
67250diff --git a/lib/bitmap.c b/lib/bitmap.c
67251index 0d4a127..33a06c7 100644
67252--- a/lib/bitmap.c
67253+++ b/lib/bitmap.c
67254@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
67255 {
67256 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67257 u32 chunk;
67258- const char __user __force *ubuf = (const char __user __force *)buf;
67259+ const char __user *ubuf = (const char __force_user *)buf;
67260
67261 bitmap_zero(maskp, nmaskbits);
67262
67263@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
67264 {
67265 if (!access_ok(VERIFY_READ, ubuf, ulen))
67266 return -EFAULT;
67267- return __bitmap_parse((const char __force *)ubuf,
67268+ return __bitmap_parse((const char __force_kernel *)ubuf,
67269 ulen, 1, maskp, nmaskbits);
67270
67271 }
67272@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
67273 {
67274 unsigned a, b;
67275 int c, old_c, totaldigits;
67276- const char __user __force *ubuf = (const char __user __force *)buf;
67277+ const char __user *ubuf = (const char __force_user *)buf;
67278 int exp_digit, in_range;
67279
67280 totaldigits = c = 0;
67281@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
67282 {
67283 if (!access_ok(VERIFY_READ, ubuf, ulen))
67284 return -EFAULT;
67285- return __bitmap_parselist((const char __force *)ubuf,
67286+ return __bitmap_parselist((const char __force_kernel *)ubuf,
67287 ulen, 1, maskp, nmaskbits);
67288 }
67289 EXPORT_SYMBOL(bitmap_parselist_user);
67290diff --git a/lib/bug.c b/lib/bug.c
67291index 1955209..cbbb2ad 100644
67292--- a/lib/bug.c
67293+++ b/lib/bug.c
67294@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
67295 return BUG_TRAP_TYPE_NONE;
67296
67297 bug = find_bug(bugaddr);
67298+ if (!bug)
67299+ return BUG_TRAP_TYPE_NONE;
67300
67301 file = NULL;
67302 line = 0;
67303diff --git a/lib/debugobjects.c b/lib/debugobjects.c
67304index a78b7c6..2c73084 100644
67305--- a/lib/debugobjects.c
67306+++ b/lib/debugobjects.c
67307@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
67308 if (limit > 4)
67309 return;
67310
67311- is_on_stack = object_is_on_stack(addr);
67312+ is_on_stack = object_starts_on_stack(addr);
67313 if (is_on_stack == onstack)
67314 return;
67315
67316diff --git a/lib/devres.c b/lib/devres.c
67317index 7c0e953..f642b5c 100644
67318--- a/lib/devres.c
67319+++ b/lib/devres.c
67320@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67321 void devm_iounmap(struct device *dev, void __iomem *addr)
67322 {
67323 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67324- (void *)addr));
67325+ (void __force *)addr));
67326 iounmap(addr);
67327 }
67328 EXPORT_SYMBOL(devm_iounmap);
67329@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
67330 {
67331 ioport_unmap(addr);
67332 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67333- devm_ioport_map_match, (void *)addr));
67334+ devm_ioport_map_match, (void __force *)addr));
67335 }
67336 EXPORT_SYMBOL(devm_ioport_unmap);
67337
67338diff --git a/lib/dma-debug.c b/lib/dma-debug.c
67339index fea790a..ebb0e82 100644
67340--- a/lib/dma-debug.c
67341+++ b/lib/dma-debug.c
67342@@ -925,7 +925,7 @@ out:
67343
67344 static void check_for_stack(struct device *dev, void *addr)
67345 {
67346- if (object_is_on_stack(addr))
67347+ if (object_starts_on_stack(addr))
67348 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67349 "stack [addr=%p]\n", addr);
67350 }
67351diff --git a/lib/extable.c b/lib/extable.c
67352index 4cac81e..63e9b8f 100644
67353--- a/lib/extable.c
67354+++ b/lib/extable.c
67355@@ -13,6 +13,7 @@
67356 #include <linux/init.h>
67357 #include <linux/sort.h>
67358 #include <asm/uaccess.h>
67359+#include <asm/pgtable.h>
67360
67361 #ifndef ARCH_HAS_SORT_EXTABLE
67362 /*
67363@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
67364 void sort_extable(struct exception_table_entry *start,
67365 struct exception_table_entry *finish)
67366 {
67367+ pax_open_kernel();
67368 sort(start, finish - start, sizeof(struct exception_table_entry),
67369 cmp_ex, NULL);
67370+ pax_close_kernel();
67371 }
67372
67373 #ifdef CONFIG_MODULES
67374diff --git a/lib/inflate.c b/lib/inflate.c
67375index 013a761..c28f3fc 100644
67376--- a/lib/inflate.c
67377+++ b/lib/inflate.c
67378@@ -269,7 +269,7 @@ static void free(void *where)
67379 malloc_ptr = free_mem_ptr;
67380 }
67381 #else
67382-#define malloc(a) kmalloc(a, GFP_KERNEL)
67383+#define malloc(a) kmalloc((a), GFP_KERNEL)
67384 #define free(a) kfree(a)
67385 #endif
67386
67387diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
67388index bd2bea9..6b3c95e 100644
67389--- a/lib/is_single_threaded.c
67390+++ b/lib/is_single_threaded.c
67391@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
67392 struct task_struct *p, *t;
67393 bool ret;
67394
67395+ if (!mm)
67396+ return true;
67397+
67398 if (atomic_read(&task->signal->live) != 1)
67399 return false;
67400
67401diff --git a/lib/kref.c b/lib/kref.c
67402index 3efb882..8492f4c 100644
67403--- a/lib/kref.c
67404+++ b/lib/kref.c
67405@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67406 */
67407 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67408 {
67409- WARN_ON(release == NULL);
67410+ BUG_ON(release == NULL);
67411 WARN_ON(release == (void (*)(struct kref *))kfree);
67412
67413 if (atomic_dec_and_test(&kref->refcount)) {
67414diff --git a/lib/radix-tree.c b/lib/radix-tree.c
67415index d9df745..e73c2fe 100644
67416--- a/lib/radix-tree.c
67417+++ b/lib/radix-tree.c
67418@@ -80,7 +80,7 @@ struct radix_tree_preload {
67419 int nr;
67420 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67421 };
67422-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67423+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67424
67425 static inline void *ptr_to_indirect(void *ptr)
67426 {
67427diff --git a/lib/vsprintf.c b/lib/vsprintf.c
67428index 993599e..84dc70e 100644
67429--- a/lib/vsprintf.c
67430+++ b/lib/vsprintf.c
67431@@ -16,6 +16,9 @@
67432 * - scnprintf and vscnprintf
67433 */
67434
67435+#ifdef CONFIG_GRKERNSEC_HIDESYM
67436+#define __INCLUDED_BY_HIDESYM 1
67437+#endif
67438 #include <stdarg.h>
67439 #include <linux/module.h>
67440 #include <linux/types.h>
67441@@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
67442 char sym[KSYM_SYMBOL_LEN];
67443 if (ext == 'B')
67444 sprint_backtrace(sym, value);
67445- else if (ext != 'f' && ext != 's')
67446+ else if (ext != 'f' && ext != 's' && ext != 'a')
67447 sprint_symbol(sym, value);
67448 else
67449 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67450@@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
67451 return string(buf, end, uuid, spec);
67452 }
67453
67454+#ifdef CONFIG_GRKERNSEC_HIDESYM
67455+int kptr_restrict __read_mostly = 2;
67456+#else
67457 int kptr_restrict __read_mostly;
67458+#endif
67459
67460 /*
67461 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67462@@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
67463 * - 'S' For symbolic direct pointers with offset
67464 * - 's' For symbolic direct pointers without offset
67465 * - 'B' For backtraced symbolic direct pointers with offset
67466+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67467+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67468 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67469 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67470 * - 'M' For a 6-byte MAC address, it prints the address in the
67471@@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67472 {
67473 if (!ptr && *fmt != 'K') {
67474 /*
67475- * Print (null) with the same width as a pointer so it makes
67476+ * Print (nil) with the same width as a pointer so it makes
67477 * tabular output look nice.
67478 */
67479 if (spec.field_width == -1)
67480 spec.field_width = 2 * sizeof(void *);
67481- return string(buf, end, "(null)", spec);
67482+ return string(buf, end, "(nil)", spec);
67483 }
67484
67485 switch (*fmt) {
67486@@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67487 /* Fallthrough */
67488 case 'S':
67489 case 's':
67490+#ifdef CONFIG_GRKERNSEC_HIDESYM
67491+ break;
67492+#else
67493+ return symbol_string(buf, end, ptr, spec, *fmt);
67494+#endif
67495+ case 'A':
67496+ case 'a':
67497 case 'B':
67498 return symbol_string(buf, end, ptr, spec, *fmt);
67499 case 'R':
67500@@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67501 typeof(type) value; \
67502 if (sizeof(type) == 8) { \
67503 args = PTR_ALIGN(args, sizeof(u32)); \
67504- *(u32 *)&value = *(u32 *)args; \
67505- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67506+ *(u32 *)&value = *(const u32 *)args; \
67507+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67508 } else { \
67509 args = PTR_ALIGN(args, sizeof(type)); \
67510- value = *(typeof(type) *)args; \
67511+ value = *(const typeof(type) *)args; \
67512 } \
67513 args += sizeof(type); \
67514 value; \
67515@@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67516 case FORMAT_TYPE_STR: {
67517 const char *str_arg = args;
67518 args += strlen(str_arg) + 1;
67519- str = string(str, end, (char *)str_arg, spec);
67520+ str = string(str, end, str_arg, spec);
67521 break;
67522 }
67523
67524diff --git a/localversion-grsec b/localversion-grsec
67525new file mode 100644
67526index 0000000..7cd6065
67527--- /dev/null
67528+++ b/localversion-grsec
67529@@ -0,0 +1 @@
67530+-grsec
67531diff --git a/mm/Kconfig b/mm/Kconfig
67532index 011b110..b492af2 100644
67533--- a/mm/Kconfig
67534+++ b/mm/Kconfig
67535@@ -241,10 +241,10 @@ config KSM
67536 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67537
67538 config DEFAULT_MMAP_MIN_ADDR
67539- int "Low address space to protect from user allocation"
67540+ int "Low address space to protect from user allocation"
67541 depends on MMU
67542- default 4096
67543- help
67544+ default 65536
67545+ help
67546 This is the portion of low virtual memory which should be protected
67547 from userspace allocation. Keeping a user from writing to low pages
67548 can help reduce the impact of kernel NULL pointer bugs.
67549diff --git a/mm/filemap.c b/mm/filemap.c
67550index 03c5b0e..a01e793 100644
67551--- a/mm/filemap.c
67552+++ b/mm/filemap.c
67553@@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67554 struct address_space *mapping = file->f_mapping;
67555
67556 if (!mapping->a_ops->readpage)
67557- return -ENOEXEC;
67558+ return -ENODEV;
67559 file_accessed(file);
67560 vma->vm_ops = &generic_file_vm_ops;
67561 vma->vm_flags |= VM_CAN_NONLINEAR;
67562@@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67563 *pos = i_size_read(inode);
67564
67565 if (limit != RLIM_INFINITY) {
67566+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67567 if (*pos >= limit) {
67568 send_sig(SIGXFSZ, current, 0);
67569 return -EFBIG;
67570diff --git a/mm/fremap.c b/mm/fremap.c
67571index 9ed4fd4..c42648d 100644
67572--- a/mm/fremap.c
67573+++ b/mm/fremap.c
67574@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67575 retry:
67576 vma = find_vma(mm, start);
67577
67578+#ifdef CONFIG_PAX_SEGMEXEC
67579+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67580+ goto out;
67581+#endif
67582+
67583 /*
67584 * Make sure the vma is shared, that it supports prefaulting,
67585 * and that the remapped range is valid and fully within
67586diff --git a/mm/highmem.c b/mm/highmem.c
67587index 57d82c6..e9e0552 100644
67588--- a/mm/highmem.c
67589+++ b/mm/highmem.c
67590@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67591 * So no dangers, even with speculative execution.
67592 */
67593 page = pte_page(pkmap_page_table[i]);
67594+ pax_open_kernel();
67595 pte_clear(&init_mm, (unsigned long)page_address(page),
67596 &pkmap_page_table[i]);
67597-
67598+ pax_close_kernel();
67599 set_page_address(page, NULL);
67600 need_flush = 1;
67601 }
67602@@ -186,9 +187,11 @@ start:
67603 }
67604 }
67605 vaddr = PKMAP_ADDR(last_pkmap_nr);
67606+
67607+ pax_open_kernel();
67608 set_pte_at(&init_mm, vaddr,
67609 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67610-
67611+ pax_close_kernel();
67612 pkmap_count[last_pkmap_nr] = 1;
67613 set_page_address(page, (void *)vaddr);
67614
67615diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67616index 33141f5..e56bef9 100644
67617--- a/mm/huge_memory.c
67618+++ b/mm/huge_memory.c
67619@@ -703,7 +703,7 @@ out:
67620 * run pte_offset_map on the pmd, if an huge pmd could
67621 * materialize from under us from a different thread.
67622 */
67623- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67624+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67625 return VM_FAULT_OOM;
67626 /* if an huge pmd materialized from under us just retry later */
67627 if (unlikely(pmd_trans_huge(*pmd)))
67628diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67629index 2316840..b418671 100644
67630--- a/mm/hugetlb.c
67631+++ b/mm/hugetlb.c
67632@@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67633 return 1;
67634 }
67635
67636+#ifdef CONFIG_PAX_SEGMEXEC
67637+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67638+{
67639+ struct mm_struct *mm = vma->vm_mm;
67640+ struct vm_area_struct *vma_m;
67641+ unsigned long address_m;
67642+ pte_t *ptep_m;
67643+
67644+ vma_m = pax_find_mirror_vma(vma);
67645+ if (!vma_m)
67646+ return;
67647+
67648+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67649+ address_m = address + SEGMEXEC_TASK_SIZE;
67650+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67651+ get_page(page_m);
67652+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
67653+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67654+}
67655+#endif
67656+
67657 /*
67658 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67659 */
67660@@ -2450,6 +2471,11 @@ retry_avoidcopy:
67661 make_huge_pte(vma, new_page, 1));
67662 page_remove_rmap(old_page);
67663 hugepage_add_new_anon_rmap(new_page, vma, address);
67664+
67665+#ifdef CONFIG_PAX_SEGMEXEC
67666+ pax_mirror_huge_pte(vma, address, new_page);
67667+#endif
67668+
67669 /* Make the old page be freed below */
67670 new_page = old_page;
67671 mmu_notifier_invalidate_range_end(mm,
67672@@ -2601,6 +2627,10 @@ retry:
67673 && (vma->vm_flags & VM_SHARED)));
67674 set_huge_pte_at(mm, address, ptep, new_pte);
67675
67676+#ifdef CONFIG_PAX_SEGMEXEC
67677+ pax_mirror_huge_pte(vma, address, page);
67678+#endif
67679+
67680 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67681 /* Optimization, do the COW without a second fault */
67682 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67683@@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67684 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67685 struct hstate *h = hstate_vma(vma);
67686
67687+#ifdef CONFIG_PAX_SEGMEXEC
67688+ struct vm_area_struct *vma_m;
67689+#endif
67690+
67691 ptep = huge_pte_offset(mm, address);
67692 if (ptep) {
67693 entry = huge_ptep_get(ptep);
67694@@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67695 VM_FAULT_SET_HINDEX(h - hstates);
67696 }
67697
67698+#ifdef CONFIG_PAX_SEGMEXEC
67699+ vma_m = pax_find_mirror_vma(vma);
67700+ if (vma_m) {
67701+ unsigned long address_m;
67702+
67703+ if (vma->vm_start > vma_m->vm_start) {
67704+ address_m = address;
67705+ address -= SEGMEXEC_TASK_SIZE;
67706+ vma = vma_m;
67707+ h = hstate_vma(vma);
67708+ } else
67709+ address_m = address + SEGMEXEC_TASK_SIZE;
67710+
67711+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67712+ return VM_FAULT_OOM;
67713+ address_m &= HPAGE_MASK;
67714+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67715+ }
67716+#endif
67717+
67718 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67719 if (!ptep)
67720 return VM_FAULT_OOM;
67721diff --git a/mm/internal.h b/mm/internal.h
67722index 2189af4..f2ca332 100644
67723--- a/mm/internal.h
67724+++ b/mm/internal.h
67725@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
67726 * in mm/page_alloc.c
67727 */
67728 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67729+extern void free_compound_page(struct page *page);
67730 extern void prep_compound_page(struct page *page, unsigned long order);
67731 #ifdef CONFIG_MEMORY_FAILURE
67732 extern bool is_free_buddy_page(struct page *page);
67733diff --git a/mm/kmemleak.c b/mm/kmemleak.c
67734index f3b2a00..61da94d 100644
67735--- a/mm/kmemleak.c
67736+++ b/mm/kmemleak.c
67737@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
67738
67739 for (i = 0; i < object->trace_len; i++) {
67740 void *ptr = (void *)object->trace[i];
67741- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67742+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67743 }
67744 }
67745
67746diff --git a/mm/maccess.c b/mm/maccess.c
67747index d53adf9..03a24bf 100644
67748--- a/mm/maccess.c
67749+++ b/mm/maccess.c
67750@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
67751 set_fs(KERNEL_DS);
67752 pagefault_disable();
67753 ret = __copy_from_user_inatomic(dst,
67754- (__force const void __user *)src, size);
67755+ (const void __force_user *)src, size);
67756 pagefault_enable();
67757 set_fs(old_fs);
67758
67759@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
67760
67761 set_fs(KERNEL_DS);
67762 pagefault_disable();
67763- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67764+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67765 pagefault_enable();
67766 set_fs(old_fs);
67767
67768diff --git a/mm/madvise.c b/mm/madvise.c
67769index 74bf193..feb6fd3 100644
67770--- a/mm/madvise.c
67771+++ b/mm/madvise.c
67772@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
67773 pgoff_t pgoff;
67774 unsigned long new_flags = vma->vm_flags;
67775
67776+#ifdef CONFIG_PAX_SEGMEXEC
67777+ struct vm_area_struct *vma_m;
67778+#endif
67779+
67780 switch (behavior) {
67781 case MADV_NORMAL:
67782 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67783@@ -110,6 +114,13 @@ success:
67784 /*
67785 * vm_flags is protected by the mmap_sem held in write mode.
67786 */
67787+
67788+#ifdef CONFIG_PAX_SEGMEXEC
67789+ vma_m = pax_find_mirror_vma(vma);
67790+ if (vma_m)
67791+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67792+#endif
67793+
67794 vma->vm_flags = new_flags;
67795
67796 out:
67797@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67798 struct vm_area_struct ** prev,
67799 unsigned long start, unsigned long end)
67800 {
67801+
67802+#ifdef CONFIG_PAX_SEGMEXEC
67803+ struct vm_area_struct *vma_m;
67804+#endif
67805+
67806 *prev = vma;
67807 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67808 return -EINVAL;
67809@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67810 zap_page_range(vma, start, end - start, &details);
67811 } else
67812 zap_page_range(vma, start, end - start, NULL);
67813+
67814+#ifdef CONFIG_PAX_SEGMEXEC
67815+ vma_m = pax_find_mirror_vma(vma);
67816+ if (vma_m) {
67817+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67818+ struct zap_details details = {
67819+ .nonlinear_vma = vma_m,
67820+ .last_index = ULONG_MAX,
67821+ };
67822+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67823+ } else
67824+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67825+ }
67826+#endif
67827+
67828 return 0;
67829 }
67830
67831@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
67832 if (end < start)
67833 goto out;
67834
67835+#ifdef CONFIG_PAX_SEGMEXEC
67836+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67837+ if (end > SEGMEXEC_TASK_SIZE)
67838+ goto out;
67839+ } else
67840+#endif
67841+
67842+ if (end > TASK_SIZE)
67843+ goto out;
67844+
67845 error = 0;
67846 if (end == start)
67847 goto out;
67848diff --git a/mm/memory-failure.c b/mm/memory-failure.c
67849index 06d3479..0778eef 100644
67850--- a/mm/memory-failure.c
67851+++ b/mm/memory-failure.c
67852@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
67853
67854 int sysctl_memory_failure_recovery __read_mostly = 1;
67855
67856-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67857+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67858
67859 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67860
67861@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
67862 si.si_signo = SIGBUS;
67863 si.si_errno = 0;
67864 si.si_code = BUS_MCEERR_AO;
67865- si.si_addr = (void *)addr;
67866+ si.si_addr = (void __user *)addr;
67867 #ifdef __ARCH_SI_TRAPNO
67868 si.si_trapno = trapno;
67869 #endif
67870@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67871 }
67872
67873 nr_pages = 1 << compound_trans_order(hpage);
67874- atomic_long_add(nr_pages, &mce_bad_pages);
67875+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67876
67877 /*
67878 * We need/can do nothing about count=0 pages.
67879@@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67880 if (!PageHWPoison(hpage)
67881 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67882 || (p != hpage && TestSetPageHWPoison(hpage))) {
67883- atomic_long_sub(nr_pages, &mce_bad_pages);
67884+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67885 return 0;
67886 }
67887 set_page_hwpoison_huge_page(hpage);
67888@@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67889 }
67890 if (hwpoison_filter(p)) {
67891 if (TestClearPageHWPoison(p))
67892- atomic_long_sub(nr_pages, &mce_bad_pages);
67893+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67894 unlock_page(hpage);
67895 put_page(hpage);
67896 return 0;
67897@@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
67898 return 0;
67899 }
67900 if (TestClearPageHWPoison(p))
67901- atomic_long_sub(nr_pages, &mce_bad_pages);
67902+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67903 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
67904 return 0;
67905 }
67906@@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
67907 */
67908 if (TestClearPageHWPoison(page)) {
67909 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
67910- atomic_long_sub(nr_pages, &mce_bad_pages);
67911+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67912 freeit = 1;
67913 if (PageHuge(page))
67914 clear_page_hwpoison_huge_page(page);
67915@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
67916 }
67917 done:
67918 if (!PageHWPoison(hpage))
67919- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
67920+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
67921 set_page_hwpoison_huge_page(hpage);
67922 dequeue_hwpoisoned_huge_page(hpage);
67923 /* keep elevated page count for bad page */
67924@@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
67925 return ret;
67926
67927 done:
67928- atomic_long_add(1, &mce_bad_pages);
67929+ atomic_long_add_unchecked(1, &mce_bad_pages);
67930 SetPageHWPoison(page);
67931 /* keep elevated page count for bad page */
67932 return ret;
67933diff --git a/mm/memory.c b/mm/memory.c
67934index 829d437..3d3926a 100644
67935--- a/mm/memory.c
67936+++ b/mm/memory.c
67937@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
67938 return;
67939
67940 pmd = pmd_offset(pud, start);
67941+
67942+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67943 pud_clear(pud);
67944 pmd_free_tlb(tlb, pmd, start);
67945+#endif
67946+
67947 }
67948
67949 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67950@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67951 if (end - 1 > ceiling - 1)
67952 return;
67953
67954+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67955 pud = pud_offset(pgd, start);
67956 pgd_clear(pgd);
67957 pud_free_tlb(tlb, pud, start);
67958+#endif
67959+
67960 }
67961
67962 /*
67963@@ -1566,12 +1573,6 @@ no_page_table:
67964 return page;
67965 }
67966
67967-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67968-{
67969- return stack_guard_page_start(vma, addr) ||
67970- stack_guard_page_end(vma, addr+PAGE_SIZE);
67971-}
67972-
67973 /**
67974 * __get_user_pages() - pin user pages in memory
67975 * @tsk: task_struct of target task
67976@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67977 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67978 i = 0;
67979
67980- do {
67981+ while (nr_pages) {
67982 struct vm_area_struct *vma;
67983
67984- vma = find_extend_vma(mm, start);
67985+ vma = find_vma(mm, start);
67986 if (!vma && in_gate_area(mm, start)) {
67987 unsigned long pg = start & PAGE_MASK;
67988 pgd_t *pgd;
67989@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67990 goto next_page;
67991 }
67992
67993- if (!vma ||
67994+ if (!vma || start < vma->vm_start ||
67995 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67996 !(vm_flags & vma->vm_flags))
67997 return i ? : -EFAULT;
67998@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67999 int ret;
68000 unsigned int fault_flags = 0;
68001
68002- /* For mlock, just skip the stack guard page. */
68003- if (foll_flags & FOLL_MLOCK) {
68004- if (stack_guard_page(vma, start))
68005- goto next_page;
68006- }
68007 if (foll_flags & FOLL_WRITE)
68008 fault_flags |= FAULT_FLAG_WRITE;
68009 if (nonblocking)
68010@@ -1800,7 +1796,7 @@ next_page:
68011 start += PAGE_SIZE;
68012 nr_pages--;
68013 } while (nr_pages && start < vma->vm_end);
68014- } while (nr_pages);
68015+ }
68016 return i;
68017 }
68018 EXPORT_SYMBOL(__get_user_pages);
68019@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
68020 page_add_file_rmap(page);
68021 set_pte_at(mm, addr, pte, mk_pte(page, prot));
68022
68023+#ifdef CONFIG_PAX_SEGMEXEC
68024+ pax_mirror_file_pte(vma, addr, page, ptl);
68025+#endif
68026+
68027 retval = 0;
68028 pte_unmap_unlock(pte, ptl);
68029 return retval;
68030@@ -2041,10 +2041,22 @@ out:
68031 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
68032 struct page *page)
68033 {
68034+
68035+#ifdef CONFIG_PAX_SEGMEXEC
68036+ struct vm_area_struct *vma_m;
68037+#endif
68038+
68039 if (addr < vma->vm_start || addr >= vma->vm_end)
68040 return -EFAULT;
68041 if (!page_count(page))
68042 return -EINVAL;
68043+
68044+#ifdef CONFIG_PAX_SEGMEXEC
68045+ vma_m = pax_find_mirror_vma(vma);
68046+ if (vma_m)
68047+ vma_m->vm_flags |= VM_INSERTPAGE;
68048+#endif
68049+
68050 vma->vm_flags |= VM_INSERTPAGE;
68051 return insert_page(vma, addr, page, vma->vm_page_prot);
68052 }
68053@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
68054 unsigned long pfn)
68055 {
68056 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
68057+ BUG_ON(vma->vm_mirror);
68058
68059 if (addr < vma->vm_start || addr >= vma->vm_end)
68060 return -EFAULT;
68061@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
68062 copy_user_highpage(dst, src, va, vma);
68063 }
68064
68065+#ifdef CONFIG_PAX_SEGMEXEC
68066+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
68067+{
68068+ struct mm_struct *mm = vma->vm_mm;
68069+ spinlock_t *ptl;
68070+ pte_t *pte, entry;
68071+
68072+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68073+ entry = *pte;
68074+ if (!pte_present(entry)) {
68075+ if (!pte_none(entry)) {
68076+ BUG_ON(pte_file(entry));
68077+ free_swap_and_cache(pte_to_swp_entry(entry));
68078+ pte_clear_not_present_full(mm, address, pte, 0);
68079+ }
68080+ } else {
68081+ struct page *page;
68082+
68083+ flush_cache_page(vma, address, pte_pfn(entry));
68084+ entry = ptep_clear_flush(vma, address, pte);
68085+ BUG_ON(pte_dirty(entry));
68086+ page = vm_normal_page(vma, address, entry);
68087+ if (page) {
68088+ update_hiwater_rss(mm);
68089+ if (PageAnon(page))
68090+ dec_mm_counter_fast(mm, MM_ANONPAGES);
68091+ else
68092+ dec_mm_counter_fast(mm, MM_FILEPAGES);
68093+ page_remove_rmap(page);
68094+ page_cache_release(page);
68095+ }
68096+ }
68097+ pte_unmap_unlock(pte, ptl);
68098+}
68099+
68100+/* PaX: if vma is mirrored, synchronize the mirror's PTE
68101+ *
68102+ * the ptl of the lower mapped page is held on entry and is not released on exit
68103+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68104+ */
68105+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68106+{
68107+ struct mm_struct *mm = vma->vm_mm;
68108+ unsigned long address_m;
68109+ spinlock_t *ptl_m;
68110+ struct vm_area_struct *vma_m;
68111+ pmd_t *pmd_m;
68112+ pte_t *pte_m, entry_m;
68113+
68114+ BUG_ON(!page_m || !PageAnon(page_m));
68115+
68116+ vma_m = pax_find_mirror_vma(vma);
68117+ if (!vma_m)
68118+ return;
68119+
68120+ BUG_ON(!PageLocked(page_m));
68121+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68122+ address_m = address + SEGMEXEC_TASK_SIZE;
68123+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68124+ pte_m = pte_offset_map(pmd_m, address_m);
68125+ ptl_m = pte_lockptr(mm, pmd_m);
68126+ if (ptl != ptl_m) {
68127+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68128+ if (!pte_none(*pte_m))
68129+ goto out;
68130+ }
68131+
68132+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68133+ page_cache_get(page_m);
68134+ page_add_anon_rmap(page_m, vma_m, address_m);
68135+ inc_mm_counter_fast(mm, MM_ANONPAGES);
68136+ set_pte_at(mm, address_m, pte_m, entry_m);
68137+ update_mmu_cache(vma_m, address_m, entry_m);
68138+out:
68139+ if (ptl != ptl_m)
68140+ spin_unlock(ptl_m);
68141+ pte_unmap(pte_m);
68142+ unlock_page(page_m);
68143+}
68144+
68145+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68146+{
68147+ struct mm_struct *mm = vma->vm_mm;
68148+ unsigned long address_m;
68149+ spinlock_t *ptl_m;
68150+ struct vm_area_struct *vma_m;
68151+ pmd_t *pmd_m;
68152+ pte_t *pte_m, entry_m;
68153+
68154+ BUG_ON(!page_m || PageAnon(page_m));
68155+
68156+ vma_m = pax_find_mirror_vma(vma);
68157+ if (!vma_m)
68158+ return;
68159+
68160+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68161+ address_m = address + SEGMEXEC_TASK_SIZE;
68162+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68163+ pte_m = pte_offset_map(pmd_m, address_m);
68164+ ptl_m = pte_lockptr(mm, pmd_m);
68165+ if (ptl != ptl_m) {
68166+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68167+ if (!pte_none(*pte_m))
68168+ goto out;
68169+ }
68170+
68171+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68172+ page_cache_get(page_m);
68173+ page_add_file_rmap(page_m);
68174+ inc_mm_counter_fast(mm, MM_FILEPAGES);
68175+ set_pte_at(mm, address_m, pte_m, entry_m);
68176+ update_mmu_cache(vma_m, address_m, entry_m);
68177+out:
68178+ if (ptl != ptl_m)
68179+ spin_unlock(ptl_m);
68180+ pte_unmap(pte_m);
68181+}
68182+
68183+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68184+{
68185+ struct mm_struct *mm = vma->vm_mm;
68186+ unsigned long address_m;
68187+ spinlock_t *ptl_m;
68188+ struct vm_area_struct *vma_m;
68189+ pmd_t *pmd_m;
68190+ pte_t *pte_m, entry_m;
68191+
68192+ vma_m = pax_find_mirror_vma(vma);
68193+ if (!vma_m)
68194+ return;
68195+
68196+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68197+ address_m = address + SEGMEXEC_TASK_SIZE;
68198+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68199+ pte_m = pte_offset_map(pmd_m, address_m);
68200+ ptl_m = pte_lockptr(mm, pmd_m);
68201+ if (ptl != ptl_m) {
68202+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68203+ if (!pte_none(*pte_m))
68204+ goto out;
68205+ }
68206+
68207+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68208+ set_pte_at(mm, address_m, pte_m, entry_m);
68209+out:
68210+ if (ptl != ptl_m)
68211+ spin_unlock(ptl_m);
68212+ pte_unmap(pte_m);
68213+}
68214+
68215+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68216+{
68217+ struct page *page_m;
68218+ pte_t entry;
68219+
68220+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68221+ goto out;
68222+
68223+ entry = *pte;
68224+ page_m = vm_normal_page(vma, address, entry);
68225+ if (!page_m)
68226+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68227+ else if (PageAnon(page_m)) {
68228+ if (pax_find_mirror_vma(vma)) {
68229+ pte_unmap_unlock(pte, ptl);
68230+ lock_page(page_m);
68231+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68232+ if (pte_same(entry, *pte))
68233+ pax_mirror_anon_pte(vma, address, page_m, ptl);
68234+ else
68235+ unlock_page(page_m);
68236+ }
68237+ } else
68238+ pax_mirror_file_pte(vma, address, page_m, ptl);
68239+
68240+out:
68241+ pte_unmap_unlock(pte, ptl);
68242+}
68243+#endif
68244+
68245 /*
68246 * This routine handles present pages, when users try to write
68247 * to a shared page. It is done by copying the page to a new address
68248@@ -2656,6 +2849,12 @@ gotten:
68249 */
68250 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68251 if (likely(pte_same(*page_table, orig_pte))) {
68252+
68253+#ifdef CONFIG_PAX_SEGMEXEC
68254+ if (pax_find_mirror_vma(vma))
68255+ BUG_ON(!trylock_page(new_page));
68256+#endif
68257+
68258 if (old_page) {
68259 if (!PageAnon(old_page)) {
68260 dec_mm_counter_fast(mm, MM_FILEPAGES);
68261@@ -2707,6 +2906,10 @@ gotten:
68262 page_remove_rmap(old_page);
68263 }
68264
68265+#ifdef CONFIG_PAX_SEGMEXEC
68266+ pax_mirror_anon_pte(vma, address, new_page, ptl);
68267+#endif
68268+
68269 /* Free the old page.. */
68270 new_page = old_page;
68271 ret |= VM_FAULT_WRITE;
68272@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68273 swap_free(entry);
68274 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68275 try_to_free_swap(page);
68276+
68277+#ifdef CONFIG_PAX_SEGMEXEC
68278+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68279+#endif
68280+
68281 unlock_page(page);
68282 if (swapcache) {
68283 /*
68284@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68285
68286 /* No need to invalidate - it was non-present before */
68287 update_mmu_cache(vma, address, page_table);
68288+
68289+#ifdef CONFIG_PAX_SEGMEXEC
68290+ pax_mirror_anon_pte(vma, address, page, ptl);
68291+#endif
68292+
68293 unlock:
68294 pte_unmap_unlock(page_table, ptl);
68295 out:
68296@@ -3028,40 +3241,6 @@ out_release:
68297 }
68298
68299 /*
68300- * This is like a special single-page "expand_{down|up}wards()",
68301- * except we must first make sure that 'address{-|+}PAGE_SIZE'
68302- * doesn't hit another vma.
68303- */
68304-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68305-{
68306- address &= PAGE_MASK;
68307- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68308- struct vm_area_struct *prev = vma->vm_prev;
68309-
68310- /*
68311- * Is there a mapping abutting this one below?
68312- *
68313- * That's only ok if it's the same stack mapping
68314- * that has gotten split..
68315- */
68316- if (prev && prev->vm_end == address)
68317- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68318-
68319- expand_downwards(vma, address - PAGE_SIZE);
68320- }
68321- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68322- struct vm_area_struct *next = vma->vm_next;
68323-
68324- /* As VM_GROWSDOWN but s/below/above/ */
68325- if (next && next->vm_start == address + PAGE_SIZE)
68326- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68327-
68328- expand_upwards(vma, address + PAGE_SIZE);
68329- }
68330- return 0;
68331-}
68332-
68333-/*
68334 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68335 * but allow concurrent faults), and pte mapped but not yet locked.
68336 * We return with mmap_sem still held, but pte unmapped and unlocked.
68337@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68338 unsigned long address, pte_t *page_table, pmd_t *pmd,
68339 unsigned int flags)
68340 {
68341- struct page *page;
68342+ struct page *page = NULL;
68343 spinlock_t *ptl;
68344 pte_t entry;
68345
68346- pte_unmap(page_table);
68347-
68348- /* Check if we need to add a guard page to the stack */
68349- if (check_stack_guard_page(vma, address) < 0)
68350- return VM_FAULT_SIGBUS;
68351-
68352- /* Use the zero-page for reads */
68353 if (!(flags & FAULT_FLAG_WRITE)) {
68354 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68355 vma->vm_page_prot));
68356- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68357+ ptl = pte_lockptr(mm, pmd);
68358+ spin_lock(ptl);
68359 if (!pte_none(*page_table))
68360 goto unlock;
68361 goto setpte;
68362 }
68363
68364 /* Allocate our own private page. */
68365+ pte_unmap(page_table);
68366+
68367 if (unlikely(anon_vma_prepare(vma)))
68368 goto oom;
68369 page = alloc_zeroed_user_highpage_movable(vma, address);
68370@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68371 if (!pte_none(*page_table))
68372 goto release;
68373
68374+#ifdef CONFIG_PAX_SEGMEXEC
68375+ if (pax_find_mirror_vma(vma))
68376+ BUG_ON(!trylock_page(page));
68377+#endif
68378+
68379 inc_mm_counter_fast(mm, MM_ANONPAGES);
68380 page_add_new_anon_rmap(page, vma, address);
68381 setpte:
68382@@ -3116,6 +3296,12 @@ setpte:
68383
68384 /* No need to invalidate - it was non-present before */
68385 update_mmu_cache(vma, address, page_table);
68386+
68387+#ifdef CONFIG_PAX_SEGMEXEC
68388+ if (page)
68389+ pax_mirror_anon_pte(vma, address, page, ptl);
68390+#endif
68391+
68392 unlock:
68393 pte_unmap_unlock(page_table, ptl);
68394 return 0;
68395@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68396 */
68397 /* Only go through if we didn't race with anybody else... */
68398 if (likely(pte_same(*page_table, orig_pte))) {
68399+
68400+#ifdef CONFIG_PAX_SEGMEXEC
68401+ if (anon && pax_find_mirror_vma(vma))
68402+ BUG_ON(!trylock_page(page));
68403+#endif
68404+
68405 flush_icache_page(vma, page);
68406 entry = mk_pte(page, vma->vm_page_prot);
68407 if (flags & FAULT_FLAG_WRITE)
68408@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68409
68410 /* no need to invalidate: a not-present page won't be cached */
68411 update_mmu_cache(vma, address, page_table);
68412+
68413+#ifdef CONFIG_PAX_SEGMEXEC
68414+ if (anon)
68415+ pax_mirror_anon_pte(vma, address, page, ptl);
68416+ else
68417+ pax_mirror_file_pte(vma, address, page, ptl);
68418+#endif
68419+
68420 } else {
68421 if (cow_page)
68422 mem_cgroup_uncharge_page(cow_page);
68423@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
68424 if (flags & FAULT_FLAG_WRITE)
68425 flush_tlb_fix_spurious_fault(vma, address);
68426 }
68427+
68428+#ifdef CONFIG_PAX_SEGMEXEC
68429+ pax_mirror_pte(vma, address, pte, pmd, ptl);
68430+ return 0;
68431+#endif
68432+
68433 unlock:
68434 pte_unmap_unlock(pte, ptl);
68435 return 0;
68436@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68437 pmd_t *pmd;
68438 pte_t *pte;
68439
68440+#ifdef CONFIG_PAX_SEGMEXEC
68441+ struct vm_area_struct *vma_m;
68442+#endif
68443+
68444 __set_current_state(TASK_RUNNING);
68445
68446 count_vm_event(PGFAULT);
68447@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68448 if (unlikely(is_vm_hugetlb_page(vma)))
68449 return hugetlb_fault(mm, vma, address, flags);
68450
68451+#ifdef CONFIG_PAX_SEGMEXEC
68452+ vma_m = pax_find_mirror_vma(vma);
68453+ if (vma_m) {
68454+ unsigned long address_m;
68455+ pgd_t *pgd_m;
68456+ pud_t *pud_m;
68457+ pmd_t *pmd_m;
68458+
68459+ if (vma->vm_start > vma_m->vm_start) {
68460+ address_m = address;
68461+ address -= SEGMEXEC_TASK_SIZE;
68462+ vma = vma_m;
68463+ } else
68464+ address_m = address + SEGMEXEC_TASK_SIZE;
68465+
68466+ pgd_m = pgd_offset(mm, address_m);
68467+ pud_m = pud_alloc(mm, pgd_m, address_m);
68468+ if (!pud_m)
68469+ return VM_FAULT_OOM;
68470+ pmd_m = pmd_alloc(mm, pud_m, address_m);
68471+ if (!pmd_m)
68472+ return VM_FAULT_OOM;
68473+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68474+ return VM_FAULT_OOM;
68475+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68476+ }
68477+#endif
68478+
68479 pgd = pgd_offset(mm, address);
68480 pud = pud_alloc(mm, pgd, address);
68481 if (!pud)
68482@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68483 * run pte_offset_map on the pmd, if an huge pmd could
68484 * materialize from under us from a different thread.
68485 */
68486- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68487+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68488 return VM_FAULT_OOM;
68489 /* if an huge pmd materialized from under us just retry later */
68490 if (unlikely(pmd_trans_huge(*pmd)))
68491@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68492 gate_vma.vm_start = FIXADDR_USER_START;
68493 gate_vma.vm_end = FIXADDR_USER_END;
68494 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68495- gate_vma.vm_page_prot = __P101;
68496+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68497 /*
68498 * Make sure the vDSO gets into every core dump.
68499 * Dumping its contents makes post-mortem fully interpretable later
68500diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68501index c3fdbcb..2e8ef90 100644
68502--- a/mm/mempolicy.c
68503+++ b/mm/mempolicy.c
68504@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68505 unsigned long vmstart;
68506 unsigned long vmend;
68507
68508+#ifdef CONFIG_PAX_SEGMEXEC
68509+ struct vm_area_struct *vma_m;
68510+#endif
68511+
68512 vma = find_vma_prev(mm, start, &prev);
68513 if (!vma || vma->vm_start > start)
68514 return -EFAULT;
68515@@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68516 err = policy_vma(vma, new_pol);
68517 if (err)
68518 goto out;
68519+
68520+#ifdef CONFIG_PAX_SEGMEXEC
68521+ vma_m = pax_find_mirror_vma(vma);
68522+ if (vma_m) {
68523+ err = policy_vma(vma_m, new_pol);
68524+ if (err)
68525+ goto out;
68526+ }
68527+#endif
68528+
68529 }
68530
68531 out:
68532@@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68533
68534 if (end < start)
68535 return -EINVAL;
68536+
68537+#ifdef CONFIG_PAX_SEGMEXEC
68538+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68539+ if (end > SEGMEXEC_TASK_SIZE)
68540+ return -EINVAL;
68541+ } else
68542+#endif
68543+
68544+ if (end > TASK_SIZE)
68545+ return -EINVAL;
68546+
68547 if (end == start)
68548 return 0;
68549
68550@@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68551 if (!mm)
68552 goto out;
68553
68554+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68555+ if (mm != current->mm &&
68556+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68557+ err = -EPERM;
68558+ goto out;
68559+ }
68560+#endif
68561+
68562 /*
68563 * Check if this process has the right to modify the specified
68564 * process. The right exists if the process has administrative
68565@@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68566 rcu_read_lock();
68567 tcred = __task_cred(task);
68568 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68569- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68570- !capable(CAP_SYS_NICE)) {
68571+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68572 rcu_read_unlock();
68573 err = -EPERM;
68574 goto out;
68575diff --git a/mm/migrate.c b/mm/migrate.c
68576index 177aca4..ab3a744 100644
68577--- a/mm/migrate.c
68578+++ b/mm/migrate.c
68579@@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68580 if (!mm)
68581 return -EINVAL;
68582
68583+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68584+ if (mm != current->mm &&
68585+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68586+ err = -EPERM;
68587+ goto out;
68588+ }
68589+#endif
68590+
68591 /*
68592 * Check if this process has the right to modify the specified
68593 * process. The right exists if the process has administrative
68594@@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68595 rcu_read_lock();
68596 tcred = __task_cred(task);
68597 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68598- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68599- !capable(CAP_SYS_NICE)) {
68600+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68601 rcu_read_unlock();
68602 err = -EPERM;
68603 goto out;
68604diff --git a/mm/mlock.c b/mm/mlock.c
68605index 4f4f53b..9511904 100644
68606--- a/mm/mlock.c
68607+++ b/mm/mlock.c
68608@@ -13,6 +13,7 @@
68609 #include <linux/pagemap.h>
68610 #include <linux/mempolicy.h>
68611 #include <linux/syscalls.h>
68612+#include <linux/security.h>
68613 #include <linux/sched.h>
68614 #include <linux/export.h>
68615 #include <linux/rmap.h>
68616@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68617 return -EINVAL;
68618 if (end == start)
68619 return 0;
68620+ if (end > TASK_SIZE)
68621+ return -EINVAL;
68622+
68623 vma = find_vma_prev(current->mm, start, &prev);
68624 if (!vma || vma->vm_start > start)
68625 return -ENOMEM;
68626@@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68627 for (nstart = start ; ; ) {
68628 vm_flags_t newflags;
68629
68630+#ifdef CONFIG_PAX_SEGMEXEC
68631+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68632+ break;
68633+#endif
68634+
68635 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68636
68637 newflags = vma->vm_flags | VM_LOCKED;
68638@@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68639 lock_limit >>= PAGE_SHIFT;
68640
68641 /* check against resource limits */
68642+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68643 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68644 error = do_mlock(start, len, 1);
68645 up_write(&current->mm->mmap_sem);
68646@@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68647 static int do_mlockall(int flags)
68648 {
68649 struct vm_area_struct * vma, * prev = NULL;
68650- unsigned int def_flags = 0;
68651
68652 if (flags & MCL_FUTURE)
68653- def_flags = VM_LOCKED;
68654- current->mm->def_flags = def_flags;
68655+ current->mm->def_flags |= VM_LOCKED;
68656+ else
68657+ current->mm->def_flags &= ~VM_LOCKED;
68658 if (flags == MCL_FUTURE)
68659 goto out;
68660
68661 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68662 vm_flags_t newflags;
68663
68664+#ifdef CONFIG_PAX_SEGMEXEC
68665+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68666+ break;
68667+#endif
68668+
68669+ BUG_ON(vma->vm_end > TASK_SIZE);
68670 newflags = vma->vm_flags | VM_LOCKED;
68671 if (!(flags & MCL_CURRENT))
68672 newflags &= ~VM_LOCKED;
68673@@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68674 lock_limit >>= PAGE_SHIFT;
68675
68676 ret = -ENOMEM;
68677+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68678 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68679 capable(CAP_IPC_LOCK))
68680 ret = do_mlockall(flags);
68681diff --git a/mm/mmap.c b/mm/mmap.c
68682index eae90af..44552cf 100644
68683--- a/mm/mmap.c
68684+++ b/mm/mmap.c
68685@@ -46,6 +46,16 @@
68686 #define arch_rebalance_pgtables(addr, len) (addr)
68687 #endif
68688
68689+static inline void verify_mm_writelocked(struct mm_struct *mm)
68690+{
68691+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68692+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68693+ up_read(&mm->mmap_sem);
68694+ BUG();
68695+ }
68696+#endif
68697+}
68698+
68699 static void unmap_region(struct mm_struct *mm,
68700 struct vm_area_struct *vma, struct vm_area_struct *prev,
68701 unsigned long start, unsigned long end);
68702@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
68703 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68704 *
68705 */
68706-pgprot_t protection_map[16] = {
68707+pgprot_t protection_map[16] __read_only = {
68708 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68709 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68710 };
68711
68712-pgprot_t vm_get_page_prot(unsigned long vm_flags)
68713+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68714 {
68715- return __pgprot(pgprot_val(protection_map[vm_flags &
68716+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68717 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68718 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68719+
68720+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68721+ if (!(__supported_pte_mask & _PAGE_NX) &&
68722+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68723+ (vm_flags & (VM_READ | VM_WRITE)))
68724+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68725+#endif
68726+
68727+ return prot;
68728 }
68729 EXPORT_SYMBOL(vm_get_page_prot);
68730
68731 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68732 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68733 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68734+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68735 /*
68736 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68737 * other variables. It can be updated by several CPUs frequently.
68738@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
68739 struct vm_area_struct *next = vma->vm_next;
68740
68741 might_sleep();
68742+ BUG_ON(vma->vm_mirror);
68743 if (vma->vm_ops && vma->vm_ops->close)
68744 vma->vm_ops->close(vma);
68745 if (vma->vm_file) {
68746@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68747 * not page aligned -Ram Gupta
68748 */
68749 rlim = rlimit(RLIMIT_DATA);
68750+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68751 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68752 (mm->end_data - mm->start_data) > rlim)
68753 goto out;
68754@@ -689,6 +711,12 @@ static int
68755 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68756 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68757 {
68758+
68759+#ifdef CONFIG_PAX_SEGMEXEC
68760+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68761+ return 0;
68762+#endif
68763+
68764 if (is_mergeable_vma(vma, file, vm_flags) &&
68765 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68766 if (vma->vm_pgoff == vm_pgoff)
68767@@ -708,6 +736,12 @@ static int
68768 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68769 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68770 {
68771+
68772+#ifdef CONFIG_PAX_SEGMEXEC
68773+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68774+ return 0;
68775+#endif
68776+
68777 if (is_mergeable_vma(vma, file, vm_flags) &&
68778 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68779 pgoff_t vm_pglen;
68780@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68781 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68782 struct vm_area_struct *prev, unsigned long addr,
68783 unsigned long end, unsigned long vm_flags,
68784- struct anon_vma *anon_vma, struct file *file,
68785+ struct anon_vma *anon_vma, struct file *file,
68786 pgoff_t pgoff, struct mempolicy *policy)
68787 {
68788 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68789 struct vm_area_struct *area, *next;
68790 int err;
68791
68792+#ifdef CONFIG_PAX_SEGMEXEC
68793+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68794+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68795+
68796+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68797+#endif
68798+
68799 /*
68800 * We later require that vma->vm_flags == vm_flags,
68801 * so this tests vma->vm_flags & VM_SPECIAL, too.
68802@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68803 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68804 next = next->vm_next;
68805
68806+#ifdef CONFIG_PAX_SEGMEXEC
68807+ if (prev)
68808+ prev_m = pax_find_mirror_vma(prev);
68809+ if (area)
68810+ area_m = pax_find_mirror_vma(area);
68811+ if (next)
68812+ next_m = pax_find_mirror_vma(next);
68813+#endif
68814+
68815 /*
68816 * Can it merge with the predecessor?
68817 */
68818@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68819 /* cases 1, 6 */
68820 err = vma_adjust(prev, prev->vm_start,
68821 next->vm_end, prev->vm_pgoff, NULL);
68822- } else /* cases 2, 5, 7 */
68823+
68824+#ifdef CONFIG_PAX_SEGMEXEC
68825+ if (!err && prev_m)
68826+ err = vma_adjust(prev_m, prev_m->vm_start,
68827+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68828+#endif
68829+
68830+ } else { /* cases 2, 5, 7 */
68831 err = vma_adjust(prev, prev->vm_start,
68832 end, prev->vm_pgoff, NULL);
68833+
68834+#ifdef CONFIG_PAX_SEGMEXEC
68835+ if (!err && prev_m)
68836+ err = vma_adjust(prev_m, prev_m->vm_start,
68837+ end_m, prev_m->vm_pgoff, NULL);
68838+#endif
68839+
68840+ }
68841 if (err)
68842 return NULL;
68843 khugepaged_enter_vma_merge(prev);
68844@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68845 mpol_equal(policy, vma_policy(next)) &&
68846 can_vma_merge_before(next, vm_flags,
68847 anon_vma, file, pgoff+pglen)) {
68848- if (prev && addr < prev->vm_end) /* case 4 */
68849+ if (prev && addr < prev->vm_end) { /* case 4 */
68850 err = vma_adjust(prev, prev->vm_start,
68851 addr, prev->vm_pgoff, NULL);
68852- else /* cases 3, 8 */
68853+
68854+#ifdef CONFIG_PAX_SEGMEXEC
68855+ if (!err && prev_m)
68856+ err = vma_adjust(prev_m, prev_m->vm_start,
68857+ addr_m, prev_m->vm_pgoff, NULL);
68858+#endif
68859+
68860+ } else { /* cases 3, 8 */
68861 err = vma_adjust(area, addr, next->vm_end,
68862 next->vm_pgoff - pglen, NULL);
68863+
68864+#ifdef CONFIG_PAX_SEGMEXEC
68865+ if (!err && area_m)
68866+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
68867+ next_m->vm_pgoff - pglen, NULL);
68868+#endif
68869+
68870+ }
68871 if (err)
68872 return NULL;
68873 khugepaged_enter_vma_merge(area);
68874@@ -921,14 +1001,11 @@ none:
68875 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68876 struct file *file, long pages)
68877 {
68878- const unsigned long stack_flags
68879- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68880-
68881 if (file) {
68882 mm->shared_vm += pages;
68883 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68884 mm->exec_vm += pages;
68885- } else if (flags & stack_flags)
68886+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68887 mm->stack_vm += pages;
68888 if (flags & (VM_RESERVED|VM_IO))
68889 mm->reserved_vm += pages;
68890@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68891 * (the exception is when the underlying filesystem is noexec
68892 * mounted, in which case we dont add PROT_EXEC.)
68893 */
68894- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68895+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68896 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68897 prot |= PROT_EXEC;
68898
68899@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68900 /* Obtain the address to map to. we verify (or select) it and ensure
68901 * that it represents a valid section of the address space.
68902 */
68903- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68904+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68905 if (addr & ~PAGE_MASK)
68906 return addr;
68907
68908@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68909 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68910 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68911
68912+#ifdef CONFIG_PAX_MPROTECT
68913+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68914+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68915+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68916+ gr_log_rwxmmap(file);
68917+
68918+#ifdef CONFIG_PAX_EMUPLT
68919+ vm_flags &= ~VM_EXEC;
68920+#else
68921+ return -EPERM;
68922+#endif
68923+
68924+ }
68925+
68926+ if (!(vm_flags & VM_EXEC))
68927+ vm_flags &= ~VM_MAYEXEC;
68928+#else
68929+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68930+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68931+#endif
68932+ else
68933+ vm_flags &= ~VM_MAYWRITE;
68934+ }
68935+#endif
68936+
68937+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68938+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68939+ vm_flags &= ~VM_PAGEEXEC;
68940+#endif
68941+
68942 if (flags & MAP_LOCKED)
68943 if (!can_do_mlock())
68944 return -EPERM;
68945@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68946 locked += mm->locked_vm;
68947 lock_limit = rlimit(RLIMIT_MEMLOCK);
68948 lock_limit >>= PAGE_SHIFT;
68949+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68950 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68951 return -EAGAIN;
68952 }
68953@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68954 if (error)
68955 return error;
68956
68957+ if (!gr_acl_handle_mmap(file, prot))
68958+ return -EACCES;
68959+
68960 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68961 }
68962 EXPORT_SYMBOL(do_mmap_pgoff);
68963@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
68964 vm_flags_t vm_flags = vma->vm_flags;
68965
68966 /* If it was private or non-writable, the write bit is already clear */
68967- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68968+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68969 return 0;
68970
68971 /* The backer wishes to know when pages are first written to? */
68972@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
68973 unsigned long charged = 0;
68974 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68975
68976+#ifdef CONFIG_PAX_SEGMEXEC
68977+ struct vm_area_struct *vma_m = NULL;
68978+#endif
68979+
68980+ /*
68981+ * mm->mmap_sem is required to protect against another thread
68982+ * changing the mappings in case we sleep.
68983+ */
68984+ verify_mm_writelocked(mm);
68985+
68986 /* Clear old maps */
68987 error = -ENOMEM;
68988-munmap_back:
68989 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68990 if (vma && vma->vm_start < addr + len) {
68991 if (do_munmap(mm, addr, len))
68992 return -ENOMEM;
68993- goto munmap_back;
68994+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68995+ BUG_ON(vma && vma->vm_start < addr + len);
68996 }
68997
68998 /* Check against address space limit. */
68999@@ -1258,6 +1379,16 @@ munmap_back:
69000 goto unacct_error;
69001 }
69002
69003+#ifdef CONFIG_PAX_SEGMEXEC
69004+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
69005+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69006+ if (!vma_m) {
69007+ error = -ENOMEM;
69008+ goto free_vma;
69009+ }
69010+ }
69011+#endif
69012+
69013 vma->vm_mm = mm;
69014 vma->vm_start = addr;
69015 vma->vm_end = addr + len;
69016@@ -1281,6 +1412,19 @@ munmap_back:
69017 error = file->f_op->mmap(file, vma);
69018 if (error)
69019 goto unmap_and_free_vma;
69020+
69021+#ifdef CONFIG_PAX_SEGMEXEC
69022+ if (vma_m && (vm_flags & VM_EXECUTABLE))
69023+ added_exe_file_vma(mm);
69024+#endif
69025+
69026+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69027+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
69028+ vma->vm_flags |= VM_PAGEEXEC;
69029+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69030+ }
69031+#endif
69032+
69033 if (vm_flags & VM_EXECUTABLE)
69034 added_exe_file_vma(mm);
69035
69036@@ -1316,6 +1460,11 @@ munmap_back:
69037 vma_link(mm, vma, prev, rb_link, rb_parent);
69038 file = vma->vm_file;
69039
69040+#ifdef CONFIG_PAX_SEGMEXEC
69041+ if (vma_m)
69042+ BUG_ON(pax_mirror_vma(vma_m, vma));
69043+#endif
69044+
69045 /* Once vma denies write, undo our temporary denial count */
69046 if (correct_wcount)
69047 atomic_inc(&inode->i_writecount);
69048@@ -1324,6 +1473,7 @@ out:
69049
69050 mm->total_vm += len >> PAGE_SHIFT;
69051 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
69052+ track_exec_limit(mm, addr, addr + len, vm_flags);
69053 if (vm_flags & VM_LOCKED) {
69054 if (!mlock_vma_pages_range(vma, addr, addr + len))
69055 mm->locked_vm += (len >> PAGE_SHIFT);
69056@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
69057 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
69058 charged = 0;
69059 free_vma:
69060+
69061+#ifdef CONFIG_PAX_SEGMEXEC
69062+ if (vma_m)
69063+ kmem_cache_free(vm_area_cachep, vma_m);
69064+#endif
69065+
69066 kmem_cache_free(vm_area_cachep, vma);
69067 unacct_error:
69068 if (charged)
69069@@ -1348,6 +1504,44 @@ unacct_error:
69070 return error;
69071 }
69072
69073+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69074+{
69075+ if (!vma) {
69076+#ifdef CONFIG_STACK_GROWSUP
69077+ if (addr > sysctl_heap_stack_gap)
69078+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69079+ else
69080+ vma = find_vma(current->mm, 0);
69081+ if (vma && (vma->vm_flags & VM_GROWSUP))
69082+ return false;
69083+#endif
69084+ return true;
69085+ }
69086+
69087+ if (addr + len > vma->vm_start)
69088+ return false;
69089+
69090+ if (vma->vm_flags & VM_GROWSDOWN)
69091+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69092+#ifdef CONFIG_STACK_GROWSUP
69093+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69094+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69095+#endif
69096+
69097+ return true;
69098+}
69099+
69100+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69101+{
69102+ if (vma->vm_start < len)
69103+ return -ENOMEM;
69104+ if (!(vma->vm_flags & VM_GROWSDOWN))
69105+ return vma->vm_start - len;
69106+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
69107+ return vma->vm_start - len - sysctl_heap_stack_gap;
69108+ return -ENOMEM;
69109+}
69110+
69111 /* Get an address range which is currently unmapped.
69112 * For shmat() with addr=0.
69113 *
69114@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
69115 if (flags & MAP_FIXED)
69116 return addr;
69117
69118+#ifdef CONFIG_PAX_RANDMMAP
69119+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69120+#endif
69121+
69122 if (addr) {
69123 addr = PAGE_ALIGN(addr);
69124- vma = find_vma(mm, addr);
69125- if (TASK_SIZE - len >= addr &&
69126- (!vma || addr + len <= vma->vm_start))
69127- return addr;
69128+ if (TASK_SIZE - len >= addr) {
69129+ vma = find_vma(mm, addr);
69130+ if (check_heap_stack_gap(vma, addr, len))
69131+ return addr;
69132+ }
69133 }
69134 if (len > mm->cached_hole_size) {
69135- start_addr = addr = mm->free_area_cache;
69136+ start_addr = addr = mm->free_area_cache;
69137 } else {
69138- start_addr = addr = TASK_UNMAPPED_BASE;
69139- mm->cached_hole_size = 0;
69140+ start_addr = addr = mm->mmap_base;
69141+ mm->cached_hole_size = 0;
69142 }
69143
69144 full_search:
69145@@ -1396,34 +1595,40 @@ full_search:
69146 * Start a new search - just in case we missed
69147 * some holes.
69148 */
69149- if (start_addr != TASK_UNMAPPED_BASE) {
69150- addr = TASK_UNMAPPED_BASE;
69151- start_addr = addr;
69152+ if (start_addr != mm->mmap_base) {
69153+ start_addr = addr = mm->mmap_base;
69154 mm->cached_hole_size = 0;
69155 goto full_search;
69156 }
69157 return -ENOMEM;
69158 }
69159- if (!vma || addr + len <= vma->vm_start) {
69160- /*
69161- * Remember the place where we stopped the search:
69162- */
69163- mm->free_area_cache = addr + len;
69164- return addr;
69165- }
69166+ if (check_heap_stack_gap(vma, addr, len))
69167+ break;
69168 if (addr + mm->cached_hole_size < vma->vm_start)
69169 mm->cached_hole_size = vma->vm_start - addr;
69170 addr = vma->vm_end;
69171 }
69172+
69173+ /*
69174+ * Remember the place where we stopped the search:
69175+ */
69176+ mm->free_area_cache = addr + len;
69177+ return addr;
69178 }
69179 #endif
69180
69181 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69182 {
69183+
69184+#ifdef CONFIG_PAX_SEGMEXEC
69185+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69186+ return;
69187+#endif
69188+
69189 /*
69190 * Is this a new hole at the lowest possible address?
69191 */
69192- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69193+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69194 mm->free_area_cache = addr;
69195 mm->cached_hole_size = ~0UL;
69196 }
69197@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69198 {
69199 struct vm_area_struct *vma;
69200 struct mm_struct *mm = current->mm;
69201- unsigned long addr = addr0;
69202+ unsigned long base = mm->mmap_base, addr = addr0;
69203
69204 /* requested length too big for entire address space */
69205 if (len > TASK_SIZE)
69206@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69207 if (flags & MAP_FIXED)
69208 return addr;
69209
69210+#ifdef CONFIG_PAX_RANDMMAP
69211+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69212+#endif
69213+
69214 /* requesting a specific address */
69215 if (addr) {
69216 addr = PAGE_ALIGN(addr);
69217- vma = find_vma(mm, addr);
69218- if (TASK_SIZE - len >= addr &&
69219- (!vma || addr + len <= vma->vm_start))
69220- return addr;
69221+ if (TASK_SIZE - len >= addr) {
69222+ vma = find_vma(mm, addr);
69223+ if (check_heap_stack_gap(vma, addr, len))
69224+ return addr;
69225+ }
69226 }
69227
69228 /* check if free_area_cache is useful for us */
69229@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69230 /* make sure it can fit in the remaining address space */
69231 if (addr > len) {
69232 vma = find_vma(mm, addr-len);
69233- if (!vma || addr <= vma->vm_start)
69234+ if (check_heap_stack_gap(vma, addr - len, len))
69235 /* remember the address as a hint for next time */
69236 return (mm->free_area_cache = addr-len);
69237 }
69238@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69239 * return with success:
69240 */
69241 vma = find_vma(mm, addr);
69242- if (!vma || addr+len <= vma->vm_start)
69243+ if (check_heap_stack_gap(vma, addr, len))
69244 /* remember the address as a hint for next time */
69245 return (mm->free_area_cache = addr);
69246
69247@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69248 mm->cached_hole_size = vma->vm_start - addr;
69249
69250 /* try just below the current vma->vm_start */
69251- addr = vma->vm_start-len;
69252- } while (len < vma->vm_start);
69253+ addr = skip_heap_stack_gap(vma, len);
69254+ } while (!IS_ERR_VALUE(addr));
69255
69256 bottomup:
69257 /*
69258@@ -1507,13 +1717,21 @@ bottomup:
69259 * can happen with large stack limits and large mmap()
69260 * allocations.
69261 */
69262+ mm->mmap_base = TASK_UNMAPPED_BASE;
69263+
69264+#ifdef CONFIG_PAX_RANDMMAP
69265+ if (mm->pax_flags & MF_PAX_RANDMMAP)
69266+ mm->mmap_base += mm->delta_mmap;
69267+#endif
69268+
69269+ mm->free_area_cache = mm->mmap_base;
69270 mm->cached_hole_size = ~0UL;
69271- mm->free_area_cache = TASK_UNMAPPED_BASE;
69272 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69273 /*
69274 * Restore the topdown base:
69275 */
69276- mm->free_area_cache = mm->mmap_base;
69277+ mm->mmap_base = base;
69278+ mm->free_area_cache = base;
69279 mm->cached_hole_size = ~0UL;
69280
69281 return addr;
69282@@ -1522,6 +1740,12 @@ bottomup:
69283
69284 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69285 {
69286+
69287+#ifdef CONFIG_PAX_SEGMEXEC
69288+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69289+ return;
69290+#endif
69291+
69292 /*
69293 * Is this a new hole at the highest possible address?
69294 */
69295@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69296 mm->free_area_cache = addr;
69297
69298 /* dont allow allocations above current base */
69299- if (mm->free_area_cache > mm->mmap_base)
69300+ if (mm->free_area_cache > mm->mmap_base) {
69301 mm->free_area_cache = mm->mmap_base;
69302+ mm->cached_hole_size = ~0UL;
69303+ }
69304 }
69305
69306 unsigned long
69307@@ -1603,40 +1829,42 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
69308
69309 EXPORT_SYMBOL(find_vma);
69310
69311-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
69312+/*
69313+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
69314+ * Note: pprev is set to NULL when return value is NULL.
69315+ */
69316 struct vm_area_struct *
69317 find_vma_prev(struct mm_struct *mm, unsigned long addr,
69318 struct vm_area_struct **pprev)
69319 {
69320- struct vm_area_struct *vma = NULL, *prev = NULL;
69321- struct rb_node *rb_node;
69322- if (!mm)
69323- goto out;
69324+ struct vm_area_struct *vma;
69325
69326- /* Guard against addr being lower than the first VMA */
69327- vma = mm->mmap;
69328+ vma = find_vma(mm, addr);
69329+ *pprev = vma ? vma->vm_prev : NULL;
69330+ return vma;
69331+}
69332
69333- /* Go through the RB tree quickly. */
69334- rb_node = mm->mm_rb.rb_node;
69335+#ifdef CONFIG_PAX_SEGMEXEC
69336+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69337+{
69338+ struct vm_area_struct *vma_m;
69339
69340- while (rb_node) {
69341- struct vm_area_struct *vma_tmp;
69342- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
69343-
69344- if (addr < vma_tmp->vm_end) {
69345- rb_node = rb_node->rb_left;
69346- } else {
69347- prev = vma_tmp;
69348- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
69349- break;
69350- rb_node = rb_node->rb_right;
69351- }
69352+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69353+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69354+ BUG_ON(vma->vm_mirror);
69355+ return NULL;
69356 }
69357-
69358-out:
69359- *pprev = prev;
69360- return prev ? prev->vm_next : vma;
69361+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69362+ vma_m = vma->vm_mirror;
69363+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69364+ BUG_ON(vma->vm_file != vma_m->vm_file);
69365+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69366+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69367+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69368+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69369+ return vma_m;
69370 }
69371+#endif
69372
69373 /*
69374 * Verify that the stack growth is acceptable and
69375@@ -1654,6 +1882,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69376 return -ENOMEM;
69377
69378 /* Stack limit test */
69379+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
69380 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69381 return -ENOMEM;
69382
69383@@ -1664,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69384 locked = mm->locked_vm + grow;
69385 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69386 limit >>= PAGE_SHIFT;
69387+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69388 if (locked > limit && !capable(CAP_IPC_LOCK))
69389 return -ENOMEM;
69390 }
69391@@ -1694,37 +1924,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69392 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69393 * vma is the last one with address > vma->vm_end. Have to extend vma.
69394 */
69395+#ifndef CONFIG_IA64
69396+static
69397+#endif
69398 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69399 {
69400 int error;
69401+ bool locknext;
69402
69403 if (!(vma->vm_flags & VM_GROWSUP))
69404 return -EFAULT;
69405
69406+ /* Also guard against wrapping around to address 0. */
69407+ if (address < PAGE_ALIGN(address+1))
69408+ address = PAGE_ALIGN(address+1);
69409+ else
69410+ return -ENOMEM;
69411+
69412 /*
69413 * We must make sure the anon_vma is allocated
69414 * so that the anon_vma locking is not a noop.
69415 */
69416 if (unlikely(anon_vma_prepare(vma)))
69417 return -ENOMEM;
69418+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69419+ if (locknext && anon_vma_prepare(vma->vm_next))
69420+ return -ENOMEM;
69421 vma_lock_anon_vma(vma);
69422+ if (locknext)
69423+ vma_lock_anon_vma(vma->vm_next);
69424
69425 /*
69426 * vma->vm_start/vm_end cannot change under us because the caller
69427 * is required to hold the mmap_sem in read mode. We need the
69428- * anon_vma lock to serialize against concurrent expand_stacks.
69429- * Also guard against wrapping around to address 0.
69430+ * anon_vma locks to serialize against concurrent expand_stacks
69431+ * and expand_upwards.
69432 */
69433- if (address < PAGE_ALIGN(address+4))
69434- address = PAGE_ALIGN(address+4);
69435- else {
69436- vma_unlock_anon_vma(vma);
69437- return -ENOMEM;
69438- }
69439 error = 0;
69440
69441 /* Somebody else might have raced and expanded it already */
69442- if (address > vma->vm_end) {
69443+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69444+ error = -ENOMEM;
69445+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69446 unsigned long size, grow;
69447
69448 size = address - vma->vm_start;
69449@@ -1739,6 +1980,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69450 }
69451 }
69452 }
69453+ if (locknext)
69454+ vma_unlock_anon_vma(vma->vm_next);
69455 vma_unlock_anon_vma(vma);
69456 khugepaged_enter_vma_merge(vma);
69457 return error;
69458@@ -1752,6 +1995,8 @@ int expand_downwards(struct vm_area_struct *vma,
69459 unsigned long address)
69460 {
69461 int error;
69462+ bool lockprev = false;
69463+ struct vm_area_struct *prev;
69464
69465 /*
69466 * We must make sure the anon_vma is allocated
69467@@ -1765,6 +2010,15 @@ int expand_downwards(struct vm_area_struct *vma,
69468 if (error)
69469 return error;
69470
69471+ prev = vma->vm_prev;
69472+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69473+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69474+#endif
69475+ if (lockprev && anon_vma_prepare(prev))
69476+ return -ENOMEM;
69477+ if (lockprev)
69478+ vma_lock_anon_vma(prev);
69479+
69480 vma_lock_anon_vma(vma);
69481
69482 /*
69483@@ -1774,9 +2028,17 @@ int expand_downwards(struct vm_area_struct *vma,
69484 */
69485
69486 /* Somebody else might have raced and expanded it already */
69487- if (address < vma->vm_start) {
69488+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69489+ error = -ENOMEM;
69490+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69491 unsigned long size, grow;
69492
69493+#ifdef CONFIG_PAX_SEGMEXEC
69494+ struct vm_area_struct *vma_m;
69495+
69496+ vma_m = pax_find_mirror_vma(vma);
69497+#endif
69498+
69499 size = vma->vm_end - address;
69500 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69501
69502@@ -1786,11 +2048,22 @@ int expand_downwards(struct vm_area_struct *vma,
69503 if (!error) {
69504 vma->vm_start = address;
69505 vma->vm_pgoff -= grow;
69506+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69507+
69508+#ifdef CONFIG_PAX_SEGMEXEC
69509+ if (vma_m) {
69510+ vma_m->vm_start -= grow << PAGE_SHIFT;
69511+ vma_m->vm_pgoff -= grow;
69512+ }
69513+#endif
69514+
69515 perf_event_mmap(vma);
69516 }
69517 }
69518 }
69519 vma_unlock_anon_vma(vma);
69520+ if (lockprev)
69521+ vma_unlock_anon_vma(prev);
69522 khugepaged_enter_vma_merge(vma);
69523 return error;
69524 }
69525@@ -1860,6 +2133,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69526 do {
69527 long nrpages = vma_pages(vma);
69528
69529+#ifdef CONFIG_PAX_SEGMEXEC
69530+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69531+ vma = remove_vma(vma);
69532+ continue;
69533+ }
69534+#endif
69535+
69536 mm->total_vm -= nrpages;
69537 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69538 vma = remove_vma(vma);
69539@@ -1905,6 +2185,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69540 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69541 vma->vm_prev = NULL;
69542 do {
69543+
69544+#ifdef CONFIG_PAX_SEGMEXEC
69545+ if (vma->vm_mirror) {
69546+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69547+ vma->vm_mirror->vm_mirror = NULL;
69548+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
69549+ vma->vm_mirror = NULL;
69550+ }
69551+#endif
69552+
69553 rb_erase(&vma->vm_rb, &mm->mm_rb);
69554 mm->map_count--;
69555 tail_vma = vma;
69556@@ -1933,14 +2223,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69557 struct vm_area_struct *new;
69558 int err = -ENOMEM;
69559
69560+#ifdef CONFIG_PAX_SEGMEXEC
69561+ struct vm_area_struct *vma_m, *new_m = NULL;
69562+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69563+#endif
69564+
69565 if (is_vm_hugetlb_page(vma) && (addr &
69566 ~(huge_page_mask(hstate_vma(vma)))))
69567 return -EINVAL;
69568
69569+#ifdef CONFIG_PAX_SEGMEXEC
69570+ vma_m = pax_find_mirror_vma(vma);
69571+#endif
69572+
69573 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69574 if (!new)
69575 goto out_err;
69576
69577+#ifdef CONFIG_PAX_SEGMEXEC
69578+ if (vma_m) {
69579+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69580+ if (!new_m) {
69581+ kmem_cache_free(vm_area_cachep, new);
69582+ goto out_err;
69583+ }
69584+ }
69585+#endif
69586+
69587 /* most fields are the same, copy all, and then fixup */
69588 *new = *vma;
69589
69590@@ -1953,6 +2262,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69591 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69592 }
69593
69594+#ifdef CONFIG_PAX_SEGMEXEC
69595+ if (vma_m) {
69596+ *new_m = *vma_m;
69597+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
69598+ new_m->vm_mirror = new;
69599+ new->vm_mirror = new_m;
69600+
69601+ if (new_below)
69602+ new_m->vm_end = addr_m;
69603+ else {
69604+ new_m->vm_start = addr_m;
69605+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69606+ }
69607+ }
69608+#endif
69609+
69610 pol = mpol_dup(vma_policy(vma));
69611 if (IS_ERR(pol)) {
69612 err = PTR_ERR(pol);
69613@@ -1978,6 +2303,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69614 else
69615 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69616
69617+#ifdef CONFIG_PAX_SEGMEXEC
69618+ if (!err && vma_m) {
69619+ if (anon_vma_clone(new_m, vma_m))
69620+ goto out_free_mpol;
69621+
69622+ mpol_get(pol);
69623+ vma_set_policy(new_m, pol);
69624+
69625+ if (new_m->vm_file) {
69626+ get_file(new_m->vm_file);
69627+ if (vma_m->vm_flags & VM_EXECUTABLE)
69628+ added_exe_file_vma(mm);
69629+ }
69630+
69631+ if (new_m->vm_ops && new_m->vm_ops->open)
69632+ new_m->vm_ops->open(new_m);
69633+
69634+ if (new_below)
69635+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69636+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69637+ else
69638+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69639+
69640+ if (err) {
69641+ if (new_m->vm_ops && new_m->vm_ops->close)
69642+ new_m->vm_ops->close(new_m);
69643+ if (new_m->vm_file) {
69644+ if (vma_m->vm_flags & VM_EXECUTABLE)
69645+ removed_exe_file_vma(mm);
69646+ fput(new_m->vm_file);
69647+ }
69648+ mpol_put(pol);
69649+ }
69650+ }
69651+#endif
69652+
69653 /* Success. */
69654 if (!err)
69655 return 0;
69656@@ -1990,10 +2351,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69657 removed_exe_file_vma(mm);
69658 fput(new->vm_file);
69659 }
69660- unlink_anon_vmas(new);
69661 out_free_mpol:
69662 mpol_put(pol);
69663 out_free_vma:
69664+
69665+#ifdef CONFIG_PAX_SEGMEXEC
69666+ if (new_m) {
69667+ unlink_anon_vmas(new_m);
69668+ kmem_cache_free(vm_area_cachep, new_m);
69669+ }
69670+#endif
69671+
69672+ unlink_anon_vmas(new);
69673 kmem_cache_free(vm_area_cachep, new);
69674 out_err:
69675 return err;
69676@@ -2006,6 +2375,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69677 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69678 unsigned long addr, int new_below)
69679 {
69680+
69681+#ifdef CONFIG_PAX_SEGMEXEC
69682+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69683+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69684+ if (mm->map_count >= sysctl_max_map_count-1)
69685+ return -ENOMEM;
69686+ } else
69687+#endif
69688+
69689 if (mm->map_count >= sysctl_max_map_count)
69690 return -ENOMEM;
69691
69692@@ -2017,11 +2395,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69693 * work. This now handles partial unmappings.
69694 * Jeremy Fitzhardinge <jeremy@goop.org>
69695 */
69696+#ifdef CONFIG_PAX_SEGMEXEC
69697 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69698 {
69699+ int ret = __do_munmap(mm, start, len);
69700+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69701+ return ret;
69702+
69703+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69704+}
69705+
69706+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69707+#else
69708+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69709+#endif
69710+{
69711 unsigned long end;
69712 struct vm_area_struct *vma, *prev, *last;
69713
69714+ /*
69715+ * mm->mmap_sem is required to protect against another thread
69716+ * changing the mappings in case we sleep.
69717+ */
69718+ verify_mm_writelocked(mm);
69719+
69720 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69721 return -EINVAL;
69722
69723@@ -2096,6 +2493,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69724 /* Fix up all other VM information */
69725 remove_vma_list(mm, vma);
69726
69727+ track_exec_limit(mm, start, end, 0UL);
69728+
69729 return 0;
69730 }
69731
69732@@ -2108,22 +2507,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
69733
69734 profile_munmap(addr);
69735
69736+#ifdef CONFIG_PAX_SEGMEXEC
69737+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69738+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69739+ return -EINVAL;
69740+#endif
69741+
69742 down_write(&mm->mmap_sem);
69743 ret = do_munmap(mm, addr, len);
69744 up_write(&mm->mmap_sem);
69745 return ret;
69746 }
69747
69748-static inline void verify_mm_writelocked(struct mm_struct *mm)
69749-{
69750-#ifdef CONFIG_DEBUG_VM
69751- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69752- WARN_ON(1);
69753- up_read(&mm->mmap_sem);
69754- }
69755-#endif
69756-}
69757-
69758 /*
69759 * this is really a simplified "do_mmap". it only handles
69760 * anonymous maps. eventually we may be able to do some
69761@@ -2137,6 +2532,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69762 struct rb_node ** rb_link, * rb_parent;
69763 pgoff_t pgoff = addr >> PAGE_SHIFT;
69764 int error;
69765+ unsigned long charged;
69766
69767 len = PAGE_ALIGN(len);
69768 if (!len)
69769@@ -2148,16 +2544,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69770
69771 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69772
69773+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69774+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69775+ flags &= ~VM_EXEC;
69776+
69777+#ifdef CONFIG_PAX_MPROTECT
69778+ if (mm->pax_flags & MF_PAX_MPROTECT)
69779+ flags &= ~VM_MAYEXEC;
69780+#endif
69781+
69782+ }
69783+#endif
69784+
69785 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69786 if (error & ~PAGE_MASK)
69787 return error;
69788
69789+ charged = len >> PAGE_SHIFT;
69790+
69791 /*
69792 * mlock MCL_FUTURE?
69793 */
69794 if (mm->def_flags & VM_LOCKED) {
69795 unsigned long locked, lock_limit;
69796- locked = len >> PAGE_SHIFT;
69797+ locked = charged;
69798 locked += mm->locked_vm;
69799 lock_limit = rlimit(RLIMIT_MEMLOCK);
69800 lock_limit >>= PAGE_SHIFT;
69801@@ -2174,22 +2584,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69802 /*
69803 * Clear old maps. this also does some error checking for us
69804 */
69805- munmap_back:
69806 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69807 if (vma && vma->vm_start < addr + len) {
69808 if (do_munmap(mm, addr, len))
69809 return -ENOMEM;
69810- goto munmap_back;
69811+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69812+ BUG_ON(vma && vma->vm_start < addr + len);
69813 }
69814
69815 /* Check against address space limits *after* clearing old maps... */
69816- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69817+ if (!may_expand_vm(mm, charged))
69818 return -ENOMEM;
69819
69820 if (mm->map_count > sysctl_max_map_count)
69821 return -ENOMEM;
69822
69823- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69824+ if (security_vm_enough_memory(charged))
69825 return -ENOMEM;
69826
69827 /* Can we just expand an old private anonymous mapping? */
69828@@ -2203,7 +2613,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69829 */
69830 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69831 if (!vma) {
69832- vm_unacct_memory(len >> PAGE_SHIFT);
69833+ vm_unacct_memory(charged);
69834 return -ENOMEM;
69835 }
69836
69837@@ -2217,11 +2627,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69838 vma_link(mm, vma, prev, rb_link, rb_parent);
69839 out:
69840 perf_event_mmap(vma);
69841- mm->total_vm += len >> PAGE_SHIFT;
69842+ mm->total_vm += charged;
69843 if (flags & VM_LOCKED) {
69844 if (!mlock_vma_pages_range(vma, addr, addr + len))
69845- mm->locked_vm += (len >> PAGE_SHIFT);
69846+ mm->locked_vm += charged;
69847 }
69848+ track_exec_limit(mm, addr, addr + len, flags);
69849 return addr;
69850 }
69851
69852@@ -2268,8 +2679,10 @@ void exit_mmap(struct mm_struct *mm)
69853 * Walk the list again, actually closing and freeing it,
69854 * with preemption enabled, without holding any MM locks.
69855 */
69856- while (vma)
69857+ while (vma) {
69858+ vma->vm_mirror = NULL;
69859 vma = remove_vma(vma);
69860+ }
69861
69862 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69863 }
69864@@ -2283,6 +2696,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69865 struct vm_area_struct * __vma, * prev;
69866 struct rb_node ** rb_link, * rb_parent;
69867
69868+#ifdef CONFIG_PAX_SEGMEXEC
69869+ struct vm_area_struct *vma_m = NULL;
69870+#endif
69871+
69872+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69873+ return -EPERM;
69874+
69875 /*
69876 * The vm_pgoff of a purely anonymous vma should be irrelevant
69877 * until its first write fault, when page's anon_vma and index
69878@@ -2305,7 +2725,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69879 if ((vma->vm_flags & VM_ACCOUNT) &&
69880 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69881 return -ENOMEM;
69882+
69883+#ifdef CONFIG_PAX_SEGMEXEC
69884+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69885+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69886+ if (!vma_m)
69887+ return -ENOMEM;
69888+ }
69889+#endif
69890+
69891 vma_link(mm, vma, prev, rb_link, rb_parent);
69892+
69893+#ifdef CONFIG_PAX_SEGMEXEC
69894+ if (vma_m)
69895+ BUG_ON(pax_mirror_vma(vma_m, vma));
69896+#endif
69897+
69898 return 0;
69899 }
69900
69901@@ -2323,6 +2758,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69902 struct rb_node **rb_link, *rb_parent;
69903 struct mempolicy *pol;
69904
69905+ BUG_ON(vma->vm_mirror);
69906+
69907 /*
69908 * If anonymous vma has not yet been faulted, update new pgoff
69909 * to match new location, to increase its chance of merging.
69910@@ -2373,6 +2810,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69911 return NULL;
69912 }
69913
69914+#ifdef CONFIG_PAX_SEGMEXEC
69915+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69916+{
69917+ struct vm_area_struct *prev_m;
69918+ struct rb_node **rb_link_m, *rb_parent_m;
69919+ struct mempolicy *pol_m;
69920+
69921+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69922+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69923+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69924+ *vma_m = *vma;
69925+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69926+ if (anon_vma_clone(vma_m, vma))
69927+ return -ENOMEM;
69928+ pol_m = vma_policy(vma_m);
69929+ mpol_get(pol_m);
69930+ vma_set_policy(vma_m, pol_m);
69931+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69932+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69933+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69934+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69935+ if (vma_m->vm_file)
69936+ get_file(vma_m->vm_file);
69937+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69938+ vma_m->vm_ops->open(vma_m);
69939+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69940+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69941+ vma_m->vm_mirror = vma;
69942+ vma->vm_mirror = vma_m;
69943+ return 0;
69944+}
69945+#endif
69946+
69947 /*
69948 * Return true if the calling process may expand its vm space by the passed
69949 * number of pages
69950@@ -2383,7 +2853,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
69951 unsigned long lim;
69952
69953 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69954-
69955+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69956 if (cur + npages > lim)
69957 return 0;
69958 return 1;
69959@@ -2454,6 +2924,22 @@ int install_special_mapping(struct mm_struct *mm,
69960 vma->vm_start = addr;
69961 vma->vm_end = addr + len;
69962
69963+#ifdef CONFIG_PAX_MPROTECT
69964+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69965+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69966+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69967+ return -EPERM;
69968+ if (!(vm_flags & VM_EXEC))
69969+ vm_flags &= ~VM_MAYEXEC;
69970+#else
69971+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69972+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69973+#endif
69974+ else
69975+ vm_flags &= ~VM_MAYWRITE;
69976+ }
69977+#endif
69978+
69979 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69980 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69981
69982diff --git a/mm/mprotect.c b/mm/mprotect.c
69983index 5a688a2..27e031c 100644
69984--- a/mm/mprotect.c
69985+++ b/mm/mprotect.c
69986@@ -23,10 +23,16 @@
69987 #include <linux/mmu_notifier.h>
69988 #include <linux/migrate.h>
69989 #include <linux/perf_event.h>
69990+
69991+#ifdef CONFIG_PAX_MPROTECT
69992+#include <linux/elf.h>
69993+#endif
69994+
69995 #include <asm/uaccess.h>
69996 #include <asm/pgtable.h>
69997 #include <asm/cacheflush.h>
69998 #include <asm/tlbflush.h>
69999+#include <asm/mmu_context.h>
70000
70001 #ifndef pgprot_modify
70002 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
70003@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
70004 flush_tlb_range(vma, start, end);
70005 }
70006
70007+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70008+/* called while holding the mmap semaphor for writing except stack expansion */
70009+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
70010+{
70011+ unsigned long oldlimit, newlimit = 0UL;
70012+
70013+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
70014+ return;
70015+
70016+ spin_lock(&mm->page_table_lock);
70017+ oldlimit = mm->context.user_cs_limit;
70018+ if ((prot & VM_EXEC) && oldlimit < end)
70019+ /* USER_CS limit moved up */
70020+ newlimit = end;
70021+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
70022+ /* USER_CS limit moved down */
70023+ newlimit = start;
70024+
70025+ if (newlimit) {
70026+ mm->context.user_cs_limit = newlimit;
70027+
70028+#ifdef CONFIG_SMP
70029+ wmb();
70030+ cpus_clear(mm->context.cpu_user_cs_mask);
70031+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
70032+#endif
70033+
70034+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
70035+ }
70036+ spin_unlock(&mm->page_table_lock);
70037+ if (newlimit == end) {
70038+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
70039+
70040+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
70041+ if (is_vm_hugetlb_page(vma))
70042+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
70043+ else
70044+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
70045+ }
70046+}
70047+#endif
70048+
70049 int
70050 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70051 unsigned long start, unsigned long end, unsigned long newflags)
70052@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70053 int error;
70054 int dirty_accountable = 0;
70055
70056+#ifdef CONFIG_PAX_SEGMEXEC
70057+ struct vm_area_struct *vma_m = NULL;
70058+ unsigned long start_m, end_m;
70059+
70060+ start_m = start + SEGMEXEC_TASK_SIZE;
70061+ end_m = end + SEGMEXEC_TASK_SIZE;
70062+#endif
70063+
70064 if (newflags == oldflags) {
70065 *pprev = vma;
70066 return 0;
70067 }
70068
70069+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
70070+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
70071+
70072+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
70073+ return -ENOMEM;
70074+
70075+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
70076+ return -ENOMEM;
70077+ }
70078+
70079 /*
70080 * If we make a private mapping writable we increase our commit;
70081 * but (without finer accounting) cannot reduce our commit if we
70082@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70083 }
70084 }
70085
70086+#ifdef CONFIG_PAX_SEGMEXEC
70087+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
70088+ if (start != vma->vm_start) {
70089+ error = split_vma(mm, vma, start, 1);
70090+ if (error)
70091+ goto fail;
70092+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
70093+ *pprev = (*pprev)->vm_next;
70094+ }
70095+
70096+ if (end != vma->vm_end) {
70097+ error = split_vma(mm, vma, end, 0);
70098+ if (error)
70099+ goto fail;
70100+ }
70101+
70102+ if (pax_find_mirror_vma(vma)) {
70103+ error = __do_munmap(mm, start_m, end_m - start_m);
70104+ if (error)
70105+ goto fail;
70106+ } else {
70107+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70108+ if (!vma_m) {
70109+ error = -ENOMEM;
70110+ goto fail;
70111+ }
70112+ vma->vm_flags = newflags;
70113+ error = pax_mirror_vma(vma_m, vma);
70114+ if (error) {
70115+ vma->vm_flags = oldflags;
70116+ goto fail;
70117+ }
70118+ }
70119+ }
70120+#endif
70121+
70122 /*
70123 * First try to merge with previous and/or next vma.
70124 */
70125@@ -204,9 +306,21 @@ success:
70126 * vm_flags and vm_page_prot are protected by the mmap_sem
70127 * held in write mode.
70128 */
70129+
70130+#ifdef CONFIG_PAX_SEGMEXEC
70131+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70132+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70133+#endif
70134+
70135 vma->vm_flags = newflags;
70136+
70137+#ifdef CONFIG_PAX_MPROTECT
70138+ if (mm->binfmt && mm->binfmt->handle_mprotect)
70139+ mm->binfmt->handle_mprotect(vma, newflags);
70140+#endif
70141+
70142 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70143- vm_get_page_prot(newflags));
70144+ vm_get_page_prot(vma->vm_flags));
70145
70146 if (vma_wants_writenotify(vma)) {
70147 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70148@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70149 end = start + len;
70150 if (end <= start)
70151 return -ENOMEM;
70152+
70153+#ifdef CONFIG_PAX_SEGMEXEC
70154+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70155+ if (end > SEGMEXEC_TASK_SIZE)
70156+ return -EINVAL;
70157+ } else
70158+#endif
70159+
70160+ if (end > TASK_SIZE)
70161+ return -EINVAL;
70162+
70163 if (!arch_validate_prot(prot))
70164 return -EINVAL;
70165
70166@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70167 /*
70168 * Does the application expect PROT_READ to imply PROT_EXEC:
70169 */
70170- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70171+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70172 prot |= PROT_EXEC;
70173
70174 vm_flags = calc_vm_prot_bits(prot);
70175@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70176 if (start > vma->vm_start)
70177 prev = vma;
70178
70179+#ifdef CONFIG_PAX_MPROTECT
70180+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70181+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
70182+#endif
70183+
70184 for (nstart = start ; ; ) {
70185 unsigned long newflags;
70186
70187@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70188
70189 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70190 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70191+ if (prot & (PROT_WRITE | PROT_EXEC))
70192+ gr_log_rwxmprotect(vma->vm_file);
70193+
70194+ error = -EACCES;
70195+ goto out;
70196+ }
70197+
70198+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70199 error = -EACCES;
70200 goto out;
70201 }
70202@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70203 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70204 if (error)
70205 goto out;
70206+
70207+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
70208+
70209 nstart = tmp;
70210
70211 if (nstart < prev->vm_end)
70212diff --git a/mm/mremap.c b/mm/mremap.c
70213index d6959cb..18a402a 100644
70214--- a/mm/mremap.c
70215+++ b/mm/mremap.c
70216@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
70217 continue;
70218 pte = ptep_get_and_clear(mm, old_addr, old_pte);
70219 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70220+
70221+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70222+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70223+ pte = pte_exprotect(pte);
70224+#endif
70225+
70226 set_pte_at(mm, new_addr, new_pte, pte);
70227 }
70228
70229@@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
70230 if (is_vm_hugetlb_page(vma))
70231 goto Einval;
70232
70233+#ifdef CONFIG_PAX_SEGMEXEC
70234+ if (pax_find_mirror_vma(vma))
70235+ goto Einval;
70236+#endif
70237+
70238 /* We can't remap across vm area boundaries */
70239 if (old_len > vma->vm_end - addr)
70240 goto Efault;
70241@@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
70242 unsigned long ret = -EINVAL;
70243 unsigned long charged = 0;
70244 unsigned long map_flags;
70245+ unsigned long pax_task_size = TASK_SIZE;
70246
70247 if (new_addr & ~PAGE_MASK)
70248 goto out;
70249
70250- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70251+#ifdef CONFIG_PAX_SEGMEXEC
70252+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70253+ pax_task_size = SEGMEXEC_TASK_SIZE;
70254+#endif
70255+
70256+ pax_task_size -= PAGE_SIZE;
70257+
70258+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70259 goto out;
70260
70261 /* Check if the location we're moving into overlaps the
70262 * old location at all, and fail if it does.
70263 */
70264- if ((new_addr <= addr) && (new_addr+new_len) > addr)
70265- goto out;
70266-
70267- if ((addr <= new_addr) && (addr+old_len) > new_addr)
70268+ if (addr + old_len > new_addr && new_addr + new_len > addr)
70269 goto out;
70270
70271 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70272@@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
70273 struct vm_area_struct *vma;
70274 unsigned long ret = -EINVAL;
70275 unsigned long charged = 0;
70276+ unsigned long pax_task_size = TASK_SIZE;
70277
70278 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70279 goto out;
70280@@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
70281 if (!new_len)
70282 goto out;
70283
70284+#ifdef CONFIG_PAX_SEGMEXEC
70285+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70286+ pax_task_size = SEGMEXEC_TASK_SIZE;
70287+#endif
70288+
70289+ pax_task_size -= PAGE_SIZE;
70290+
70291+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70292+ old_len > pax_task_size || addr > pax_task_size-old_len)
70293+ goto out;
70294+
70295 if (flags & MREMAP_FIXED) {
70296 if (flags & MREMAP_MAYMOVE)
70297 ret = mremap_to(addr, old_len, new_addr, new_len);
70298@@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
70299 addr + new_len);
70300 }
70301 ret = addr;
70302+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70303 goto out;
70304 }
70305 }
70306@@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
70307 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70308 if (ret)
70309 goto out;
70310+
70311+ map_flags = vma->vm_flags;
70312 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70313+ if (!(ret & ~PAGE_MASK)) {
70314+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70315+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70316+ }
70317 }
70318 out:
70319 if (ret & ~PAGE_MASK)
70320diff --git a/mm/nobootmem.c b/mm/nobootmem.c
70321index 7fa41b4..6087460 100644
70322--- a/mm/nobootmem.c
70323+++ b/mm/nobootmem.c
70324@@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
70325 unsigned long __init free_all_memory_core_early(int nodeid)
70326 {
70327 int i;
70328- u64 start, end;
70329+ u64 start, end, startrange, endrange;
70330 unsigned long count = 0;
70331- struct range *range = NULL;
70332+ struct range *range = NULL, rangerange = { 0, 0 };
70333 int nr_range;
70334
70335 nr_range = get_free_all_memory_range(&range, nodeid);
70336+ startrange = __pa(range) >> PAGE_SHIFT;
70337+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70338
70339 for (i = 0; i < nr_range; i++) {
70340 start = range[i].start;
70341 end = range[i].end;
70342+ if (start <= endrange && startrange < end) {
70343+ BUG_ON(rangerange.start | rangerange.end);
70344+ rangerange = range[i];
70345+ continue;
70346+ }
70347 count += end - start;
70348 __free_pages_memory(start, end);
70349 }
70350+ start = rangerange.start;
70351+ end = rangerange.end;
70352+ count += end - start;
70353+ __free_pages_memory(start, end);
70354
70355 return count;
70356 }
70357diff --git a/mm/nommu.c b/mm/nommu.c
70358index b982290..7d73f53 100644
70359--- a/mm/nommu.c
70360+++ b/mm/nommu.c
70361@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
70362 int sysctl_overcommit_ratio = 50; /* default is 50% */
70363 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70364 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70365-int heap_stack_gap = 0;
70366
70367 atomic_long_t mmap_pages_allocated;
70368
70369@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
70370 EXPORT_SYMBOL(find_vma);
70371
70372 /*
70373- * find a VMA
70374- * - we don't extend stack VMAs under NOMMU conditions
70375- */
70376-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70377-{
70378- return find_vma(mm, addr);
70379-}
70380-
70381-/*
70382 * expand a stack to a given address
70383 * - not supported under NOMMU conditions
70384 */
70385@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70386
70387 /* most fields are the same, copy all, and then fixup */
70388 *new = *vma;
70389+ INIT_LIST_HEAD(&new->anon_vma_chain);
70390 *region = *vma->vm_region;
70391 new->vm_region = region;
70392
70393diff --git a/mm/page_alloc.c b/mm/page_alloc.c
70394index 485be89..c059ad3 100644
70395--- a/mm/page_alloc.c
70396+++ b/mm/page_alloc.c
70397@@ -341,7 +341,7 @@ out:
70398 * This usage means that zero-order pages may not be compound.
70399 */
70400
70401-static void free_compound_page(struct page *page)
70402+void free_compound_page(struct page *page)
70403 {
70404 __free_pages_ok(page, compound_order(page));
70405 }
70406@@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70407 int i;
70408 int bad = 0;
70409
70410+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70411+ unsigned long index = 1UL << order;
70412+#endif
70413+
70414 trace_mm_page_free_direct(page, order);
70415 kmemcheck_free_shadow(page, order);
70416
70417@@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70418 debug_check_no_obj_freed(page_address(page),
70419 PAGE_SIZE << order);
70420 }
70421+
70422+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70423+ for (; index; --index)
70424+ sanitize_highpage(page + index - 1);
70425+#endif
70426+
70427 arch_free_page(page, order);
70428 kernel_map_pages(page, 1 << order, 0);
70429
70430@@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
70431 arch_alloc_page(page, order);
70432 kernel_map_pages(page, 1 << order, 1);
70433
70434+#ifndef CONFIG_PAX_MEMORY_SANITIZE
70435 if (gfp_flags & __GFP_ZERO)
70436 prep_zero_page(page, order, gfp_flags);
70437+#endif
70438
70439 if (order && (gfp_flags & __GFP_COMP))
70440 prep_compound_page(page, order);
70441@@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
70442 unsigned long pfn;
70443
70444 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70445+#ifdef CONFIG_X86_32
70446+ /* boot failures in VMware 8 on 32bit vanilla since
70447+ this change */
70448+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70449+#else
70450 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70451+#endif
70452 return 1;
70453 }
70454 return 0;
70455diff --git a/mm/percpu.c b/mm/percpu.c
70456index 716eb4a..8d10419 100644
70457--- a/mm/percpu.c
70458+++ b/mm/percpu.c
70459@@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
70460 static unsigned int pcpu_high_unit_cpu __read_mostly;
70461
70462 /* the address of the first chunk which starts with the kernel static area */
70463-void *pcpu_base_addr __read_mostly;
70464+void *pcpu_base_addr __read_only;
70465 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70466
70467 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70468diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
70469index e920aa3..137702a 100644
70470--- a/mm/process_vm_access.c
70471+++ b/mm/process_vm_access.c
70472@@ -13,6 +13,7 @@
70473 #include <linux/uio.h>
70474 #include <linux/sched.h>
70475 #include <linux/highmem.h>
70476+#include <linux/security.h>
70477 #include <linux/ptrace.h>
70478 #include <linux/slab.h>
70479 #include <linux/syscalls.h>
70480@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70481 size_t iov_l_curr_offset = 0;
70482 ssize_t iov_len;
70483
70484+ return -ENOSYS; // PaX: until properly audited
70485+
70486 /*
70487 * Work out how many pages of struct pages we're going to need
70488 * when eventually calling get_user_pages
70489 */
70490 for (i = 0; i < riovcnt; i++) {
70491 iov_len = rvec[i].iov_len;
70492- if (iov_len > 0) {
70493- nr_pages_iov = ((unsigned long)rvec[i].iov_base
70494- + iov_len)
70495- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
70496- / PAGE_SIZE + 1;
70497- nr_pages = max(nr_pages, nr_pages_iov);
70498- }
70499+ if (iov_len <= 0)
70500+ continue;
70501+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
70502+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
70503+ nr_pages = max(nr_pages, nr_pages_iov);
70504 }
70505
70506 if (nr_pages == 0)
70507@@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70508 goto free_proc_pages;
70509 }
70510
70511- task_lock(task);
70512- if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
70513- task_unlock(task);
70514+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
70515 rc = -EPERM;
70516 goto put_task_struct;
70517 }
70518- mm = task->mm;
70519
70520- if (!mm || (task->flags & PF_KTHREAD)) {
70521- task_unlock(task);
70522- rc = -EINVAL;
70523+ mm = mm_access(task, PTRACE_MODE_ATTACH);
70524+ if (!mm || IS_ERR(mm)) {
70525+ rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
70526+ /*
70527+ * Explicitly map EACCES to EPERM as EPERM is a more a
70528+ * appropriate error code for process_vw_readv/writev
70529+ */
70530+ if (rc == -EACCES)
70531+ rc = -EPERM;
70532 goto put_task_struct;
70533 }
70534
70535- atomic_inc(&mm->mm_users);
70536- task_unlock(task);
70537-
70538 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
70539 rc = process_vm_rw_single_vec(
70540 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
70541diff --git a/mm/rmap.c b/mm/rmap.c
70542index a4fd368..e0ffec7 100644
70543--- a/mm/rmap.c
70544+++ b/mm/rmap.c
70545@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70546 struct anon_vma *anon_vma = vma->anon_vma;
70547 struct anon_vma_chain *avc;
70548
70549+#ifdef CONFIG_PAX_SEGMEXEC
70550+ struct anon_vma_chain *avc_m = NULL;
70551+#endif
70552+
70553 might_sleep();
70554 if (unlikely(!anon_vma)) {
70555 struct mm_struct *mm = vma->vm_mm;
70556@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70557 if (!avc)
70558 goto out_enomem;
70559
70560+#ifdef CONFIG_PAX_SEGMEXEC
70561+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70562+ if (!avc_m)
70563+ goto out_enomem_free_avc;
70564+#endif
70565+
70566 anon_vma = find_mergeable_anon_vma(vma);
70567 allocated = NULL;
70568 if (!anon_vma) {
70569@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70570 /* page_table_lock to protect against threads */
70571 spin_lock(&mm->page_table_lock);
70572 if (likely(!vma->anon_vma)) {
70573+
70574+#ifdef CONFIG_PAX_SEGMEXEC
70575+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70576+
70577+ if (vma_m) {
70578+ BUG_ON(vma_m->anon_vma);
70579+ vma_m->anon_vma = anon_vma;
70580+ avc_m->anon_vma = anon_vma;
70581+ avc_m->vma = vma;
70582+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70583+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
70584+ avc_m = NULL;
70585+ }
70586+#endif
70587+
70588 vma->anon_vma = anon_vma;
70589 avc->anon_vma = anon_vma;
70590 avc->vma = vma;
70591@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70592
70593 if (unlikely(allocated))
70594 put_anon_vma(allocated);
70595+
70596+#ifdef CONFIG_PAX_SEGMEXEC
70597+ if (unlikely(avc_m))
70598+ anon_vma_chain_free(avc_m);
70599+#endif
70600+
70601 if (unlikely(avc))
70602 anon_vma_chain_free(avc);
70603 }
70604 return 0;
70605
70606 out_enomem_free_avc:
70607+
70608+#ifdef CONFIG_PAX_SEGMEXEC
70609+ if (avc_m)
70610+ anon_vma_chain_free(avc_m);
70611+#endif
70612+
70613 anon_vma_chain_free(avc);
70614 out_enomem:
70615 return -ENOMEM;
70616@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70617 * Attach the anon_vmas from src to dst.
70618 * Returns 0 on success, -ENOMEM on failure.
70619 */
70620-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70621+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70622 {
70623 struct anon_vma_chain *avc, *pavc;
70624 struct anon_vma *root = NULL;
70625@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70626 * the corresponding VMA in the parent process is attached to.
70627 * Returns 0 on success, non-zero on failure.
70628 */
70629-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70630+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70631 {
70632 struct anon_vma_chain *avc;
70633 struct anon_vma *anon_vma;
70634diff --git a/mm/shmem.c b/mm/shmem.c
70635index 6c253f7..367e20a 100644
70636--- a/mm/shmem.c
70637+++ b/mm/shmem.c
70638@@ -31,7 +31,7 @@
70639 #include <linux/export.h>
70640 #include <linux/swap.h>
70641
70642-static struct vfsmount *shm_mnt;
70643+struct vfsmount *shm_mnt;
70644
70645 #ifdef CONFIG_SHMEM
70646 /*
70647@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70648 #define BOGO_DIRENT_SIZE 20
70649
70650 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70651-#define SHORT_SYMLINK_LEN 128
70652+#define SHORT_SYMLINK_LEN 64
70653
70654 struct shmem_xattr {
70655 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70656@@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70657 int err = -ENOMEM;
70658
70659 /* Round up to L1_CACHE_BYTES to resist false sharing */
70660- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70661- L1_CACHE_BYTES), GFP_KERNEL);
70662+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70663 if (!sbinfo)
70664 return -ENOMEM;
70665
70666diff --git a/mm/slab.c b/mm/slab.c
70667index 83311c9a..fcf8f86 100644
70668--- a/mm/slab.c
70669+++ b/mm/slab.c
70670@@ -151,7 +151,7 @@
70671
70672 /* Legal flag mask for kmem_cache_create(). */
70673 #if DEBUG
70674-# define CREATE_MASK (SLAB_RED_ZONE | \
70675+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70676 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70677 SLAB_CACHE_DMA | \
70678 SLAB_STORE_USER | \
70679@@ -159,7 +159,7 @@
70680 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70681 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70682 #else
70683-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70684+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70685 SLAB_CACHE_DMA | \
70686 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70687 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70688@@ -288,7 +288,7 @@ struct kmem_list3 {
70689 * Need this for bootstrapping a per node allocator.
70690 */
70691 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70692-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70693+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70694 #define CACHE_CACHE 0
70695 #define SIZE_AC MAX_NUMNODES
70696 #define SIZE_L3 (2 * MAX_NUMNODES)
70697@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70698 if ((x)->max_freeable < i) \
70699 (x)->max_freeable = i; \
70700 } while (0)
70701-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70702-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70703-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70704-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70705+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70706+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70707+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70708+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70709 #else
70710 #define STATS_INC_ACTIVE(x) do { } while (0)
70711 #define STATS_DEC_ACTIVE(x) do { } while (0)
70712@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
70713 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70714 */
70715 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70716- const struct slab *slab, void *obj)
70717+ const struct slab *slab, const void *obj)
70718 {
70719 u32 offset = (obj - slab->s_mem);
70720 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70721@@ -564,7 +564,7 @@ struct cache_names {
70722 static struct cache_names __initdata cache_names[] = {
70723 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70724 #include <linux/kmalloc_sizes.h>
70725- {NULL,}
70726+ {NULL}
70727 #undef CACHE
70728 };
70729
70730@@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
70731 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70732 sizes[INDEX_AC].cs_size,
70733 ARCH_KMALLOC_MINALIGN,
70734- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70735+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70736 NULL);
70737
70738 if (INDEX_AC != INDEX_L3) {
70739@@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
70740 kmem_cache_create(names[INDEX_L3].name,
70741 sizes[INDEX_L3].cs_size,
70742 ARCH_KMALLOC_MINALIGN,
70743- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70744+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70745 NULL);
70746 }
70747
70748@@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
70749 sizes->cs_cachep = kmem_cache_create(names->name,
70750 sizes->cs_size,
70751 ARCH_KMALLOC_MINALIGN,
70752- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70753+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70754 NULL);
70755 }
70756 #ifdef CONFIG_ZONE_DMA
70757@@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
70758 }
70759 /* cpu stats */
70760 {
70761- unsigned long allochit = atomic_read(&cachep->allochit);
70762- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70763- unsigned long freehit = atomic_read(&cachep->freehit);
70764- unsigned long freemiss = atomic_read(&cachep->freemiss);
70765+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70766+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70767+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70768+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70769
70770 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70771 allochit, allocmiss, freehit, freemiss);
70772@@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
70773 {
70774 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
70775 #ifdef CONFIG_DEBUG_SLAB_LEAK
70776- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70777+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
70778 #endif
70779 return 0;
70780 }
70781 module_init(slab_proc_init);
70782 #endif
70783
70784+void check_object_size(const void *ptr, unsigned long n, bool to)
70785+{
70786+
70787+#ifdef CONFIG_PAX_USERCOPY
70788+ struct page *page;
70789+ struct kmem_cache *cachep = NULL;
70790+ struct slab *slabp;
70791+ unsigned int objnr;
70792+ unsigned long offset;
70793+ const char *type;
70794+
70795+ if (!n)
70796+ return;
70797+
70798+ type = "<null>";
70799+ if (ZERO_OR_NULL_PTR(ptr))
70800+ goto report;
70801+
70802+ if (!virt_addr_valid(ptr))
70803+ return;
70804+
70805+ page = virt_to_head_page(ptr);
70806+
70807+ type = "<process stack>";
70808+ if (!PageSlab(page)) {
70809+ if (object_is_on_stack(ptr, n) == -1)
70810+ goto report;
70811+ return;
70812+ }
70813+
70814+ cachep = page_get_cache(page);
70815+ type = cachep->name;
70816+ if (!(cachep->flags & SLAB_USERCOPY))
70817+ goto report;
70818+
70819+ slabp = page_get_slab(page);
70820+ objnr = obj_to_index(cachep, slabp, ptr);
70821+ BUG_ON(objnr >= cachep->num);
70822+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70823+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70824+ return;
70825+
70826+report:
70827+ pax_report_usercopy(ptr, n, to, type);
70828+#endif
70829+
70830+}
70831+EXPORT_SYMBOL(check_object_size);
70832+
70833 /**
70834 * ksize - get the actual amount of memory allocated for a given object
70835 * @objp: Pointer to the object
70836diff --git a/mm/slob.c b/mm/slob.c
70837index 8105be4..e045f96 100644
70838--- a/mm/slob.c
70839+++ b/mm/slob.c
70840@@ -29,7 +29,7 @@
70841 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70842 * alloc_pages() directly, allocating compound pages so the page order
70843 * does not have to be separately tracked, and also stores the exact
70844- * allocation size in page->private so that it can be used to accurately
70845+ * allocation size in slob_page->size so that it can be used to accurately
70846 * provide ksize(). These objects are detected in kfree() because slob_page()
70847 * is false for them.
70848 *
70849@@ -58,6 +58,7 @@
70850 */
70851
70852 #include <linux/kernel.h>
70853+#include <linux/sched.h>
70854 #include <linux/slab.h>
70855 #include <linux/mm.h>
70856 #include <linux/swap.h> /* struct reclaim_state */
70857@@ -102,7 +103,8 @@ struct slob_page {
70858 unsigned long flags; /* mandatory */
70859 atomic_t _count; /* mandatory */
70860 slobidx_t units; /* free units left in page */
70861- unsigned long pad[2];
70862+ unsigned long pad[1];
70863+ unsigned long size; /* size when >=PAGE_SIZE */
70864 slob_t *free; /* first free slob_t in page */
70865 struct list_head list; /* linked list of free pages */
70866 };
70867@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70868 */
70869 static inline int is_slob_page(struct slob_page *sp)
70870 {
70871- return PageSlab((struct page *)sp);
70872+ return PageSlab((struct page *)sp) && !sp->size;
70873 }
70874
70875 static inline void set_slob_page(struct slob_page *sp)
70876@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
70877
70878 static inline struct slob_page *slob_page(const void *addr)
70879 {
70880- return (struct slob_page *)virt_to_page(addr);
70881+ return (struct slob_page *)virt_to_head_page(addr);
70882 }
70883
70884 /*
70885@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
70886 /*
70887 * Return the size of a slob block.
70888 */
70889-static slobidx_t slob_units(slob_t *s)
70890+static slobidx_t slob_units(const slob_t *s)
70891 {
70892 if (s->units > 0)
70893 return s->units;
70894@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70895 /*
70896 * Return the next free slob block pointer after this one.
70897 */
70898-static slob_t *slob_next(slob_t *s)
70899+static slob_t *slob_next(const slob_t *s)
70900 {
70901 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70902 slobidx_t next;
70903@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70904 /*
70905 * Returns true if s is the last free block in its page.
70906 */
70907-static int slob_last(slob_t *s)
70908+static int slob_last(const slob_t *s)
70909 {
70910 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70911 }
70912@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
70913 if (!page)
70914 return NULL;
70915
70916+ set_slob_page(page);
70917 return page_address(page);
70918 }
70919
70920@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
70921 if (!b)
70922 return NULL;
70923 sp = slob_page(b);
70924- set_slob_page(sp);
70925
70926 spin_lock_irqsave(&slob_lock, flags);
70927 sp->units = SLOB_UNITS(PAGE_SIZE);
70928 sp->free = b;
70929+ sp->size = 0;
70930 INIT_LIST_HEAD(&sp->list);
70931 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70932 set_slob_page_free(sp, slob_list);
70933@@ -476,10 +479,9 @@ out:
70934 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70935 */
70936
70937-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70938+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70939 {
70940- unsigned int *m;
70941- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70942+ slob_t *m;
70943 void *ret;
70944
70945 gfp &= gfp_allowed_mask;
70946@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70947
70948 if (!m)
70949 return NULL;
70950- *m = size;
70951+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70952+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70953+ m[0].units = size;
70954+ m[1].units = align;
70955 ret = (void *)m + align;
70956
70957 trace_kmalloc_node(_RET_IP_, ret,
70958@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70959 gfp |= __GFP_COMP;
70960 ret = slob_new_pages(gfp, order, node);
70961 if (ret) {
70962- struct page *page;
70963- page = virt_to_page(ret);
70964- page->private = size;
70965+ struct slob_page *sp;
70966+ sp = slob_page(ret);
70967+ sp->size = size;
70968 }
70969
70970 trace_kmalloc_node(_RET_IP_, ret,
70971 size, PAGE_SIZE << order, gfp, node);
70972 }
70973
70974- kmemleak_alloc(ret, size, 1, gfp);
70975+ return ret;
70976+}
70977+
70978+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70979+{
70980+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70981+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70982+
70983+ if (!ZERO_OR_NULL_PTR(ret))
70984+ kmemleak_alloc(ret, size, 1, gfp);
70985 return ret;
70986 }
70987 EXPORT_SYMBOL(__kmalloc_node);
70988@@ -533,13 +547,92 @@ void kfree(const void *block)
70989 sp = slob_page(block);
70990 if (is_slob_page(sp)) {
70991 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70992- unsigned int *m = (unsigned int *)(block - align);
70993- slob_free(m, *m + align);
70994- } else
70995+ slob_t *m = (slob_t *)(block - align);
70996+ slob_free(m, m[0].units + align);
70997+ } else {
70998+ clear_slob_page(sp);
70999+ free_slob_page(sp);
71000+ sp->size = 0;
71001 put_page(&sp->page);
71002+ }
71003 }
71004 EXPORT_SYMBOL(kfree);
71005
71006+void check_object_size(const void *ptr, unsigned long n, bool to)
71007+{
71008+
71009+#ifdef CONFIG_PAX_USERCOPY
71010+ struct slob_page *sp;
71011+ const slob_t *free;
71012+ const void *base;
71013+ unsigned long flags;
71014+ const char *type;
71015+
71016+ if (!n)
71017+ return;
71018+
71019+ type = "<null>";
71020+ if (ZERO_OR_NULL_PTR(ptr))
71021+ goto report;
71022+
71023+ if (!virt_addr_valid(ptr))
71024+ return;
71025+
71026+ type = "<process stack>";
71027+ sp = slob_page(ptr);
71028+ if (!PageSlab((struct page *)sp)) {
71029+ if (object_is_on_stack(ptr, n) == -1)
71030+ goto report;
71031+ return;
71032+ }
71033+
71034+ type = "<slob>";
71035+ if (sp->size) {
71036+ base = page_address(&sp->page);
71037+ if (base <= ptr && n <= sp->size - (ptr - base))
71038+ return;
71039+ goto report;
71040+ }
71041+
71042+ /* some tricky double walking to find the chunk */
71043+ spin_lock_irqsave(&slob_lock, flags);
71044+ base = (void *)((unsigned long)ptr & PAGE_MASK);
71045+ free = sp->free;
71046+
71047+ while (!slob_last(free) && (void *)free <= ptr) {
71048+ base = free + slob_units(free);
71049+ free = slob_next(free);
71050+ }
71051+
71052+ while (base < (void *)free) {
71053+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
71054+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
71055+ int offset;
71056+
71057+ if (ptr < base + align)
71058+ break;
71059+
71060+ offset = ptr - base - align;
71061+ if (offset >= m) {
71062+ base += size;
71063+ continue;
71064+ }
71065+
71066+ if (n > m - offset)
71067+ break;
71068+
71069+ spin_unlock_irqrestore(&slob_lock, flags);
71070+ return;
71071+ }
71072+
71073+ spin_unlock_irqrestore(&slob_lock, flags);
71074+report:
71075+ pax_report_usercopy(ptr, n, to, type);
71076+#endif
71077+
71078+}
71079+EXPORT_SYMBOL(check_object_size);
71080+
71081 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
71082 size_t ksize(const void *block)
71083 {
71084@@ -552,10 +645,10 @@ size_t ksize(const void *block)
71085 sp = slob_page(block);
71086 if (is_slob_page(sp)) {
71087 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71088- unsigned int *m = (unsigned int *)(block - align);
71089- return SLOB_UNITS(*m) * SLOB_UNIT;
71090+ slob_t *m = (slob_t *)(block - align);
71091+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
71092 } else
71093- return sp->page.private;
71094+ return sp->size;
71095 }
71096 EXPORT_SYMBOL(ksize);
71097
71098@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71099 {
71100 struct kmem_cache *c;
71101
71102+#ifdef CONFIG_PAX_USERCOPY
71103+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
71104+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
71105+#else
71106 c = slob_alloc(sizeof(struct kmem_cache),
71107 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
71108+#endif
71109
71110 if (c) {
71111 c->name = name;
71112@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
71113
71114 lockdep_trace_alloc(flags);
71115
71116+#ifdef CONFIG_PAX_USERCOPY
71117+ b = __kmalloc_node_align(c->size, flags, node, c->align);
71118+#else
71119 if (c->size < PAGE_SIZE) {
71120 b = slob_alloc(c->size, flags, c->align, node);
71121 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71122 SLOB_UNITS(c->size) * SLOB_UNIT,
71123 flags, node);
71124 } else {
71125+ struct slob_page *sp;
71126+
71127 b = slob_new_pages(flags, get_order(c->size), node);
71128+ sp = slob_page(b);
71129+ sp->size = c->size;
71130 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71131 PAGE_SIZE << get_order(c->size),
71132 flags, node);
71133 }
71134+#endif
71135
71136 if (c->ctor)
71137 c->ctor(b);
71138@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
71139
71140 static void __kmem_cache_free(void *b, int size)
71141 {
71142- if (size < PAGE_SIZE)
71143+ struct slob_page *sp = slob_page(b);
71144+
71145+ if (is_slob_page(sp))
71146 slob_free(b, size);
71147- else
71148+ else {
71149+ clear_slob_page(sp);
71150+ free_slob_page(sp);
71151+ sp->size = 0;
71152 slob_free_pages(b, get_order(size));
71153+ }
71154 }
71155
71156 static void kmem_rcu_free(struct rcu_head *head)
71157@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
71158
71159 void kmem_cache_free(struct kmem_cache *c, void *b)
71160 {
71161+ int size = c->size;
71162+
71163+#ifdef CONFIG_PAX_USERCOPY
71164+ if (size + c->align < PAGE_SIZE) {
71165+ size += c->align;
71166+ b -= c->align;
71167+ }
71168+#endif
71169+
71170 kmemleak_free_recursive(b, c->flags);
71171 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71172 struct slob_rcu *slob_rcu;
71173- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71174- slob_rcu->size = c->size;
71175+ slob_rcu = b + (size - sizeof(struct slob_rcu));
71176+ slob_rcu->size = size;
71177 call_rcu(&slob_rcu->head, kmem_rcu_free);
71178 } else {
71179- __kmem_cache_free(b, c->size);
71180+ __kmem_cache_free(b, size);
71181 }
71182
71183+#ifdef CONFIG_PAX_USERCOPY
71184+ trace_kfree(_RET_IP_, b);
71185+#else
71186 trace_kmem_cache_free(_RET_IP_, b);
71187+#endif
71188+
71189 }
71190 EXPORT_SYMBOL(kmem_cache_free);
71191
71192diff --git a/mm/slub.c b/mm/slub.c
71193index 1a919f0..1739c9b 100644
71194--- a/mm/slub.c
71195+++ b/mm/slub.c
71196@@ -208,7 +208,7 @@ struct track {
71197
71198 enum track_item { TRACK_ALLOC, TRACK_FREE };
71199
71200-#ifdef CONFIG_SYSFS
71201+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71202 static int sysfs_slab_add(struct kmem_cache *);
71203 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71204 static void sysfs_slab_remove(struct kmem_cache *);
71205@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
71206 if (!t->addr)
71207 return;
71208
71209- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71210+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71211 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71212 #ifdef CONFIG_STACKTRACE
71213 {
71214@@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
71215
71216 page = virt_to_head_page(x);
71217
71218+ BUG_ON(!PageSlab(page));
71219+
71220 slab_free(s, page, x, _RET_IP_);
71221
71222 trace_kmem_cache_free(_RET_IP_, x);
71223@@ -2592,7 +2594,7 @@ static int slub_min_objects;
71224 * Merge control. If this is set then no merging of slab caches will occur.
71225 * (Could be removed. This was introduced to pacify the merge skeptics.)
71226 */
71227-static int slub_nomerge;
71228+static int slub_nomerge = 1;
71229
71230 /*
71231 * Calculate the order of allocation given an slab object size.
71232@@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
71233 else
71234 s->cpu_partial = 30;
71235
71236- s->refcount = 1;
71237+ atomic_set(&s->refcount, 1);
71238 #ifdef CONFIG_NUMA
71239 s->remote_node_defrag_ratio = 1000;
71240 #endif
71241@@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
71242 void kmem_cache_destroy(struct kmem_cache *s)
71243 {
71244 down_write(&slub_lock);
71245- s->refcount--;
71246- if (!s->refcount) {
71247+ if (atomic_dec_and_test(&s->refcount)) {
71248 list_del(&s->list);
71249 up_write(&slub_lock);
71250 if (kmem_cache_close(s)) {
71251@@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
71252 EXPORT_SYMBOL(__kmalloc_node);
71253 #endif
71254
71255+void check_object_size(const void *ptr, unsigned long n, bool to)
71256+{
71257+
71258+#ifdef CONFIG_PAX_USERCOPY
71259+ struct page *page;
71260+ struct kmem_cache *s = NULL;
71261+ unsigned long offset;
71262+ const char *type;
71263+
71264+ if (!n)
71265+ return;
71266+
71267+ type = "<null>";
71268+ if (ZERO_OR_NULL_PTR(ptr))
71269+ goto report;
71270+
71271+ if (!virt_addr_valid(ptr))
71272+ return;
71273+
71274+ page = virt_to_head_page(ptr);
71275+
71276+ type = "<process stack>";
71277+ if (!PageSlab(page)) {
71278+ if (object_is_on_stack(ptr, n) == -1)
71279+ goto report;
71280+ return;
71281+ }
71282+
71283+ s = page->slab;
71284+ type = s->name;
71285+ if (!(s->flags & SLAB_USERCOPY))
71286+ goto report;
71287+
71288+ offset = (ptr - page_address(page)) % s->size;
71289+ if (offset <= s->objsize && n <= s->objsize - offset)
71290+ return;
71291+
71292+report:
71293+ pax_report_usercopy(ptr, n, to, type);
71294+#endif
71295+
71296+}
71297+EXPORT_SYMBOL(check_object_size);
71298+
71299 size_t ksize(const void *object)
71300 {
71301 struct page *page;
71302@@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
71303 int node;
71304
71305 list_add(&s->list, &slab_caches);
71306- s->refcount = -1;
71307+ atomic_set(&s->refcount, -1);
71308
71309 for_each_node_state(node, N_NORMAL_MEMORY) {
71310 struct kmem_cache_node *n = get_node(s, node);
71311@@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
71312
71313 /* Caches that are not of the two-to-the-power-of size */
71314 if (KMALLOC_MIN_SIZE <= 32) {
71315- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71316+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71317 caches++;
71318 }
71319
71320 if (KMALLOC_MIN_SIZE <= 64) {
71321- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71322+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71323 caches++;
71324 }
71325
71326 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71327- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71328+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71329 caches++;
71330 }
71331
71332@@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
71333 /*
71334 * We may have set a slab to be unmergeable during bootstrap.
71335 */
71336- if (s->refcount < 0)
71337+ if (atomic_read(&s->refcount) < 0)
71338 return 1;
71339
71340 return 0;
71341@@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71342 down_write(&slub_lock);
71343 s = find_mergeable(size, align, flags, name, ctor);
71344 if (s) {
71345- s->refcount++;
71346+ atomic_inc(&s->refcount);
71347 /*
71348 * Adjust the object sizes so that we clear
71349 * the complete object on kzalloc.
71350@@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71351 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71352
71353 if (sysfs_slab_alias(s, name)) {
71354- s->refcount--;
71355+ atomic_dec(&s->refcount);
71356 goto err;
71357 }
71358 up_write(&slub_lock);
71359@@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
71360 }
71361 #endif
71362
71363-#ifdef CONFIG_SYSFS
71364+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71365 static int count_inuse(struct page *page)
71366 {
71367 return page->inuse;
71368@@ -4410,12 +4455,12 @@ static void resiliency_test(void)
71369 validate_slab_cache(kmalloc_caches[9]);
71370 }
71371 #else
71372-#ifdef CONFIG_SYSFS
71373+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71374 static void resiliency_test(void) {};
71375 #endif
71376 #endif
71377
71378-#ifdef CONFIG_SYSFS
71379+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71380 enum slab_stat_type {
71381 SL_ALL, /* All slabs */
71382 SL_PARTIAL, /* Only partially allocated slabs */
71383@@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
71384
71385 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71386 {
71387- return sprintf(buf, "%d\n", s->refcount - 1);
71388+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71389 }
71390 SLAB_ATTR_RO(aliases);
71391
71392@@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
71393 return name;
71394 }
71395
71396+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71397 static int sysfs_slab_add(struct kmem_cache *s)
71398 {
71399 int err;
71400@@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
71401 kobject_del(&s->kobj);
71402 kobject_put(&s->kobj);
71403 }
71404+#endif
71405
71406 /*
71407 * Need to buffer aliases during bootup until sysfs becomes
71408@@ -5298,6 +5345,7 @@ struct saved_alias {
71409
71410 static struct saved_alias *alias_list;
71411
71412+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71413 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71414 {
71415 struct saved_alias *al;
71416@@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71417 alias_list = al;
71418 return 0;
71419 }
71420+#endif
71421
71422 static int __init slab_sysfs_init(void)
71423 {
71424diff --git a/mm/swap.c b/mm/swap.c
71425index 55b266d..a532537 100644
71426--- a/mm/swap.c
71427+++ b/mm/swap.c
71428@@ -31,6 +31,7 @@
71429 #include <linux/backing-dev.h>
71430 #include <linux/memcontrol.h>
71431 #include <linux/gfp.h>
71432+#include <linux/hugetlb.h>
71433
71434 #include "internal.h"
71435
71436@@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
71437
71438 __page_cache_release(page);
71439 dtor = get_compound_page_dtor(page);
71440+ if (!PageHuge(page))
71441+ BUG_ON(dtor != free_compound_page);
71442 (*dtor)(page);
71443 }
71444
71445diff --git a/mm/swapfile.c b/mm/swapfile.c
71446index b1cd120..aaae885 100644
71447--- a/mm/swapfile.c
71448+++ b/mm/swapfile.c
71449@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
71450
71451 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71452 /* Activity counter to indicate that a swapon or swapoff has occurred */
71453-static atomic_t proc_poll_event = ATOMIC_INIT(0);
71454+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71455
71456 static inline unsigned char swap_count(unsigned char ent)
71457 {
71458@@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
71459 }
71460 filp_close(swap_file, NULL);
71461 err = 0;
71462- atomic_inc(&proc_poll_event);
71463+ atomic_inc_unchecked(&proc_poll_event);
71464 wake_up_interruptible(&proc_poll_wait);
71465
71466 out_dput:
71467@@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
71468
71469 poll_wait(file, &proc_poll_wait, wait);
71470
71471- if (seq->poll_event != atomic_read(&proc_poll_event)) {
71472- seq->poll_event = atomic_read(&proc_poll_event);
71473+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71474+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71475 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71476 }
71477
71478@@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
71479 return ret;
71480
71481 seq = file->private_data;
71482- seq->poll_event = atomic_read(&proc_poll_event);
71483+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71484 return 0;
71485 }
71486
71487@@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
71488 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71489
71490 mutex_unlock(&swapon_mutex);
71491- atomic_inc(&proc_poll_event);
71492+ atomic_inc_unchecked(&proc_poll_event);
71493 wake_up_interruptible(&proc_poll_wait);
71494
71495 if (S_ISREG(inode->i_mode))
71496diff --git a/mm/util.c b/mm/util.c
71497index 136ac4f..5117eef 100644
71498--- a/mm/util.c
71499+++ b/mm/util.c
71500@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71501 * allocated buffer. Use this if you don't want to free the buffer immediately
71502 * like, for example, with RCU.
71503 */
71504+#undef __krealloc
71505 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71506 {
71507 void *ret;
71508@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71509 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71510 * %NULL pointer, the object pointed to is freed.
71511 */
71512+#undef krealloc
71513 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71514 {
71515 void *ret;
71516@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71517 void arch_pick_mmap_layout(struct mm_struct *mm)
71518 {
71519 mm->mmap_base = TASK_UNMAPPED_BASE;
71520+
71521+#ifdef CONFIG_PAX_RANDMMAP
71522+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71523+ mm->mmap_base += mm->delta_mmap;
71524+#endif
71525+
71526 mm->get_unmapped_area = arch_get_unmapped_area;
71527 mm->unmap_area = arch_unmap_area;
71528 }
71529diff --git a/mm/vmalloc.c b/mm/vmalloc.c
71530index 27be2f0..0aef2c2 100644
71531--- a/mm/vmalloc.c
71532+++ b/mm/vmalloc.c
71533@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
71534
71535 pte = pte_offset_kernel(pmd, addr);
71536 do {
71537- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71538- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71539+
71540+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71541+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71542+ BUG_ON(!pte_exec(*pte));
71543+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71544+ continue;
71545+ }
71546+#endif
71547+
71548+ {
71549+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71550+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71551+ }
71552 } while (pte++, addr += PAGE_SIZE, addr != end);
71553 }
71554
71555@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71556 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71557 {
71558 pte_t *pte;
71559+ int ret = -ENOMEM;
71560
71561 /*
71562 * nr is a running index into the array which helps higher level
71563@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71564 pte = pte_alloc_kernel(pmd, addr);
71565 if (!pte)
71566 return -ENOMEM;
71567+
71568+ pax_open_kernel();
71569 do {
71570 struct page *page = pages[*nr];
71571
71572- if (WARN_ON(!pte_none(*pte)))
71573- return -EBUSY;
71574- if (WARN_ON(!page))
71575- return -ENOMEM;
71576+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71577+ if (pgprot_val(prot) & _PAGE_NX)
71578+#endif
71579+
71580+ if (WARN_ON(!pte_none(*pte))) {
71581+ ret = -EBUSY;
71582+ goto out;
71583+ }
71584+ if (WARN_ON(!page)) {
71585+ ret = -ENOMEM;
71586+ goto out;
71587+ }
71588 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71589 (*nr)++;
71590 } while (pte++, addr += PAGE_SIZE, addr != end);
71591- return 0;
71592+ ret = 0;
71593+out:
71594+ pax_close_kernel();
71595+ return ret;
71596 }
71597
71598 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71599@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71600 * and fall back on vmalloc() if that fails. Others
71601 * just put it in the vmalloc space.
71602 */
71603-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71604+#ifdef CONFIG_MODULES
71605+#ifdef MODULES_VADDR
71606 unsigned long addr = (unsigned long)x;
71607 if (addr >= MODULES_VADDR && addr < MODULES_END)
71608 return 1;
71609 #endif
71610+
71611+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71612+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71613+ return 1;
71614+#endif
71615+
71616+#endif
71617+
71618 return is_vmalloc_addr(x);
71619 }
71620
71621@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71622
71623 if (!pgd_none(*pgd)) {
71624 pud_t *pud = pud_offset(pgd, addr);
71625+#ifdef CONFIG_X86
71626+ if (!pud_large(*pud))
71627+#endif
71628 if (!pud_none(*pud)) {
71629 pmd_t *pmd = pmd_offset(pud, addr);
71630+#ifdef CONFIG_X86
71631+ if (!pmd_large(*pmd))
71632+#endif
71633 if (!pmd_none(*pmd)) {
71634 pte_t *ptep, pte;
71635
71636@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71637 struct vm_struct *area;
71638
71639 BUG_ON(in_interrupt());
71640+
71641+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71642+ if (flags & VM_KERNEXEC) {
71643+ if (start != VMALLOC_START || end != VMALLOC_END)
71644+ return NULL;
71645+ start = (unsigned long)MODULES_EXEC_VADDR;
71646+ end = (unsigned long)MODULES_EXEC_END;
71647+ }
71648+#endif
71649+
71650 if (flags & VM_IOREMAP) {
71651 int bit = fls(size);
71652
71653@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71654 if (count > totalram_pages)
71655 return NULL;
71656
71657+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71658+ if (!(pgprot_val(prot) & _PAGE_NX))
71659+ flags |= VM_KERNEXEC;
71660+#endif
71661+
71662 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71663 __builtin_return_address(0));
71664 if (!area)
71665@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71666 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71667 goto fail;
71668
71669+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71670+ if (!(pgprot_val(prot) & _PAGE_NX))
71671+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71672+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71673+ else
71674+#endif
71675+
71676 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71677 start, end, node, gfp_mask, caller);
71678 if (!area)
71679@@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71680 gfp_mask, prot, node, caller);
71681 }
71682
71683+#undef __vmalloc
71684 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71685 {
71686 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71687@@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71688 * For tight control over page level allocator and protection flags
71689 * use __vmalloc() instead.
71690 */
71691+#undef vmalloc
71692 void *vmalloc(unsigned long size)
71693 {
71694 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71695@@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71696 * For tight control over page level allocator and protection flags
71697 * use __vmalloc() instead.
71698 */
71699+#undef vzalloc
71700 void *vzalloc(unsigned long size)
71701 {
71702 return __vmalloc_node_flags(size, -1,
71703@@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
71704 * The resulting memory area is zeroed so it can be mapped to userspace
71705 * without leaking data.
71706 */
71707+#undef vmalloc_user
71708 void *vmalloc_user(unsigned long size)
71709 {
71710 struct vm_struct *area;
71711@@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
71712 * For tight control over page level allocator and protection flags
71713 * use __vmalloc() instead.
71714 */
71715+#undef vmalloc_node
71716 void *vmalloc_node(unsigned long size, int node)
71717 {
71718 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71719@@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
71720 * For tight control over page level allocator and protection flags
71721 * use __vmalloc_node() instead.
71722 */
71723+#undef vzalloc_node
71724 void *vzalloc_node(unsigned long size, int node)
71725 {
71726 return __vmalloc_node_flags(size, node,
71727@@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
71728 * For tight control over page level allocator and protection flags
71729 * use __vmalloc() instead.
71730 */
71731-
71732+#undef vmalloc_exec
71733 void *vmalloc_exec(unsigned long size)
71734 {
71735- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71736+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71737 -1, __builtin_return_address(0));
71738 }
71739
71740@@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
71741 * Allocate enough 32bit PA addressable pages to cover @size from the
71742 * page level allocator and map them into contiguous kernel virtual space.
71743 */
71744+#undef vmalloc_32
71745 void *vmalloc_32(unsigned long size)
71746 {
71747 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71748@@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
71749 * The resulting memory area is 32bit addressable and zeroed so it can be
71750 * mapped to userspace without leaking data.
71751 */
71752+#undef vmalloc_32_user
71753 void *vmalloc_32_user(unsigned long size)
71754 {
71755 struct vm_struct *area;
71756@@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
71757 unsigned long uaddr = vma->vm_start;
71758 unsigned long usize = vma->vm_end - vma->vm_start;
71759
71760+ BUG_ON(vma->vm_mirror);
71761+
71762 if ((PAGE_SIZE-1) & (unsigned long)addr)
71763 return -EINVAL;
71764
71765diff --git a/mm/vmstat.c b/mm/vmstat.c
71766index 8fd603b..cf0d930 100644
71767--- a/mm/vmstat.c
71768+++ b/mm/vmstat.c
71769@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71770 *
71771 * vm_stat contains the global counters
71772 */
71773-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71774+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71775 EXPORT_SYMBOL(vm_stat);
71776
71777 #ifdef CONFIG_SMP
71778@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71779 v = p->vm_stat_diff[i];
71780 p->vm_stat_diff[i] = 0;
71781 local_irq_restore(flags);
71782- atomic_long_add(v, &zone->vm_stat[i]);
71783+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71784 global_diff[i] += v;
71785 #ifdef CONFIG_NUMA
71786 /* 3 seconds idle till flush */
71787@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71788
71789 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71790 if (global_diff[i])
71791- atomic_long_add(global_diff[i], &vm_stat[i]);
71792+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71793 }
71794
71795 #endif
71796@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
71797 start_cpu_timer(cpu);
71798 #endif
71799 #ifdef CONFIG_PROC_FS
71800- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71801- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71802- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71803- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71804+ {
71805+ mode_t gr_mode = S_IRUGO;
71806+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71807+ gr_mode = S_IRUSR;
71808+#endif
71809+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71810+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71811+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71812+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71813+#else
71814+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71815+#endif
71816+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71817+ }
71818 #endif
71819 return 0;
71820 }
71821diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
71822index 5471628..cef8398 100644
71823--- a/net/8021q/vlan.c
71824+++ b/net/8021q/vlan.c
71825@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
71826 err = -EPERM;
71827 if (!capable(CAP_NET_ADMIN))
71828 break;
71829- if ((args.u.name_type >= 0) &&
71830- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71831+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71832 struct vlan_net *vn;
71833
71834 vn = net_generic(net, vlan_net_id);
71835diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
71836index fdfdb57..38d368c 100644
71837--- a/net/9p/trans_fd.c
71838+++ b/net/9p/trans_fd.c
71839@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
71840 oldfs = get_fs();
71841 set_fs(get_ds());
71842 /* The cast to a user pointer is valid due to the set_fs() */
71843- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71844+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71845 set_fs(oldfs);
71846
71847 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71848diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
71849index f41f026..fe76ea8 100644
71850--- a/net/atm/atm_misc.c
71851+++ b/net/atm/atm_misc.c
71852@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
71853 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71854 return 1;
71855 atm_return(vcc, truesize);
71856- atomic_inc(&vcc->stats->rx_drop);
71857+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71858 return 0;
71859 }
71860 EXPORT_SYMBOL(atm_charge);
71861@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
71862 }
71863 }
71864 atm_return(vcc, guess);
71865- atomic_inc(&vcc->stats->rx_drop);
71866+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71867 return NULL;
71868 }
71869 EXPORT_SYMBOL(atm_alloc_charge);
71870@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71871
71872 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71873 {
71874-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71875+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71876 __SONET_ITEMS
71877 #undef __HANDLE_ITEM
71878 }
71879@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71880
71881 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71882 {
71883-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71884+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71885 __SONET_ITEMS
71886 #undef __HANDLE_ITEM
71887 }
71888diff --git a/net/atm/lec.h b/net/atm/lec.h
71889index dfc0719..47c5322 100644
71890--- a/net/atm/lec.h
71891+++ b/net/atm/lec.h
71892@@ -48,7 +48,7 @@ struct lane2_ops {
71893 const u8 *tlvs, u32 sizeoftlvs);
71894 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71895 const u8 *tlvs, u32 sizeoftlvs);
71896-};
71897+} __no_const;
71898
71899 /*
71900 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71901diff --git a/net/atm/mpc.h b/net/atm/mpc.h
71902index 0919a88..a23d54e 100644
71903--- a/net/atm/mpc.h
71904+++ b/net/atm/mpc.h
71905@@ -33,7 +33,7 @@ struct mpoa_client {
71906 struct mpc_parameters parameters; /* parameters for this client */
71907
71908 const struct net_device_ops *old_ops;
71909- struct net_device_ops new_ops;
71910+ net_device_ops_no_const new_ops;
71911 };
71912
71913
71914diff --git a/net/atm/proc.c b/net/atm/proc.c
71915index 0d020de..011c7bb 100644
71916--- a/net/atm/proc.c
71917+++ b/net/atm/proc.c
71918@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
71919 const struct k_atm_aal_stats *stats)
71920 {
71921 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71922- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71923- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71924- atomic_read(&stats->rx_drop));
71925+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71926+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71927+ atomic_read_unchecked(&stats->rx_drop));
71928 }
71929
71930 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71931diff --git a/net/atm/resources.c b/net/atm/resources.c
71932index 23f45ce..c748f1a 100644
71933--- a/net/atm/resources.c
71934+++ b/net/atm/resources.c
71935@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71936 static void copy_aal_stats(struct k_atm_aal_stats *from,
71937 struct atm_aal_stats *to)
71938 {
71939-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71940+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71941 __AAL_STAT_ITEMS
71942 #undef __HANDLE_ITEM
71943 }
71944@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
71945 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71946 struct atm_aal_stats *to)
71947 {
71948-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71949+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71950 __AAL_STAT_ITEMS
71951 #undef __HANDLE_ITEM
71952 }
71953diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
71954index 3512e25..2b33401 100644
71955--- a/net/batman-adv/bat_iv_ogm.c
71956+++ b/net/batman-adv/bat_iv_ogm.c
71957@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71958
71959 /* change sequence number to network order */
71960 batman_ogm_packet->seqno =
71961- htonl((uint32_t)atomic_read(&hard_iface->seqno));
71962+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71963
71964 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
71965 batman_ogm_packet->tt_crc = htons((uint16_t)
71966@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71967 else
71968 batman_ogm_packet->gw_flags = NO_FLAGS;
71969
71970- atomic_inc(&hard_iface->seqno);
71971+ atomic_inc_unchecked(&hard_iface->seqno);
71972
71973 slide_own_bcast_window(hard_iface);
71974 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
71975@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
71976 return;
71977
71978 /* could be changed by schedule_own_packet() */
71979- if_incoming_seqno = atomic_read(&if_incoming->seqno);
71980+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71981
71982 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
71983
71984diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
71985index 7704df4..beb4e16 100644
71986--- a/net/batman-adv/hard-interface.c
71987+++ b/net/batman-adv/hard-interface.c
71988@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
71989 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71990 dev_add_pack(&hard_iface->batman_adv_ptype);
71991
71992- atomic_set(&hard_iface->seqno, 1);
71993- atomic_set(&hard_iface->frag_seqno, 1);
71994+ atomic_set_unchecked(&hard_iface->seqno, 1);
71995+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71996 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71997 hard_iface->net_dev->name);
71998
71999diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
72000index f9cc957..efd9dae 100644
72001--- a/net/batman-adv/soft-interface.c
72002+++ b/net/batman-adv/soft-interface.c
72003@@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
72004
72005 /* set broadcast sequence number */
72006 bcast_packet->seqno =
72007- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
72008+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
72009
72010 add_bcast_packet_to_list(bat_priv, skb, 1);
72011
72012@@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
72013 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
72014
72015 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
72016- atomic_set(&bat_priv->bcast_seqno, 1);
72017+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
72018 atomic_set(&bat_priv->ttvn, 0);
72019 atomic_set(&bat_priv->tt_local_changes, 0);
72020 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
72021diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
72022index ab8d0fe..ceba3fd 100644
72023--- a/net/batman-adv/types.h
72024+++ b/net/batman-adv/types.h
72025@@ -38,8 +38,8 @@ struct hard_iface {
72026 int16_t if_num;
72027 char if_status;
72028 struct net_device *net_dev;
72029- atomic_t seqno;
72030- atomic_t frag_seqno;
72031+ atomic_unchecked_t seqno;
72032+ atomic_unchecked_t frag_seqno;
72033 unsigned char *packet_buff;
72034 int packet_len;
72035 struct kobject *hardif_obj;
72036@@ -154,7 +154,7 @@ struct bat_priv {
72037 atomic_t orig_interval; /* uint */
72038 atomic_t hop_penalty; /* uint */
72039 atomic_t log_level; /* uint */
72040- atomic_t bcast_seqno;
72041+ atomic_unchecked_t bcast_seqno;
72042 atomic_t bcast_queue_left;
72043 atomic_t batman_queue_left;
72044 atomic_t ttvn; /* translation table version number */
72045diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
72046index 07d1c1d..7e9bea9 100644
72047--- a/net/batman-adv/unicast.c
72048+++ b/net/batman-adv/unicast.c
72049@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
72050 frag1->flags = UNI_FRAG_HEAD | large_tail;
72051 frag2->flags = large_tail;
72052
72053- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
72054+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
72055 frag1->seqno = htons(seqno - 1);
72056 frag2->seqno = htons(seqno);
72057
72058diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
72059index c1c597e..05ebb40 100644
72060--- a/net/bluetooth/hci_conn.c
72061+++ b/net/bluetooth/hci_conn.c
72062@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
72063 memset(&cp, 0, sizeof(cp));
72064
72065 cp.handle = cpu_to_le16(conn->handle);
72066- memcpy(cp.ltk, ltk, sizeof(ltk));
72067+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
72068
72069 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
72070 }
72071diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
72072index 17b5b1c..826d872 100644
72073--- a/net/bluetooth/l2cap_core.c
72074+++ b/net/bluetooth/l2cap_core.c
72075@@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
72076 break;
72077
72078 case L2CAP_CONF_RFC:
72079- if (olen == sizeof(rfc))
72080- memcpy(&rfc, (void *)val, olen);
72081+ if (olen != sizeof(rfc))
72082+ break;
72083+
72084+ memcpy(&rfc, (void *)val, olen);
72085
72086 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
72087 rfc.mode != chan->mode)
72088@@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
72089
72090 switch (type) {
72091 case L2CAP_CONF_RFC:
72092- if (olen == sizeof(rfc))
72093- memcpy(&rfc, (void *)val, olen);
72094+ if (olen != sizeof(rfc))
72095+ break;
72096+
72097+ memcpy(&rfc, (void *)val, olen);
72098 goto done;
72099 }
72100 }
72101diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
72102index a5f4e57..910ee6d 100644
72103--- a/net/bridge/br_multicast.c
72104+++ b/net/bridge/br_multicast.c
72105@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
72106 nexthdr = ip6h->nexthdr;
72107 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
72108
72109- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
72110+ if (nexthdr != IPPROTO_ICMPV6)
72111 return 0;
72112
72113 /* Okay, we found ICMPv6 header */
72114diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
72115index 5864cc4..121f3a3 100644
72116--- a/net/bridge/netfilter/ebtables.c
72117+++ b/net/bridge/netfilter/ebtables.c
72118@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
72119 tmp.valid_hooks = t->table->valid_hooks;
72120 }
72121 mutex_unlock(&ebt_mutex);
72122- if (copy_to_user(user, &tmp, *len) != 0){
72123+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
72124 BUGPRINT("c2u Didn't work\n");
72125 ret = -EFAULT;
72126 break;
72127diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
72128index a986280..13444a1 100644
72129--- a/net/caif/caif_socket.c
72130+++ b/net/caif/caif_socket.c
72131@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
72132 #ifdef CONFIG_DEBUG_FS
72133 struct debug_fs_counter {
72134 atomic_t caif_nr_socks;
72135- atomic_t caif_sock_create;
72136- atomic_t num_connect_req;
72137- atomic_t num_connect_resp;
72138- atomic_t num_connect_fail_resp;
72139- atomic_t num_disconnect;
72140- atomic_t num_remote_shutdown_ind;
72141- atomic_t num_tx_flow_off_ind;
72142- atomic_t num_tx_flow_on_ind;
72143- atomic_t num_rx_flow_off;
72144- atomic_t num_rx_flow_on;
72145+ atomic_unchecked_t caif_sock_create;
72146+ atomic_unchecked_t num_connect_req;
72147+ atomic_unchecked_t num_connect_resp;
72148+ atomic_unchecked_t num_connect_fail_resp;
72149+ atomic_unchecked_t num_disconnect;
72150+ atomic_unchecked_t num_remote_shutdown_ind;
72151+ atomic_unchecked_t num_tx_flow_off_ind;
72152+ atomic_unchecked_t num_tx_flow_on_ind;
72153+ atomic_unchecked_t num_rx_flow_off;
72154+ atomic_unchecked_t num_rx_flow_on;
72155 };
72156 static struct debug_fs_counter cnt;
72157 #define dbfs_atomic_inc(v) atomic_inc_return(v)
72158+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
72159 #define dbfs_atomic_dec(v) atomic_dec_return(v)
72160 #else
72161 #define dbfs_atomic_inc(v) 0
72162@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72163 atomic_read(&cf_sk->sk.sk_rmem_alloc),
72164 sk_rcvbuf_lowwater(cf_sk));
72165 set_rx_flow_off(cf_sk);
72166- dbfs_atomic_inc(&cnt.num_rx_flow_off);
72167+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72168 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72169 }
72170
72171@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72172 set_rx_flow_off(cf_sk);
72173 if (net_ratelimit())
72174 pr_debug("sending flow OFF due to rmem_schedule\n");
72175- dbfs_atomic_inc(&cnt.num_rx_flow_off);
72176+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72177 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72178 }
72179 skb->dev = NULL;
72180@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
72181 switch (flow) {
72182 case CAIF_CTRLCMD_FLOW_ON_IND:
72183 /* OK from modem to start sending again */
72184- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72185+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72186 set_tx_flow_on(cf_sk);
72187 cf_sk->sk.sk_state_change(&cf_sk->sk);
72188 break;
72189
72190 case CAIF_CTRLCMD_FLOW_OFF_IND:
72191 /* Modem asks us to shut up */
72192- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72193+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72194 set_tx_flow_off(cf_sk);
72195 cf_sk->sk.sk_state_change(&cf_sk->sk);
72196 break;
72197@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72198 /* We're now connected */
72199 caif_client_register_refcnt(&cf_sk->layer,
72200 cfsk_hold, cfsk_put);
72201- dbfs_atomic_inc(&cnt.num_connect_resp);
72202+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72203 cf_sk->sk.sk_state = CAIF_CONNECTED;
72204 set_tx_flow_on(cf_sk);
72205 cf_sk->sk.sk_state_change(&cf_sk->sk);
72206@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72207
72208 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72209 /* Connect request failed */
72210- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72211+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72212 cf_sk->sk.sk_err = ECONNREFUSED;
72213 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72214 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72215@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72216
72217 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72218 /* Modem has closed this connection, or device is down. */
72219- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72220+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72221 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72222 cf_sk->sk.sk_err = ECONNRESET;
72223 set_rx_flow_on(cf_sk);
72224@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
72225 return;
72226
72227 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72228- dbfs_atomic_inc(&cnt.num_rx_flow_on);
72229+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72230 set_rx_flow_on(cf_sk);
72231 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72232 }
72233@@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
72234 /*ifindex = id of the interface.*/
72235 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72236
72237- dbfs_atomic_inc(&cnt.num_connect_req);
72238+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72239 cf_sk->layer.receive = caif_sktrecv_cb;
72240
72241 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72242@@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
72243 spin_unlock_bh(&sk->sk_receive_queue.lock);
72244 sock->sk = NULL;
72245
72246- dbfs_atomic_inc(&cnt.num_disconnect);
72247+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72248
72249 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72250 if (cf_sk->debugfs_socket_dir != NULL)
72251@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
72252 cf_sk->conn_req.protocol = protocol;
72253 /* Increase the number of sockets created. */
72254 dbfs_atomic_inc(&cnt.caif_nr_socks);
72255- num = dbfs_atomic_inc(&cnt.caif_sock_create);
72256+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72257 #ifdef CONFIG_DEBUG_FS
72258 if (!IS_ERR(debugfsdir)) {
72259
72260diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
72261index 5cf5222..6f704ad 100644
72262--- a/net/caif/cfctrl.c
72263+++ b/net/caif/cfctrl.c
72264@@ -9,6 +9,7 @@
72265 #include <linux/stddef.h>
72266 #include <linux/spinlock.h>
72267 #include <linux/slab.h>
72268+#include <linux/sched.h>
72269 #include <net/caif/caif_layer.h>
72270 #include <net/caif/cfpkt.h>
72271 #include <net/caif/cfctrl.h>
72272@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
72273 memset(&dev_info, 0, sizeof(dev_info));
72274 dev_info.id = 0xff;
72275 cfsrvl_init(&this->serv, 0, &dev_info, false);
72276- atomic_set(&this->req_seq_no, 1);
72277- atomic_set(&this->rsp_seq_no, 1);
72278+ atomic_set_unchecked(&this->req_seq_no, 1);
72279+ atomic_set_unchecked(&this->rsp_seq_no, 1);
72280 this->serv.layer.receive = cfctrl_recv;
72281 sprintf(this->serv.layer.name, "ctrl");
72282 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72283@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
72284 struct cfctrl_request_info *req)
72285 {
72286 spin_lock_bh(&ctrl->info_list_lock);
72287- atomic_inc(&ctrl->req_seq_no);
72288- req->sequence_no = atomic_read(&ctrl->req_seq_no);
72289+ atomic_inc_unchecked(&ctrl->req_seq_no);
72290+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72291 list_add_tail(&req->list, &ctrl->list);
72292 spin_unlock_bh(&ctrl->info_list_lock);
72293 }
72294@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
72295 if (p != first)
72296 pr_warn("Requests are not received in order\n");
72297
72298- atomic_set(&ctrl->rsp_seq_no,
72299+ atomic_set_unchecked(&ctrl->rsp_seq_no,
72300 p->sequence_no);
72301 list_del(&p->list);
72302 goto out;
72303diff --git a/net/can/gw.c b/net/can/gw.c
72304index 3d79b12..8de85fa 100644
72305--- a/net/can/gw.c
72306+++ b/net/can/gw.c
72307@@ -96,7 +96,7 @@ struct cf_mod {
72308 struct {
72309 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
72310 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
72311- } csumfunc;
72312+ } __no_const csumfunc;
72313 };
72314
72315
72316diff --git a/net/compat.c b/net/compat.c
72317index 6def90e..c6992fa 100644
72318--- a/net/compat.c
72319+++ b/net/compat.c
72320@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72321 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72322 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72323 return -EFAULT;
72324- kmsg->msg_name = compat_ptr(tmp1);
72325- kmsg->msg_iov = compat_ptr(tmp2);
72326- kmsg->msg_control = compat_ptr(tmp3);
72327+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72328+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72329+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72330 return 0;
72331 }
72332
72333@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72334
72335 if (kern_msg->msg_namelen) {
72336 if (mode == VERIFY_READ) {
72337- int err = move_addr_to_kernel(kern_msg->msg_name,
72338+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72339 kern_msg->msg_namelen,
72340 kern_address);
72341 if (err < 0)
72342@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72343 kern_msg->msg_name = NULL;
72344
72345 tot_len = iov_from_user_compat_to_kern(kern_iov,
72346- (struct compat_iovec __user *)kern_msg->msg_iov,
72347+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
72348 kern_msg->msg_iovlen);
72349 if (tot_len >= 0)
72350 kern_msg->msg_iov = kern_iov;
72351@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72352
72353 #define CMSG_COMPAT_FIRSTHDR(msg) \
72354 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72355- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72356+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72357 (struct compat_cmsghdr __user *)NULL)
72358
72359 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72360 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72361 (ucmlen) <= (unsigned long) \
72362 ((mhdr)->msg_controllen - \
72363- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72364+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72365
72366 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72367 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72368 {
72369 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72370- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72371+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72372 msg->msg_controllen)
72373 return NULL;
72374 return (struct compat_cmsghdr __user *)ptr;
72375@@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72376 {
72377 struct compat_timeval ctv;
72378 struct compat_timespec cts[3];
72379- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72380+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72381 struct compat_cmsghdr cmhdr;
72382 int cmlen;
72383
72384@@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72385
72386 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72387 {
72388- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72389+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72390 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72391 int fdnum = scm->fp->count;
72392 struct file **fp = scm->fp->fp;
72393@@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
72394 return -EFAULT;
72395 old_fs = get_fs();
72396 set_fs(KERNEL_DS);
72397- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72398+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72399 set_fs(old_fs);
72400
72401 return err;
72402@@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
72403 len = sizeof(ktime);
72404 old_fs = get_fs();
72405 set_fs(KERNEL_DS);
72406- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72407+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72408 set_fs(old_fs);
72409
72410 if (!err) {
72411@@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72412 case MCAST_JOIN_GROUP:
72413 case MCAST_LEAVE_GROUP:
72414 {
72415- struct compat_group_req __user *gr32 = (void *)optval;
72416+ struct compat_group_req __user *gr32 = (void __user *)optval;
72417 struct group_req __user *kgr =
72418 compat_alloc_user_space(sizeof(struct group_req));
72419 u32 interface;
72420@@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72421 case MCAST_BLOCK_SOURCE:
72422 case MCAST_UNBLOCK_SOURCE:
72423 {
72424- struct compat_group_source_req __user *gsr32 = (void *)optval;
72425+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72426 struct group_source_req __user *kgsr = compat_alloc_user_space(
72427 sizeof(struct group_source_req));
72428 u32 interface;
72429@@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72430 }
72431 case MCAST_MSFILTER:
72432 {
72433- struct compat_group_filter __user *gf32 = (void *)optval;
72434+ struct compat_group_filter __user *gf32 = (void __user *)optval;
72435 struct group_filter __user *kgf;
72436 u32 interface, fmode, numsrc;
72437
72438@@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
72439 char __user *optval, int __user *optlen,
72440 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72441 {
72442- struct compat_group_filter __user *gf32 = (void *)optval;
72443+ struct compat_group_filter __user *gf32 = (void __user *)optval;
72444 struct group_filter __user *kgf;
72445 int __user *koptlen;
72446 u32 interface, fmode, numsrc;
72447diff --git a/net/core/datagram.c b/net/core/datagram.c
72448index 68bbf9f..5ef0d12 100644
72449--- a/net/core/datagram.c
72450+++ b/net/core/datagram.c
72451@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
72452 }
72453
72454 kfree_skb(skb);
72455- atomic_inc(&sk->sk_drops);
72456+ atomic_inc_unchecked(&sk->sk_drops);
72457 sk_mem_reclaim_partial(sk);
72458
72459 return err;
72460diff --git a/net/core/dev.c b/net/core/dev.c
72461index 5a13edf..a6f2bd2 100644
72462--- a/net/core/dev.c
72463+++ b/net/core/dev.c
72464@@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
72465 if (no_module && capable(CAP_NET_ADMIN))
72466 no_module = request_module("netdev-%s", name);
72467 if (no_module && capable(CAP_SYS_MODULE)) {
72468+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72469+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
72470+#else
72471 if (!request_module("%s", name))
72472 pr_err("Loading kernel module for a network device "
72473 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72474 "instead\n", name);
72475+#endif
72476 }
72477 }
72478 EXPORT_SYMBOL(dev_load);
72479@@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72480 {
72481 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
72482 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
72483- atomic_long_inc(&dev->rx_dropped);
72484+ atomic_long_inc_unchecked(&dev->rx_dropped);
72485 kfree_skb(skb);
72486 return NET_RX_DROP;
72487 }
72488@@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72489 nf_reset(skb);
72490
72491 if (unlikely(!is_skb_forwardable(dev, skb))) {
72492- atomic_long_inc(&dev->rx_dropped);
72493+ atomic_long_inc_unchecked(&dev->rx_dropped);
72494 kfree_skb(skb);
72495 return NET_RX_DROP;
72496 }
72497@@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
72498
72499 struct dev_gso_cb {
72500 void (*destructor)(struct sk_buff *skb);
72501-};
72502+} __no_const;
72503
72504 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72505
72506@@ -2970,7 +2974,7 @@ enqueue:
72507
72508 local_irq_restore(flags);
72509
72510- atomic_long_inc(&skb->dev->rx_dropped);
72511+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72512 kfree_skb(skb);
72513 return NET_RX_DROP;
72514 }
72515@@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
72516 }
72517 EXPORT_SYMBOL(netif_rx_ni);
72518
72519-static void net_tx_action(struct softirq_action *h)
72520+static void net_tx_action(void)
72521 {
72522 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72523
72524@@ -3333,7 +3337,7 @@ ncls:
72525 if (pt_prev) {
72526 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
72527 } else {
72528- atomic_long_inc(&skb->dev->rx_dropped);
72529+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72530 kfree_skb(skb);
72531 /* Jamal, now you will not able to escape explaining
72532 * me how you were going to use this. :-)
72533@@ -3891,7 +3895,7 @@ void netif_napi_del(struct napi_struct *napi)
72534 }
72535 EXPORT_SYMBOL(netif_napi_del);
72536
72537-static void net_rx_action(struct softirq_action *h)
72538+static void net_rx_action(void)
72539 {
72540 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72541 unsigned long time_limit = jiffies + 2;
72542@@ -5949,7 +5953,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
72543 } else {
72544 netdev_stats_to_stats64(storage, &dev->stats);
72545 }
72546- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
72547+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
72548 return storage;
72549 }
72550 EXPORT_SYMBOL(dev_get_stats);
72551diff --git a/net/core/flow.c b/net/core/flow.c
72552index e318c7e..168b1d0 100644
72553--- a/net/core/flow.c
72554+++ b/net/core/flow.c
72555@@ -61,7 +61,7 @@ struct flow_cache {
72556 struct timer_list rnd_timer;
72557 };
72558
72559-atomic_t flow_cache_genid = ATOMIC_INIT(0);
72560+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72561 EXPORT_SYMBOL(flow_cache_genid);
72562 static struct flow_cache flow_cache_global;
72563 static struct kmem_cache *flow_cachep __read_mostly;
72564@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
72565
72566 static int flow_entry_valid(struct flow_cache_entry *fle)
72567 {
72568- if (atomic_read(&flow_cache_genid) != fle->genid)
72569+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72570 return 0;
72571 if (fle->object && !fle->object->ops->check(fle->object))
72572 return 0;
72573@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
72574 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72575 fcp->hash_count++;
72576 }
72577- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72578+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72579 flo = fle->object;
72580 if (!flo)
72581 goto ret_object;
72582@@ -280,7 +280,7 @@ nocache:
72583 }
72584 flo = resolver(net, key, family, dir, flo, ctx);
72585 if (fle) {
72586- fle->genid = atomic_read(&flow_cache_genid);
72587+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
72588 if (!IS_ERR(flo))
72589 fle->object = flo;
72590 else
72591diff --git a/net/core/iovec.c b/net/core/iovec.c
72592index c40f27e..7f49254 100644
72593--- a/net/core/iovec.c
72594+++ b/net/core/iovec.c
72595@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72596 if (m->msg_namelen) {
72597 if (mode == VERIFY_READ) {
72598 void __user *namep;
72599- namep = (void __user __force *) m->msg_name;
72600+ namep = (void __force_user *) m->msg_name;
72601 err = move_addr_to_kernel(namep, m->msg_namelen,
72602 address);
72603 if (err < 0)
72604@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72605 }
72606
72607 size = m->msg_iovlen * sizeof(struct iovec);
72608- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72609+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72610 return -EFAULT;
72611
72612 m->msg_iov = iov;
72613diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72614index 9083e82..1673203 100644
72615--- a/net/core/rtnetlink.c
72616+++ b/net/core/rtnetlink.c
72617@@ -57,7 +57,7 @@ struct rtnl_link {
72618 rtnl_doit_func doit;
72619 rtnl_dumpit_func dumpit;
72620 rtnl_calcit_func calcit;
72621-};
72622+} __no_const;
72623
72624 static DEFINE_MUTEX(rtnl_mutex);
72625 static u16 min_ifinfo_dump_size;
72626diff --git a/net/core/scm.c b/net/core/scm.c
72627index ff52ad0..aff1c0f 100644
72628--- a/net/core/scm.c
72629+++ b/net/core/scm.c
72630@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72631 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72632 {
72633 struct cmsghdr __user *cm
72634- = (__force struct cmsghdr __user *)msg->msg_control;
72635+ = (struct cmsghdr __force_user *)msg->msg_control;
72636 struct cmsghdr cmhdr;
72637 int cmlen = CMSG_LEN(len);
72638 int err;
72639@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72640 err = -EFAULT;
72641 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72642 goto out;
72643- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72644+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72645 goto out;
72646 cmlen = CMSG_SPACE(len);
72647 if (msg->msg_controllen < cmlen)
72648@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72649 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72650 {
72651 struct cmsghdr __user *cm
72652- = (__force struct cmsghdr __user*)msg->msg_control;
72653+ = (struct cmsghdr __force_user *)msg->msg_control;
72654
72655 int fdmax = 0;
72656 int fdnum = scm->fp->count;
72657@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72658 if (fdnum < fdmax)
72659 fdmax = fdnum;
72660
72661- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72662+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72663 i++, cmfptr++)
72664 {
72665 int new_fd;
72666diff --git a/net/core/sock.c b/net/core/sock.c
72667index b23f174..b9a0d26 100644
72668--- a/net/core/sock.c
72669+++ b/net/core/sock.c
72670@@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72671 struct sk_buff_head *list = &sk->sk_receive_queue;
72672
72673 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72674- atomic_inc(&sk->sk_drops);
72675+ atomic_inc_unchecked(&sk->sk_drops);
72676 trace_sock_rcvqueue_full(sk, skb);
72677 return -ENOMEM;
72678 }
72679@@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72680 return err;
72681
72682 if (!sk_rmem_schedule(sk, skb->truesize)) {
72683- atomic_inc(&sk->sk_drops);
72684+ atomic_inc_unchecked(&sk->sk_drops);
72685 return -ENOBUFS;
72686 }
72687
72688@@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72689 skb_dst_force(skb);
72690
72691 spin_lock_irqsave(&list->lock, flags);
72692- skb->dropcount = atomic_read(&sk->sk_drops);
72693+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72694 __skb_queue_tail(list, skb);
72695 spin_unlock_irqrestore(&list->lock, flags);
72696
72697@@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72698 skb->dev = NULL;
72699
72700 if (sk_rcvqueues_full(sk, skb)) {
72701- atomic_inc(&sk->sk_drops);
72702+ atomic_inc_unchecked(&sk->sk_drops);
72703 goto discard_and_relse;
72704 }
72705 if (nested)
72706@@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72707 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72708 } else if (sk_add_backlog(sk, skb)) {
72709 bh_unlock_sock(sk);
72710- atomic_inc(&sk->sk_drops);
72711+ atomic_inc_unchecked(&sk->sk_drops);
72712 goto discard_and_relse;
72713 }
72714
72715@@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72716 if (len > sizeof(peercred))
72717 len = sizeof(peercred);
72718 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72719- if (copy_to_user(optval, &peercred, len))
72720+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72721 return -EFAULT;
72722 goto lenout;
72723 }
72724@@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72725 return -ENOTCONN;
72726 if (lv < len)
72727 return -EINVAL;
72728- if (copy_to_user(optval, address, len))
72729+ if (len > sizeof(address) || copy_to_user(optval, address, len))
72730 return -EFAULT;
72731 goto lenout;
72732 }
72733@@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72734
72735 if (len > lv)
72736 len = lv;
72737- if (copy_to_user(optval, &v, len))
72738+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
72739 return -EFAULT;
72740 lenout:
72741 if (put_user(len, optlen))
72742@@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
72743 */
72744 smp_wmb();
72745 atomic_set(&sk->sk_refcnt, 1);
72746- atomic_set(&sk->sk_drops, 0);
72747+ atomic_set_unchecked(&sk->sk_drops, 0);
72748 }
72749 EXPORT_SYMBOL(sock_init_data);
72750
72751diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
72752index 02e75d1..9a57a7c 100644
72753--- a/net/decnet/sysctl_net_decnet.c
72754+++ b/net/decnet/sysctl_net_decnet.c
72755@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
72756
72757 if (len > *lenp) len = *lenp;
72758
72759- if (copy_to_user(buffer, addr, len))
72760+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
72761 return -EFAULT;
72762
72763 *lenp = len;
72764@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
72765
72766 if (len > *lenp) len = *lenp;
72767
72768- if (copy_to_user(buffer, devname, len))
72769+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
72770 return -EFAULT;
72771
72772 *lenp = len;
72773diff --git a/net/econet/Kconfig b/net/econet/Kconfig
72774index 39a2d29..f39c0fe 100644
72775--- a/net/econet/Kconfig
72776+++ b/net/econet/Kconfig
72777@@ -4,7 +4,7 @@
72778
72779 config ECONET
72780 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72781- depends on EXPERIMENTAL && INET
72782+ depends on EXPERIMENTAL && INET && BROKEN
72783 ---help---
72784 Econet is a fairly old and slow networking protocol mainly used by
72785 Acorn computers to access file and print servers. It uses native
72786diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
72787index 92fc5f6..b790d91 100644
72788--- a/net/ipv4/fib_frontend.c
72789+++ b/net/ipv4/fib_frontend.c
72790@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
72791 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72792 fib_sync_up(dev);
72793 #endif
72794- atomic_inc(&net->ipv4.dev_addr_genid);
72795+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72796 rt_cache_flush(dev_net(dev), -1);
72797 break;
72798 case NETDEV_DOWN:
72799 fib_del_ifaddr(ifa, NULL);
72800- atomic_inc(&net->ipv4.dev_addr_genid);
72801+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72802 if (ifa->ifa_dev->ifa_list == NULL) {
72803 /* Last address was deleted from this interface.
72804 * Disable IP.
72805@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
72806 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72807 fib_sync_up(dev);
72808 #endif
72809- atomic_inc(&net->ipv4.dev_addr_genid);
72810+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72811 rt_cache_flush(dev_net(dev), -1);
72812 break;
72813 case NETDEV_DOWN:
72814diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
72815index 80106d8..232e898 100644
72816--- a/net/ipv4/fib_semantics.c
72817+++ b/net/ipv4/fib_semantics.c
72818@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
72819 nh->nh_saddr = inet_select_addr(nh->nh_dev,
72820 nh->nh_gw,
72821 nh->nh_parent->fib_scope);
72822- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72823+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72824
72825 return nh->nh_saddr;
72826 }
72827diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
72828index ccee270..db23c3c 100644
72829--- a/net/ipv4/inet_diag.c
72830+++ b/net/ipv4/inet_diag.c
72831@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
72832 r->idiag_retrans = 0;
72833
72834 r->id.idiag_if = sk->sk_bound_dev_if;
72835+
72836+#ifdef CONFIG_GRKERNSEC_HIDESYM
72837+ r->id.idiag_cookie[0] = 0;
72838+ r->id.idiag_cookie[1] = 0;
72839+#else
72840 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72841 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72842+#endif
72843
72844 r->id.idiag_sport = inet->inet_sport;
72845 r->id.idiag_dport = inet->inet_dport;
72846@@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
72847 r->idiag_family = tw->tw_family;
72848 r->idiag_retrans = 0;
72849 r->id.idiag_if = tw->tw_bound_dev_if;
72850+
72851+#ifdef CONFIG_GRKERNSEC_HIDESYM
72852+ r->id.idiag_cookie[0] = 0;
72853+ r->id.idiag_cookie[1] = 0;
72854+#else
72855 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72856 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72857+#endif
72858+
72859 r->id.idiag_sport = tw->tw_sport;
72860 r->id.idiag_dport = tw->tw_dport;
72861 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72862@@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
72863 if (sk == NULL)
72864 goto unlock;
72865
72866+#ifndef CONFIG_GRKERNSEC_HIDESYM
72867 err = -ESTALE;
72868 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72869 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72870 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72871 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72872 goto out;
72873+#endif
72874
72875 err = -ENOMEM;
72876 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72877@@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
72878 r->idiag_retrans = req->retrans;
72879
72880 r->id.idiag_if = sk->sk_bound_dev_if;
72881+
72882+#ifdef CONFIG_GRKERNSEC_HIDESYM
72883+ r->id.idiag_cookie[0] = 0;
72884+ r->id.idiag_cookie[1] = 0;
72885+#else
72886 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72887 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72888+#endif
72889
72890 tmo = req->expires - jiffies;
72891 if (tmo < 0)
72892diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
72893index 984ec65..97ac518 100644
72894--- a/net/ipv4/inet_hashtables.c
72895+++ b/net/ipv4/inet_hashtables.c
72896@@ -18,12 +18,15 @@
72897 #include <linux/sched.h>
72898 #include <linux/slab.h>
72899 #include <linux/wait.h>
72900+#include <linux/security.h>
72901
72902 #include <net/inet_connection_sock.h>
72903 #include <net/inet_hashtables.h>
72904 #include <net/secure_seq.h>
72905 #include <net/ip.h>
72906
72907+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72908+
72909 /*
72910 * Allocate and initialize a new local port bind bucket.
72911 * The bindhash mutex for snum's hash chain must be held here.
72912@@ -530,6 +533,8 @@ ok:
72913 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72914 spin_unlock(&head->lock);
72915
72916+ gr_update_task_in_ip_table(current, inet_sk(sk));
72917+
72918 if (tw) {
72919 inet_twsk_deschedule(tw, death_row);
72920 while (twrefcnt) {
72921diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
72922index 86f13c67..59a35b5 100644
72923--- a/net/ipv4/inetpeer.c
72924+++ b/net/ipv4/inetpeer.c
72925@@ -436,8 +436,8 @@ relookup:
72926 if (p) {
72927 p->daddr = *daddr;
72928 atomic_set(&p->refcnt, 1);
72929- atomic_set(&p->rid, 0);
72930- atomic_set(&p->ip_id_count,
72931+ atomic_set_unchecked(&p->rid, 0);
72932+ atomic_set_unchecked(&p->ip_id_count,
72933 (daddr->family == AF_INET) ?
72934 secure_ip_id(daddr->addr.a4) :
72935 secure_ipv6_id(daddr->addr.a6));
72936diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
72937index fdaabf2..0ec3205 100644
72938--- a/net/ipv4/ip_fragment.c
72939+++ b/net/ipv4/ip_fragment.c
72940@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
72941 return 0;
72942
72943 start = qp->rid;
72944- end = atomic_inc_return(&peer->rid);
72945+ end = atomic_inc_return_unchecked(&peer->rid);
72946 qp->rid = end;
72947
72948 rc = qp->q.fragments && (end - start) > max;
72949diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
72950index 09ff51b..d3968eb 100644
72951--- a/net/ipv4/ip_sockglue.c
72952+++ b/net/ipv4/ip_sockglue.c
72953@@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72954 len = min_t(unsigned int, len, opt->optlen);
72955 if (put_user(len, optlen))
72956 return -EFAULT;
72957- if (copy_to_user(optval, opt->__data, len))
72958+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72959+ copy_to_user(optval, opt->__data, len))
72960 return -EFAULT;
72961 return 0;
72962 }
72963@@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72964 if (sk->sk_type != SOCK_STREAM)
72965 return -ENOPROTOOPT;
72966
72967- msg.msg_control = optval;
72968+ msg.msg_control = (void __force_kernel *)optval;
72969 msg.msg_controllen = len;
72970 msg.msg_flags = flags;
72971
72972diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
72973index 99ec116..c5628fe 100644
72974--- a/net/ipv4/ipconfig.c
72975+++ b/net/ipv4/ipconfig.c
72976@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
72977
72978 mm_segment_t oldfs = get_fs();
72979 set_fs(get_ds());
72980- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72981+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72982 set_fs(oldfs);
72983 return res;
72984 }
72985@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
72986
72987 mm_segment_t oldfs = get_fs();
72988 set_fs(get_ds());
72989- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72990+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72991 set_fs(oldfs);
72992 return res;
72993 }
72994@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
72995
72996 mm_segment_t oldfs = get_fs();
72997 set_fs(get_ds());
72998- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72999+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
73000 set_fs(oldfs);
73001 return res;
73002 }
73003diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73004index 2133c30..5c4b40b 100644
73005--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
73006+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73007@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
73008
73009 *len = 0;
73010
73011- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
73012+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
73013 if (*octets == NULL)
73014 return 0;
73015
73016diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
73017index 43d4c3b..1914409 100644
73018--- a/net/ipv4/ping.c
73019+++ b/net/ipv4/ping.c
73020@@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
73021 sk_rmem_alloc_get(sp),
73022 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73023 atomic_read(&sp->sk_refcnt), sp,
73024- atomic_read(&sp->sk_drops), len);
73025+ atomic_read_unchecked(&sp->sk_drops), len);
73026 }
73027
73028 static int ping_seq_show(struct seq_file *seq, void *v)
73029diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
73030index 007e2eb..85a18a0 100644
73031--- a/net/ipv4/raw.c
73032+++ b/net/ipv4/raw.c
73033@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
73034 int raw_rcv(struct sock *sk, struct sk_buff *skb)
73035 {
73036 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
73037- atomic_inc(&sk->sk_drops);
73038+ atomic_inc_unchecked(&sk->sk_drops);
73039 kfree_skb(skb);
73040 return NET_RX_DROP;
73041 }
73042@@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
73043
73044 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
73045 {
73046+ struct icmp_filter filter;
73047+
73048 if (optlen > sizeof(struct icmp_filter))
73049 optlen = sizeof(struct icmp_filter);
73050- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
73051+ if (copy_from_user(&filter, optval, optlen))
73052 return -EFAULT;
73053+ raw_sk(sk)->filter = filter;
73054 return 0;
73055 }
73056
73057 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
73058 {
73059 int len, ret = -EFAULT;
73060+ struct icmp_filter filter;
73061
73062 if (get_user(len, optlen))
73063 goto out;
73064@@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
73065 if (len > sizeof(struct icmp_filter))
73066 len = sizeof(struct icmp_filter);
73067 ret = -EFAULT;
73068- if (put_user(len, optlen) ||
73069- copy_to_user(optval, &raw_sk(sk)->filter, len))
73070+ filter = raw_sk(sk)->filter;
73071+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
73072 goto out;
73073 ret = 0;
73074 out: return ret;
73075@@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73076 sk_wmem_alloc_get(sp),
73077 sk_rmem_alloc_get(sp),
73078 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73079- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73080+ atomic_read(&sp->sk_refcnt),
73081+#ifdef CONFIG_GRKERNSEC_HIDESYM
73082+ NULL,
73083+#else
73084+ sp,
73085+#endif
73086+ atomic_read_unchecked(&sp->sk_drops));
73087 }
73088
73089 static int raw_seq_show(struct seq_file *seq, void *v)
73090diff --git a/net/ipv4/route.c b/net/ipv4/route.c
73091index 94cdbc5..0cb0063 100644
73092--- a/net/ipv4/route.c
73093+++ b/net/ipv4/route.c
73094@@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
73095
73096 static inline int rt_genid(struct net *net)
73097 {
73098- return atomic_read(&net->ipv4.rt_genid);
73099+ return atomic_read_unchecked(&net->ipv4.rt_genid);
73100 }
73101
73102 #ifdef CONFIG_PROC_FS
73103@@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
73104 unsigned char shuffle;
73105
73106 get_random_bytes(&shuffle, sizeof(shuffle));
73107- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
73108+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
73109 redirect_genid++;
73110 }
73111
73112@@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
73113 error = rt->dst.error;
73114 if (peer) {
73115 inet_peer_refcheck(rt->peer);
73116- id = atomic_read(&peer->ip_id_count) & 0xffff;
73117+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
73118 if (peer->tcp_ts_stamp) {
73119 ts = peer->tcp_ts;
73120 tsage = get_seconds() - peer->tcp_ts_stamp;
73121diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
73122index c89e354..8bd55c8 100644
73123--- a/net/ipv4/tcp_ipv4.c
73124+++ b/net/ipv4/tcp_ipv4.c
73125@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
73126 int sysctl_tcp_low_latency __read_mostly;
73127 EXPORT_SYMBOL(sysctl_tcp_low_latency);
73128
73129+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73130+extern int grsec_enable_blackhole;
73131+#endif
73132
73133 #ifdef CONFIG_TCP_MD5SIG
73134 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
73135@@ -1627,6 +1630,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
73136 return 0;
73137
73138 reset:
73139+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73140+ if (!grsec_enable_blackhole)
73141+#endif
73142 tcp_v4_send_reset(rsk, skb);
73143 discard:
73144 kfree_skb(skb);
73145@@ -1689,12 +1695,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
73146 TCP_SKB_CB(skb)->sacked = 0;
73147
73148 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73149- if (!sk)
73150+ if (!sk) {
73151+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73152+ ret = 1;
73153+#endif
73154 goto no_tcp_socket;
73155-
73156+ }
73157 process:
73158- if (sk->sk_state == TCP_TIME_WAIT)
73159+ if (sk->sk_state == TCP_TIME_WAIT) {
73160+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73161+ ret = 2;
73162+#endif
73163 goto do_time_wait;
73164+ }
73165
73166 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73167 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73168@@ -1744,6 +1757,10 @@ no_tcp_socket:
73169 bad_packet:
73170 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73171 } else {
73172+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73173+ if (!grsec_enable_blackhole || (ret == 1 &&
73174+ (skb->dev->flags & IFF_LOOPBACK)))
73175+#endif
73176 tcp_v4_send_reset(NULL, skb);
73177 }
73178
73179@@ -2404,7 +2421,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
73180 0, /* non standard timer */
73181 0, /* open_requests have no inode */
73182 atomic_read(&sk->sk_refcnt),
73183+#ifdef CONFIG_GRKERNSEC_HIDESYM
73184+ NULL,
73185+#else
73186 req,
73187+#endif
73188 len);
73189 }
73190
73191@@ -2454,7 +2475,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
73192 sock_i_uid(sk),
73193 icsk->icsk_probes_out,
73194 sock_i_ino(sk),
73195- atomic_read(&sk->sk_refcnt), sk,
73196+ atomic_read(&sk->sk_refcnt),
73197+#ifdef CONFIG_GRKERNSEC_HIDESYM
73198+ NULL,
73199+#else
73200+ sk,
73201+#endif
73202 jiffies_to_clock_t(icsk->icsk_rto),
73203 jiffies_to_clock_t(icsk->icsk_ack.ato),
73204 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73205@@ -2482,7 +2508,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
73206 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73207 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73208 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73209- atomic_read(&tw->tw_refcnt), tw, len);
73210+ atomic_read(&tw->tw_refcnt),
73211+#ifdef CONFIG_GRKERNSEC_HIDESYM
73212+ NULL,
73213+#else
73214+ tw,
73215+#endif
73216+ len);
73217 }
73218
73219 #define TMPSZ 150
73220diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
73221index 66363b6..b0654a3 100644
73222--- a/net/ipv4/tcp_minisocks.c
73223+++ b/net/ipv4/tcp_minisocks.c
73224@@ -27,6 +27,10 @@
73225 #include <net/inet_common.h>
73226 #include <net/xfrm.h>
73227
73228+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73229+extern int grsec_enable_blackhole;
73230+#endif
73231+
73232 int sysctl_tcp_syncookies __read_mostly = 1;
73233 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73234
73235@@ -751,6 +755,10 @@ listen_overflow:
73236
73237 embryonic_reset:
73238 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73239+
73240+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73241+ if (!grsec_enable_blackhole)
73242+#endif
73243 if (!(flg & TCP_FLAG_RST))
73244 req->rsk_ops->send_reset(sk, skb);
73245
73246diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
73247index 85ee7eb..53277ab 100644
73248--- a/net/ipv4/tcp_probe.c
73249+++ b/net/ipv4/tcp_probe.c
73250@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
73251 if (cnt + width >= len)
73252 break;
73253
73254- if (copy_to_user(buf + cnt, tbuf, width))
73255+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73256 return -EFAULT;
73257 cnt += width;
73258 }
73259diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
73260index 2e0f0af..e2948bf 100644
73261--- a/net/ipv4/tcp_timer.c
73262+++ b/net/ipv4/tcp_timer.c
73263@@ -22,6 +22,10 @@
73264 #include <linux/gfp.h>
73265 #include <net/tcp.h>
73266
73267+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73268+extern int grsec_lastack_retries;
73269+#endif
73270+
73271 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73272 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73273 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73274@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
73275 }
73276 }
73277
73278+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73279+ if ((sk->sk_state == TCP_LAST_ACK) &&
73280+ (grsec_lastack_retries > 0) &&
73281+ (grsec_lastack_retries < retry_until))
73282+ retry_until = grsec_lastack_retries;
73283+#endif
73284+
73285 if (retransmits_timed_out(sk, retry_until,
73286 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73287 /* Has it gone just too far? */
73288diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
73289index 5a65eea..bd913a1 100644
73290--- a/net/ipv4/udp.c
73291+++ b/net/ipv4/udp.c
73292@@ -86,6 +86,7 @@
73293 #include <linux/types.h>
73294 #include <linux/fcntl.h>
73295 #include <linux/module.h>
73296+#include <linux/security.h>
73297 #include <linux/socket.h>
73298 #include <linux/sockios.h>
73299 #include <linux/igmp.h>
73300@@ -108,6 +109,10 @@
73301 #include <trace/events/udp.h>
73302 #include "udp_impl.h"
73303
73304+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73305+extern int grsec_enable_blackhole;
73306+#endif
73307+
73308 struct udp_table udp_table __read_mostly;
73309 EXPORT_SYMBOL(udp_table);
73310
73311@@ -565,6 +570,9 @@ found:
73312 return s;
73313 }
73314
73315+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73316+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73317+
73318 /*
73319 * This routine is called by the ICMP module when it gets some
73320 * sort of error condition. If err < 0 then the socket should
73321@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
73322 dport = usin->sin_port;
73323 if (dport == 0)
73324 return -EINVAL;
73325+
73326+ err = gr_search_udp_sendmsg(sk, usin);
73327+ if (err)
73328+ return err;
73329 } else {
73330 if (sk->sk_state != TCP_ESTABLISHED)
73331 return -EDESTADDRREQ;
73332+
73333+ err = gr_search_udp_sendmsg(sk, NULL);
73334+ if (err)
73335+ return err;
73336+
73337 daddr = inet->inet_daddr;
73338 dport = inet->inet_dport;
73339 /* Open fast path for connected socket.
73340@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
73341 udp_lib_checksum_complete(skb)) {
73342 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73343 IS_UDPLITE(sk));
73344- atomic_inc(&sk->sk_drops);
73345+ atomic_inc_unchecked(&sk->sk_drops);
73346 __skb_unlink(skb, rcvq);
73347 __skb_queue_tail(&list_kill, skb);
73348 }
73349@@ -1185,6 +1202,10 @@ try_again:
73350 if (!skb)
73351 goto out;
73352
73353+ err = gr_search_udp_recvmsg(sk, skb);
73354+ if (err)
73355+ goto out_free;
73356+
73357 ulen = skb->len - sizeof(struct udphdr);
73358 copied = len;
73359 if (copied > ulen)
73360@@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73361
73362 drop:
73363 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73364- atomic_inc(&sk->sk_drops);
73365+ atomic_inc_unchecked(&sk->sk_drops);
73366 kfree_skb(skb);
73367 return -1;
73368 }
73369@@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73370 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73371
73372 if (!skb1) {
73373- atomic_inc(&sk->sk_drops);
73374+ atomic_inc_unchecked(&sk->sk_drops);
73375 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73376 IS_UDPLITE(sk));
73377 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73378@@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73379 goto csum_error;
73380
73381 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73382+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73383+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73384+#endif
73385 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73386
73387 /*
73388@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
73389 sk_wmem_alloc_get(sp),
73390 sk_rmem_alloc_get(sp),
73391 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73392- atomic_read(&sp->sk_refcnt), sp,
73393- atomic_read(&sp->sk_drops), len);
73394+ atomic_read(&sp->sk_refcnt),
73395+#ifdef CONFIG_GRKERNSEC_HIDESYM
73396+ NULL,
73397+#else
73398+ sp,
73399+#endif
73400+ atomic_read_unchecked(&sp->sk_drops), len);
73401 }
73402
73403 int udp4_seq_show(struct seq_file *seq, void *v)
73404diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
73405index 836c4ea..cbb74dc 100644
73406--- a/net/ipv6/addrconf.c
73407+++ b/net/ipv6/addrconf.c
73408@@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
73409 p.iph.ihl = 5;
73410 p.iph.protocol = IPPROTO_IPV6;
73411 p.iph.ttl = 64;
73412- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73413+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73414
73415 if (ops->ndo_do_ioctl) {
73416 mm_segment_t oldfs = get_fs();
73417diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
73418index 1567fb1..29af910 100644
73419--- a/net/ipv6/inet6_connection_sock.c
73420+++ b/net/ipv6/inet6_connection_sock.c
73421@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
73422 #ifdef CONFIG_XFRM
73423 {
73424 struct rt6_info *rt = (struct rt6_info *)dst;
73425- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73426+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73427 }
73428 #endif
73429 }
73430@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
73431 #ifdef CONFIG_XFRM
73432 if (dst) {
73433 struct rt6_info *rt = (struct rt6_info *)dst;
73434- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73435+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73436 __sk_dst_reset(sk);
73437 dst = NULL;
73438 }
73439diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
73440index 26cb08c..8af9877 100644
73441--- a/net/ipv6/ipv6_sockglue.c
73442+++ b/net/ipv6/ipv6_sockglue.c
73443@@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
73444 if (sk->sk_type != SOCK_STREAM)
73445 return -ENOPROTOOPT;
73446
73447- msg.msg_control = optval;
73448+ msg.msg_control = (void __force_kernel *)optval;
73449 msg.msg_controllen = len;
73450 msg.msg_flags = flags;
73451
73452diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
73453index 361ebf3..d5628fb 100644
73454--- a/net/ipv6/raw.c
73455+++ b/net/ipv6/raw.c
73456@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
73457 {
73458 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
73459 skb_checksum_complete(skb)) {
73460- atomic_inc(&sk->sk_drops);
73461+ atomic_inc_unchecked(&sk->sk_drops);
73462 kfree_skb(skb);
73463 return NET_RX_DROP;
73464 }
73465@@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73466 struct raw6_sock *rp = raw6_sk(sk);
73467
73468 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73469- atomic_inc(&sk->sk_drops);
73470+ atomic_inc_unchecked(&sk->sk_drops);
73471 kfree_skb(skb);
73472 return NET_RX_DROP;
73473 }
73474@@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73475
73476 if (inet->hdrincl) {
73477 if (skb_checksum_complete(skb)) {
73478- atomic_inc(&sk->sk_drops);
73479+ atomic_inc_unchecked(&sk->sk_drops);
73480 kfree_skb(skb);
73481 return NET_RX_DROP;
73482 }
73483@@ -601,7 +601,7 @@ out:
73484 return err;
73485 }
73486
73487-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73488+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73489 struct flowi6 *fl6, struct dst_entry **dstp,
73490 unsigned int flags)
73491 {
73492@@ -909,12 +909,15 @@ do_confirm:
73493 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73494 char __user *optval, int optlen)
73495 {
73496+ struct icmp6_filter filter;
73497+
73498 switch (optname) {
73499 case ICMPV6_FILTER:
73500 if (optlen > sizeof(struct icmp6_filter))
73501 optlen = sizeof(struct icmp6_filter);
73502- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73503+ if (copy_from_user(&filter, optval, optlen))
73504 return -EFAULT;
73505+ raw6_sk(sk)->filter = filter;
73506 return 0;
73507 default:
73508 return -ENOPROTOOPT;
73509@@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73510 char __user *optval, int __user *optlen)
73511 {
73512 int len;
73513+ struct icmp6_filter filter;
73514
73515 switch (optname) {
73516 case ICMPV6_FILTER:
73517@@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73518 len = sizeof(struct icmp6_filter);
73519 if (put_user(len, optlen))
73520 return -EFAULT;
73521- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73522+ filter = raw6_sk(sk)->filter;
73523+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
73524 return -EFAULT;
73525 return 0;
73526 default:
73527@@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73528 0, 0L, 0,
73529 sock_i_uid(sp), 0,
73530 sock_i_ino(sp),
73531- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73532+ atomic_read(&sp->sk_refcnt),
73533+#ifdef CONFIG_GRKERNSEC_HIDESYM
73534+ NULL,
73535+#else
73536+ sp,
73537+#endif
73538+ atomic_read_unchecked(&sp->sk_drops));
73539 }
73540
73541 static int raw6_seq_show(struct seq_file *seq, void *v)
73542diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
73543index b859e4a..f9d1589 100644
73544--- a/net/ipv6/tcp_ipv6.c
73545+++ b/net/ipv6/tcp_ipv6.c
73546@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
73547 }
73548 #endif
73549
73550+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73551+extern int grsec_enable_blackhole;
73552+#endif
73553+
73554 static void tcp_v6_hash(struct sock *sk)
73555 {
73556 if (sk->sk_state != TCP_CLOSE) {
73557@@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
73558 return 0;
73559
73560 reset:
73561+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73562+ if (!grsec_enable_blackhole)
73563+#endif
73564 tcp_v6_send_reset(sk, skb);
73565 discard:
73566 if (opt_skb)
73567@@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
73568 TCP_SKB_CB(skb)->sacked = 0;
73569
73570 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73571- if (!sk)
73572+ if (!sk) {
73573+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73574+ ret = 1;
73575+#endif
73576 goto no_tcp_socket;
73577+ }
73578
73579 process:
73580- if (sk->sk_state == TCP_TIME_WAIT)
73581+ if (sk->sk_state == TCP_TIME_WAIT) {
73582+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73583+ ret = 2;
73584+#endif
73585 goto do_time_wait;
73586+ }
73587
73588 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73589 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73590@@ -1783,6 +1798,10 @@ no_tcp_socket:
73591 bad_packet:
73592 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73593 } else {
73594+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73595+ if (!grsec_enable_blackhole || (ret == 1 &&
73596+ (skb->dev->flags & IFF_LOOPBACK)))
73597+#endif
73598 tcp_v6_send_reset(NULL, skb);
73599 }
73600
73601@@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73602 uid,
73603 0, /* non standard timer */
73604 0, /* open_requests have no inode */
73605- 0, req);
73606+ 0,
73607+#ifdef CONFIG_GRKERNSEC_HIDESYM
73608+ NULL
73609+#else
73610+ req
73611+#endif
73612+ );
73613 }
73614
73615 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73616@@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73617 sock_i_uid(sp),
73618 icsk->icsk_probes_out,
73619 sock_i_ino(sp),
73620- atomic_read(&sp->sk_refcnt), sp,
73621+ atomic_read(&sp->sk_refcnt),
73622+#ifdef CONFIG_GRKERNSEC_HIDESYM
73623+ NULL,
73624+#else
73625+ sp,
73626+#endif
73627 jiffies_to_clock_t(icsk->icsk_rto),
73628 jiffies_to_clock_t(icsk->icsk_ack.ato),
73629 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73630@@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73631 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73632 tw->tw_substate, 0, 0,
73633 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73634- atomic_read(&tw->tw_refcnt), tw);
73635+ atomic_read(&tw->tw_refcnt),
73636+#ifdef CONFIG_GRKERNSEC_HIDESYM
73637+ NULL
73638+#else
73639+ tw
73640+#endif
73641+ );
73642 }
73643
73644 static int tcp6_seq_show(struct seq_file *seq, void *v)
73645diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73646index 8c25419..47a51ae 100644
73647--- a/net/ipv6/udp.c
73648+++ b/net/ipv6/udp.c
73649@@ -50,6 +50,10 @@
73650 #include <linux/seq_file.h>
73651 #include "udp_impl.h"
73652
73653+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73654+extern int grsec_enable_blackhole;
73655+#endif
73656+
73657 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73658 {
73659 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73660@@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73661
73662 return 0;
73663 drop:
73664- atomic_inc(&sk->sk_drops);
73665+ atomic_inc_unchecked(&sk->sk_drops);
73666 drop_no_sk_drops_inc:
73667 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73668 kfree_skb(skb);
73669@@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73670 continue;
73671 }
73672 drop:
73673- atomic_inc(&sk->sk_drops);
73674+ atomic_inc_unchecked(&sk->sk_drops);
73675 UDP6_INC_STATS_BH(sock_net(sk),
73676 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73677 UDP6_INC_STATS_BH(sock_net(sk),
73678@@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73679 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73680 proto == IPPROTO_UDPLITE);
73681
73682+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73683+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73684+#endif
73685 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73686
73687 kfree_skb(skb);
73688@@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73689 if (!sock_owned_by_user(sk))
73690 udpv6_queue_rcv_skb(sk, skb);
73691 else if (sk_add_backlog(sk, skb)) {
73692- atomic_inc(&sk->sk_drops);
73693+ atomic_inc_unchecked(&sk->sk_drops);
73694 bh_unlock_sock(sk);
73695 sock_put(sk);
73696 goto discard;
73697@@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73698 0, 0L, 0,
73699 sock_i_uid(sp), 0,
73700 sock_i_ino(sp),
73701- atomic_read(&sp->sk_refcnt), sp,
73702- atomic_read(&sp->sk_drops));
73703+ atomic_read(&sp->sk_refcnt),
73704+#ifdef CONFIG_GRKERNSEC_HIDESYM
73705+ NULL,
73706+#else
73707+ sp,
73708+#endif
73709+ atomic_read_unchecked(&sp->sk_drops));
73710 }
73711
73712 int udp6_seq_show(struct seq_file *seq, void *v)
73713diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
73714index 253695d..9481ce8 100644
73715--- a/net/irda/ircomm/ircomm_tty.c
73716+++ b/net/irda/ircomm/ircomm_tty.c
73717@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73718 add_wait_queue(&self->open_wait, &wait);
73719
73720 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73721- __FILE__,__LINE__, tty->driver->name, self->open_count );
73722+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73723
73724 /* As far as I can see, we protect open_count - Jean II */
73725 spin_lock_irqsave(&self->spinlock, flags);
73726 if (!tty_hung_up_p(filp)) {
73727 extra_count = 1;
73728- self->open_count--;
73729+ local_dec(&self->open_count);
73730 }
73731 spin_unlock_irqrestore(&self->spinlock, flags);
73732- self->blocked_open++;
73733+ local_inc(&self->blocked_open);
73734
73735 while (1) {
73736 if (tty->termios->c_cflag & CBAUD) {
73737@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73738 }
73739
73740 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73741- __FILE__,__LINE__, tty->driver->name, self->open_count );
73742+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73743
73744 schedule();
73745 }
73746@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73747 if (extra_count) {
73748 /* ++ is not atomic, so this should be protected - Jean II */
73749 spin_lock_irqsave(&self->spinlock, flags);
73750- self->open_count++;
73751+ local_inc(&self->open_count);
73752 spin_unlock_irqrestore(&self->spinlock, flags);
73753 }
73754- self->blocked_open--;
73755+ local_dec(&self->blocked_open);
73756
73757 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73758- __FILE__,__LINE__, tty->driver->name, self->open_count);
73759+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73760
73761 if (!retval)
73762 self->flags |= ASYNC_NORMAL_ACTIVE;
73763@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
73764 }
73765 /* ++ is not atomic, so this should be protected - Jean II */
73766 spin_lock_irqsave(&self->spinlock, flags);
73767- self->open_count++;
73768+ local_inc(&self->open_count);
73769
73770 tty->driver_data = self;
73771 self->tty = tty;
73772 spin_unlock_irqrestore(&self->spinlock, flags);
73773
73774 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73775- self->line, self->open_count);
73776+ self->line, local_read(&self->open_count));
73777
73778 /* Not really used by us, but lets do it anyway */
73779 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73780@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73781 return;
73782 }
73783
73784- if ((tty->count == 1) && (self->open_count != 1)) {
73785+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73786 /*
73787 * Uh, oh. tty->count is 1, which means that the tty
73788 * structure will be freed. state->count should always
73789@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73790 */
73791 IRDA_DEBUG(0, "%s(), bad serial port count; "
73792 "tty->count is 1, state->count is %d\n", __func__ ,
73793- self->open_count);
73794- self->open_count = 1;
73795+ local_read(&self->open_count));
73796+ local_set(&self->open_count, 1);
73797 }
73798
73799- if (--self->open_count < 0) {
73800+ if (local_dec_return(&self->open_count) < 0) {
73801 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73802- __func__, self->line, self->open_count);
73803- self->open_count = 0;
73804+ __func__, self->line, local_read(&self->open_count));
73805+ local_set(&self->open_count, 0);
73806 }
73807- if (self->open_count) {
73808+ if (local_read(&self->open_count)) {
73809 spin_unlock_irqrestore(&self->spinlock, flags);
73810
73811 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73812@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73813 tty->closing = 0;
73814 self->tty = NULL;
73815
73816- if (self->blocked_open) {
73817+ if (local_read(&self->blocked_open)) {
73818 if (self->close_delay)
73819 schedule_timeout_interruptible(self->close_delay);
73820 wake_up_interruptible(&self->open_wait);
73821@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
73822 spin_lock_irqsave(&self->spinlock, flags);
73823 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73824 self->tty = NULL;
73825- self->open_count = 0;
73826+ local_set(&self->open_count, 0);
73827 spin_unlock_irqrestore(&self->spinlock, flags);
73828
73829 wake_up_interruptible(&self->open_wait);
73830@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
73831 seq_putc(m, '\n');
73832
73833 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73834- seq_printf(m, "Open count: %d\n", self->open_count);
73835+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73836 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73837 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73838
73839diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
73840index 274d150..656a144 100644
73841--- a/net/iucv/af_iucv.c
73842+++ b/net/iucv/af_iucv.c
73843@@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
73844
73845 write_lock_bh(&iucv_sk_list.lock);
73846
73847- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73848+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73849 while (__iucv_get_sock_by_name(name)) {
73850 sprintf(name, "%08x",
73851- atomic_inc_return(&iucv_sk_list.autobind_name));
73852+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73853 }
73854
73855 write_unlock_bh(&iucv_sk_list.lock);
73856diff --git a/net/key/af_key.c b/net/key/af_key.c
73857index 1e733e9..3d73c9f 100644
73858--- a/net/key/af_key.c
73859+++ b/net/key/af_key.c
73860@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
73861 static u32 get_acqseq(void)
73862 {
73863 u32 res;
73864- static atomic_t acqseq;
73865+ static atomic_unchecked_t acqseq;
73866
73867 do {
73868- res = atomic_inc_return(&acqseq);
73869+ res = atomic_inc_return_unchecked(&acqseq);
73870 } while (!res);
73871 return res;
73872 }
73873diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
73874index 73495f1..ad51356 100644
73875--- a/net/mac80211/ieee80211_i.h
73876+++ b/net/mac80211/ieee80211_i.h
73877@@ -27,6 +27,7 @@
73878 #include <net/ieee80211_radiotap.h>
73879 #include <net/cfg80211.h>
73880 #include <net/mac80211.h>
73881+#include <asm/local.h>
73882 #include "key.h"
73883 #include "sta_info.h"
73884
73885@@ -764,7 +765,7 @@ struct ieee80211_local {
73886 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73887 spinlock_t queue_stop_reason_lock;
73888
73889- int open_count;
73890+ local_t open_count;
73891 int monitors, cooked_mntrs;
73892 /* number of interfaces with corresponding FIF_ flags */
73893 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73894diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
73895index 30d7355..e260095 100644
73896--- a/net/mac80211/iface.c
73897+++ b/net/mac80211/iface.c
73898@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73899 break;
73900 }
73901
73902- if (local->open_count == 0) {
73903+ if (local_read(&local->open_count) == 0) {
73904 res = drv_start(local);
73905 if (res)
73906 goto err_del_bss;
73907@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73908 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73909
73910 if (!is_valid_ether_addr(dev->dev_addr)) {
73911- if (!local->open_count)
73912+ if (!local_read(&local->open_count))
73913 drv_stop(local);
73914 return -EADDRNOTAVAIL;
73915 }
73916@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73917 mutex_unlock(&local->mtx);
73918
73919 if (coming_up)
73920- local->open_count++;
73921+ local_inc(&local->open_count);
73922
73923 if (hw_reconf_flags) {
73924 ieee80211_hw_config(local, hw_reconf_flags);
73925@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73926 err_del_interface:
73927 drv_remove_interface(local, &sdata->vif);
73928 err_stop:
73929- if (!local->open_count)
73930+ if (!local_read(&local->open_count))
73931 drv_stop(local);
73932 err_del_bss:
73933 sdata->bss = NULL;
73934@@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73935 }
73936
73937 if (going_down)
73938- local->open_count--;
73939+ local_dec(&local->open_count);
73940
73941 switch (sdata->vif.type) {
73942 case NL80211_IFTYPE_AP_VLAN:
73943@@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73944
73945 ieee80211_recalc_ps(local, -1);
73946
73947- if (local->open_count == 0) {
73948+ if (local_read(&local->open_count) == 0) {
73949 if (local->ops->napi_poll)
73950 napi_disable(&local->napi);
73951 ieee80211_clear_tx_pending(local);
73952diff --git a/net/mac80211/main.c b/net/mac80211/main.c
73953index a7536fd..4039cc0 100644
73954--- a/net/mac80211/main.c
73955+++ b/net/mac80211/main.c
73956@@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
73957 local->hw.conf.power_level = power;
73958 }
73959
73960- if (changed && local->open_count) {
73961+ if (changed && local_read(&local->open_count)) {
73962 ret = drv_config(local, changed);
73963 /*
73964 * Goal:
73965diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
73966index 9ee7164..56c5061 100644
73967--- a/net/mac80211/pm.c
73968+++ b/net/mac80211/pm.c
73969@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73970 struct ieee80211_sub_if_data *sdata;
73971 struct sta_info *sta;
73972
73973- if (!local->open_count)
73974+ if (!local_read(&local->open_count))
73975 goto suspend;
73976
73977 ieee80211_scan_cancel(local);
73978@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73979 cancel_work_sync(&local->dynamic_ps_enable_work);
73980 del_timer_sync(&local->dynamic_ps_timer);
73981
73982- local->wowlan = wowlan && local->open_count;
73983+ local->wowlan = wowlan && local_read(&local->open_count);
73984 if (local->wowlan) {
73985 int err = drv_suspend(local, wowlan);
73986 if (err < 0) {
73987@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73988 }
73989
73990 /* stop hardware - this must stop RX */
73991- if (local->open_count)
73992+ if (local_read(&local->open_count))
73993 ieee80211_stop_device(local);
73994
73995 suspend:
73996diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
73997index 5a5a776..9600b11 100644
73998--- a/net/mac80211/rate.c
73999+++ b/net/mac80211/rate.c
74000@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
74001
74002 ASSERT_RTNL();
74003
74004- if (local->open_count)
74005+ if (local_read(&local->open_count))
74006 return -EBUSY;
74007
74008 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
74009diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
74010index c97a065..ff61928 100644
74011--- a/net/mac80211/rc80211_pid_debugfs.c
74012+++ b/net/mac80211/rc80211_pid_debugfs.c
74013@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
74014
74015 spin_unlock_irqrestore(&events->lock, status);
74016
74017- if (copy_to_user(buf, pb, p))
74018+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
74019 return -EFAULT;
74020
74021 return p;
74022diff --git a/net/mac80211/util.c b/net/mac80211/util.c
74023index d5230ec..c604b21 100644
74024--- a/net/mac80211/util.c
74025+++ b/net/mac80211/util.c
74026@@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
74027 drv_set_coverage_class(local, hw->wiphy->coverage_class);
74028
74029 /* everything else happens only if HW was up & running */
74030- if (!local->open_count)
74031+ if (!local_read(&local->open_count))
74032 goto wake_up;
74033
74034 /*
74035diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
74036index d5597b7..ab6d39c 100644
74037--- a/net/netfilter/Kconfig
74038+++ b/net/netfilter/Kconfig
74039@@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
74040
74041 To compile it as a module, choose M here. If unsure, say N.
74042
74043+config NETFILTER_XT_MATCH_GRADM
74044+ tristate '"gradm" match support'
74045+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
74046+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
74047+ ---help---
74048+ The gradm match allows to match on grsecurity RBAC being enabled.
74049+ It is useful when iptables rules are applied early on bootup to
74050+ prevent connections to the machine (except from a trusted host)
74051+ while the RBAC system is disabled.
74052+
74053 config NETFILTER_XT_MATCH_HASHLIMIT
74054 tristate '"hashlimit" match support'
74055 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
74056diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
74057index 1a02853..5d8c22e 100644
74058--- a/net/netfilter/Makefile
74059+++ b/net/netfilter/Makefile
74060@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
74061 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
74062 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
74063 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
74064+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
74065 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
74066 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
74067 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
74068diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
74069index 29fa5ba..8debc79 100644
74070--- a/net/netfilter/ipvs/ip_vs_conn.c
74071+++ b/net/netfilter/ipvs/ip_vs_conn.c
74072@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
74073 /* Increase the refcnt counter of the dest */
74074 atomic_inc(&dest->refcnt);
74075
74076- conn_flags = atomic_read(&dest->conn_flags);
74077+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
74078 if (cp->protocol != IPPROTO_UDP)
74079 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
74080 /* Bind with the destination and its corresponding transmitter */
74081@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
74082 atomic_set(&cp->refcnt, 1);
74083
74084 atomic_set(&cp->n_control, 0);
74085- atomic_set(&cp->in_pkts, 0);
74086+ atomic_set_unchecked(&cp->in_pkts, 0);
74087
74088 atomic_inc(&ipvs->conn_count);
74089 if (flags & IP_VS_CONN_F_NO_CPORT)
74090@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
74091
74092 /* Don't drop the entry if its number of incoming packets is not
74093 located in [0, 8] */
74094- i = atomic_read(&cp->in_pkts);
74095+ i = atomic_read_unchecked(&cp->in_pkts);
74096 if (i > 8 || i < 0) return 0;
74097
74098 if (!todrop_rate[i]) return 0;
74099diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
74100index 093cc32..9209ae1 100644
74101--- a/net/netfilter/ipvs/ip_vs_core.c
74102+++ b/net/netfilter/ipvs/ip_vs_core.c
74103@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
74104 ret = cp->packet_xmit(skb, cp, pd->pp);
74105 /* do not touch skb anymore */
74106
74107- atomic_inc(&cp->in_pkts);
74108+ atomic_inc_unchecked(&cp->in_pkts);
74109 ip_vs_conn_put(cp);
74110 return ret;
74111 }
74112@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
74113 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
74114 pkts = sysctl_sync_threshold(ipvs);
74115 else
74116- pkts = atomic_add_return(1, &cp->in_pkts);
74117+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74118
74119 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
74120 cp->protocol == IPPROTO_SCTP) {
74121diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
74122index e1a66cf..0910076 100644
74123--- a/net/netfilter/ipvs/ip_vs_ctl.c
74124+++ b/net/netfilter/ipvs/ip_vs_ctl.c
74125@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
74126 ip_vs_rs_hash(ipvs, dest);
74127 write_unlock_bh(&ipvs->rs_lock);
74128 }
74129- atomic_set(&dest->conn_flags, conn_flags);
74130+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
74131
74132 /* bind the service */
74133 if (!dest->svc) {
74134@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74135 " %-7s %-6d %-10d %-10d\n",
74136 &dest->addr.in6,
74137 ntohs(dest->port),
74138- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74139+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74140 atomic_read(&dest->weight),
74141 atomic_read(&dest->activeconns),
74142 atomic_read(&dest->inactconns));
74143@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74144 "%-7s %-6d %-10d %-10d\n",
74145 ntohl(dest->addr.ip),
74146 ntohs(dest->port),
74147- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74148+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74149 atomic_read(&dest->weight),
74150 atomic_read(&dest->activeconns),
74151 atomic_read(&dest->inactconns));
74152@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
74153
74154 entry.addr = dest->addr.ip;
74155 entry.port = dest->port;
74156- entry.conn_flags = atomic_read(&dest->conn_flags);
74157+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
74158 entry.weight = atomic_read(&dest->weight);
74159 entry.u_threshold = dest->u_threshold;
74160 entry.l_threshold = dest->l_threshold;
74161@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
74162 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
74163
74164 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
74165- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74166+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74167 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74168 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74169 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74170diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
74171index 2b6678c0..aaa41fc 100644
74172--- a/net/netfilter/ipvs/ip_vs_sync.c
74173+++ b/net/netfilter/ipvs/ip_vs_sync.c
74174@@ -649,7 +649,7 @@ control:
74175 * i.e only increment in_pkts for Templates.
74176 */
74177 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74178- int pkts = atomic_add_return(1, &cp->in_pkts);
74179+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74180
74181 if (pkts % sysctl_sync_period(ipvs) != 1)
74182 return;
74183@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
74184
74185 if (opt)
74186 memcpy(&cp->in_seq, opt, sizeof(*opt));
74187- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74188+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74189 cp->state = state;
74190 cp->old_state = cp->state;
74191 /*
74192diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
74193index aa2d720..d8aa111 100644
74194--- a/net/netfilter/ipvs/ip_vs_xmit.c
74195+++ b/net/netfilter/ipvs/ip_vs_xmit.c
74196@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
74197 else
74198 rc = NF_ACCEPT;
74199 /* do not touch skb anymore */
74200- atomic_inc(&cp->in_pkts);
74201+ atomic_inc_unchecked(&cp->in_pkts);
74202 goto out;
74203 }
74204
74205@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
74206 else
74207 rc = NF_ACCEPT;
74208 /* do not touch skb anymore */
74209- atomic_inc(&cp->in_pkts);
74210+ atomic_inc_unchecked(&cp->in_pkts);
74211 goto out;
74212 }
74213
74214diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
74215index 66b2c54..c7884e3 100644
74216--- a/net/netfilter/nfnetlink_log.c
74217+++ b/net/netfilter/nfnetlink_log.c
74218@@ -70,7 +70,7 @@ struct nfulnl_instance {
74219 };
74220
74221 static DEFINE_SPINLOCK(instances_lock);
74222-static atomic_t global_seq;
74223+static atomic_unchecked_t global_seq;
74224
74225 #define INSTANCE_BUCKETS 16
74226 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74227@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
74228 /* global sequence number */
74229 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74230 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74231- htonl(atomic_inc_return(&global_seq)));
74232+ htonl(atomic_inc_return_unchecked(&global_seq)));
74233
74234 if (data_len) {
74235 struct nlattr *nla;
74236diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
74237new file mode 100644
74238index 0000000..6905327
74239--- /dev/null
74240+++ b/net/netfilter/xt_gradm.c
74241@@ -0,0 +1,51 @@
74242+/*
74243+ * gradm match for netfilter
74244